Merge "drm/msm/sde: fix null parameter checks in drm driver"
diff --git a/Documentation/arm/msm/remote_debug_drv.txt b/Documentation/arm/msm/remote_debug_drv.txt
new file mode 100644
index 0000000..13a35f4
--- /dev/null
+++ b/Documentation/arm/msm/remote_debug_drv.txt
@@ -0,0 +1,468 @@
+Introduction
+============
+
+The goal of this debug feature is to provide a reliable, responsive,
+accurate and secure debug capability to developers interested in
+debugging MSM subsystem processor images without the use of a hardware
+debugger.
+
+The Debug Agent along with the Remote Debug Driver implements a shared
+memory based transport mechanism that allows for a debugger (ex. GDB)
+running on a host PC to communicate with a remote stub running on
+peripheral subsystems such as the ADSP, MODEM etc.
+
+The diagram below depicts end to end the components involved to
+support remote debugging:
+
+
+:               :
+:    HOST (PC)  :  MSM
+:  ,--------,   :   ,-------,
+:  |        |   :   | Debug |                         ,--------,
+:  |Debugger|<--:-->| Agent |                         | Remote |
+:  |        |   :   |  App  |                  +----->| Debug  |
+:  `--------`   :   |-------|    ,--------,    |      | Stub   |
+:               :   | Remote|    |        |<---+      `--------`
+:               :   | Debug |<-->|--------|
+:               :   | Driver|    |        |<---+      ,--------,
+:               :   `-------`    `--------`    |      | Remote |
+:               :       LA         Shared      +----->| Debug  |
+:               :                  Memory             | Stub   |
+:               :                                     `--------`
+:               :                               Peripheral Subsystems
+:               :                                 (ADSP, MODEM, ...)
+
+
+Debugger:       Debugger application running on the host PC that
+                communicates with the remote stub.
+                Examples: GDB, LLDB
+
+Debug Agent:    Software that runs on the Linux Android platform
+                that provides connectivity from the MSM to the
+                host PC. This involves two portions:
+                1) User mode Debug Agent application that discovers
+                processes running on the subsystems and creates
+                TCP/IP sockets for the host to connect to. In addition
+                to this, it creates an info (or meta) port that
+                users can connect to discover the various
+                processes and their corresponding debug ports.
+
+Remote Debug    A character based driver that the Debug
+Driver:         Agent uses to transport the payload received from the
+                host to the debug stub running on the subsystem
+                processor over shared memory and vice versa.
+
+Shared Memory:  Shared memory from the SMEM pool that is accessible
+                from the Applications Processor (AP) and the
+                subsystem processors.
+
+Remote Debug    Privileged code that runs in the kernels of the
+Stub:           subsystem processors that receives debug commands
+                from the debugger running on the host and
+                acts on these commands. These commands include reading
+                and writing to registers and memory belonging to the
+                subsystem's address space, setting breakpoints,
+                single stepping etc.
+
+Hardware description
+====================
+
+The Remote Debug Driver interfaces with the Remote Debug stubs
+running on the subsystem processors and does not drive or
+manage any hardware resources.
+
+Software description
+====================
+
+The debugger and the remote stubs use Remote Serial Protocol (RSP)
+to communicate with each other. This is widely used protocol by both
+software and hardware debuggers. RSP is an ASCII based protocol
+and used when it is not possible to run GDB server on the target under
+debug.
+
+The Debug Agent application along with the Remote Debug Driver
+is responsible for establishing a bi-directional connection from
+the debugger application running on the host to the remote debug
+stub running on a subsystem. The Debug Agent establishes connectivity
+to the host PC via TCP/IP sockets.
+
+This feature uses ADB port forwarding to establish connectivity
+between the debugger running on the host and the target under debug.
+
+Please note the Debug Agent does not expose HLOS memory to the
+remote subsystem processors.
+
+Design
+======
+
+Here is the overall flow:
+
+1) When the Debug Agent application starts up, it opens up a shared memory
+based transport channel to the various subsystem processor images.
+
+2) The Debug Agent application sends messages across to the remote stubs
+to discover the various processes that are running on the subsystem and
+creates debug sockets for each of them.
+
+3) Whenever a process running on a subsystem exits, the Debug Agent
+is notified by the stub so that the debug port and other resources
+can be reclaimed.
+
+4) The Debug Agent uses the services of the Remote Debug Driver to
+transport payload from the host debugger to the remote stub and vice versa.
+
+5) Communication between the Remote Debug Driver and the Remote Debug stub
+running on the subsystem processor is done over shared memory (see figure).
+SMEM services are used to allocate the shared memory that will
+be readable and writeable by the AP and the subsystem image under debug.
+
+A separate SMEM allocation takes place for each subsystem processor
+involved in remote debugging. The remote stub running on each of the
+subsystems allocates a SMEM buffer using a unique identifier so that both
+the AP and subsystem get the same physical block of memory. It should be
+noted that subsystem images can be restarted at any time.
+However, when a subsystem comes back up, its stub uses the same unique
+SMEM identifier to allocate the SMEM block. This would not result in a
+new allocation rather the same block of memory in the first bootup instance
+is provided back to the stub running on the subsystem.
+
+An 8KB chunk of shared memory is allocated and used for communication
+per subsystem. For multi-process capable subsystems, 16KB chunk of shared
+memory is allocated to allow for simultaneous debugging of more than one
+process running on a single subsystem.
+
+The shared memory is used as a circular ring buffer in each direction.
+Thus we have a bi-directional shared memory channel between the AP
+and a subsystem. We call this SMQ. Each memory channel contains a header,
+data and a control mechanism that is used to synchronize read and write
+of data between the AP and the remote subsystem.
+
+Overall SMQ memory view:
+:
+:    +------------------------------------------------+
+:    | SMEM buffer                                    |
+:    |-----------------------+------------------------|
+:    |Producer: LA           | Producer: Remote       |
+:    |Consumer: Remote       |           subsystem    |
+:    |          subsystem    | Consumer: LA           |
+:    |                       |                        |
+:    |               Producer|                Consumer|
+:    +-----------------------+------------------------+
+:    |                       |
+:    |                       |
+:    |                       +--------------------------------------+
+:    |                                                              |
+:    |                                                              |
+:    v                                                              v
+:    +--------------------------------------------------------------+
+:    |   Header  |       Data      |            Control             |
+:    +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+
+:    |           | b | b | b |     | S  |n |n |     | S |n |n |     |
+:    |  Producer | l | l | l |     | M  |o |o |     | M |o |o |     |
+:    |    Ver    | o | o | o |     | Q  |d |d |     | Q |d |d |     |
+:    |-----------| c | c | c | ... |    |e |e | ... |   |e |e | ... |
+:    |           | k | k | k |     | O  |  |  |     | I |  |  |     |
+:    |  Consumer |   |   |   |     | u  |0 |1 |     | n |0 |1 |     |
+:    |    Ver    | 0 | 1 | 2 |     | t  |  |  |     |   |  |  |     |
+:    +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+
+:                                       |           |
+:                                       +           |
+:                                                   |
+:                          +------------------------+
+:                          |
+:                          v
+:                        +----+----+----+----+
+:                        | SMQ Nodes         |
+:                        |----|----|----|----|
+:                 Node # |  0 |  1 |  2 | ...|
+:                        |----|----|----|----|
+: Starting Block Index # |  0 |  3 |  8 | ...|
+:                        |----|----|----|----|
+:            # of blocks |  3 |  5 |  1 | ...|
+:                        +----+----+----+----+
+:
+
+Header: Contains version numbers for software compatibility to ensure
+that both producers and consumers on the AP and subsystems know how to
+read from and write to the queue.
+Both the producer and consumer versions are 1.
+:     +---------+-------------------+
+:     | Size    | Field             |
+:     +---------+-------------------+
+:     | 1 byte  | Producer Version  |
+:     +---------+-------------------+
+:     | 1 byte  | Consumer Version  |
+:     +---------+-------------------+
+
+
+Data: The data portion contains multiple blocks [0..N] of a fixed size.
+The block size SM_BLOCKSIZE is fixed to 128 bytes for header version #1.
+Payload sent from the debug agent app is split (if necessary) and placed
+in these blocks. The first data block is placed at the next 8 byte aligned
+address after the header.
+
+The number of blocks for a given SMEM allocation is derived as follows:
+  Number of Blocks = ((Total Size - Alignment - Size of Header
+                      - Size of SMQIn - Size of SMQOut)/(SM_BLOCKSIZE))
+
+The producer maintains a private block map of each of these blocks to
+determine which of these blocks in the queue is available and which are free.
+
+Control:
+The control portion contains a list of nodes [0..N] where N is number
+of available data blocks. Each node identifies the data
+block indexes that contain a particular debug message to be transferred,
+and the number of blocks it took to hold the contents of the message.
+
+Each node has the following structure:
+:     +---------+-------------------+
+:     | Size    | Field             |
+:     +---------+-------------------+
+:     | 2 bytes |Staring Block Index|
+:     +---------+-------------------+
+:     | 2 bytes |Number of Blocks   |
+:     +---------+-------------------+
+
+The producer and the consumer update different parts of the control channel
+(SMQOut / SMQIn) respectively. Each of these control data structures contains
+information about the last node that was written / read, and the actual nodes
+that were written/read.
+
+SMQOut Structure (R/W by producer, R by consumer):
+:     +---------+-------------------+
+:     | Size    | Field             |
+:     +---------+-------------------+
+:     | 4 bytes | Magic Init Number |
+:     +---------+-------------------+
+:     | 4 bytes | Reset             |
+:     +---------+-------------------+
+:     | 4 bytes | Last Sent Index   |
+:     +---------+-------------------+
+:     | 4 bytes | Index Free Read   |
+:     +---------+-------------------+
+
+SMQIn Structure (R/W by consumer, R by producer):
+:     +---------+-------------------+
+:     | Size    | Field             |
+:     +---------+-------------------+
+:     | 4 bytes | Magic Init Number |
+:     +---------+-------------------+
+:     | 4 bytes | Reset ACK         |
+:     +---------+-------------------+
+:     | 4 bytes | Last Read Index   |
+:     +---------+-------------------+
+:     | 4 bytes | Index Free Write  |
+:     +---------+-------------------+
+
+Magic Init Number:
+Both SMQ Out and SMQ In initialize this field with a predefined magic
+number so as to make sure that both the consumer and producer blocks
+have fully initialized and have valid data in the shared memory control area.
+  Producer Magic #: 0xFF00FF01
+  Consumer Magic #: 0xFF00FF02
+
+SMQ Out's Last Sent Index and Index Free Read:
+  Only a producer can write to these indexes and they are updated whenever
+  there is new payload to be inserted into the SMQ in order to be sent to a
+  consumer.
+
+  The number of blocks required for the SMQ allocation is determined as:
+   (payload size + SM_BLOCKSIZE - 1) / SM_BLOCKSIZE
+
+  The private block map is searched for a large enough continuous set of blocks
+  and the user data is copied into the data blocks.
+
+  The starting index of the free block(s) is updated in the SMQOut's Last Sent
+  Index. This update keeps track of which index was last written to and the
+  producer uses it to determine where the the next allocation could be done.
+
+  Every allocation, a producer updates the Index Free Read from its
+  collaborating consumer's Index Free Write field (if they are unequal).
+  This index value indicates that the consumer has read all blocks associated
+  with allocation on the SMQ and that the producer can reuse these blocks for
+  subsquent allocations since this is a circular queue.
+
+  At cold boot and restart, these indexes are initialized to zero and all
+  blocks are marked as available for allocation.
+
+SMQ In's Last Read Index and Index Free Write:
+  These indexes are written to only by a consumer and are updated whenever
+  there is new payload to be read from the SMQ. The Last Read Index keeps
+  track of which index was last read by the consumer and using this, it
+  determines where the next read should be done.
+  After completing a read, Last Read Index is incremented to the
+  next block index. A consumer updates Index Free Write to the starting
+  index of an allocation whenever it has completed processing the blocks.
+  This is an optimization that can be used to prevent an additional copy
+  of data from the queue into a client's data buffer and the data in the queue
+  itself can be used.
+  Once Index Free Write is updated, the collaborating producer (on the next
+  data allocation) reads the updated Index Free Write value and it then
+  updates its corresponding SMQ Out's Index Free Read and marks the blocks
+  associated with that index as available for allocation. At cold boot and
+  restart, these indexes are initialized to zero.
+
+SMQ Out Reset# and SMQ In Reset ACK #:
+  Since subsystems can restart at anytime, the data blocks and control channel
+  can be in an inconsistent state when a producer or consumer comes up.
+  We use Reset and Reset ACK to manage this. At cold boot, the producer
+  initializes the Reset# to a known number ex. 1. Every other reset that the
+  producer undergoes, the Reset#1 is simply incremented by 1. All the producer
+  indexes are reset.
+  When the producer notifies the consumer of data availability, the consumer
+  reads the producers Reset # and copies that into its SMQ In Reset ACK#
+  field when they differ. When that occurs, the consumer resets its
+  indexes to 0.
+
+6) Asynchronous notifications between a producer and consumer are
+done using the SMP2P service which is interrupt based.
+
+Power Management
+================
+
+None
+
+SMP/multi-core
+==============
+
+The driver uses completion to wake up the Debug Agent client threads.
+
+Security
+========
+
+From the perspective of the subsystem, the AP is untrusted. The remote
+stubs consult the secure debug fuses to determine whether or not the
+remote debugging will be enabled at the subsystem.
+
+If the hardware debug fuses indicate that debugging is disabled, the
+remote stubs will not be functional on the subsystem. Writes to the
+queue will only be done if the driver sees that the remote stub has been
+initialized on the subsystem.
+
+Therefore even if any untrusted software running on the AP requests
+the services of the Remote Debug Driver and inject RSP messages
+into the shared memory buffer, these RSP messages will be discarded and
+an appropriate error code will be sent up to the invoking application.
+
+Performance
+===========
+
+During operation, the Remote Debug Driver copies RSP messages
+asynchronously sent from the host debugger to the remote stub and vice
+versa. The debug messages are ASCII based and relatively short
+(<25 bytes) and may once in a while go up to a maximum 700 bytes
+depending on the command the user requested. Thus we do not
+anticipate any major performance impact. Moreover, in a typical
+functional debug scenario performance should not be a concern.
+
+Interface
+=========
+
+The Remote Debug Driver is a character based device that manages
+a piece of shared memory that is used as a bi-directional
+single producer/consumer circular queue using a next fit allocator.
+Every subsystem, has its own shared memory buffer that is managed
+like a separate device.
+
+The driver distinguishes each subsystem processor's buffer by
+registering a node with a different minor number.
+
+For each subsystem that is supported, the driver exposes a user space
+interface through the following node:
+    - /dev/rdbg-<subsystem>
+    Ex. /dev/rdbg-adsp (for the ADSP subsystem)
+
+The standard open(), close(), read() and write() API set is
+implemented.
+
+The open() syscall will fail if a subsystem is not present or supported
+by the driver or a shared memory buffer cannot be allocated for the
+AP - subsystem communication. It will also fail if the subsytem has
+not initialized the queue on its side. Here are the error codes returned
+in case a call to open() fails:
+ENODEV - memory was not yet allocated for the device
+EEXIST - device is already opened
+ENOMEM - SMEM allocation failed
+ECOMM - Subsytem queue is not yet setup
+ENOMEM - Failure to initialize SMQ
+
+read() is a blocking call that will return with the number of bytes written
+by the subsystem whenever the subsystem sends it some payload. Here are the
+error codes returned in case a call to read() fails:
+EINVAL - Invalid input
+ENODEV - Device has not been opened yet
+ERESTARTSYS - call to wait_for_completion_interruptible is interrupted
+ENODATA - call to smq_receive failed
+
+write() attempts to send user mode payload out to the subsystem. It can fail
+if the SMQ is full. The number of bytes written is returned back to the user.
+Here are the error codes returned in case a call to write() fails:
+EINVAL - Invalid input
+ECOMM - SMQ send failed
+
+In the close() syscall, the control information state of the SMQ is
+initialized to zero thereby preventing any further communication between
+the AP and the subsystem. Here is the error code returned in case
+a call to close() fails:
+ENODEV - device wasn't opened/initialized
+
+The Remote Debug driver uses SMP2P for bi-directional AP to subsystem
+notification. Notifications are sent to indicate that there are new
+debug messages available for processing. Each subsystem that is
+supported will need to add a device tree entry per the usage
+specification of SMP2P driver.
+
+In case the remote stub becomes non operational or the security configuration
+on the subsystem does not permit debugging, any messages put in the SMQ will
+not be responded to. It is the responsibility of the Debug Agent app and the
+host debugger application such as GDB to timeout and notify the user of the
+non availability of remote debugging.
+
+Driver parameters
+=================
+
+None
+
+Config options
+==============
+
+The driver is configured with a device tree entry to map an SMP2P entry
+to the device. The SMP2P entry name used is "rdbg". Please see
+kernel\Documentation\arm\msm\msm_smp2p.txt for information about the
+device tree entry required to configure SMP2P.
+
+The driver uses the SMEM allocation type SMEM_LC_DEBUGGER to allocate memory
+for the queue that is used to share data with the subsystems.
+
+Dependencies
+============
+
+The Debug Agent driver requires services of SMEM to
+allocate shared memory buffers.
+
+SMP2P is used as a bi-directional notification
+mechanism between the AP and a subsystem processor.
+
+User space utilities
+====================
+
+This driver is meant to be used in conjunction with the user mode
+Remote Debug Agent application.
+
+Other
+=====
+
+None
+
+Known issues
+============
+For targets with an external subsystem, we cannot use
+shared memory for communication and would have to use the prevailing
+transport mechanisms that exists between the AP and the external subsystem.
+
+This driver cannot be leveraged for such targets.
+
+To do
+=====
+
+None
diff --git a/Documentation/devicetree/bindings/arm/msm/heap-sharing.txt b/Documentation/devicetree/bindings/arm/msm/heap-sharing.txt
new file mode 100644
index 0000000..e63d09b
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/heap-sharing.txt
@@ -0,0 +1,44 @@
+* Memory Share Driver (MEMSHARE)
+
+The Memshare driver implements a Kernel QMI service on the
+LA-APSS, which is responsible for providing contiguous physical
+memory to MPSS for use cases when the modem requires additional
+memory (e.g. GPS).
+
+Required properties for Memshare
+
+-Root Node-
+
+- compatible:	Must be "qcom,memshare"
+
+Required properties for child nodes:
+
+- compatible:	Must be "qcom,memshare-peripheral"
+
+- qcom,peripheral-size:	Indicates the size (in bytes) required for that child.
+
+- qcom,client-id:	Indicates the client id of the child node.
+
+- label:	Indicates the peripheral information for the node. Should be one of
+  the following:
+  - modem	/* Represent Modem Peripheral */
+  - adsp	/* Represent ADSP Peripheral */
+  - wcnss	/* Represent WCNSS Peripheral */
+
+Optional properties for child nodes:
+
+- qcom,allocate-boot-time:	Indicates whether clients needs boot time memory allocation.
+
+Example:
+
+qcom,memshare {
+	compatible = "qcom,memshare";
+
+	qcom,client_1 {
+		compatible = "qcom,memshare-peripheral";
+		qcom,peripheral-size = <0x200000>;
+		qcom,client-id = <0>;
+		qcom,allocate-boot-time;
+		label = "modem";
+	};
+};
diff --git a/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt b/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt
index ae476d0..797dbcc 100644
--- a/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt
+++ b/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt
@@ -28,9 +28,6 @@
 	- qcom,default-level: The default low power level that a cluster is
 	programmed. The SPM of the corresponding device is configured at this
 	low power mode by default.
-	- qcom,cpu: List of CPU phandles to identify the CPUs associated with
-	this cluster. This property is required if and only if the cluster
-	node contains a qcom,pm-cpu node.
 
 	qcom,pm-cluster contains qcom,pm-cluster-level nodes which identify
 	the various low power modes that the cluster can enter. The
@@ -103,9 +100,13 @@
 					power collapse (PC)
 
 [Node bindings for qcom,pm-cpu]
-qcom,pm-cpu contains the low power modes that a cpu could enter. Currently it
-doesn't have any required properties and is a container for
-qcom,pm-cpu-levels.
+qcom,pm-cpu contains the low power modes that a cpu could enter and the CPUs
+that share the parameters.It contains the following properties.
+	- qcom,cpu: List of CPU phandles to identify the CPUs associated with
+	this cluster.
+	- qcom,pm-cpu-levels: The different low power modes that a CPU could
+	enter. The following section explains the required properties of this
+	node.
 
 [Node bindings for qcom,pm-cpu-levels]
  Required properties:
@@ -184,7 +185,6 @@
 			label = "a53";
 			qcom,spm-device-names = "l2";
 			qcom,default-level=<0>;
-			qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3>;
 
 			qcom,pm-cluster-level@0{
 				reg = <0>;
@@ -210,6 +210,7 @@
 			qcom,pm-cpu {
 				#address-cells = <1>;
 				#size-cells = <0>;
+				qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3>;
 				qcom,pm-cpu-level@0 {
 					reg = <0>;
 					qcom,spm-cpu-mode = "wfi";
@@ -255,7 +256,6 @@
 			label = "a57";
 			qcom,spm-device-names = "l2";
 			qcom,default-level=<0>;
-			qcom,cpu = <&CPU4 &CPU5 &CPU6 &CPU7>;
 
 			qcom,pm-cluster-level@0{
 				reg = <0>;
@@ -281,6 +281,7 @@
 			qcom,pm-cpu {
 				#address-cells = <1>;
 				#size-cells = <0>;
+				qcom,cpu = <&CPU4 &CPU5 &CPU6 &CPU7>;
 				qcom,pm-cpu-level@0 {
 					reg = <0>;
 					qcom,spm-cpu-mode = "wfi";
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_qmp.txt b/Documentation/devicetree/bindings/arm/msm/msm_qmp.txt
index 0a5c0b3..5fb3e65 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm_qmp.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm_qmp.txt
@@ -13,6 +13,7 @@
 - qcom,irq-mask : the bitmask to trigger an interrupt.
 - interrupt : the receiving interrupt line.
 - mbox-desc-offset : offset of mailbox descriptor from start of the msgram.
+- priority : the priority of this mailbox compared to other mailboxes.
 - #mbox-cells: Common mailbox binding property to identify the number of cells
 		required for the mailbox specifier, should be 1.
 
@@ -33,6 +34,7 @@
 		qcom,irq-mask = <0x1>;
 		interrupt = <0 389 1>;
 		mbox-desc-offset = <0x100>;
+		priority = <1>;
 		mbox-offset = <0x500>;
 		mbox-size = <0x400>;
 		#mbox-cells = <1>;
diff --git a/Documentation/devicetree/bindings/arm/msm/rdbg-smp2p.txt b/Documentation/devicetree/bindings/arm/msm/rdbg-smp2p.txt
new file mode 100644
index 0000000..ce2d8bd
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/rdbg-smp2p.txt
@@ -0,0 +1,17 @@
+Qualcomm Technologies, Inc. Remote Debugger (RDBG) driver
+
+Required properties:
+-compatible : Should be one of
+	To communicate with modem
+		qcom,smp2pgpio_client_rdbg_2_in (inbound)
+		qcom,smp2pgpio_client_rdbg_2_out (outbound)
+	To communicate with modem
+		qcom,smp2pgpio_client_rdbg_1_in (inbound)
+		qcom,smp2pgpio_client_rdbg_1_out (outbound)
+-gpios : the relevant gpio pins of the entry.
+
+Example:
+	qcom,smp2pgpio_client_rdbg_2_in {
+		compatible = "qcom,smp2pgpio_client_rdbg_2_in";
+		gpios = <&smp2pgpio_rdbg_2_in 0 0>;
+	};
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index 863a169..d3222fb 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -178,6 +178,9 @@
 - qcom,sde-te-size:		A u32 value indicates the te block address range.
 - qcom,sde-te2-size:		A u32 value indicates the te2 block address range.
 - qcom,sde-dsc-off:	 	A u32 offset indicates the dsc block offset on pingpong.
+- qcom,sde-dither-off:		A u32 offset indicates the dither block offset on pingpong.
+- qcom,sde-dither-version:	A u32 value indicates the dither block version.
+- qcom,sde-dither-size:		A u32 value indicates the dither block address range.
 - qcom,sde-sspp-vig-blocks:	A node that lists the blocks inside the VIG hardware. The
 				block entries will contain the offset and version (if needed)
 				of each feature block. The presence of a block entry
@@ -361,17 +364,23 @@
 				* Current values of src & dst are defined at
 				include/linux/msm-bus-board.h
 
+SMMU Subnodes:
+- smmu_sde_****:		Child nodes representing sde smmu virtual
+				devices
+
 Subnode properties:
-- compatible :          Compatible name used in smmu v2.
-                        smmu_v2 names should be:
-                        "qcom,smmu-mdp-unsec"   - smmu context bank device for
-                                                unsecure mdp domain.
-                        "qcom,smmu-rot-unsec"   - smmu context bank device for
-                                                unsecure rotation domain.
-                        "qcom,smmu-mdp-sec"     - smmu context bank device for
-                                                secure mdp domain.
-                        "qcom,smmu-rot-sec"     - smmu context bank device for
-                                                secure rotation domain.
+- compatible:			Compatible names used for smmu devices.
+				names should be:
+				"qcom,smmu_sde_unsec": smmu context bank device
+				for unsecure sde real time domain.
+				"qcom,smmu_sde_sec": smmu context bank device
+				for secure sde real time domain.
+				"qcom,smmu_sde_nrt_unsec": smmu context bank device
+				for unsecure sde non-real time domain.
+				"qcom,smmu_sde_nrt_sec": smmu context bank device
+				for secure sde non-real time domain.
+
+
 Please refer to ../../interrupt-controller/interrupts.txt for a general
 description of interrupt bindings.
 
@@ -673,4 +682,14 @@
               <1 590 0 160000>,
               <1 590 0 320000>;
         };
+
+    smmu_kms_unsec: qcom,smmu_kms_unsec_cb {
+        compatible = "qcom,smmu_sde_unsec";
+        iommus = <&mmss_smmu 0>;
+    };
+
+    smmu_kms_sec: qcom,smmu_kms_sec_cb {
+        compatible = "qcom,smmu_sde_sec";
+        iommus = <&mmss_smmu 1>;
+    };
   };
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt b/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt
index c9aaa00..54365b1 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt
@@ -13,16 +13,16 @@
   property defined.
 - gpios : should contain phandle to gpio controller node and array of
   #gpio-cells specifying specific gpio (controller specific)
-- qcom,gpio-req-tbl-num : should contain index to gpios specific to this sensor
-- qcom,gpio-req-tbl-flags : should contain direction of gpios present in
-  qcom,gpio-req-tbl-num property (in the same order)
-- qcom,gpio-req-tbl-label : should contain name of gpios present in
-  qcom,gpio-req-tbl-num property (in the same order)
+- gpio-req-tbl-num : should contain index to gpios specific to this sensor
+- gpio-req-tbl-flags : should contain direction of gpios present in
+  gpio-req-tbl-num property (in the same order)
+- gpio-req-tbl-label : should contain name of gpios present in
+  gpio-req-tbl-num property (in the same order)
 - clock-names: name of the clocks required for the device
 - clock-rates: clock rate in Hz
 
 Optional properties:
-- qcom,cam-vreg-name : name of the voltage regulators required for the device.
+- regulator-names : name of the voltage regulators required for the device.
 - gdscr-supply : should contain gdsr regulator used for cci clocks.
 - mmagic-supply : should contain mmagic regulator used for mmagic clocks.
 
@@ -43,18 +43,16 @@
 * Qualcomm Technologies, Inc. CCI clock settings
 
 Optional properties:
-- qcom,hw-thigh : should contain high period of the SCL clock in terms of CCI
-    clock cycle
-- qcom,hw-tlow : should contain high period of the SCL clock in terms of CCI
-    clock cycle
-- qcom,hw-tsu-sto : should contain setup time for STOP condition
-- qcom,hw-tsu-sta : should contain setup time for Repeated START condition
-- qcom,hw-thd-dat : should contain hold time for the data
-- qcom,hw-thd-sta : should contain hold time for START condition
-- qcom,hw-tbuf : should contain free time between a STOP and a START condition
-- qcom,hw-scl-stretch-en : should contain enable or disable clock stretching
-- qcom,hw-trdhld : should contain internal hold time for SDA
-- qcom,hw-tsp : should contain filtering of glitches
+- hw-thigh : should contain high period of the SCL clock in terms of CCI clock cycle
+- hw-tlow : should contain high period of the SCL clock in terms of CCI clock cycle
+- hw-tsu-sto : should contain setup time for STOP condition
+- hw-tsu-sta : should contain setup time for Repeated START condition
+- hw-thd-dat : should contain hold time for the data
+- hw-thd-sta : should contain hold time for START condition
+- hw-tbuf : should contain free time between a STOP and a START condition
+- hw-scl-stretch-en : should contain enable or disable clock stretching
+- hw-trdhld : should contain internal hold time for SDA
+- hw-tsp : should contain filtering of glitches
 
 * Qualcomm Technologies, Inc. MSM Sensor
 
@@ -64,7 +62,7 @@
 - compatible : should be manufacturer name followed by sensor name
   - "qcom,camera"
 - reg : should contain i2c slave address of the device
-- qcom,csiphy-sd-index : should contain csiphy instance that will used to
+- csiphy-sd-index : should contain csiphy instance that will used to
   receive sensor data
   - 0, 1, 2
 - cam_vdig-supply : should contain regulator from which digital voltage is
@@ -72,67 +70,69 @@
 - cam_vana-supply : should contain regulator from which analog voltage is
   supplied
 - cam_vio-supply : should contain regulator from which IO voltage is supplied
-- qcom,cam-vreg-name : should contain names of all regulators needed by this
+- regulator-names : should contain names of all regulators needed by this
   sensor
   - "cam_vdig", "cam_vana", "cam_vio", "cam_vaf"
-- qcom,cam-vreg-min-voltage : should contain minimum voltage level for
-  regulators mentioned in qcom,cam-vreg-name property (in the same order)
-- qcom,cam-vreg-max-voltage : should contain maximum voltage level for
-  regulators mentioned in qcom,cam-vreg-name property (in the same order)
-- qcom,cam-vreg-op-mode : should contain optimum voltage level for regulators
-  mentioned in qcom,cam-vreg-name property (in the same order)
-- qcom,sensor-position-roll : should contain sensor rotational angle with respect
+- rgltr-cntrl-support : It is booloean property. This property is required
+  if the code and regulator control parameters e.g. rgltr-min-voltage
+- rgltr-min-voltage : should contain minimum voltage level for
+  regulators mentioned in regulator-names property (in the same order)
+- rgltr-max-voltage : should contain maximum voltage level for
+  regulators mentioned in regulator-names property (in the same order)
+- rgltr-load-current : should contain optimum voltage level for regulators
+  mentioned in regulator-names property (in the same order)
+- sensor-position-roll : should contain sensor rotational angle with respect
   to axis of reference
   - 0, 90, 180, 360
-- qcom,sensor-position-pitch : should contain sensor rotational angle with respect
+- sensor-position-pitch : should contain sensor rotational angle with respect
   to axis of reference
   - 0, 90, 180, 360
-- qcom,sensor-position-yaw : should contain sensor rotational angle  with respect
+- sensor-position-yaw : should contain sensor rotational angle  with respect
   to axis of reference
   - 0, 90, 180, 360
 Optional properties:
-- qcom,slave-id : should contain i2c slave address, device id address, expected
+- slave-id : should contain i2c slave address, device id address, expected
   id read value and device id mask
-- qcom,sensor-name : should contain unique sensor name to differentiate from
+- sensor-name : should contain unique sensor name to differentiate from
   other sensor
   - "s5k3l1yx"
-- qcom,sensor-mode : should contain sensor mode supported
+- sensor-mode : should contain sensor mode supported
   - 0 -> back camera 2D
   - 1 -> front camera 2D
   - 2 -> back camera 3D
   - 3 -> back camera int 3D
-- qcom,sensor-type : should contain format of data that sensor streams
+- sensor-type : should contain format of data that sensor streams
   - 0 -> bayer format
   - 1 -> yuv format
 - qcom,secure : should be enabled to operate the camera in secure mode
   - 0, 1
-- qcom,gpio-no-mux : should contain field to indicate whether gpio mux table is
+- gpio-no-mux : should contain field to indicate whether gpio mux table is
   available
   - 1 if gpio mux is not available, 0 otherwise
 - cam_vaf-supply : should contain regulator from which AF voltage is supplied
 - gpios : should contain phandle to gpio controller node and array of
-   #gpio-cells specifying specific gpio (controller specific)
-- qcom,gpio-reset : should contain index to gpio used by sensors reset_n
-- qcom,gpio-standby : should contain index to gpio used by sensors standby_n
-- qcom,gpio-vio : should contain index to gpio used by sensors io vreg enable
-- qcom,gpio-vana : should contain index to gpio used by sensors analog vreg enable
-- qcom,gpio-vdig : should contain index to gpio used by sensors digital vreg enable
-- qcom,gpio-vaf : should contain index to gpio used by sensors af vreg enable
-- qcom,gpio-af-pwdm : should contain index to gpio used by sensors af pwdm_n
-- qcom,gpio-req-tbl-num : should contain index to gpios specific to this sensor
-- qcom,gpio-req-tbl-flags : should contain direction of gpios present in
-  qcom,gpio-req-tbl-num property (in the same order)
-- qcom,gpio-req-tbl-label : should contain name of gpios present in
-  qcom,gpio-req-tbl-num property (in the same order)
-- qcom,gpio-set-tbl-num : should contain index of gpios that need to be
+    #gpio-cells specifying specific gpio (controller specific)
+- gpio-reset : should contain index to gpio used by sensors reset_n
+- gpio-standby : should contain index to gpio used by sensors standby_n
+- gpio-vio : should contain index to gpio used by sensors io vreg enable
+- gpio-vana : should contain index to gpio used by sensors analog vreg enable
+- gpio-vdig : should contain index to gpio used by sensors digital vreg enable
+- gpio-vaf : should contain index to gpio used by sensors af vreg enable
+- gpio-af-pwdm : should contain index to gpio used by sensors af pwdm_n
+- gpio-req-tbl-num : should contain index to gpios specific to this sensor
+- gpio-req-tbl-flags : should contain direction of gpios present in
+  gpio-req-tbl-num property (in the same order)
+- gpio-req-tbl-label : should contain name of gpios present in
+  gpio-req-tbl-num property (in the same order)
+- gpio-set-tbl-num : should contain index of gpios that need to be
   configured by msm
-- qcom,gpio-set-tbl-flags : should contain value to be configured for the gpios
-  present in qcom,gpio-set-tbl-num property (in the same order)
-- qcom,gpio-set-tbl-delay : should contain amount of delay after configuring
+- gpio-set-tbl-flags : should contain value to be configured for the gpios
+  present in gpio-set-tbl-num property (in the same order)
+- gpio-set-tbl-delay : should contain amount of delay after configuring
   gpios as specified in gpio_set_tbl_flags property (in the same order)
-- qcom,csi-phy-sel : should contain CSIPHY core instance from which CSID should
+- csi-phy-sel : should contain CSIPHY core instance from which CSID should
   receive data
-- qcom,actuator-cam-name : should contain actuator cam name associated with
+- actuator-cam-name : should contain actuator cam name associated with
   this sensor
   - If actuator does not exist, this property should not be initialized
   - If actuator exist, this field should indicate the index of actuator to
@@ -141,39 +141,40 @@
   for actuator
 - qcom,actuator-vcm-enable : should contain value to be set for actuator vcm
   gpio
-- qcom,sensor-position : should contain the mount angle of the camera sensor
+- sensor-position : should contain the mount angle of the camera sensor
   - 0 -> back camera
   - 1 -> front camera
-- qcom,cci-master : should contain i2c master id to be used for this camera
+- cci-master : should contain i2c master id to be used for this camera
   sensor
   - 0 -> MASTER 0
   - 1 -> MASTER 1
-- qcom,actuator-src : if auto focus is supported by this sensor, this
+- actuator-src : if auto focus is supported by this sensor, this
   property should contain phandle of respective actuator node
-- qcom,led-flash-src : if LED flash is supported by this sensor, this
+- led-flash-src : if LED flash is supported by this sensor, this
   property should contain phandle of respective LED flash node
 - qcom,vdd-cx-supply : should contain regulator from which cx voltage is
   supplied
 - qcom,vdd-cx-name : should contain names of cx regulator
-- qcom,eeprom-src : if eeprom memory is supported by this sensor, this
+- eeprom-src : if eeprom memory is supported by this sensor, this
   property should contain phandle of respective eeprom nodes
-- qcom,ois-src : if optical image stabilization is supported by this sensor,
+- ois-src : if optical image stabilization is supported by this sensor,
   this property should contain phandle of respective ois node
-- qcom,ir-led-src : if ir led is supported by this sensor, this property
+- ir-led-src : if ir led is supported by this sensor, this property
   should contain phandle of respective ir-led node
 - qcom,ir-cut-src : if ir cut is supported by this sensor, this property
   should contain phandle of respective ir-cut node
 - qcom,special-support-sensors: if only some special sensors are supported
   on this board, add sensor name in this property.
-- qcom,clock-rates: clock rate in Hz.
-- qcom,clock-cntl-support: Says whether clock control support is present or not
-- qcom,clock-control: The valid fields are "NO_SET_RATE", "INIT_RATE" and
+- clock-rates: clock rate in Hz.
+- clock-cntl-level: says what all different cloc level node has.
+- clock-cntl-support: Says whether clock control support is present or not
+- clock-control: The valid fields are "NO_SET_RATE", "INIT_RATE" and
   "SET_RATE". "NO_SET_RATE" the corresponding clock is enabled without setting
   the rate assuming some other driver has already set it to appropriate rate.
   "INIT_RATE" clock rate is not queried assuming some other driver has set
   the clock rate and ispif will set the the clock to this rate.
   "SET_RATE" clock is enabled and the rate is set to the value specified
-  in the property qcom,clock-rates.
+  in the property clock-rates.
 
 * Qualcomm Technologies, Inc. MSM ACTUATOR
 
@@ -184,20 +185,22 @@
   data field which is 0x0
 - compatible :
   - "qcom,actuator"
-- qcom,cci-master : should contain i2c master id to be used for this camera
+- cci-master : should contain i2c master id to be used for this camera
   sensor
   - 0 -> MASTER 0
   - 1 -> MASTER 1
 Optional properties:
-- qcom,cam-vreg-name : should contain names of all regulators needed by this
+- regulator-names : should contain names of all regulators needed by this
   actuator
   - "cam_vaf"
-- qcom,cam-vreg-min-voltage : should contain minimum voltage level in mcrovolts
-  for regulators mentioned in qcom,cam-vreg-name property (in the same order)
-- qcom,cam-vreg-max-voltage : should contain maximum voltage level in mcrovolts
-  for regulators mentioned in qcom,cam-vreg-name property (in the same order)
-- qcom,cam-vreg-op-mode : should contain the maximum current in microamps
-  required from the regulators mentioned in the qcom,cam-vreg-name property
+- rgltr-cntrl-support : It is booloean property. This property is required
+  if the code and regulator control parameters e.g. rgltr-min-voltage
+- rgltr-min-voltage : should contain minimum voltage level in mcrovolts
+  for regulators mentioned in regulator-names property (in the same order)
+- rgltr-max-voltage : should contain maximum voltage level in mcrovolts
+  for regulators mentioned in regulator-names property (in the same order)
+- rgltr-load-current : should contain the maximum current in microamps
+  required from the regulators mentioned in the regulator-names property
   (in the same order).
 - cam_vaf-supply : should contain regulator from which AF voltage is supplied
 
@@ -210,22 +213,24 @@
   data field which is 0x0
 - compatible :
   - "qcom,ois"
-- qcom,cci-master : should contain i2c master id to be used for this camera
+- cci-master : should contain i2c master id to be used for this camera
   sensor
   - 0 -> MASTER 0
   - 1 -> MASTER 1
-- qcom,clock-rates: clock rate in Hz.
+- clock-rates: clock rate in Hz.
 
 Optional properties:
-- qcom,cam-vreg-name : should contain names of all regulators needed by this
+- regulator-names : should contain names of all regulators needed by this
   ois
   - "cam_vaf"
-- qcom,cam-vreg-min-voltage : should contain minimum voltage level in mcrovolts
-  for regulators mentioned in qcom,cam-vreg-name property (in the same order)
-- qcom,cam-vreg-max-voltage : should contain maximum voltage level in mcrovolts
-  for regulators mentioned in qcom,cam-vreg-name property (in the same order)
-- qcom,cam-vreg-op-mode : should contain the maximum current in microamps
-  required from the regulators mentioned in the qcom,cam-vreg-name property
+- rgltr-cntrl-support : It is booloean property. This property is required
+  if the code and regulator control parameters e.g. rgltr-min-voltage
+- rgltr-min-voltage : should contain minimum voltage level in mcrovolts
+  for regulators mentioned in regulator-names property (in the same order)
+- rgltr-max-voltage : should contain maximum voltage level in mcrovolts
+  for regulators mentioned in regulator-names property (in the same order)
+- rgltr-load-current : should contain the maximum current in microamps
+  required from the regulators mentioned in the regulator-names property
   (in the same order).
 - cam_vaf-supply : should contain regulator from which ois voltage is supplied
 
@@ -234,9 +239,9 @@
 led_flash0: qcom,camera-flash@0 {
     cell-index = <0>;
     compatible = "qcom,camera-flash";
-    qcom,flash-source = <&pmi8994_flash0 &pmi8994_flash1>;
-    qcom,torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
-    qcom,switch-source = <&pmi8998_switch>;
+    flash-source = <&pmi8994_flash0 &pmi8994_flash1>;
+    torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
+    switch-source = <&pmi8998_switch>;
     status = "ok";
 }
 
@@ -248,70 +253,71 @@
     interrupts = <0 50 0>;
     interrupt-names = "cci";
     clock-names = "camnoc_axi_clk", "soc_ahb_clk",
-         "slow_ahb_src_clk", "cpas_ahb_clk",
-         "cci_clk", "cci_clk_src";
-    qcom,clock-rates = <0 0 80000000 0 0 37500000>;
+	"slow_ahb_src_clk", "cpas_ahb_clk",
+	"cci_clk", "cci_clk_src";
+    clock-rates = <0 0 80000000 0 0 37500000>;
+	clock-cntl-level = "turbo";
     gpios = <&tlmm 17 0>,
            <&tlmm 18 0>,
            <&tlmm 19 0>,
            <&tlmm 20 0>;
-    qcom,gpio-tbl-num = <0 1 2 3>;
-    qcom,gpio-tbl-flags = <1 1 1 1>;
-    qcom,gpio-tbl-label = "CCI_I2C_DATA0",
+    gpio-tbl-num = <0 1 2 3>;
+    gpio-tbl-flags = <1 1 1 1>;
+    gpio-tbl-label = "CCI_I2C_DATA0",
                     "CCI_I2C_CLK0",
                     "CCI_I2C_DATA1",
                     "CCI_I2C_CLK1";
     i2c_freq_100Khz: qcom,i2c_standard_mode {
-         qcom,hw-thigh = <78>;
-         qcom,hw-tlow = <114>;
-         qcom,hw-tsu-sto = <28>;
-         qcom,hw-tsu-sta = <28>;
-         qcom,hw-thd-dat = <10>;
-         qcom,hw-thd-sta = <77>;
-         qcom,hw-tbuf = <118>;
-         qcom,hw-scl-stretch-en = <0>;
-         qcom,hw-trdhld = <6>;
-         qcom,hw-tsp = <1>;
+         hw-thigh = <78>;
+         hw-tlow = <114>;
+         hw-tsu-sto = <28>;
+         hw-tsu-sta = <28>;
+         hw-thd-dat = <10>;
+         hw-thd-sta = <77>;
+         hw-tbuf = <118>;
+         hw-scl-stretch-en = <0>;
+         hw-trdhld = <6>;
+         hw-tsp = <1>;
          status = "ok";
     };
     i2c_freq_400Khz: qcom,i2c_fast_mode {
-         qcom,hw-thigh = <20>;
-         qcom,hw-tlow = <28>;
-         qcom,hw-tsu-sto = <21>;
-         qcom,hw-tsu-sta = <21>;
-         qcom,hw-thd-dat = <13>;
-         qcom,hw-thd-sta = <18>;
-         qcom,hw-tbuf = <25>;
-         qcom,hw-scl-stretch-en = <0>;
-         qcom,hw-trdhld = <6>;
-         qcom,hw-tsp = <3>;
+         hw-thigh = <20>;
+         hw-tlow = <28>;
+         hw-tsu-sto = <21>;
+         hw-tsu-sta = <21>;
+         hw-thd-dat = <13>;
+         hw-thd-sta = <18>;
+         hw-tbuf = <25>;
+         hw-scl-stretch-en = <0>;
+         hw-trdhld = <6>;
+         hw-tsp = <3>;
          status = "ok";
     };
     i2c_freq_custom: qcom,i2c_custom_mode {
-         qcom,hw-thigh = <15>;
-         qcom,hw-tlow = <28>;
-         qcom,hw-tsu-sto = <21>;
-         qcom,hw-tsu-sta = <21>;
-         qcom,hw-thd-dat = <13>;
-         qcom,hw-thd-sta = <18>;
-         qcom,hw-tbuf = <25>;
-         qcom,hw-scl-stretch-en = <1>;
-         qcom,hw-trdhld = <6>;
-         qcom,hw-tsp = <3>;
+         hw-thigh = <15>;
+         hw-tlow = <28>;
+         hw-tsu-sto = <21>;
+         hw-tsu-sta = <21>;
+         hw-thd-dat = <13>;
+         hw-thd-sta = <18>;
+         hw-tbuf = <25>;
+         hw-scl-stretch-en = <1>;
+         hw-trdhld = <6>;
+         hw-tsp = <3>;
          status = "ok";
     };
     i2c_freq_1Mhz: qcom,i2c_fast_plus_mode {
-         qcom,hw-thigh = <16>;
-         qcom,hw-tlow = <22>;
-         qcom,hw-tsu-sto = <17>;
-         qcom,hw-tsu-sta = <18>;
-         qcom,hw-thd-dat = <16>;
-         qcom,hw-thd-sta = <15>;
-         qcom,hw-tbuf = <19>;
-         qcom,hw-scl-stretch-en = <1>;
-         qcom,hw-trdhld = <3>;
-         qcom,hw-tsp = <3>;
-         qcom,cci-clk-src = <37500000>;
+         hw-thigh = <16>;
+         hw-tlow = <22>;
+         hw-tsu-sto = <17>;
+         hw-tsu-sta = <18>;
+         hw-thd-dat = <16>;
+         hw-thd-sta = <15>;
+         hw-tbuf = <19>;
+         hw-scl-stretch-en = <1>;
+         hw-trdhld = <3>;
+         hw-tsp = <3>;
+         cci-clk-src = <37500000>;
          status = "ok";
     };
 
@@ -319,34 +325,36 @@
          cell-index = <0>;
          reg = <0x0>;
          compatible = "qcom,actuator";
-         qcom,cci-master = <0>;
+         cci-master = <0>;
          cam_vaf-supply = <&pmi8998_bob>;
-         qcom,cam-vreg-name = "cam_vaf";
-         qcom,cam-vreg-min-voltage = <2800000>;
-         qcom,cam-vreg-max-voltage = <2800000>;
-         qcom,cam-vreg-op-mode = <100000>;
+         regulator-names = "cam_vaf";
+	 rgltr-cntrl-support;
+         rgltr-min-voltage = <2800000>;
+         rgltr-max-voltage = <2800000>;
+         rgltr-load-current = <100000>;
     };
 
     qcom,cam-sensor@0 {
          cell-index = <0>;
          compatible = "qcom,camera";
          reg = <0x0>;
-         qcom,csiphy-sd-index = <0>;
-         qcom,sensor-position-roll = <90>;
-         qcom,sensor-position-pitch = <0>;
-         qcom,sensor-position-yaw = <180>;
-         qcom,secure = <1>;
-         qcom,led-flash-src = <&led_flash0>;
-         qcom,actuator-src = <&actuator0>;
-         qcom,eeprom-src = <&eeprom0>;
+         csiphy-sd-index = <0>;
+         sensor-position-roll = <90>;
+         sensor-position-pitch = <0>;
+         sensor-position-yaw = <180>;
+         secure = <1>;
+         led-flash-src = <&led_flash0>;
+         actuator-src = <&actuator0>;
+         eeprom-src = <&eeprom0>;
          cam_vdig-supply = <&pm845_s3>;
          cam_vio-supply = <&pm845_lvs1>;
          cam_vana-supply = <&pmi8998_bob>;
-         qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
-         qcom,cam-vreg-min-voltage = <0 3312000 1352000>;
-         qcom,cam-vreg-max-voltage = <0 3312000 1352000>;
-         qcom,cam-vreg-op-mode = <0 80000 105000>;
-         qcom,gpio-no-mux = <0>;
+         regulator-names = "cam_vdig", "cam_vio", "cam_vana";
+	 rgltr-cntrl-support;
+         rgltr-min-voltage = <0 3312000 1352000>;
+         rgltr-max-voltage = <0 3312000 1352000>;
+         rgltr-load-current = <0 80000 105000>;
+         gpio-no-mux = <0>;
          pinctrl-names = "cam_default", "cam_suspend";
          pinctrl-0 = <&cam_sensor_mclk0_active
                    &cam_sensor_rear_active>;
@@ -355,19 +363,20 @@
          gpios = <&tlmm 13 0>,
               <&tlmm 80 0>,
               <&tlmm 79 0>;
-         qcom,gpio-reset = <1>;
-         qcom,gpio-standby = <2>;
-         qcom,gpio-req-tbl-num = <0 1 2>;
-         qcom,gpio-req-tbl-flags = <1 0 0>;
-         qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+         gpio-reset = <1>;
+         gpio-standby = <2>;
+         gpio-req-tbl-num = <0 1 2>;
+         gpio-req-tbl-flags = <1 0 0>;
+         gpio-req-tbl-label = "CAMIF_MCLK0",
                          "CAM_RESET0",
                          "CAM_VANA";
-         qcom,sensor-position = <0>;
-         qcom,sensor-mode = <0>;
-         qcom,cci-master = <0>;
+         sensor-position = <0>;
+         sensor-mode = <0>;
+         cci-master = <0>;
          status = "ok";
          clocks = <&clock_mmss clk_mclk0_clk_src>,
                <&clock_mmss clk_camss_mclk0_clk>;
          clock-names = "cam_src_clk", "cam_clk";
+		 clock-cntl-level;
     };
 };
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-cdm.txt b/Documentation/devicetree/bindings/media/video/msm-cam-cdm.txt
index d62910a..3dc661f 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-cdm.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-cdm.txt
@@ -118,6 +118,12 @@
   Value type: <string>
   Definition: List of Clients supported by CDM HW node.
 
+- clock-cntl-level
+  Usage: required
+  Value type: <string>
+  Definition: List of strings corresponds clock-rates levels.
+  Supported strings: minsvs, lowsvs, svs, svs_l1, nominal, turbo.
+
 Example:
 	qcom,cpas-cdm0@ac48000 {
 		cell-index = <0>;
@@ -143,5 +149,6 @@
 			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
 		qcom,clock-rates = <0 80000000 80000000 80000000 80000000 80000000>;
 		cdm-client-names = "ife";
+		clock-cntl-level = "turbo";
 		status = "ok";
 	};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt b/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
index 62a51cf..99f3ba2 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
@@ -97,6 +97,12 @@
   Value type: <u32>
   Definition: List of clocks rates.
 
+- clock-cntl-level
+  Usage: required
+  Value type: <string>
+  Definition: List of strings corresponds clock-rates levels.
+  Supported strings: minsvs, lowsvs, svs, svs_l1, nominal, turbo.
+
 - qcom,msm-bus,name
 - qcom,msm-bus,num-cases
 - qcom,msm-bus,num-paths
@@ -191,6 +197,7 @@
 			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
 		src-clock-name = "slow_ahb_clk_src";
 		clock-rates = <0 0 0 0 80000000 0>;
+		clock-cntl-level = "turbo";
 		qcom,msm-bus,name = "cam_ahb";
 		qcom,msm-bus,num-cases = <4>;
 		qcom,msm-bus,num-paths = <1>;
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-csiphy.txt b/Documentation/devicetree/bindings/media/video/msm-cam-csiphy.txt
index e8a74b3..dd8668c 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-csiphy.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-csiphy.txt
@@ -6,13 +6,15 @@
   - "qcom,csiphy-v5.01"
 - reg : offset and length of the register set for the device
   for the csiphy operating in compatible mode.
+reg-cam-base : offset of ceiphy in  camera hw block
 - reg-names : should specify relevant names to each reg property defined.
 - interrupts : should contain the csiphy interrupt.
 - interrupt-names : should specify relevant names to each interrupts
   property defined.
 - clock-names: name of the clocks required for the device
-- qcom,clock-rates: clock rate in Hz
+- clock-rates: clock rate in Hz
   - 0 if appropriate clock is required but doesn't have to apply the rate
+- clock-cntl-level: says what all different cloc level node has.
 
 Example:
 
@@ -20,6 +22,7 @@
      cell-index = <0>;
      compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
      reg = <0xac65000 0x200>;
+     reg-cam-base = <0x65000>;
      reg-names = "csiphy";
      interrupts = <0 477 0>;
      interrupt-names = "csiphy";
@@ -28,7 +31,9 @@
               "cphy_rx_clk_src", "csiphy0_clk",
               "csi0phytimer_clk_src", "csi0phytimer_clk",
               "ife_0_csid_clk", "ife_0_csid_clk_src";
-     qcom,clock-rates =
+     clock-rates =
            <0 0 80000000 0 320000000 0 269333333 0 0 384000000>;
+     clock-cntl-level = "turbo";
+     regulator-names = "gdscr";
      status = "ok";
 };
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-icp.txt b/Documentation/devicetree/bindings/media/video/msm-cam-icp.txt
index c560a05..28a0920 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-icp.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-icp.txt
@@ -111,6 +111,12 @@
   Value type: <phandle>
   Definition: List of clocks used for CDM HW.
 
+- clock-cntl-level
+  Usage: required
+  Value type: <string>
+  Definition: List of strings corresponds clock-rates levels.
+  Supported strings: minsvs, lowsvs, svs, svs_l1, nominal, turbo.
+
 - clock-rates
   Usage: required
   Value type: <u32>
@@ -157,6 +163,7 @@
 			<&clock_camcc CAM_CC_ICP_TS_CLK>;
 
 	clock-rates = <0 0 0 80000000 0 0 0 0 600000000 0 0>;
+	clock-cntl-level = "turbo";
 	fw_name = "CAMERA_ICP.elf";
 };
 
@@ -177,6 +184,7 @@
 			<&clock_camcc CAM_CC_IPE_0_CLK_SRC>;
 
 	clock-rates = <80000000 400000000 0 0 600000000>;
+	clock-cntl-level = "turbo";
 };
 
 qcom,ipe1 {
@@ -195,7 +203,8 @@
 			<&clock_camcc CAM_CC_IPE_1_CLK>,
 			<&clock_camcc CAM_CC_IPE_1_CLK_SRC>;
 
-		clock-rates = <80000000 400000000 0 0 600000000>;
+	clock-rates = <80000000 400000000 0 0 600000000>;
+	clock-cntl-level = "turbo";
 };
 
 bps: qcom,bps {
@@ -215,5 +224,6 @@
 			<&clock_camcc CAM_CC_BPS_CLK_SRC>;
 
 	clock-rates = <80000000 400000000 0 0 600000000>;
+	clock-cntl-level = "turbo";
 };
 
diff --git a/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt b/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt
index 6ac06c1..5b6bd97 100644
--- a/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt
+++ b/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt
@@ -42,6 +42,8 @@
                    see:
                    Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb.txt
 - interrupt-names: Corresponding interrupt name to the interrupts property
+- qcom,can-sleep:  Boolean flag indicating that processes waiting on SPMI
+		   transactions may sleep
 
 Each child node of SPMI slave id represents a function of the PMIC. In the
 example below the rtc device node represents a peripheral of pm8941
diff --git a/Documentation/devicetree/bindings/platform/msm/msm_tspp.txt b/Documentation/devicetree/bindings/platform/msm/msm_tspp.txt
new file mode 100644
index 0000000..1398309
--- /dev/null
+++ b/Documentation/devicetree/bindings/platform/msm/msm_tspp.txt
@@ -0,0 +1,82 @@
+* TSPP ( QTI Transport Stream Packet Processor )
+
+Hardware driver for QTI TSIF 12seg wrapper core, which consists of a TSPP, a
+BAM (Bus access manager, used for DMA) and two TSIF inputs.
+
+The TSPP driver is responsible for:
+ - TSPP/TSIF hardware configuration (using SPS driver to configure BAM hardware)
+ - TSIF GPIO/Clocks configuration
+ - Memory resource management
+ - Handling TSIF/TSPP interrupts and BAM events
+ - TSPP Power management
+
+Required properties:
+- compatible : Should be "qcom,msm_tspp"
+- reg : Specifies the base physical addresses and sizes of TSIF, TSPP & BAM registers.
+- reg-names : Specifies the register names of TSIF, TSPP & BAM base registers.
+- interrupts : Specifies the interrupts associated with TSIF 12 seg core.
+- interrupt-names: Specifies interrupt names for TSIF, TSPP & BAM interrupts.
+- clock-names: Specifies the clock names used for interface & reference clocks.
+- clocks: GCC_TSIF_AHB_CLK clock for interface clock & GCC_TSIF_REF_CLK clock for reference clock.
+- qcom, msm_bus,name: Should be "tsif"
+- qcom, msm_bus,num_cases: Depends on the use cases for bus scaling
+- qcom, msm_bus,num_paths: The paths for source and destination ports
+- qcom, msm_bus,vectors: Vectors for bus topology.
+- pinctrl-names: Names for the TSIF mode configuration to specify which TSIF interface is active.
+
+Optional properties:
+  - qcom,lpass-timer-tts : Indicates to add time stamps to TS packets from LPASS timer.
+                           bydefault time stamps will be added from TFIS internal counter.
+
+Example:
+
+        tspp: msm_tspp@0x8880000 {
+                compatible = "qcom,msm_tspp";
+                reg = <0x088a7000 0x200>, /* MSM_TSIF0_PHYS */
+                      <0x088a8000 0x200>, /* MSM_TSIF1_PHYS */
+                      <0x088a9000 0x1000>, /* MSM_TSPP_PHYS  */
+                      <0x08884000 0x23000>; /* MSM_TSPP_BAM_PHYS */
+                reg-names = "MSM_TSIF0_PHYS",
+                        "MSM_TSIF1_PHYS",
+                        "MSM_TSPP_PHYS",
+                        "MSM_TSPP_BAM_PHYS";
+                interrupts = <0 121 0>, /* TSIF_TSPP_IRQ */
+                        <0 119 0>, /* TSIF0_IRQ */
+                        <0 120 0>, /* TSIF1_IRQ */
+                        <0 122 0>; /* TSIF_BAM_IRQ */
+                interrupt-names = "TSIF_TSPP_IRQ",
+                        "TSIF0_IRQ",
+                        "TSIF1_IRQ",
+                        "TSIF_BAM_IRQ";
+
+                clock-names = "iface_clk", "ref_clk";
+                clocks = <&clock_gcc GCC_TSIF_AHB_CLK>,
+                        <&clock_gcc GCC_TSIF_REF_CLK>;
+
+                qcom,msm-bus,name = "tsif";
+                qcom,msm-bus,num-cases = <2>;
+                qcom,msm-bus,num-paths = <1>;
+                qcom,msm-bus,vectors-KBps =
+                                <82 512 0 0>, /* No vote */
+                                <82 512 12288 24576>;
+                                /* Max. bandwidth, 2xTSIF, each max of 96Mbps */
+
+                pinctrl-names = "disabled",
+                        "tsif0-mode1", "tsif0-mode2",
+                        "tsif1-mode1", "tsif1-mode2",
+                        "dual-tsif-mode1", "dual-tsif-mode2";
+
+                pinctrl-0 = <>;                         /* disabled */
+                pinctrl-1 = <&tsif0_signals_active>;    /* tsif0-mode1 */
+                pinctrl-2 = <&tsif0_signals_active
+                        &tsif0_sync_active>;            /* tsif0-mode2 */
+                pinctrl-3 = <&tsif1_signals_active>;    /* tsif1-mode1 */
+                pinctrl-4 = <&tsif1_signals_active
+                        &tsif1_sync_active>;            /* tsif1-mode2 */
+                pinctrl-5 = <&tsif0_signals_active
+                        &tsif1_signals_active>;         /* dual-tsif-mode1 */
+                pinctrl-6 = <&tsif0_signals_active
+                        &tsif0_sync_active
+                        &tsif1_signals_active
+                        &tsif1_sync_active>;            /* dual-tsif-mode2 */
+        };
diff --git a/Documentation/devicetree/bindings/slimbus/slim-msm-ctrl.txt b/Documentation/devicetree/bindings/slimbus/slim-msm-ctrl.txt
index 95cc85a..7711b8b 100644
--- a/Documentation/devicetree/bindings/slimbus/slim-msm-ctrl.txt
+++ b/Documentation/devicetree/bindings/slimbus/slim-msm-ctrl.txt
@@ -65,6 +65,17 @@
 		 and follow appropriate steps to ensure communication on the bus
 		 can be resumed after subsytem restart. By default slimbus driver
 		 register with ADSP subsystem.
+ - qcom,iommu-s1-bypass: Boolean flag to bypass IOMMU stage 1 translation.
+
+Optional subnodes:
+qcom,iommu_slim_ctrl_cb : Child node representing the Slimbus controller
+                          context bank.
+
+Subnode Required properties:
+- compatible : Must be "qcom,slim-ctrl-cb";
+- iommus : A list of phandle and IOMMU specifier pairs that
+           describe the IOMMU master interfaces of the device.
+
 Example:
 	slim@fe12f000 {
 		cell-index = <1>;
@@ -78,4 +89,9 @@
 		qcom,rxreg-access;
 		qcom,apps-ch-pipes = <0x60000000>;
 		qcom,ea-pc = <0x30>;
+
+		iommu_slim_ctrl_cb: qcom,iommu_slim_ctrl_cb {
+			compatible = "qcom,iommu-slim-ctrl-cb";
+			iommus = <&apps_smmu 0x1 0x0>;
+		};
 	};
diff --git a/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb-debug.txt b/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb-debug.txt
index ceac719..2131c33 100644
--- a/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb-debug.txt
+++ b/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb-debug.txt
@@ -35,6 +35,19 @@
 		    the corresponding addresses are specified in the reg
 		    property.
 
+- clocks
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Clock tuple consisting of a phandle to a clock controller
+		    device and the clock ID number for the SPMI debug controller
+		    clock.
+
+- clock-names
+	Usage:      required if clocks property is specified
+	Value type: <string>
+	Definition: Defines the name of the clock defined in the "clocks"
+		    property.  This must be "core_clk".
+
 - #address-cells
 	Usage:      required
 	Value type: <u32>
@@ -57,6 +70,8 @@
 	compatible = "qcom,spmi-pmic-arb-debug";
 	reg = <0x6b22000 0x60>, <0x7820A8 4>;
 	reg-names = "core", "fuse";
+	clocks = <&clock_aop QDSS_CLK>;
+	clock-names = "core_clk";
 	qcom,fuse-disable-bit = <12>;
 	#address-cells = <2>;
 	#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/thermal/qti-qmi-cdev.txt b/Documentation/devicetree/bindings/thermal/qti-qmi-cdev.txt
new file mode 100644
index 0000000..51c5eac
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/qti-qmi-cdev.txt
@@ -0,0 +1,113 @@
+QMI thermal mitigation(TMD) cooling devices.
+
+The QMI TMD cooling device, will be used for various mitigations for remote
+subsystem including remote processor mitigation, rail voltage restriction etc.
+This cooling device uses kernel qti QMI interface to send the message to
+remote subsystem.
+
+Each child node of the QMI TMD devicetree node represents each remote
+subsystem and each child of this subsystem represents separate cooling
+devices. It requires minimum one remote subsystem node and each subsystem
+node requires minimum one cooling device node.
+
+Properties:
+
+- compatible:
+	Usage: required
+	Value type: <string>
+	Definition: should be "qcom,qmi_cooling_devices"
+
+
+Subsystem properties:
+- qcom,instance-id:
+	Usage: required
+	Value type: <integer>
+	Definition: Remote subsystem QMI server instance id to be used for
+			communicating with QMI.
+
+	Minimum one child node is required. Child node name and its alias are
+	used as cooling device name and phandle for that cooling device.
+
+	cooling device node properties:
+	-qcom,qmi-dev-name:
+		Usage: required
+		Value type: <string>
+		Definition: Remote subsystem device identifier. Below strings
+			are the only acceptable device names,
+			"pa" -> for pa cooling device,
+			"cpuv_restriction_cold" -> for vdd restriction,
+			"cx_vdd_limit" -> for vdd limit,
+			"modem" -> for processor passive cooling device,
+			"modem_current" -> for current limiting device,
+			"modem_bw" ->  for bus bandwidth limiting device,
+			"cpr_cold" -> for cpr restriction.
+
+	-#cooling-cells:
+		Usage: required
+		Value type: <integer>
+		Definition: Must be 2. Needed for of_thermal as cooling device
+			identifier. Please refer to
+			<devicetree/bindings/thermal/thermal.txt> for more
+			details.
+Example:
+
+	qmi-tmd-devices {
+		compatible = "qcom,qmi_cooling_devices";
+
+		modem {
+			qcom,instance-id = <0x0>;
+
+			modem_pa: modem_pa {
+				qcom,qmi-dev-name = "pa";
+				#cooling-cells = <2>;
+			};
+
+			modem_proc: modem_proc {
+				qcom,qmi-dev-name = "modem";
+				#cooling-cells = <2>;
+			};
+
+			modem_vdd: modem_vdd {
+				qcom,qmi-dev-name = "cpuv_restriction_cold";
+				#cooling-cells = <2>;
+			};
+
+			modem_current: modem_current {
+				qcom,qmi-dev-name = "modem_current";
+				#cooling-cells = <2>;
+			};
+
+			modem_cpr_cold: modem_cpr_cold {
+				qcom,qmi-dev-name = "cpr_cold";
+				#cooling-cells = <2>;
+			};
+		};
+
+		adsp {
+			qcom,instance-id = <0x1>;
+
+			adsp_vdd: adsp_vdd {
+				qcom,qmi-dev-name = "cpuv_restriction_cold";
+				#cooling-cells = <2>;
+			};
+		};
+
+		cdsp {
+			qcom,instance-id = <0x43>;
+
+			cdsp_vdd: cdsp_vdd {
+				qcom,qmi-dev-name = "cpuv_restriction_cold";
+				#cooling-cells = <2>;
+			};
+		};
+
+		slpi {
+			qcom,instance-id = <0x53>;
+
+			slpi_vdd: slpi_vdd {
+				qcom,qmi-dev-name = "cpuv_restriction_cold";
+				#cooling-cells = <2>;
+			};
+		};
+	};
+
diff --git a/Documentation/devicetree/bindings/thermal/qti-rpmh-reg-cdev.txt b/Documentation/devicetree/bindings/thermal/qti-rpmh-reg-cdev.txt
new file mode 100644
index 0000000..b7734ad
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/qti-rpmh-reg-cdev.txt
@@ -0,0 +1,44 @@
+RPMh regulator cooling device.
+
+The RPMh regulator cooling device, will be used to place a voltage floor
+restriction on a rail. This cooling device will use a QMP AOP mail box to send
+the message to apply and clear voltage floor restriction.
+
+The cooling device node should be a child of the regulator devicetree node,
+which it is trying to place the floor restriction.
+
+Properties:
+
+- compatible:
+	Usage: required
+	Value type: <string>
+	Definition: shall be "qcom,rpmh-reg-cdev"
+
+- qcom,reg-resource-name:
+	Usage: required
+	Value type: <string>
+	Definition: The regulator resource name to be used for communicating
+			with RPMh. This value should be any of the below
+			resource name,
+			cx -> For CX rail,
+			mx -> For MX rail,
+			ebi -> For EBI rail.
+
+- mboxes:
+	Usage: required
+	Value type: <phandle>
+	Definition: A phandle to the QMP AOP mail box, that needs to be used
+			for sending the floor restriction message.
+
+- #cooling-cells: Must be 2. Please refer to
+			<devicetree/bindings/thermal/thermal.txt> for more
+			details.
+
+Example:
+
+	vdd_cx: rpmh-cx-regulator-cdev {
+		compatible = "qcom,rpmh-reg-cdev";
+		mboxes = <&qmp_aop 0>;
+		qcom,reg-resource-name = "cx";
+		#cooling-cells = <2>;
+	};
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 8527965..2b576cc 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -3948,6 +3948,13 @@
 	spia_pedr=
 	spia_peddr=
 
+	stack_guard_gap=	[MM]
+			override the default stack gap protection. The value
+			is in page units and it defines how many pages prior
+			to (for stacks growing down) resp. after (for stacks
+			growing up) the main stack are reserved for no other
+			mapping. Default value is 256 pages.
+
 	stacktrace	[FTRACE]
 			Enabled the stack tracer on boot up.
 
diff --git a/Makefile b/Makefile
index 9e428c5..aebb186 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 9
-SUBLEVEL = 32
+SUBLEVEL = 35
 EXTRAVERSION =
 NAME = Roaring Lionus
 
@@ -655,6 +655,12 @@
 # Tell gcc to never replace conditional load with a non-conditional one
 KBUILD_CFLAGS	+= $(call cc-option,--param=allow-store-data-races=0)
 
+# check for 'asm goto'
+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
+	KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
+	KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
+endif
+
 include scripts/Makefile.gcc-plugins
 
 ifdef CONFIG_READABLE_ASM
@@ -800,12 +806,6 @@
 # use the deterministic mode of AR if available
 KBUILD_ARFLAGS := $(call ar-option,D)
 
-# check for 'asm goto'
-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
-	KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
-	KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
-endif
-
 include scripts/Makefile.kasan
 include scripts/Makefile.extrawarn
 include scripts/Makefile.ubsan
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 689dd86..8b90d25 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -71,14 +71,14 @@
 	GET_CPU_ID  r5
 	cmp	r5, 0
 	mov.nz	r0, r5
-#ifdef CONFIG_ARC_SMP_HALT_ON_RESET
-	; Non-Master can proceed as system would be booted sufficiently
-	jnz	first_lines_of_secondary
-#else
+	bz	.Lmaster_proceed
+
 	; Non-Masters wait for Master to boot enough and bring them up
-	jnz	arc_platform_smp_wait_to_boot
-#endif
-	; Master falls thru
+	; when they resume, tail-call to entry point
+	mov	blink, @first_lines_of_secondary
+	j	arc_platform_smp_wait_to_boot
+
+.Lmaster_proceed:
 #endif
 
 	; Clear BSS before updating any globals
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 88674d9..2afbafa 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -90,22 +90,37 @@
  */
 static volatile int wake_flag;
 
+#ifdef CONFIG_ISA_ARCOMPACT
+
+#define __boot_read(f)		f
+#define __boot_write(f, v)	f = v
+
+#else
+
+#define __boot_read(f)		arc_read_uncached_32(&f)
+#define __boot_write(f, v)	arc_write_uncached_32(&f, v)
+
+#endif
+
 static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
 {
 	BUG_ON(cpu == 0);
-	wake_flag = cpu;
+
+	__boot_write(wake_flag, cpu);
 }
 
 void arc_platform_smp_wait_to_boot(int cpu)
 {
-	while (wake_flag != cpu)
+	/* for halt-on-reset, we've waited already */
+	if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET))
+		return;
+
+	while (__boot_read(wake_flag) != cpu)
 		;
 
-	wake_flag = 0;
-	__asm__ __volatile__("j @first_lines_of_secondary	\n");
+	__boot_write(wake_flag, 0);
 }
 
-
 const char *arc_platform_smp_cpuinfo(void)
 {
 	return plat_smp_ops.info ? : "";
diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c
index 2e06d56..cf4ae69 100644
--- a/arch/arc/mm/mmap.c
+++ b/arch/arc/mm/mmap.c
@@ -64,7 +64,7 @@
 
 		vma = find_vma(mm, addr);
 		if (TASK_SIZE - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 
diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig
index ea316c4..d3f1768 100644
--- a/arch/arm/configs/ezx_defconfig
+++ b/arch/arm/configs/ezx_defconfig
@@ -64,8 +64,8 @@
 CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_SCTP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig
index 18e59fe..7f479cd 100644
--- a/arch/arm/configs/imote2_defconfig
+++ b/arch/arm/configs/imote2_defconfig
@@ -56,8 +56,8 @@
 CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_SCTP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig
index 1674c11..c2252c0 100644
--- a/arch/arm/configs/sdxpoorwills-perf_defconfig
+++ b/arch/arm/configs/sdxpoorwills-perf_defconfig
@@ -175,8 +175,6 @@
 # CONFIG_NET_VENDOR_INTEL is not set
 CONFIG_KS8851=y
 # CONFIG_NET_VENDOR_MICROCHIP is not set
-CONFIG_ECM_IPA=y
-CONFIG_RNDIS_IPA=y
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SMSC is not set
@@ -270,6 +268,8 @@
 CONFIG_STAGING=y
 CONFIG_GSI=y
 CONFIG_IPA3=y
+CONFIG_ECM_IPA=y
+CONFIG_RNDIS_IPA=y
 CONFIG_IPA_UT=y
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_NDP_BAM=y
diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig
index 38a531f..e8fa052 100644
--- a/arch/arm/configs/sdxpoorwills_defconfig
+++ b/arch/arm/configs/sdxpoorwills_defconfig
@@ -167,8 +167,6 @@
 # CONFIG_NET_VENDOR_INTEL is not set
 CONFIG_KS8851=y
 # CONFIG_NET_VENDOR_MICROCHIP is not set
-CONFIG_ECM_IPA=y
-CONFIG_RNDIS_IPA=y
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SMSC is not set
@@ -266,6 +264,8 @@
 CONFIG_STAGING=y
 CONFIG_GSI=y
 CONFIG_IPA3=y
+CONFIG_ECM_IPA=y
+CONFIG_RNDIS_IPA=y
 CONFIG_IPA_UT=y
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_NDP_BAM=y
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 66353ca..641334e 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -89,7 +89,7 @@
 
 		vma = find_vma(mm, addr);
 		if (TASK_SIZE - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 
@@ -140,7 +140,7 @@
 			addr = PAGE_ALIGN(addr);
 		vma = find_vma(mm, addr);
 		if (TASK_SIZE - len >= addr &&
-				(!vma || addr + len <= vma->vm_start))
+				(!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 64b250d..3c8eaf8 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -18,10 +18,12 @@
 ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y)
 	dtbo-$(CONFIG_ARCH_SDM845) += \
 		sdm845-cdp-overlay.dtbo \
-		sdm845-mtp-overlay.dtbo
+		sdm845-mtp-overlay.dtbo \
+		sdm845-qrd-overlay.dtbo
 
 sdm845-cdp-overlay.dtbo-base := sdm845.dtb
 sdm845-mtp-overlay.dtbo-base := sdm845.dtb
+sdm845-qrd-overlay.dtbo-base := sdm845.dtb
 endif
 
 dtb-$(CONFIG_ARCH_SDM670) += sdm670-rumi.dtb \
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
index 061f1d9..6534cdc 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
@@ -40,7 +40,7 @@
 		qcom,mdss-dsi-lane-3-state;
 		qcom,mdss-dsi-dma-trigger = "trigger_sw";
 		qcom,mdss-dsi-mdp-trigger = "none";
-		qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 20>;
+		qcom,mdss-dsi-reset-sequence = <1 100>, <0 100>, <1 100>;
 		qcom,mdss-dsi-te-pin-select = <1>;
 		qcom,mdss-dsi-wr-mem-start = <0x2c>;
 		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
diff --git a/arch/arm64/boot/dts/qcom/msm-rdbg.dtsi b/arch/arm64/boot/dts/qcom/msm-rdbg.dtsi
new file mode 100644
index 0000000..2b8f22e
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm-rdbg.dtsi
@@ -0,0 +1,75 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	smp2pgpio_rdbg_2_in: qcom,smp2pgpio-rdbg-2-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "rdbg";
+		qcom,remote-pid = <2>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_client_rdbg_2_in {
+		compatible = "qcom,smp2pgpio_client_rdbg_2_in";
+		gpios = <&smp2pgpio_rdbg_2_in 0 0>;
+	};
+
+	smp2pgpio_rdbg_2_out: qcom,smp2pgpio-rdbg-2-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "rdbg";
+		qcom,remote-pid = <2>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_client_rdbg_2_out {
+		compatible = "qcom,smp2pgpio_client_rdbg_2_out";
+		gpios = <&smp2pgpio_rdbg_2_out 0 0>;
+	};
+
+	smp2pgpio_rdbg_1_in: qcom,smp2pgpio-rdbg-1-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "rdbg";
+		qcom,remote-pid = <1>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_client_rdbg_1_in {
+		compatible = "qcom,smp2pgpio_client_rdbg_1_in";
+		gpios = <&smp2pgpio_rdbg_1_in 0 0>;
+	};
+
+	smp2pgpio_rdbg_1_out: qcom,smp2pgpio-rdbg-1-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "rdbg";
+		qcom,remote-pid = <1>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_client_rdbg_1_out {
+		compatible = "qcom,smp2pgpio_client_rdbg_1_out";
+		gpios = <&smp2pgpio_rdbg_1_out 0 0>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
index 09ce9d2..86e2948 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
@@ -19,5 +19,1103 @@
 		#gpio-cells = <2>;
 		interrupt-controller;
 		#interrupt-cells = <2>;
+
+		/* QUPv3 South SE mappings */
+		/* SE 0 pin mappings */
+		qupv3_se0_i2c_pins: qupv3_se0_i2c_pins {
+			qupv3_se0_i2c_active: qupv3_se0_i2c_active {
+				mux {
+					pins = "gpio0", "gpio1";
+					function = "qup0";
+				};
+
+				config {
+					pins = "gpio0", "gpio1";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se0_i2c_sleep: qupv3_se0_i2c_sleep {
+				mux {
+					pins = "gpio0", "gpio1";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio0", "gpio1";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se0_spi_pins: qupv3_se0_spi_pins {
+			qupv3_se0_spi_active: qupv3_se0_spi_active {
+				mux {
+					pins = "gpio0", "gpio1", "gpio2",
+								"gpio3";
+					function = "qup0";
+				};
+
+				config {
+					pins = "gpio0", "gpio1", "gpio2",
+								"gpio3";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se0_spi_sleep: qupv3_se0_spi_sleep {
+				mux {
+					pins = "gpio0", "gpio1", "gpio2",
+								"gpio3";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio0", "gpio1", "gpio2",
+								"gpio3";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* SE 1 pin mappings */
+		qupv3_se1_i2c_pins: qupv3_se1_i2c_pins {
+			qupv3_se1_i2c_active: qupv3_se1_i2c_active {
+				mux {
+					pins = "gpio17", "gpio18";
+					function = "qup1";
+				};
+
+				config {
+					pins = "gpio17", "gpio18";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se1_i2c_sleep: qupv3_se1_i2c_sleep {
+				mux {
+					pins = "gpio17", "gpio18";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio17", "gpio18";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se1_spi_pins: qupv3_se1_spi_pins {
+			qupv3_se1_spi_active: qupv3_se1_spi_active {
+				mux {
+					pins = "gpio17", "gpio18", "gpio19",
+								"gpio20";
+					function = "qup1";
+				};
+
+				config {
+					pins = "gpio17", "gpio18", "gpio19",
+								"gpio20";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se1_spi_sleep: qupv3_se1_spi_sleep {
+				mux {
+					pins = "gpio17", "gpio18", "gpio19",
+								"gpio20";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio17", "gpio18", "gpio19",
+								"gpio20";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* SE 2 pin mappings */
+		qupv3_se2_i2c_pins: qupv3_se2_i2c_pins {
+			qupv3_se2_i2c_active: qupv3_se2_i2c_active {
+				mux {
+					pins = "gpio27", "gpio28";
+					function = "qup2";
+				};
+
+				config {
+					pins = "gpio27", "gpio28";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se2_i2c_sleep: qupv3_se2_i2c_sleep {
+				mux {
+					pins = "gpio27", "gpio28";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio27", "gpio28";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se2_spi_pins: qupv3_se2_spi_pins {
+			qupv3_se2_spi_active: qupv3_se2_spi_active {
+				mux {
+					pins = "gpio27", "gpio28", "gpio29",
+								"gpio30";
+					function = "qup2";
+				};
+
+				config {
+					pins = "gpio27", "gpio28", "gpio29",
+								"gpio30";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se2_spi_sleep: qupv3_se2_spi_sleep {
+				mux {
+					pins = "gpio27", "gpio28", "gpio29",
+								"gpio30";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio27", "gpio28", "gpio29",
+								"gpio30";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* SE 3 pin mappings */
+		qupv3_se3_i2c_pins: qupv3_se3_i2c_pins {
+			qupv3_se3_i2c_active: qupv3_se3_i2c_active {
+				mux {
+					pins = "gpio41", "gpio42";
+					function = "qup3";
+				};
+
+				config {
+					pins = "gpio41", "gpio42";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se3_i2c_sleep: qupv3_se3_i2c_sleep {
+				mux {
+					pins = "gpio41", "gpio42";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio41", "gpio42";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se3_spi_pins: qupv3_se3_spi_pins {
+			qupv3_se3_spi_active: qupv3_se3_spi_active {
+				mux {
+					pins = "gpio41", "gpio42", "gpio43",
+								"gpio44";
+					function = "qup3";
+				};
+
+				config {
+					pins = "gpio41", "gpio42", "gpio43",
+								"gpio44";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se3_spi_sleep: qupv3_se3_spi_sleep {
+				mux {
+					pins = "gpio41", "gpio42", "gpio43",
+								"gpio44";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio41", "gpio42", "gpio43",
+								"gpio44";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* SE 4 pin mappings */
+		qupv3_se4_i2c_pins: qupv3_se4_i2c_pins {
+			qupv3_se4_i2c_active: qupv3_se4_i2c_active {
+				mux {
+					pins = "gpio89", "gpio90";
+					function = "qup4";
+				};
+
+				config {
+					pins = "gpio89", "gpio90";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se4_i2c_sleep: qupv3_se4_i2c_sleep {
+				mux {
+					pins = "gpio89", "gpio90";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio89", "gpio90";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se4_spi_pins: qupv3_se4_spi_pins {
+			qupv3_se4_spi_active: qupv3_se4_spi_active {
+				mux {
+					pins = "gpio89", "gpio90", "gpio91",
+								"gpio92";
+					function = "qup4";
+				};
+
+				config {
+					pins = "gpio89", "gpio90", "gpio91",
+								"gpio92";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se4_spi_sleep: qupv3_se4_spi_sleep {
+				mux {
+					pins = "gpio89", "gpio90", "gpio91",
+								"gpio92";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio89", "gpio90", "gpio91",
+								"gpio92";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* SE 5 pin mappings */
+		qupv3_se5_i2c_pins: qupv3_se5_i2c_pins {
+			qupv3_se5_i2c_active: qupv3_se5_i2c_active {
+				mux {
+					pins = "gpio85", "gpio86";
+					function = "qup5";
+				};
+
+				config {
+					pins = "gpio85", "gpio86";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se5_i2c_sleep: qupv3_se5_i2c_sleep {
+				mux {
+					pins = "gpio85", "gpio86";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio85", "gpio86";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se5_spi_pins: qupv3_se5_spi_pins {
+			qupv3_se5_spi_active: qupv3_se5_spi_active {
+				mux {
+					pins = "gpio85", "gpio86", "gpio87",
+								"gpio88";
+					function = "qup5";
+				};
+
+				config {
+					pins = "gpio85", "gpio86", "gpio87",
+								"gpio88";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se5_spi_sleep: qupv3_se5_spi_sleep {
+				mux {
+					pins = "gpio85", "gpio86", "gpio87",
+								"gpio88";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio85", "gpio86", "gpio87",
+								"gpio88";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* SE 6 pin mappings */
+		qupv3_se6_i2c_pins: qupv3_se6_i2c_pins {
+			qupv3_se6_i2c_active: qupv3_se6_i2c_active {
+				mux {
+					pins = "gpio45", "gpio46";
+					function = "qup6";
+				};
+
+				config {
+					pins = "gpio45", "gpio46";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se6_i2c_sleep: qupv3_se6_i2c_sleep {
+				mux {
+					pins = "gpio45", "gpio46";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio45", "gpio46";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se6_4uart_pins: qupv3_se6_4uart_pins {
+			qupv3_se6_4uart_active: qupv3_se6_4uart_active {
+				mux {
+					pins = "gpio45", "gpio46", "gpio47",
+								"gpio48";
+					function = "qup6";
+				};
+
+				config {
+					pins = "gpio45", "gpio46", "gpio47",
+								"gpio48";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se6_4uart_sleep: qupv3_se6_4uart_sleep {
+				mux {
+					pins = "gpio45", "gpio46", "gpio47",
+								"gpio48";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio45", "gpio46", "gpio47",
+								"gpio48";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+		};
+
+		qupv3_se6_spi_pins: qupv3_se6_spi_pins {
+			qupv3_se6_spi_active: qupv3_se6_spi_active {
+				mux {
+					pins = "gpio45", "gpio46", "gpio47",
+								"gpio48";
+					function = "qup6";
+				};
+
+				config {
+					pins = "gpio45", "gpio46", "gpio47",
+								"gpio48";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se6_spi_sleep: qupv3_se6_spi_sleep {
+				mux {
+					pins = "gpio45", "gpio46", "gpio47",
+								"gpio48";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio45", "gpio46", "gpio47",
+								"gpio48";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* SE 7 pin mappings */
+		qupv3_se7_i2c_pins: qupv3_se7_i2c_pins {
+			qupv3_se7_i2c_active: qupv3_se7_i2c_active {
+				mux {
+					pins = "gpio93", "gpio94";
+					function = "qup7";
+				};
+
+				config {
+					pins = "gpio93", "gpio94";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se7_i2c_sleep: qupv3_se7_i2c_sleep {
+				mux {
+					pins = "gpio93", "gpio94";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio93", "gpio94";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se7_4uart_pins: qupv3_se7_4uart_pins {
+			qupv3_se7_4uart_active: qupv3_se7_4uart_active {
+				mux {
+					pins = "gpio93", "gpio94", "gpio95",
+								"gpio96";
+					function = "qup7";
+				};
+
+				config {
+					pins = "gpio93", "gpio94", "gpio95",
+								"gpio96";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se7_4uart_sleep: qupv3_se7_4uart_sleep {
+				mux {
+					pins = "gpio93", "gpio94", "gpio95",
+								"gpio96";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio93", "gpio94", "gpio95",
+								"gpio96";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+		};
+
+		qupv3_se7_spi_pins: qupv3_se7_spi_pins {
+			qupv3_se7_spi_active: qupv3_se7_spi_active {
+				mux {
+					pins = "gpio93", "gpio94", "gpio95",
+								"gpio96";
+					function = "qup7";
+				};
+
+				config {
+					pins = "gpio93", "gpio94", "gpio95",
+								"gpio96";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se7_spi_sleep: qupv3_se7_spi_sleep {
+				mux {
+					pins = "gpio93", "gpio94", "gpio95",
+								"gpio96";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio93", "gpio94", "gpio95",
+								"gpio96";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* QUPv3 North instances */
+		/* SE 8 pin mappings */
+		qupv3_se8_i2c_pins: qupv3_se8_i2c_pins {
+			qupv3_se8_i2c_active: qupv3_se8_i2c_active {
+				mux {
+					pins = "gpio65", "gpio66";
+					function = "qup8";
+				};
+
+				config {
+					pins = "gpio65", "gpio66";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se8_i2c_sleep: qupv3_se8_i2c_sleep {
+				mux {
+					pins = "gpio65", "gpio66";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio65", "gpio66";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se8_spi_pins: qupv3_se8_spi_pins {
+			qupv3_se8_spi_active: qupv3_se8_spi_active {
+				mux {
+					pins = "gpio65", "gpio66", "gpio67",
+								"gpio68";
+					function = "qup8";
+				};
+
+				config {
+					pins = "gpio65", "gpio66", "gpio67",
+								"gpio68";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se8_spi_sleep: qupv3_se8_spi_sleep {
+				mux {
+					pins = "gpio65", "gpio66", "gpio67",
+								"gpio68";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio65", "gpio66", "gpio67",
+								"gpio68";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* SE 9 pin mappings */
+		qupv3_se9_i2c_pins: qupv3_se9_i2c_pins {
+			qupv3_se9_i2c_active: qupv3_se9_i2c_active {
+				mux {
+					pins = "gpio6", "gpio7";
+					function = "qup9";
+				};
+
+				config {
+					pins = "gpio6", "gpio7";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se9_i2c_sleep: qupv3_se9_i2c_sleep {
+				mux {
+					pins = "gpio6", "gpio7";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio6", "gpio7";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se9_2uart_pins: qupv3_se9_2uart_pins {
+			qupv3_se9_2uart_active: qupv3_se9_2uart_active {
+				mux {
+					pins = "gpio4", "gpio5";
+					function = "qup9";
+				};
+
+				config {
+					pins = "gpio4", "gpio5";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se9_2uart_sleep: qupv3_se9_2uart_sleep {
+				mux {
+					pins = "gpio4", "gpio5";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio4", "gpio5";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+		};
+
+		qupv3_se9_spi_pins: qupv3_se9_spi_pins {
+			qupv3_se9_spi_active: qupv3_se9_spi_active {
+				mux {
+					pins = "gpio4", "gpio5", "gpio6",
+								"gpio7";
+					function = "qup9";
+				};
+
+				config {
+					pins = "gpio4", "gpio5", "gpio6",
+								"gpio7";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se9_spi_sleep: qupv3_se9_spi_sleep {
+				mux {
+					pins = "gpio4", "gpio5", "gpio6",
+								"gpio7";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio4", "gpio5", "gpio6",
+								"gpio7";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* SE 10 pin mappings */
+		qupv3_se10_i2c_pins: qupv3_se10_i2c_pins {
+			qupv3_se10_i2c_active: qupv3_se10_i2c_active {
+				mux {
+					pins = "gpio55", "gpio56";
+					function = "qup10";
+				};
+
+				config {
+					pins = "gpio55", "gpio56";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se10_i2c_sleep: qupv3_se10_i2c_sleep {
+				mux {
+					pins = "gpio55", "gpio56";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio55", "gpio56";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se10_2uart_pins: qupv3_se10_2uart_pins {
+			qupv3_se10_2uart_active: qupv3_se10_2uart_active {
+				mux {
+					pins = "gpio53", "gpio54";
+					function = "qup10";
+				};
+
+				config {
+					pins = "gpio53", "gpio54";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se10_2uart_sleep: qupv3_se10_2uart_sleep {
+				mux {
+					pins = "gpio53", "gpio54";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio53", "gpio54";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+		};
+
+		qupv3_se10_spi_pins: qupv3_se10_spi_pins {
+			qupv3_se10_spi_active: qupv3_se10_spi_active {
+				mux {
+					pins = "gpio53", "gpio54", "gpio55",
+								"gpio56";
+					function = "qup10";
+				};
+
+				config {
+					pins = "gpio53", "gpio54", "gpio55",
+								"gpio56";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se10_spi_sleep: qupv3_se10_spi_sleep {
+				mux {
+					pins = "gpio53", "gpio54", "gpio55",
+								"gpio56";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio53", "gpio54", "gpio55",
+								"gpio56";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* SE 11 pin mappings */
+		qupv3_se11_i2c_pins: qupv3_se11_i2c_pins {
+			qupv3_se11_i2c_active: qupv3_se11_i2c_active {
+				mux {
+					pins = "gpio31", "gpio32";
+					function = "qup11";
+				};
+
+				config {
+					pins = "gpio31", "gpio32";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se11_i2c_sleep: qupv3_se11_i2c_sleep {
+				mux {
+					pins = "gpio31", "gpio32";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio31", "gpio32";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se11_spi_pins: qupv3_se11_spi_pins {
+			qupv3_se11_spi_active: qupv3_se11_spi_active {
+				mux {
+					pins = "gpio31", "gpio32", "gpio33",
+								"gpio34";
+					function = "qup11";
+				};
+
+				config {
+					pins = "gpio31", "gpio32", "gpio33",
+								"gpio34";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se11_spi_sleep: qupv3_se11_spi_sleep {
+				mux {
+					pins = "gpio31", "gpio32", "gpio33",
+								"gpio34";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio31", "gpio32", "gpio33",
+								"gpio34";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* SE 12 pin mappings */
+		qupv3_se12_i2c_pins: qupv3_se12_i2c_pins {
+			qupv3_se12_i2c_active: qupv3_se12_i2c_active {
+				mux {
+					pins = "gpio49", "gpio50";
+					function = "qup12";
+				};
+
+				config {
+					pins = "gpio49", "gpio50";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se12_i2c_sleep: qupv3_se12_i2c_sleep {
+				mux {
+					pins = "gpio49", "gpio50";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio49", "gpio50";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se12_spi_pins: qupv3_se12_spi_pins {
+			qupv3_se12_spi_active: qupv3_se12_spi_active {
+				mux {
+					pins = "gpio49", "gpio50", "gpio51",
+								"gpio52";
+					function = "qup12";
+				};
+
+				config {
+					pins = "gpio49", "gpio50", "gpio51",
+								"gpio52";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se12_spi_sleep: qupv3_se12_spi_sleep {
+				mux {
+					pins = "gpio49", "gpio50", "gpio51",
+								"gpio52";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio49", "gpio50", "gpio51",
+								"gpio52";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* SE 13 pin mappings */
+		qupv3_se13_i2c_pins: qupv3_se13_i2c_pins {
+			qupv3_se13_i2c_active: qupv3_se13_i2c_active {
+				mux {
+					pins = "gpio105", "gpio106";
+					function = "qup13";
+				};
+
+				config {
+					pins = "gpio105", "gpio106";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se13_i2c_sleep: qupv3_se13_i2c_sleep {
+				mux {
+					pins = "gpio105", "gpio106";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio105", "gpio106";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se13_spi_pins: qupv3_se13_spi_pins {
+			qupv3_se13_spi_active: qupv3_se13_spi_active {
+				mux {
+					pins = "gpio105", "gpio106", "gpio107",
+								"gpio108";
+					function = "qup13";
+				};
+
+				config {
+					pins = "gpio105", "gpio106", "gpio107",
+								"gpio108";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se13_spi_sleep: qupv3_se13_spi_sleep {
+				mux {
+					pins = "gpio105", "gpio106", "gpio107",
+								"gpio108";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio105", "gpio106", "gpio107",
+								"gpio108";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* SE 14 pin mappings */
+		qupv3_se14_i2c_pins: qupv3_se14_i2c_pins {
+			qupv3_se14_i2c_active: qupv3_se14_i2c_active {
+				mux {
+					pins = "gpio33", "gpio34";
+					function = "qup14";
+				};
+
+				config {
+					pins = "gpio33", "gpio34";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se14_i2c_sleep: qupv3_se14_i2c_sleep {
+				mux {
+					pins = "gpio33", "gpio34";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio33", "gpio34";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se14_spi_pins: qupv3_se14_spi_pins {
+			qupv3_se14_spi_active: qupv3_se14_spi_active {
+				mux {
+					pins = "gpio31", "gpio32", "gpio33",
+								"gpio34";
+					function = "qup14";
+				};
+
+				config {
+					pins = "gpio31", "gpio32", "gpio33",
+								"gpio34";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se14_spi_sleep: qupv3_se14_spi_sleep {
+				mux {
+					pins = "gpio31", "gpio32", "gpio33",
+								"gpio34";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio31", "gpio32", "gpio33",
+								"gpio34";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		/* SE 15 pin mappings */
+		qupv3_se15_i2c_pins: qupv3_se15_i2c_pins {
+			qupv3_se15_i2c_active: qupv3_se15_i2c_active {
+				mux {
+					pins = "gpio81", "gpio82";
+					function = "qup15";
+				};
+
+				config {
+					pins = "gpio81", "gpio82";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se15_i2c_sleep: qupv3_se15_i2c_sleep {
+				mux {
+					pins = "gpio81", "gpio82";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio81", "gpio82";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se15_spi_pins: qupv3_se15_spi_pins {
+			qupv3_se15_spi_active: qupv3_se15_spi_active {
+				mux {
+					pins = "gpio81", "gpio82", "gpio83",
+								"gpio84";
+					function = "qup15";
+				};
+
+				config {
+					pins = "gpio81", "gpio82", "gpio83",
+								"gpio84";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se15_spi_sleep: qupv3_se15_spi_sleep {
+				mux {
+					pins = "gpio81", "gpio82", "gpio83",
+								"gpio84";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio81", "gpio82", "gpio83",
+								"gpio84";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
new file mode 100644
index 0000000..8501d80
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
@@ -0,0 +1,198 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	qcom,lpm-levels {
+		compatible = "qcom,lpm-levels";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		qcom,pm-cluster@0 {
+			reg = <0>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			label = "L3";
+			qcom,psci-mode-shift = <4>;
+			qcom,psci-mode-mask = <0xfff>;
+
+			qcom,pm-cluster-level@0 { /* D1 */
+				reg = <0>;
+				label = "l3-wfi";
+				qcom,psci-mode = <0x1>;
+				qcom,latency-us = <51>;
+				qcom,ss-power = <452>;
+				qcom,energy-overhead = <69355>;
+				qcom,time-overhead = <99>;
+			};
+
+			qcom,pm-cluster-level@1 { /* D2 */
+				reg = <1>;
+				label = "l3-dyn-ret";
+				qcom,psci-mode = <0x2>;
+				qcom,latency-us = <659>;
+				qcom,ss-power = <434>;
+				qcom,energy-overhead = <465725>;
+				qcom,time-overhead = <976>;
+				qcom,min-child-idx = <1>;
+			};
+
+			qcom,pm-cluster-level@2 { /* D4, D3 is not supported */
+				reg = <2>;
+				label = "l3-pc";
+				qcom,psci-mode = <0x4>;
+				qcom,latency-us = <3201>;
+				qcom,ss-power = <408>;
+				qcom,energy-overhead = <2421840>;
+				qcom,time-overhead = <5376>;
+				qcom,min-child-idx = <2>;
+				qcom,is-reset;
+			};
+
+			qcom,pm-cluster-level@3 { /* Cx off */
+				reg = <3>;
+				label = "cx-off";
+				qcom,psci-mode = <0x224>;
+				qcom,latency-us = <5562>;
+				qcom,ss-power = <308>;
+				qcom,energy-overhead = <2521840>;
+				qcom,time-overhead = <6376>;
+				qcom,min-child-idx = <3>;
+				qcom,is-reset;
+				qcom,notify-rpm;
+			};
+
+			qcom,pm-cluster-level@4 { /* AOSS sleep */
+				reg = <4>;
+				label = "llcc-off";
+				qcom,psci-mode = <0xC24>;
+				qcom,latency-us = <6562>;
+				qcom,ss-power = <108>;
+				qcom,energy-overhead = <2621840>;
+				qcom,time-overhead = <7376>;
+				qcom,min-child-idx = <3>;
+				qcom,is-reset;
+				qcom,notify-rpm;
+			};
+
+			qcom,pm-cpu@0 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				qcom,psci-mode-shift = <0>;
+				qcom,psci-mode-mask = <0xf>;
+				qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3 &CPU4
+									&CPU5>;
+
+				qcom,pm-cpu-level@0 { /* C1 */
+					reg = <0>;
+					qcom,spm-cpu-mode = "wfi";
+					qcom,psci-cpu-mode = <0x1>;
+					qcom,latency-us = <43>;
+					qcom,ss-power = <454>;
+					qcom,energy-overhead = <38639>;
+					qcom,time-overhead = <83>;
+				};
+
+				qcom,pm-cpu-level@1 { /* C2D */
+					reg = <1>;
+					qcom,spm-cpu-mode = "ret";
+					qcom,psci-cpu-mode = <0x2>;
+					qcom,latency-us = <119>;
+					qcom,ss-power = <449>;
+					qcom,energy-overhead = <78456>;
+					qcom,time-overhead = <167>;
+				};
+
+				qcom,pm-cpu-level@2 {  /* C3 */
+					reg = <2>;
+					qcom,spm-cpu-mode = "pc";
+					qcom,psci-cpu-mode = <0x3>;
+					qcom,latency-us = <461>;
+					qcom,ss-power = <436>;
+					qcom,energy-overhead = <418225>;
+					qcom,time-overhead = <885>;
+					qcom,is-reset;
+					qcom,use-broadcast-timer;
+				};
+
+				qcom,pm-cpu-level@3 {  /* C4 */
+					reg = <3>;
+					qcom,spm-cpu-mode = "rail-pc";
+					qcom,psci-cpu-mode = <0x4>;
+					qcom,latency-us = <531>;
+					qcom,ss-power = <400>;
+					qcom,energy-overhead = <428225>;
+					qcom,time-overhead = <1000>;
+					qcom,is-reset;
+					qcom,use-broadcast-timer;
+				};
+			};
+
+			qcom,pm-cpu@1 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				qcom,psci-mode-shift = <0>;
+				qcom,psci-mode-mask = <0xf>;
+				qcom,cpu = <&CPU6 &CPU7>;
+
+				qcom,pm-cpu-level@0 { /* C1 */
+					reg = <0>;
+					qcom,spm-cpu-mode = "wfi";
+					qcom,psci-cpu-mode = <0x1>;
+					qcom,latency-us = <43>;
+					qcom,ss-power = <454>;
+					qcom,energy-overhead = <38639>;
+					qcom,time-overhead = <83>;
+				};
+
+				qcom,pm-cpu-level@1 { /* C2D */
+					reg = <1>;
+					qcom,spm-cpu-mode = "ret";
+					qcom,psci-cpu-mode = <0x2>;
+					qcom,latency-us = <116>;
+					qcom,ss-power = <449>;
+					qcom,energy-overhead = <78456>;
+					qcom,time-overhead = <167>;
+				};
+
+				qcom,pm-cpu-level@2 {  /* C3 */
+					reg = <2>;
+					qcom,spm-cpu-mode = "pc";
+					qcom,psci-cpu-mode = <0x3>;
+					qcom,latency-us = <621>;
+					qcom,ss-power = <436>;
+					qcom,energy-overhead = <418225>;
+					qcom,time-overhead = <885>;
+					qcom,is-reset;
+					qcom,use-broadcast-timer;
+				};
+
+				qcom,pm-cpu-level@3 {  /* C4 */
+					reg = <3>;
+					qcom,spm-cpu-mode = "rail-pc";
+					qcom,psci-cpu-mode = <0x4>;
+					qcom,latency-us = <1061>;
+					qcom,ss-power = <400>;
+					qcom,energy-overhead = <428225>;
+					qcom,time-overhead = <1000>;
+					qcom,is-reset;
+					qcom,use-broadcast-timer;
+				};
+			};
+		};
+	};
+
+	qcom,rpm-stats@c300000 {
+		compatible = "qcom,rpm-stats";
+		reg = <0xc300000 0x1000>, <0xc3f0004 0x4>;
+		reg-names = "phys_addr_base", "offset_addr";
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi b/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi
new file mode 100644
index 0000000..0fb455f
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi
@@ -0,0 +1,699 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/msm/msm-bus-ids.h>
+
+&soc {
+	/* QUPv3 South instances */
+	qupv3_0: qcom,qupv3_0_geni_se@8c0000 {
+		compatible = "qcom,qupv3-geni-se";
+		reg = <0x8c0000 0x6000>;
+		qcom,bus-mas-id = <MSM_BUS_MASTER_BLSP_1>;
+		qcom,bus-slv-id = <MSM_BUS_SLAVE_EBI_CH0>;
+		qcom,iommu-s1-bypass;
+
+		iommu_qupv3_0_geni_se_cb: qcom,iommu_qupv3_0_geni_se_cb {
+			compatible = "qcom,qupv3-geni-se-cb";
+			iommus = <&apps_smmu 0x003 0x0>;
+		};
+	};
+
+	/*
+	 * HS UART instances. HS UART usecases can be supported on these
+	 * instances only.
+	 */
+	qupv3_se6_4uart: qcom,qup_uart@0x898000 {
+		compatible = "qcom,msm-geni-serial-hs", "qcom,msm-geni-uart";
+		reg = <0x898000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S6_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se6_4uart_active>;
+		pinctrl-1 = <&qupv3_se6_4uart_sleep>;
+		interrupts-extended = <&intc GIC_SPI 607 0>,
+				<&tlmm 48 0>;
+		status = "disabled";
+		qcom,wakeup-byte = <0xFD>;
+		qcom,wrapper-core = <&qupv3_0>;
+	};
+
+	qupv3_se7_4uart: qcom,qup_uart@0x89c000 {
+		compatible = "qcom,msm-geni-serial-hs", "qcom,msm-geni-uart";
+		reg = <0x89c000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S7_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se7_4uart_active>;
+		pinctrl-1 = <&qupv3_se7_4uart_sleep>;
+		interrupts-extended = <&intc GIC_SPI 608 0>,
+				<&tlmm 96 0>;
+		status = "disabled";
+		qcom,wakeup-byte = <0xFD>;
+		qcom,wrapper-core = <&qupv3_0>;
+	};
+
+	/* I2C */
+	qupv3_se0_i2c: i2c@880000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0x880000 0x4000>;
+		interrupts = <GIC_SPI 601 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S0_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se0_i2c_active>;
+		pinctrl-1 = <&qupv3_se0_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_0>;
+		status = "disabled";
+	};
+
+	qupv3_se1_i2c: i2c@884000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0x884000 0x4000>;
+		interrupts = <GIC_SPI 602 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S1_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se1_i2c_active>;
+		pinctrl-1 = <&qupv3_se1_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_0>;
+		status = "disabled";
+	};
+
+	qupv3_se2_i2c: i2c@888000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0x888000 0x4000>;
+		interrupts = <GIC_SPI 603 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S2_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se2_i2c_active>;
+		pinctrl-1 = <&qupv3_se2_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_0>;
+		status = "disabled";
+	};
+
+	qupv3_se3_i2c: i2c@88c000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0x88c000 0x4000>;
+		interrupts = <GIC_SPI 604 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S3_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se3_i2c_active>;
+		pinctrl-1 = <&qupv3_se3_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_0>;
+		status = "disabled";
+	};
+
+	qupv3_se4_i2c: i2c@890000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0x890000 0x4000>;
+		interrupts = <GIC_SPI 605 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S4_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se4_i2c_active>;
+		pinctrl-1 = <&qupv3_se4_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_0>;
+		status = "disabled";
+	};
+
+	qupv3_se5_i2c: i2c@894000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0x894000 0x4000>;
+		interrupts = <GIC_SPI 606 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S5_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se5_i2c_active>;
+		pinctrl-1 = <&qupv3_se5_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_0>;
+		status = "disabled";
+	};
+
+	qupv3_se6_i2c: i2c@898000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0x898000 0x4000>;
+		interrupts = <GIC_SPI 607 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S6_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se6_i2c_active>;
+		pinctrl-1 = <&qupv3_se6_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_0>;
+		status = "disabled";
+	};
+
+	qupv3_se7_i2c: i2c@89c000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0x89c000 0x4000>;
+		interrupts = <GIC_SPI 608 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S7_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se7_i2c_active>;
+		pinctrl-1 = <&qupv3_se7_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_0>;
+		status = "disabled";
+	};
+
+	/* SPI */
+	qupv3_se0_spi: spi@880000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x880000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S0_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se0_spi_active>;
+		pinctrl-1 = <&qupv3_se0_spi_sleep>;
+		interrupts = <GIC_SPI 601 0>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_0>;
+		status = "disabled";
+	};
+
+	qupv3_se1_spi: spi@884000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x884000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S1_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se1_spi_active>;
+		pinctrl-1 = <&qupv3_se1_spi_sleep>;
+		interrupts = <GIC_SPI 602 0>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_0>;
+		status = "disabled";
+	};
+
+	qupv3_se2_spi: spi@888000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x888000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S2_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se2_spi_active>;
+		pinctrl-1 = <&qupv3_se2_spi_sleep>;
+		interrupts = <GIC_SPI 603 0>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_0>;
+		status = "disabled";
+	};
+
+	qupv3_se3_spi: spi@88c000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x88c000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S3_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se3_spi_active>;
+		pinctrl-1 = <&qupv3_se3_spi_sleep>;
+		interrupts = <GIC_SPI 604 0>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_0>;
+		status = "disabled";
+	};
+
+	qupv3_se4_spi: spi@890000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x890000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S4_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se4_spi_active>;
+		pinctrl-1 = <&qupv3_se4_spi_sleep>;
+		interrupts = <GIC_SPI 605 0>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_0>;
+		status = "disabled";
+	};
+
+	qupv3_se5_spi: spi@894000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x894000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S5_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se5_spi_active>;
+		pinctrl-1 = <&qupv3_se5_spi_sleep>;
+		interrupts = <GIC_SPI 606 0>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_0>;
+		status = "disabled";
+	};
+
+	qupv3_se6_spi: spi@898000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x898000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S6_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se6_spi_active>;
+		pinctrl-1 = <&qupv3_se6_spi_sleep>;
+		interrupts = <GIC_SPI 607 0>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_0>;
+		status = "disabled";
+	};
+
+	qupv3_se7_spi: spi@89c000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x89c000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S7_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se7_spi_active>;
+		pinctrl-1 = <&qupv3_se7_spi_sleep>;
+		interrupts = <GIC_SPI 608 0>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_0>;
+		status = "disabled";
+	};
+
+	/* QUPv3 North Instances */
+	qupv3_1: qcom,qupv3_1_geni_se@ac0000 {
+		compatible = "qcom,qupv3-geni-se";
+		reg = <0xac0000 0x6000>;
+		qcom,bus-mas-id = <MSM_BUS_MASTER_BLSP_2>;
+		qcom,bus-slv-id = <MSM_BUS_SLAVE_EBI_CH0>;
+		qcom,iommu-s1-bypass;
+
+		iommu_qupv3_1_geni_se_cb: qcom,iommu_qupv3_1_geni_se_cb {
+			compatible = "qcom,qupv3-geni-se-cb";
+			iommus = <&apps_smmu 0x6c3 0x0>;
+		};
+	};
+
+	/* 2-wire UART */
+
+	/* Debug UART Instance for CDP/MTP platform */
+	qupv3_se9_2uart: qcom,qup_uart@0xa84000 {
+		compatible = "qcom,msm-geni-console", "qcom,msm-geni-uart";
+		reg = <0xa84000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S1_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se9_2uart_active>;
+		pinctrl-1 = <&qupv3_se9_2uart_sleep>;
+		interrupts = <GIC_SPI 354 0>;
+		qcom,wrapper-core = <&qupv3_1>;
+		status = "disabled";
+	};
+
+	/* Debug UART Instance for RUMI platform */
+	qupv3_se10_2uart: qcom,qup_uart@0xa88000 {
+		compatible = "qcom,msm-geni-console", "qcom,msm-geni-uart";
+		reg = <0xa88000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S2_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se10_2uart_active>;
+		pinctrl-1 = <&qupv3_se10_2uart_sleep>;
+		interrupts = <GIC_SPI 355 0>;
+		qcom,wrapper-core = <&qupv3_1>;
+		status = "disabled";
+	};
+
+	/* I2C */
+	qupv3_se8_i2c: i2c@a80000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0xa80000 0x4000>;
+		interrupts = <GIC_SPI 353 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S0_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se8_i2c_active>;
+		pinctrl-1 = <&qupv3_se8_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_1>;
+		status = "disabled";
+	};
+
+	qupv3_se9_i2c: i2c@a84000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0xa84000 0x4000>;
+		interrupts = <GIC_SPI 354 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S1_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se9_i2c_active>;
+		pinctrl-1 = <&qupv3_se9_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_1>;
+		status = "disabled";
+	};
+
+	qupv3_se10_i2c: i2c@a88000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0xa88000 0x4000>;
+		interrupts = <GIC_SPI 355 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S2_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se10_i2c_active>;
+		pinctrl-1 = <&qupv3_se10_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_1>;
+		status = "disabled";
+	};
+
+	qupv3_se11_i2c: i2c@a8c000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0xa8c000 0x4000>;
+		interrupts = <GIC_SPI 356 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S3_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se11_i2c_active>;
+		pinctrl-1 = <&qupv3_se11_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_1>;
+		status = "disabled";
+	};
+
+	qupv3_se12_i2c: i2c@a90000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0xa90000 0x4000>;
+		interrupts = <GIC_SPI 357 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S4_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se12_i2c_active>;
+		pinctrl-1 = <&qupv3_se12_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_1>;
+		status = "disabled";
+	};
+
+	qupv3_se13_i2c: i2c@a94000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0xa94000 0x4000>;
+		interrupts = <GIC_SPI 358 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S5_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se13_i2c_active>;
+		pinctrl-1 = <&qupv3_se13_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_1>;
+		status = "disabled";
+	};
+
+	qupv3_se14_i2c: i2c@a98000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0xa98000 0x4000>;
+		interrupts = <GIC_SPI 359 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S6_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se14_i2c_active>;
+		pinctrl-1 = <&qupv3_se14_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_1>;
+		status = "disabled";
+	};
+
+	qupv3_se15_i2c: i2c@a9c000 {
+		compatible = "qcom,i2c-geni";
+		reg = <0xa9c000 0x4000>;
+		interrupts = <GIC_SPI 360 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S7_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se15_i2c_active>;
+		pinctrl-1 = <&qupv3_se15_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_1>;
+		status = "disabled";
+	};
+
+	/* SPI */
+	qupv3_se8_spi: spi@a80000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0xa80000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S0_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se8_spi_active>;
+		pinctrl-1 = <&qupv3_se8_spi_sleep>;
+		interrupts = <GIC_SPI 353 0>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_1>;
+		status = "disabled";
+	};
+
+	qupv3_se9_spi: spi@a84000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0xa84000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S1_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se9_spi_active>;
+		pinctrl-1 = <&qupv3_se9_spi_sleep>;
+		interrupts = <GIC_SPI 354 0>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_1>;
+		status = "disabled";
+	};
+
+	qupv3_se10_spi: spi@a88000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0xa88000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S2_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se10_spi_active>;
+		pinctrl-1 = <&qupv3_se10_spi_sleep>;
+		interrupts = <GIC_SPI 355 0>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_1>;
+		status = "disabled";
+	};
+
+	qupv3_se11_spi: spi@a8c000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0xa8c000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S3_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se11_spi_active>;
+		pinctrl-1 = <&qupv3_se11_spi_sleep>;
+		interrupts = <GIC_SPI 356 0>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_1>;
+		status = "disabled";
+	};
+
+	qupv3_se12_spi: spi@a90000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0xa90000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S4_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se12_spi_active>;
+		pinctrl-1 = <&qupv3_se12_spi_sleep>;
+		interrupts = <GIC_SPI 357 0>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_1>;
+		status = "disabled";
+	};
+
+	qupv3_se13_spi: spi@a94000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0xa94000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S5_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se13_spi_active>;
+		pinctrl-1 = <&qupv3_se13_spi_sleep>;
+		interrupts = <GIC_SPI 358 0>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_1>;
+		status = "disabled";
+	};
+
+	qupv3_se14_spi: spi@a98000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0xa98000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S6_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se14_spi_active>;
+		pinctrl-1 = <&qupv3_se14_spi_sleep>;
+		interrupts = <GIC_SPI 359 0>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_1>;
+		status = "disabled";
+	};
+
+	qupv3_se15_spi: spi@a9c000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0xa9c000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S7_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se15_spi_active>;
+		pinctrl-1 = <&qupv3_se15_spi_sleep>;
+		interrupts = <GIC_SPI 360 0>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_1>;
+		status = "disabled";
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi b/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi
index 6ea92ee..17b90c7 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi
@@ -9,3 +9,71 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
+
+/{
+	aliases {
+		serial0 = &qupv3_se10_2uart;
+		serial1 = &qupv3_se9_2uart;
+		spi0 = &qupv3_se8_spi;
+		i2c0 = &qupv3_se10_i2c;
+		i2c1 = &qupv3_se3_i2c;
+		hsuart0 = &qupv3_se6_4uart;
+	};
+
+};
+
+&qupv3_se9_2uart {
+	status = "disabled";
+};
+
+&qupv3_se8_spi {
+	status = "disabled";
+};
+
+&qupv3_se10_2uart {
+	status = "ok";
+};
+
+&qupv3_se3_i2c {
+	status = "disabled";
+};
+
+&qupv3_se10_i2c {
+	status = "disabled";
+};
+
+&qupv3_se6_4uart {
+	status = "disabled";
+};
+
+&ufsphy_mem {
+	compatible = "qcom,ufs-phy-qrbtc-sdm845";
+
+	vdda-phy-supply = <&pm660l_l1>; /* 0.88v */
+	vdda-pll-supply = <&pm660_l1>; /* 1.2v */
+	vdda-phy-max-microamp = <62900>;
+	vdda-pll-max-microamp = <18300>;
+
+	status = "ok";
+};
+
+&ufshc_mem {
+	limit-tx-hs-gear = <1>;
+	limit-rx-hs-gear = <1>;
+	scsi-cmd-timeout = <300000>;
+
+	vdd-hba-supply = <&ufs_phy_gdsc>;
+	vdd-hba-fixed-regulator;
+	vcc-supply = <&pm660l_l4>;
+	vccq2-supply = <&pm660_l8>;
+	vcc-max-microamp = <600000>;
+	vccq2-max-microamp = <600000>;
+
+	qcom,vddp-ref-clk-supply = <&pm660_l1>;
+	qcom,vddp-ref-clk-max-microamp = <100>;
+
+	qcom,disable-lpm;
+	rpm-level = <0>;
+	spm-level = <0>;
+	status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi b/arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi
new file mode 100644
index 0000000..022f705
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi
@@ -0,0 +1,202 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+&soc {
+	qcom,smp2p-modem@1799000c {
+		compatible = "qcom,smp2p";
+		reg = <0x1799000c 0x4>;
+		qcom,remote-pid = <1>;
+		qcom,irq-bitmask = <0x4000>;
+		interrupts = <GIC_SPI 451 IRQ_TYPE_EDGE_RISING>;
+	};
+
+	qcom,smp2p-adsp@1799000c {
+		compatible = "qcom,smp2p";
+		reg = <0x1799000c 0x4>;
+		qcom,remote-pid = <2>;
+		qcom,irq-bitmask = <0x200>;
+		interrupts = <GIC_SPI 157 IRQ_TYPE_EDGE_RISING>;
+	};
+
+	qcom,smp2p-cdsp@1799000c {
+		compatible = "qcom,smp2p";
+		reg = <0x1799000c 0x4>;
+		qcom,remote-pid = <5>;
+		qcom,irq-bitmask = <0x40>;
+		interrupts = <GIC_SPI 576 IRQ_TYPE_EDGE_RISING>;
+	};
+
+
+	smp2pgpio_smp2p_15_in: qcom,smp2pgpio-smp2p-15-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <15>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_15_in {
+		compatible = "qcom,smp2pgpio_test_smp2p_15_in";
+		gpios = <&smp2pgpio_smp2p_15_in 0 0>;
+	};
+
+	smp2pgpio_smp2p_15_out: qcom,smp2pgpio-smp2p-15-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <15>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_15_out {
+		compatible = "qcom,smp2pgpio_test_smp2p_15_out";
+		gpios = <&smp2pgpio_smp2p_15_out 0 0>;
+	};
+
+	smp2pgpio_smp2p_1_in: qcom,smp2pgpio-smp2p-1-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <1>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_1_in {
+		compatible = "qcom,smp2pgpio_test_smp2p_1_in";
+		gpios = <&smp2pgpio_smp2p_1_in 0 0>;
+	};
+
+	smp2pgpio_smp2p_1_out: qcom,smp2pgpio-smp2p-1-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <1>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_1_out {
+		compatible = "qcom,smp2pgpio_test_smp2p_1_out";
+		gpios = <&smp2pgpio_smp2p_1_out 0 0>;
+	};
+
+	smp2pgpio_smp2p_2_in: qcom,smp2pgpio-smp2p-2-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <2>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_2_in {
+		compatible = "qcom,smp2pgpio_test_smp2p_2_in";
+		gpios = <&smp2pgpio_smp2p_2_in 0 0>;
+	};
+
+	smp2pgpio_smp2p_2_out: qcom,smp2pgpio-smp2p-2-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <2>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_2_out {
+		compatible = "qcom,smp2pgpio_test_smp2p_2_out";
+		gpios = <&smp2pgpio_smp2p_2_out 0 0>;
+	};
+
+	smp2pgpio_sleepstate_2_out: qcom,smp2pgpio-sleepstate-gpio-2-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "sleepstate";
+		qcom,remote-pid = <2>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio-sleepstate-2-out {
+		compatible = "qcom,smp2pgpio-sleepstate-out";
+		gpios = <&smp2pgpio_sleepstate_2_out 0 0>;
+	};
+
+	smp2pgpio_smp2p_5_in: qcom,smp2pgpio-smp2p-5-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <5>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_5_in {
+		compatible = "qcom,smp2pgpio_test_smp2p_5_in";
+		gpios = <&smp2pgpio_smp2p_5_in 0 0>;
+	};
+
+	smp2pgpio_smp2p_5_out: qcom,smp2pgpio-smp2p-5-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <5>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_5_out {
+		compatible = "qcom,smp2pgpio_test_smp2p_5_out";
+		gpios = <&smp2pgpio_smp2p_5_out 0 0>;
+	};
+
+	/* ssr - inbound entry from lpass */
+	smp2pgpio_ssr_smp2p_2_in: qcom,smp2pgpio-ssr-smp2p-2-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "slave-kernel";
+		qcom,remote-pid = <2>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	/* ssr - outbound entry to lpass */
+	smp2pgpio_ssr_smp2p_2_out: qcom,smp2pgpio-ssr-smp2p-2-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "master-kernel";
+		qcom,remote-pid = <2>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index bb5217e..d663bcd 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -19,6 +19,7 @@
 #include <dt-bindings/clock/qcom,videocc-sdm845.h>
 #include <dt-bindings/clock/qcom,cpucc-sdm845.h>
 #include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/soc/qcom,tcs-mbox.h>
 #include <dt-bindings/regulator/qcom,rpmh-regulator.h>
 
 / {
@@ -27,7 +28,9 @@
 	qcom,msm-id = <336 0x0>;
 	interrupt-parent = <&intc>;
 
-	aliases { };
+	aliases {
+		ufshc1 = &ufshc_mem; /* Embedded UFS slot */
+	};
 
 	cpus {
 		#address-cells = <2>;
@@ -315,37 +318,37 @@
 		pil_cdsp_mem: cdsp_regions@93300000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0 0x93300000 0 0x600000>;
+			reg = <0 0x93300000 0 0x800000>;
 		};
 
-		pil_mba_mem: pil_mba_region@0x93900000 {
+		pil_mba_mem: pil_mba_region@0x93b00000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0 0x93900000 0 0x200000>;
+			reg = <0 0x93b00000 0 0x200000>;
 		};
 
-		pil_adsp_mem: pil_adsp_region@93b00000 {
+		pil_adsp_mem: pil_adsp_region@93d00000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0 0x93b00000 0 0x1e00000>;
+			reg = <0 0x93d00000 0 0x1e00000>;
 		};
 
-		pil_ipa_fw_mem: pil_ipa_fw_region@95900000 {
+		pil_ipa_fw_mem: pil_ipa_fw_region@95b00000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0 0x95900000 0 0x10000>;
+			reg = <0 0x95b00000 0 0x10000>;
 		};
 
-		pil_ipa_gsi_mem: pil_ipa_gsi_region@95910000 {
+		pil_ipa_gsi_mem: pil_ipa_gsi_region@95b10000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0 0x95910000 0 0x5000>;
+			reg = <0 0x95b10000 0 0x5000>;
 		};
 
-		pil_gpu_mem: pil_gpu_region@95915000 {
+		pil_gpu_mem: pil_gpu_region@95b15000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0 0x95915000 0 0x1000>;
+			reg = <0 0x95b15000 0 0x1000>;
 		};
 
 		adsp_mem: adsp_region {
@@ -394,6 +397,10 @@
 
 #include "sdm670-ion.dtsi"
 
+#include "sdm670-smp2p.dtsi"
+
+#include "sdm670-qupv3.dtsi"
+
 &soc {
 	#address-cells = <1>;
 	#size-cells = <1>;
@@ -546,6 +553,30 @@
 		#reset-cells = <1>;
 	};
 
+	slim_aud: slim@62dc0000 {
+		cell-index = <1>;
+		compatible = "qcom,slim-ngd";
+		reg = <0x62dc0000 0x2c000>,
+			<0x62d84000 0x2a000>;
+		reg-names = "slimbus_physical", "slimbus_bam_physical";
+		interrupts = <0 163 0>, <0 164 0>;
+		interrupt-names = "slimbus_irq", "slimbus_bam_irq";
+		qcom,apps-ch-pipes = <0x780000>;
+		qcom,ea-pc = <0x290>;
+		status = "disabled";
+	};
+
+	slim_qca: slim@62e40000 {
+		cell-index = <3>;
+		compatible = "qcom,slim-ngd";
+		reg = <0x62e40000 0x2c000>,
+			<0x62e04000 0x20000>;
+		reg-names = "slimbus_physical", "slimbus_bam_physical";
+		interrupts = <0 291 0>, <0 292 0>;
+		interrupt-names = "slimbus_irq", "slimbus_bam_irq";
+		status = "disabled";
+	};
+
 	wdog: qcom,wdt@17980000{
 		compatible = "qcom,msm-watchdog";
 		reg = <0x17980000 0x1000>;
@@ -679,6 +710,225 @@
 				  "l3-scu-faultirq";
 	};
 
+	qcom,ipc-spinlock@1f40000 {
+		compatible = "qcom,ipc-spinlock-sfpb";
+		reg = <0x1f40000 0x8000>;
+		qcom,num-locks = <8>;
+	};
+
+	qcom,smem@86000000 {
+		compatible = "qcom,smem";
+		reg = <0x86000000 0x200000>,
+			<0x17911008 0x4>,
+			<0x778000 0x7000>,
+			<0x1fd4000 0x8>;
+		reg-names = "smem", "irq-reg-base", "aux-mem1",
+			"smem_targ_info_reg";
+		qcom,mpu-enabled;
+	};
+
+	qmp_aop: mailbox@1799000c {
+		compatible = "qcom,qmp-mbox";
+		label = "aop";
+		reg = <0xc300000 0x100000>,
+			<0x1799000c 0x4>;
+		reg-names = "msgram", "irq-reg-base";
+		qcom,irq-mask = <0x1>;
+		interrupts = <0 389 1>;
+		mbox-desc-offset = <0x0>;
+		#mbox-cells = <1>;
+	};
+
+	qcom,glink-smem-native-xprt-modem@86000000 {
+		compatible = "qcom,glink-smem-native-xprt";
+		reg = <0x86000000 0x200000>,
+			<0x1799000c 0x4>;
+		reg-names = "smem", "irq-reg-base";
+		qcom,irq-mask = <0x1000>;
+		interrupts = <GIC_SPI 449 IRQ_TYPE_EDGE_RISING>;
+		label = "mpss";
+	};
+
+	qcom,glink-smem-native-xprt-adsp@86000000 {
+		compatible = "qcom,glink-smem-native-xprt";
+		reg = <0x86000000 0x200000>,
+			<0x1799000c 0x4>;
+		reg-names = "smem", "irq-reg-base";
+		qcom,irq-mask = <0x100>;
+		interrupts = <GIC_SPI 156 IRQ_TYPE_EDGE_RISING>;
+		label = "lpass";
+		qcom,qos-config = <&glink_qos_adsp>;
+		qcom,ramp-time = <0xaf>;
+	};
+
+	glink_qos_adsp: qcom,glink-qos-config-adsp {
+		compatible = "qcom,glink-qos-config";
+		qcom,flow-info = <0x3c 0x0>,
+				<0x3c 0x0>,
+				<0x3c 0x0>,
+				<0x3c 0x0>;
+		qcom,mtu-size = <0x800>;
+		qcom,tput-stats-cycle = <0xa>;
+	};
+
+	glink_spi_xprt_wdsp: qcom,glink-spi-xprt-wdsp {
+		compatible = "qcom,glink-spi-xprt";
+		label = "wdsp";
+		qcom,remote-fifo-config = <&glink_fifo_wdsp>;
+		qcom,qos-config = <&glink_qos_wdsp>;
+		qcom,ramp-time = <0x10>,
+				     <0x20>,
+				     <0x30>,
+				     <0x40>;
+	};
+
+	glink_fifo_wdsp: qcom,glink-fifo-config-wdsp {
+		compatible = "qcom,glink-fifo-config";
+		qcom,out-read-idx-reg = <0x12000>;
+		qcom,out-write-idx-reg = <0x12004>;
+		qcom,in-read-idx-reg = <0x1200C>;
+		qcom,in-write-idx-reg = <0x12010>;
+	};
+
+	glink_qos_wdsp: qcom,glink-qos-config-wdsp {
+		compatible = "qcom,glink-qos-config";
+		qcom,flow-info = <0x80 0x0>,
+				 <0x70 0x1>,
+				 <0x60 0x2>,
+				 <0x50 0x3>;
+		qcom,mtu-size = <0x800>;
+		qcom,tput-stats-cycle = <0xa>;
+	};
+
+	qcom,glink-smem-native-xprt-cdsp@86000000 {
+		compatible = "qcom,glink-smem-native-xprt";
+		reg = <0x86000000 0x200000>,
+			<0x1799000c 0x4>;
+		reg-names = "smem", "irq-reg-base";
+		qcom,irq-mask = <0x10>;
+		interrupts = <GIC_SPI 574 IRQ_TYPE_EDGE_RISING>;
+		label = "cdsp";
+	};
+
+	glink_mpss: qcom,glink-ssr-modem {
+		compatible = "qcom,glink_ssr";
+		label = "modem";
+		qcom,edge = "mpss";
+		qcom,notify-edges = <&glink_lpass>, <&glink_cdsp>;
+		qcom,xprt = "smem";
+	};
+
+	glink_lpass: qcom,glink-ssr-adsp {
+		compatible = "qcom,glink_ssr";
+		label = "adsp";
+		qcom,edge = "lpass";
+		qcom,notify-edges = <&glink_mpss>, <&glink_cdsp>;
+		qcom,xprt = "smem";
+	};
+
+	glink_cdsp: qcom,glink-ssr-cdsp {
+		compatible = "qcom,glink_ssr";
+		label = "cdsp";
+		qcom,edge = "cdsp";
+		qcom,notify-edges = <&glink_mpss>, <&glink_lpass>;
+		qcom,xprt = "smem";
+	};
+
+	qcom,ipc_router {
+		compatible = "qcom,ipc_router";
+		qcom,node-id = <1>;
+	};
+
+	qcom,ipc_router_modem_xprt {
+		compatible = "qcom,ipc_router_glink_xprt";
+		qcom,ch-name = "IPCRTR";
+		qcom,xprt-remote = "mpss";
+		qcom,glink-xprt = "smem";
+		qcom,xprt-linkid = <1>;
+		qcom,xprt-version = <1>;
+		qcom,fragmented-data;
+	};
+
+	qcom,ipc_router_q6_xprt {
+		compatible = "qcom,ipc_router_glink_xprt";
+		qcom,ch-name = "IPCRTR";
+		qcom,xprt-remote = "lpass";
+		qcom,glink-xprt = "smem";
+		qcom,xprt-linkid = <1>;
+		qcom,xprt-version = <1>;
+		qcom,fragmented-data;
+	};
+
+	qcom,ipc_router_cdsp_xprt {
+		compatible = "qcom,ipc_router_glink_xprt";
+		qcom,ch-name = "IPCRTR";
+		qcom,xprt-remote = "cdsp";
+		qcom,glink-xprt = "smem";
+		qcom,xprt-linkid = <1>;
+		qcom,xprt-version = <1>;
+		qcom,fragmented-data;
+	};
+
+	qcom,glink_pkt {
+		compatible = "qcom,glinkpkt";
+
+		qcom,glinkpkt-at-mdm0 {
+			qcom,glinkpkt-transport = "smem";
+			qcom,glinkpkt-edge = "mpss";
+			qcom,glinkpkt-ch-name = "DS";
+			qcom,glinkpkt-dev-name = "at_mdm0";
+		};
+
+		qcom,glinkpkt-loopback_cntl {
+			qcom,glinkpkt-transport = "lloop";
+			qcom,glinkpkt-edge = "local";
+			qcom,glinkpkt-ch-name = "LOCAL_LOOPBACK_CLNT";
+			qcom,glinkpkt-dev-name = "glink_pkt_loopback_ctrl";
+		};
+
+		qcom,glinkpkt-loopback_data {
+			qcom,glinkpkt-transport = "lloop";
+			qcom,glinkpkt-edge = "local";
+			qcom,glinkpkt-ch-name = "glink_pkt_lloop_CLNT";
+			qcom,glinkpkt-dev-name = "glink_pkt_loopback";
+		};
+
+		qcom,glinkpkt-apr-apps2 {
+			qcom,glinkpkt-transport = "smem";
+			qcom,glinkpkt-edge = "adsp";
+			qcom,glinkpkt-ch-name = "apr_apps2";
+			qcom,glinkpkt-dev-name = "apr_apps2";
+		};
+
+		qcom,glinkpkt-data40-cntl {
+			qcom,glinkpkt-transport = "smem";
+			qcom,glinkpkt-edge = "mpss";
+			qcom,glinkpkt-ch-name = "DATA40_CNTL";
+			qcom,glinkpkt-dev-name = "smdcntl8";
+		};
+
+		qcom,glinkpkt-data1 {
+			qcom,glinkpkt-transport = "smem";
+			qcom,glinkpkt-edge = "mpss";
+			qcom,glinkpkt-ch-name = "DATA1";
+			qcom,glinkpkt-dev-name = "smd7";
+		};
+
+		qcom,glinkpkt-data4 {
+			qcom,glinkpkt-transport = "smem";
+			qcom,glinkpkt-edge = "mpss";
+			qcom,glinkpkt-ch-name = "DATA4";
+			qcom,glinkpkt-dev-name = "smd8";
+		};
+
+		qcom,glinkpkt-data11 {
+			qcom,glinkpkt-transport = "smem";
+			qcom,glinkpkt-edge = "mpss";
+			qcom,glinkpkt-ch-name = "DATA11";
+			qcom,glinkpkt-dev-name = "smd11";
+		};
+	};
+
 	qcom,chd_sliver {
 		compatible = "qcom,core-hang-detect";
 		label = "silver";
@@ -741,6 +991,29 @@
 		};
 	};
 
+	cmd_db: qcom,cmd-db@c3f000c {
+		compatible = "qcom,cmd-db";
+		reg = <0xc3f000c 0x8>;
+	};
+
+	apps_rsc: mailbox@179e0000 {
+		compatible = "qcom,tcs-drv";
+		label = "apps_rsc";
+		reg = <0x179e0000 0x100>, <0x179e0d00 0x3000>;
+		interrupts = <0 5 0>;
+		#mbox-cells = <1>;
+		qcom,drv-id = <2>;
+		qcom,tcs-config = <ACTIVE_TCS  2>,
+				<SLEEP_TCS     3>,
+				<WAKE_TCS      3>,
+				<CONTROL_TCS   1>;
+	};
+
+	system_pm {
+		compatible = "qcom,system-pm";
+		mboxes = <&apps_rsc 0>;
+	};
+
 	dcc: dcc_v2@10a2000 {
 		compatible = "qcom,dcc_v2";
 		reg = <0x10a2000 0x1000>,
@@ -766,11 +1039,105 @@
 		#interrupt-cells = <4>;
 		cell-index = <0>;
 	};
+
+	ufsphy_mem: ufsphy_mem@1d87000 {
+		reg = <0x1d87000 0xe00>; /* PHY regs */
+		reg-names = "phy_mem";
+		#phy-cells = <0>;
+
+		lanes-per-direction = <1>;
+
+		clock-names = "ref_clk_src",
+			"ref_clk",
+			"ref_aux_clk";
+		clocks = <&clock_rpmh RPMH_CXO_CLK>,
+			<&clock_gcc GCC_UFS_MEM_CLKREF_CLK>,
+			<&clock_gcc GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK>;
+
+		status = "disabled";
+	};
+
+	ufshc_mem: ufshc@1d84000 {
+		compatible = "qcom,ufshc";
+		reg = <0x1d84000 0x3000>;
+		interrupts = <0 265 0>;
+		phys = <&ufsphy_mem>;
+		phy-names = "ufsphy";
+
+		lanes-per-direction = <1>;
+		dev-ref-clk-freq = <0>; /* 19.2 MHz */
+
+		clock-names =
+			"core_clk",
+			"bus_aggr_clk",
+			"iface_clk",
+			"core_clk_unipro",
+			"core_clk_ice",
+			"ref_clk",
+			"tx_lane0_sync_clk",
+			"rx_lane0_sync_clk";
+		clocks =
+			<&clock_gcc GCC_UFS_PHY_AXI_HW_CTL_CLK>,
+			<&clock_gcc GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK>,
+			<&clock_gcc GCC_UFS_PHY_AHB_CLK>,
+			<&clock_gcc GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK>,
+			<&clock_gcc GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK>,
+			<&clock_rpmh RPMH_CXO_CLK>,
+			<&clock_gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
+			<&clock_gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>;
+		freq-table-hz =
+			<50000000 200000000>,
+			<0 0>,
+			<0 0>,
+			<37500000 150000000>,
+			<75000000 300000000>,
+			<0 0>,
+			<0 0>,
+			<0 0>;
+
+		resets = <&clock_gcc GCC_UFS_PHY_BCR>;
+		reset-names = "core_reset";
+
+		status = "disabled";
+	};
+
+	qcom,lpass@62400000 {
+		compatible = "qcom,pil-tz-generic";
+		reg = <0x62400000 0x00100>;
+		interrupts = <0 162 1>;
+
+		vdd_cx-supply = <&pm660l_l9_level>;
+		qcom,proxy-reg-names = "vdd_cx";
+		qcom,vdd_cx-uV-uA = <RPMH_REGULATOR_LEVEL_TURBO 100000>;
+
+		clocks = <&clock_rpmh RPMH_CXO_CLK>;
+		clock-names = "xo";
+		qcom,proxy-clock-names = "xo";
+
+		qcom,pas-id = <1>;
+		qcom,proxy-timeout-ms = <10000>;
+		qcom,smem-id = <423>;
+		qcom,sysmon-id = <1>;
+		qcom,ssctl-instance-id = <0x14>;
+		qcom,firmware-name = "adsp";
+		memory-region = <&pil_adsp_mem>;
+
+		/* GPIO inputs from lpass */
+		qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_2_in 0 0>;
+		qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_2_in 2 0>;
+		qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_2_in 1 0>;
+		qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_2_in 3 0>;
+
+		/* GPIO output to lpass */
+		qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_2_out 0 0>;
+		status = "ok";
+	};
 };
 
 #include "sdm670-pinctrl.dtsi"
 #include "msm-arm-smmu-sdm670.dtsi"
 #include "msm-gdsc-sdm845.dtsi"
+#include "sdm670-pm.dtsi"
 
 &usb30_prim_gdsc {
 	status = "ok";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
index 4b7a680..94d74e2 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
@@ -22,7 +22,7 @@
 	qcom,board-id = <1 1>;
 };
 
-&dsi_dual_nt35597_truly_cmd_display {
+&dsi_nt35597_truly_dsc_cmd_display {
 	/delete-property/ qcom,dsi-display-active;
 };
 
@@ -41,6 +41,17 @@
 	qcom,platform-reset-gpio = <&tlmm 6 0>;
 };
 
+&dsi_sharp_4k_dsc_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+	qcom,panel-mode-gpio = <&tlmm 52 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
 &dsi_sharp_4k_dsc_video_display {
 	qcom,dsi-display-active;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
index 67c3bcd..fca87e1 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
@@ -22,7 +22,7 @@
 	qcom,board-id = <8 1>;
 };
 
-&dsi_dual_nt35597_truly_cmd_display {
+&dsi_nt35597_truly_dsc_cmd_display {
 	/delete-property/ qcom,dsi-display-active;
 };
 
@@ -41,6 +41,17 @@
 	qcom,platform-reset-gpio = <&tlmm 6 0>;
 };
 
+&dsi_sharp_4k_dsc_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+	qcom,panel-mode-gpio = <&tlmm 52 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
 &dsi_sharp_4k_dsc_video_display {
 	qcom,dsi-display-active;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm845-audio-overlay.dtsi
new file mode 100644
index 0000000..9208302
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-audio-overlay.dtsi
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdm845-wcd.dtsi"
+#include "msm-wsa881x.dtsi"
+#include <dt-bindings/clock/qcom,audio-ext-clk.h>
+
+&snd_934x {
+	qcom,audio-routing =
+		"AIF4 VI", "MCLK",
+		"RX_BIAS", "MCLK",
+		"MADINPUT", "MCLK",
+		"hifi amp", "LINEOUT1",
+		"hifi amp", "LINEOUT2",
+		"AMIC2", "MIC BIAS2",
+		"MIC BIAS2", "Headset Mic",
+		"AMIC3", "MIC BIAS2",
+		"MIC BIAS2", "ANCRight Headset Mic",
+		"AMIC4", "MIC BIAS2",
+		"MIC BIAS2", "ANCLeft Headset Mic",
+		"AMIC5", "MIC BIAS3",
+		"MIC BIAS3", "Handset Mic",
+		"DMIC0", "MIC BIAS1",
+		"MIC BIAS1", "Digital Mic0",
+		"DMIC1", "MIC BIAS1",
+		"MIC BIAS1", "Digital Mic1",
+		"DMIC2", "MIC BIAS3",
+		"MIC BIAS3", "Digital Mic2",
+		"DMIC3", "MIC BIAS3",
+		"MIC BIAS3", "Digital Mic3",
+		"DMIC4", "MIC BIAS4",
+		"MIC BIAS4", "Digital Mic4",
+		"DMIC5", "MIC BIAS4",
+		"MIC BIAS4", "Digital Mic5",
+		"SpkrLeft IN", "SPK1 OUT",
+		"SpkrRight IN", "SPK2 OUT";
+
+	qcom,msm-mbhc-hphl-swh = <1>;
+	qcom,msm-mbhc-gnd-swh = <1>;
+	qcom,hph-en0-gpio = <&tavil_hph_en0>;
+	qcom,hph-en1-gpio = <&tavil_hph_en1>;
+	qcom,tavil-mclk-clk-freq = <9600000>;
+
+	asoc-codec = <&stub_codec>;
+	asoc-codec-names = "msm-stub-codec.1";
+
+	qcom,usbc-analog-en1-gpio = <&wcd_usbc_analog_en1_gpio>;
+	qcom,usbc-analog-en2-gpio = <&tlmm 51 0>;
+	pinctrl-names = "aud_active", "aud_sleep";
+	pinctrl-0 = <&wcd_usbc_analog_en2_active>;
+	pinctrl-1 = <&wcd_usbc_analog_en2_idle>;
+
+	qcom,wsa-max-devs = <2>;
+	qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0212>,
+			<&wsa881x_0213>, <&wsa881x_0214>;
+	qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight",
+				  "SpkrLeft", "SpkrRight";
+};
+
+&soc {
+	wcd_usbc_analog_en1_gpio: msm_cdc_pinctrl@49 {
+		compatible = "qcom,msm-cdc-pinctrl";
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&wcd_usbc_analog_en1_active>;
+		pinctrl-1 = <&wcd_usbc_analog_en1_idle>;
+	};
+
+	wcd9xxx_intc: wcd9xxx-irq {
+		status = "ok";
+		compatible = "qcom,wcd9xxx-irq";
+		interrupt-controller;
+		#interrupt-cells = <1>;
+		interrupt-parent = <&tlmm>;
+		qcom,gpio-connect = <&tlmm 54 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&wcd_intr_default>;
+	};
+
+	clock_audio_lnbb: audio_ext_clk_lnbb {
+		status = "ok";
+		compatible = "qcom,audio-ref-clk";
+		clock-names = "osr_clk";
+		clocks = <&clock_rpmh RPMH_LN_BB_CLK2>;
+		qcom,node_has_rpm_clock;
+		#clock-cells = <1>;
+	};
+
+	wcd_rst_gpio: msm_cdc_pinctrl@64 {
+		compatible = "qcom,msm-cdc-pinctrl";
+		qcom,cdc-rst-n-gpio = <&tlmm 64 0>;
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&cdc_reset_active>;
+		pinctrl-1 = <&cdc_reset_sleep>;
+	};
+
+	qocm,wcd-dsp-glink {
+		compatible = "qcom,wcd-dsp-glink";
+	};
+
+	qcom,wcd-dsp-mgr {
+		compatible = "qcom,wcd-dsp-mgr";
+		qcom,wdsp-components = <&wcd934x_cdc 0>,
+				       <&wcd_spi_0 1>,
+				       <&glink_spi_xprt_wdsp 2>;
+		qcom,img-filename = "cpe_9340";
+	};
+};
+
+&slim_aud {
+	wcd934x_cdc: tavil_codec {
+		compatible = "qcom,tavil-slim-pgd";
+		elemental-addr = [00 01 50 02 17 02];
+
+		interrupt-parent = <&wcd9xxx_intc>;
+		interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
+			      17 18 19 20 21 22 23 24 25 26 27 28 29
+			      30 31>;
+
+		qcom,wcd-rst-gpio-node = <&wcd_rst_gpio>;
+
+		clock-names = "wcd_clk";
+		clocks = <&clock_audio_lnbb AUDIO_PMIC_LNBB_CLK>;
+
+		cdc-vdd-buck-supply = <&pm8998_s4>;
+		qcom,cdc-vdd-buck-voltage = <1800000 1800000>;
+		qcom,cdc-vdd-buck-current = <650000>;
+
+		cdc-buck-sido-supply = <&pm8998_s4>;
+		qcom,cdc-buck-sido-voltage = <1800000 1800000>;
+		qcom,cdc-buck-sido-current = <250000>;
+
+		cdc-vdd-tx-h-supply = <&pm8998_s4>;
+		qcom,cdc-vdd-tx-h-voltage = <1800000 1800000>;
+		qcom,cdc-vdd-tx-h-current = <25000>;
+
+		cdc-vdd-rx-h-supply = <&pm8998_s4>;
+		qcom,cdc-vdd-rx-h-voltage = <1800000 1800000>;
+		qcom,cdc-vdd-rx-h-current = <25000>;
+
+		cdc-vddpx-1-supply = <&pm8998_s4>;
+		qcom,cdc-vddpx-1-voltage = <1800000 1800000>;
+		qcom,cdc-vddpx-1-current = <10000>;
+
+		qcom,cdc-static-supplies = "cdc-vdd-buck",
+					   "cdc-buck-sido",
+					   "cdc-vdd-tx-h",
+					   "cdc-vdd-rx-h",
+					   "cdc-vddpx-1";
+
+		qcom,cdc-micbias1-mv = <1800>;
+		qcom,cdc-micbias2-mv = <1800>;
+		qcom,cdc-micbias3-mv = <1800>;
+		qcom,cdc-micbias4-mv = <1800>;
+
+		qcom,cdc-mclk-clk-rate = <9600000>;
+		qcom,cdc-slim-ifd = "tavil-slim-ifd";
+		qcom,cdc-slim-ifd-elemental-addr = [00 00 50 02 17 02];
+		qcom,cdc-dmic-sample-rate = <4800000>;
+		qcom,cdc-mad-dmic-rate = <600000>;
+
+		qcom,wdsp-cmpnt-dev-name = "tavil_codec";
+
+		wcd_spi_0: wcd_spi {
+			compatible = "qcom,wcd-spi-v2";
+			qcom,master-bus-num = <0>;
+			qcom,chip-select = <0>;
+			qcom,max-frequency = <9600000>;
+			qcom,mem-base-addr = <0x100000>;
+		};
+
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi b/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
index ad046e9..dd82ad7 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
@@ -12,9 +12,6 @@
  */
 
 #include "msm-audio-lpass.dtsi"
-#include "sdm845-wcd.dtsi"
-#include "msm-wsa881x.dtsi"
-#include <dt-bindings/clock/qcom,audio-ext-clk.h>
 
 &msm_audio_ion {
 	iommus = <&apps_smmu 0x1821 0x0>;
@@ -31,7 +28,7 @@
 		qcom,clk-mult = <10>;
 	};
 
-	sound-tavil {
+	snd_934x: sound-tavil {
 		compatible = "qcom,sdm845-asoc-snd-tavil";
 		qcom,model = "sdm845-tavil-snd-card";
 		qcom,wcn-btfm;
@@ -48,43 +45,6 @@
 			    "lpaif_tert_mode_muxsel",
 			    "lpaif_quat_mode_muxsel";
 
-		qcom,audio-routing =
-			"AIF4 VI", "MCLK",
-			"RX_BIAS", "MCLK",
-			"MADINPUT", "MCLK",
-			"hifi amp", "LINEOUT1",
-			"hifi amp", "LINEOUT2",
-			"AMIC2", "MIC BIAS2",
-			"MIC BIAS2", "Headset Mic",
-			"AMIC3", "MIC BIAS2",
-			"MIC BIAS2", "ANCRight Headset Mic",
-			"AMIC4", "MIC BIAS2",
-			"MIC BIAS2", "ANCLeft Headset Mic",
-			"AMIC5", "MIC BIAS3",
-			"MIC BIAS3", "Handset Mic",
-			"DMIC0", "MIC BIAS1",
-			"MIC BIAS1", "Digital Mic0",
-			"DMIC1", "MIC BIAS1",
-			"MIC BIAS1", "Digital Mic1",
-			"DMIC2", "MIC BIAS3",
-			"MIC BIAS3", "Digital Mic2",
-			"DMIC3", "MIC BIAS3",
-			"MIC BIAS3", "Digital Mic3",
-			"DMIC4", "MIC BIAS4",
-			"MIC BIAS4", "Digital Mic4",
-			"DMIC5", "MIC BIAS4",
-			"MIC BIAS4", "Digital Mic5",
-			"SpkrLeft IN", "SPK1 OUT",
-			"SpkrRight IN", "SPK2 OUT";
-
-		qcom,msm-mbhc-hphl-swh = <1>;
-		qcom,msm-mbhc-gnd-swh = <1>;
-		qcom,hph-en0-gpio = <&tavil_hph_en0>;
-		qcom,hph-en1-gpio = <&tavil_hph_en1>;
-		qcom,tavil-mclk-clk-freq = <9600000>;
-
-		qcom,usbc-analog-en1-gpio = <&wcd_usbc_analog_en1_gpio>;
-
 		asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
 				<&loopback>, <&compress>, <&hostless>,
 				<&afe>, <&lsm>, <&routing>, <&compr>,
@@ -136,65 +96,6 @@
 				"msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36881",
 				"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897",
 				"msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913";
-		asoc-codec = <&stub_codec>;
-		asoc-codec-names = "msm-stub-codec.1";
-		qcom,wsa-max-devs = <2>;
-		qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0212>,
-				<&wsa881x_0213>, <&wsa881x_0214>;
-		qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight",
-					  "SpkrLeft", "SpkrRight";
-
-		qcom,usbc-analog-en2-gpio = <&tlmm 51 0>;
-		pinctrl-names = "aud_active", "aud_sleep";
-		pinctrl-0 = <&wcd_usbc_analog_en2_active>;
-		pinctrl-1 = <&wcd_usbc_analog_en2_idle>;
-	};
-
-	wcd_usbc_analog_en1_gpio: msm_cdc_pinctrl@49 {
-		compatible = "qcom,msm-cdc-pinctrl";
-		pinctrl-names = "aud_active", "aud_sleep";
-		pinctrl-0 = <&wcd_usbc_analog_en1_active>;
-		pinctrl-1 = <&wcd_usbc_analog_en1_idle>;
-	};
-
-	wcd9xxx_intc: wcd9xxx-irq {
-		status = "ok";
-		compatible = "qcom,wcd9xxx-irq";
-		interrupt-controller;
-		#interrupt-cells = <1>;
-		interrupt-parent = <&tlmm>;
-		qcom,gpio-connect = <&tlmm 54 0>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&wcd_intr_default>;
-	};
-
-	clock_audio_lnbb: audio_ext_clk_lnbb {
-		status = "ok";
-		compatible = "qcom,audio-ref-clk";
-		clock-names = "osr_clk";
-		clocks = <&clock_rpmh RPMH_LN_BB_CLK2>;
-		qcom,node_has_rpm_clock;
-		#clock-cells = <1>;
-	};
-
-	wcd_rst_gpio: msm_cdc_pinctrl@64 {
-		compatible = "qcom,msm-cdc-pinctrl";
-		qcom,cdc-rst-n-gpio = <&tlmm 64 0>;
-		pinctrl-names = "aud_active", "aud_sleep";
-		pinctrl-0 = <&cdc_reset_active>;
-		pinctrl-1 = <&cdc_reset_sleep>;
-	};
-
-	qocm,wcd-dsp-glink {
-		compatible = "qcom,wcd-dsp-glink";
-	};
-
-	qcom,wcd-dsp-mgr {
-		compatible = "qcom,wcd-dsp-mgr";
-		qcom,wdsp-components = <&wcd934x_cdc 0>,
-				       <&wcd_spi_0 1>,
-				       <&glink_spi_xprt_wdsp 2>;
-		qcom,img-filename = "cpe_9340";
 	};
 };
 
@@ -203,66 +104,4 @@
 		compatible = "qcom,msm-dai-slim";
 		elemental-addr = [ff ff ff fe 17 02];
 	};
-
-	wcd934x_cdc: tavil_codec {
-		compatible = "qcom,tavil-slim-pgd";
-		elemental-addr = [00 01 50 02 17 02];
-
-		interrupt-parent = <&wcd9xxx_intc>;
-		interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
-			      17 18 19 20 21 22 23 24 25 26 27 28 29
-			      30 31>;
-
-		qcom,wcd-rst-gpio-node = <&wcd_rst_gpio>;
-
-		clock-names = "wcd_clk";
-		clocks = <&clock_audio_lnbb AUDIO_PMIC_LNBB_CLK>;
-
-		cdc-vdd-buck-supply = <&pm8998_s4>;
-		qcom,cdc-vdd-buck-voltage = <1800000 1800000>;
-		qcom,cdc-vdd-buck-current = <650000>;
-
-		cdc-buck-sido-supply = <&pm8998_s4>;
-		qcom,cdc-buck-sido-voltage = <1800000 1800000>;
-		qcom,cdc-buck-sido-current = <250000>;
-
-		cdc-vdd-tx-h-supply = <&pm8998_s4>;
-		qcom,cdc-vdd-tx-h-voltage = <1800000 1800000>;
-		qcom,cdc-vdd-tx-h-current = <25000>;
-
-		cdc-vdd-rx-h-supply = <&pm8998_s4>;
-		qcom,cdc-vdd-rx-h-voltage = <1800000 1800000>;
-		qcom,cdc-vdd-rx-h-current = <25000>;
-
-		cdc-vddpx-1-supply = <&pm8998_s4>;
-		qcom,cdc-vddpx-1-voltage = <1800000 1800000>;
-		qcom,cdc-vddpx-1-current = <10000>;
-
-		qcom,cdc-static-supplies = "cdc-vdd-buck",
-					   "cdc-buck-sido",
-					   "cdc-vdd-tx-h",
-					   "cdc-vdd-rx-h",
-					   "cdc-vddpx-1";
-
-		qcom,cdc-micbias1-mv = <1800>;
-		qcom,cdc-micbias2-mv = <1800>;
-		qcom,cdc-micbias3-mv = <1800>;
-		qcom,cdc-micbias4-mv = <1800>;
-
-		qcom,cdc-mclk-clk-rate = <9600000>;
-		qcom,cdc-slim-ifd = "tavil-slim-ifd";
-		qcom,cdc-slim-ifd-elemental-addr = [00 00 50 02 17 02];
-		qcom,cdc-dmic-sample-rate = <4800000>;
-		qcom,cdc-mad-dmic-rate = <600000>;
-
-		qcom,wdsp-cmpnt-dev-name = "tavil_codec";
-
-		wcd_spi_0: wcd_spi {
-			compatible = "qcom,wcd-spi-v2";
-			qcom,master-bus-num = <0>;
-			qcom,chip-select = <0>;
-			qcom,max-frequency = <9600000>;
-			qcom,mem-base-addr = <0x100000>;
-		};
-	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
index b33b525..3ce5611 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
@@ -680,7 +680,14 @@
 			qcom,qport = <10>;
 			qcom,connections = <&slv_qns_a2noc_snoc>;
 			qcom,bus-dev = <&fab_aggre2_noc>;
+			qcom,ap-owned;
 			qcom,prio = <2>;
+			qcom,node-qos-clks {
+				clocks =
+				<&clock_gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>;
+				clock-names =
+				"clk-usb3-prim-axi-no-rate";
+			};
 		};
 
 		mas_xm_usb3_1: mas-xm-usb3-1 {
@@ -691,7 +698,14 @@
 			qcom,qport = <11>;
 			qcom,connections = <&slv_qns_a2noc_snoc>;
 			qcom,bus-dev = <&fab_aggre2_noc>;
+			qcom,ap-owned;
 			qcom,prio = <2>;
+			qcom,node-qos-clks {
+				clocks =
+				<&clock_gcc GCC_AGGRE_USB3_SEC_AXI_CLK>;
+				clock-names =
+				"clk-usb3-sec-axi-no-rate";
+			};
 		};
 
 		mas_qxm_camnoc_hf0_uncomp: mas-qxm-camnoc-hf0-uncomp {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
index a715025..5db4c35 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
@@ -16,9 +16,9 @@
 		cell-index = <0>;
 		reg = <0x00 0x00>;
 		compatible = "qcom,camera-flash";
-		qcom,flash-source = <&pmi8998_flash0 &pmi8998_flash1>;
-		qcom,torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
-		qcom,switch-source = <&pmi8998_switch0>;
+		flash-source = <&pmi8998_flash0 &pmi8998_flash1>;
+		torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
+		switch-source = <&pmi8998_switch0>;
 		status = "ok";
 	};
 
@@ -26,9 +26,9 @@
 		cell-index = <1>;
 		reg = <0x01 0x00>;
 		compatible = "qcom,camera-flash";
-		qcom,flash-source = <&pmi8998_flash2>;
-		qcom,torch-source = <&pmi8998_torch2>;
-		qcom,switch-source = <&pmi8998_switch1>;
+		flash-source = <&pmi8998_flash2>;
+		torch-source = <&pmi8998_torch2>;
+		switch-source = <&pmi8998_switch1>;
 		status = "ok";
 	};
 
@@ -78,36 +78,39 @@
 		cell-index = <0>;
 		reg = <0x0>;
 		compatible = "qcom,actuator";
-		qcom,cci-master = <0>;
+		cci-master = <0>;
 		cam_vaf-supply = <&actuator_regulator>;
-		qcom,cam-vreg-name = "cam_vaf";
-		qcom,cam-vreg-min-voltage = <2800000>;
-		qcom,cam-vreg-max-voltage = <2800000>;
-		qcom,cam-vreg-op-mode = <0>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
 	};
 
 	actuator_front: qcom,actuator@1 {
 		cell-index = <1>;
 		reg = <0x1>;
 		compatible = "qcom,actuator";
-		qcom,cci-master = <1>;
+		cci-master = <1>;
 		cam_vaf-supply = <&actuator_regulator>;
-		qcom,cam-vreg-name = "cam_vaf";
-		qcom,cam-vreg-min-voltage = <2800000>;
-		qcom,cam-vreg-max-voltage = <2800000>;
-		qcom,cam-vreg-op-mode = <0>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
 	};
 
 	ois_rear: qcom,ois@0 {
 		cell-index = <0>;
 		reg = <0x0>;
 		compatible = "qcom,ois";
-		qcom,cci-master = <0>;
+		cci-master = <0>;
 		cam_vaf-supply = <&actuator_regulator>;
-		qcom,cam-vreg-name = "cam_vaf";
-		qcom,cam-vreg-min-voltage = <2800000>;
-		qcom,cam-vreg-max-voltage = <2800000>;
-		qcom,cam-vreg-op-mode = <0>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
 		status = "disabled";
 	};
 
@@ -119,12 +122,13 @@
 		cam_vana-supply = <&pmi8998_bob>;
 		cam_vdig-supply = <&camera_rear_ldo>;
 		cam_clk-supply = <&titan_top_gdsc>;
-		qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
 			"cam_clk";
-		qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
-		qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
-		qcom,cam-vreg-op-mode = <0 80000 105000 0>;
-		qcom,gpio-no-mux = <0>;
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 3312000 1050000 0>;
+		rgltr-max-voltage = <0 3600000 1050000 0>;
+		rgltr-load-current = <0 80000 105000 0>;
+		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk0_active
 				&cam_sensor_rear_active>;
@@ -134,22 +138,22 @@
 			<&tlmm 80 0>,
 			<&tlmm 79 0>,
 			<&tlmm 27 0>;
-		qcom,gpio-reset = <1>;
-		qcom,gpio-vana = <2>;
-		qcom,gpio-vaf = <3>;
-		qcom,gpio-req-tbl-num = <0 1 2 3>;
-		qcom,gpio-req-tbl-flags = <1 0 0 0>;
-		qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+		gpio-reset = <1>;
+		gpio-vana = <2>;
+		gpio-vaf = <3>;
+		gpio-req-tbl-num = <0 1 2 3>;
+		gpio-req-tbl-flags = <1 0 0 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
 					"CAM_RESET0",
 					"CAM_VANA0",
 					"CAM_VAF";
-		qcom,sensor-position = <0>;
-		qcom,sensor-mode = <0>;
-		qcom,cci-master = <0>;
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-master = <0>;
 		status = "ok";
 		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
 		clock-names = "cam_clk";
-		qcom,clock-rates = <24000000>;
+		clock-rates = <24000000>;
 	};
 
 	eeprom_rear_aux: qcom,eeprom@1 {
@@ -160,12 +164,13 @@
 		cam_vio-supply = <&pm8998_lvs1>;
 		cam_vana-supply = <&pmi8998_bob>;
 		cam_clk-supply = <&titan_top_gdsc>;
-		qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+		regulator-names = "cam_vdig", "cam_vio", "cam_vana",
 			"cam_clk";
-		qcom,cam-vreg-min-voltage = <1050000 0 3312000 0>;
-		qcom,cam-vreg-max-voltage = <1050000 0 3600000 0>;
-		qcom,cam-vreg-op-mode = <105000 0 80000 0>;
-		qcom,gpio-no-mux = <0>;
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1050000 0 3312000 0>;
+		rgltr-max-voltage = <1050000 0 3600000 0>;
+		rgltr-load-current = <105000 0 80000 0>;
+		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk2_active
 				 &cam_sensor_rear2_active>;
@@ -174,20 +179,20 @@
 		gpios = <&tlmm 15 0>,
 			<&tlmm 9 0>,
 			<&tlmm 8 0>;
-		qcom,gpio-reset = <1>;
-		qcom,gpio-vana = <2>;
-		qcom,gpio-req-tbl-num = <0 1 2>;
-		qcom,gpio-req-tbl-flags = <1 0 0>;
-		qcom,gpio-req-tbl-label = "CAMIF_MCLK1",
+		gpio-reset = <1>;
+		gpio-vana = <2>;
+		gpio-req-tbl-num = <0 1 2>;
+		gpio-req-tbl-flags = <1 0 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
 					"CAM_RESET1",
 					"CAM_VANA1";
-		qcom,sensor-position = <0>;
-		qcom,sensor-mode = <0>;
-		qcom,cci-master = <1>;
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-master = <1>;
 		status = "ok";
 		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
 		clock-names = "cam_clk";
-		qcom,clock-rates = <24000000>;
+		clock-rates = <24000000>;
 	};
 
 	eeprom_front: qcom,eeprom@2 {
@@ -198,12 +203,13 @@
 		cam_vana-supply = <&pmi8998_bob>;
 		cam_vdig-supply = <&camera_ldo>;
 		cam_clk-supply = <&titan_top_gdsc>;
-		qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
 			"cam_clk";
-		qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
-		qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
-		qcom,cam-vreg-op-mode = <0 80000 105000 0>;
-		qcom,gpio-no-mux = <0>;
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 3312000 1050000 0>;
+		rgltr-max-voltage = <0 3600000 1050000 0>;
+		rgltr-load-current = <0 80000 105000 0>;
+		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk1_active
 				 &cam_sensor_front_active>;
@@ -213,46 +219,47 @@
 			<&tlmm 28 0>,
 			<&tlmm 8 0>,
 			<&tlmm 27 0>;
-		qcom,gpio-reset = <1>;
-		qcom,gpio-vana = <2>;
-		qcom,gpio-vaf = <3>;
-		qcom,gpio-req-tbl-num = <0 1 2 3>;
-		qcom,gpio-req-tbl-flags = <1 0 0 0>;
-		qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+		gpio-reset = <1>;
+		gpio-vana = <2>;
+		gpio-vaf = <3>;
+		gpio-req-tbl-num = <0 1 2 3>;
+		gpio-req-tbl-flags = <1 0 0 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK2",
 					"CAM_RESET2",
 					"CAM_VANA2",
 					"CAM_VAF";
-		qcom,sensor-position = <1>;
-		qcom,sensor-mode = <0>;
-		qcom,cci-master = <1>;
+		sensor-position = <1>;
+		sensor-mode = <0>;
+		cci-master = <1>;
 		status = "ok";
 		clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
 		clock-names = "cam_clk";
-		qcom,clock-rates = <24000000>;
+		clock-rates = <24000000>;
 	};
 
 	qcom,cam-sensor@0 {
 		cell-index = <0>;
 		compatible = "qcom,cam-sensor";
 		reg = <0x0>;
-		qcom,csiphy-sd-index = <0>;
-		qcom,sensor-position-roll = <90>;
-		qcom,sensor-position-pitch = <0>;
-		qcom,sensor-position-yaw = <180>;
-		qcom,led-flash-src = <&led_flash_rear>;
-		qcom,actuator-src = <&actuator_rear>;
-		qcom,ois-src = <&ois_rear>;
-		qcom,eeprom-src = <&eeprom_rear>;
+		csiphy-sd-index = <0>;
+		sensor-position-roll = <90>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		led-flash-src = <&led_flash_rear>;
+		actuator-src = <&actuator_rear>;
+		ois-src = <&ois_rear>;
+		eeprom-src = <&eeprom_rear>;
 		cam_vio-supply = <&pm8998_lvs1>;
 		cam_vana-supply = <&pmi8998_bob>;
 		cam_vdig-supply = <&camera_rear_ldo>;
 		cam_clk-supply = <&titan_top_gdsc>;
-		qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
 			"cam_clk";
-		qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
-		qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
-		qcom,cam-vreg-op-mode = <0 80000 105000 0>;
-		qcom,gpio-no-mux = <0>;
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 3312000 1050000 0>;
+		rgltr-max-voltage = <0 3600000 1050000 0>;
+		rgltr-load-current = <0 80000 105000 0>;
+		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk0_active
 				&cam_sensor_rear_active>;
@@ -261,40 +268,42 @@
 		gpios = <&tlmm 13 0>,
 			<&tlmm 80 0>,
 			<&tlmm 79 0>;
-		qcom,gpio-reset = <1>;
-		qcom,gpio-vana = <2>;
-		qcom,gpio-req-tbl-num = <0 1 2>;
-		qcom,gpio-req-tbl-flags = <1 0 0>;
-		qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+		gpio-reset = <1>;
+		gpio-vana = <2>;
+		gpio-req-tbl-num = <0 1 2>;
+		gpio-req-tbl-flags = <1 0 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
 					"CAM_RESET0",
 					"CAM_VANA";
-		qcom,sensor-mode = <0>;
-		qcom,cci-master = <0>;
+		sensor-mode = <0>;
+		cci-master = <0>;
 		status = "ok";
 		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
 		clock-names = "cam_clk";
-		qcom,clock-rates = <24000000>;
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
 	};
 
 	qcom,cam-sensor@1 {
 		cell-index = <1>;
 		compatible = "qcom,cam-sensor";
 		reg = <0x1>;
-		qcom,csiphy-sd-index = <1>;
-		qcom,sensor-position-roll = <90>;
-		qcom,sensor-position-pitch = <0>;
-		qcom,sensor-position-yaw = <180>;
-		qcom,eeprom-src = <&eeprom_rear_aux>;
+		csiphy-sd-index = <1>;
+		sensor-position-roll = <90>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		eeprom-src = <&eeprom_rear_aux>;
 		cam_vdig-supply = <&camera_ldo>;
 		cam_vio-supply = <&pm8998_lvs1>;
 		cam_vana-supply = <&pmi8998_bob>;
 		cam_clk-supply = <&titan_top_gdsc>;
-		qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+		regulator-names = "cam_vdig", "cam_vio", "cam_vana",
 			"cam_clk";
-		qcom,cam-vreg-min-voltage = <1050000 0 3312000 0>;
-		qcom,cam-vreg-max-voltage = <1050000 0 3600000 0>;
-		qcom,cam-vreg-op-mode = <105000 0 80000 0>;
-		qcom,gpio-no-mux = <0>;
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1050000 0 3312000 0>;
+		rgltr-max-voltage = <1050000 0 3600000 0>;
+		rgltr-load-current = <105000 0 80000 0>;
+		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk2_active
 				&cam_sensor_rear2_active>;
@@ -303,42 +312,44 @@
 		gpios = <&tlmm 15 0>,
 			<&tlmm 9 0>,
 			<&tlmm 8 0>;
-		qcom,gpio-reset = <1>;
-		qcom,gpio-vana = <2>;
-		qcom,gpio-req-tbl-num = <0 1 2>;
-		qcom,gpio-req-tbl-flags = <1 0 0>;
-		qcom,gpio-req-tbl-label = "CAMIF_MCLK1",
+		gpio-reset = <1>;
+		gpio-vana = <2>;
+		gpio-req-tbl-num = <0 1 2>;
+		gpio-req-tbl-flags = <1 0 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
 					"CAM_RESET1",
 					"CAM_VANA1";
-		qcom,sensor-mode = <0>;
-		qcom,cci-master = <1>;
+		sensor-mode = <0>;
+		cci-master = <1>;
 		status = "ok";
 		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
 		clock-names = "cam_clk";
-		qcom,clock-rates = <24000000>;
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
 	};
 
 	qcom,cam-sensor@2 {
 		cell-index = <2>;
 		compatible = "qcom,cam-sensor";
 		reg = <0x02>;
-		qcom,csiphy-sd-index = <2>;
-		qcom,sensor-position-roll = <90>;
-		qcom,sensor-position-pitch = <0>;
-		qcom,sensor-position-yaw = <0>;
-		qcom,eeprom-src = <&eeprom_front>;
-		qcom,actuator-src = <&actuator_front>;
-		qcom,led-flash-src = <&led_flash_front>;
+		csiphy-sd-index = <2>;
+		sensor-position-roll = <90>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <0>;
+		eeprom-src = <&eeprom_front>;
+		actuator-src = <&actuator_front>;
+		led-flash-src = <&led_flash_front>;
 		cam_vio-supply = <&pm8998_lvs1>;
 		cam_vana-supply = <&pmi8998_bob>;
 		cam_vdig-supply = <&camera_ldo>;
 		cam_clk-supply = <&titan_top_gdsc>;
-		qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
 			"cam_clk";
-		qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
-		qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
-		qcom,cam-vreg-op-mode = <0 80000 105000 0>;
-		qcom,gpio-no-mux = <0>;
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 3312000 1050000 0>;
+		rgltr-max-voltage = <0 3600000 1050000 0>;
+		rgltr-load-current = <0 80000 105000 0>;
+		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk1_active
 				 &cam_sensor_front_active>;
@@ -347,18 +358,19 @@
 		gpios = <&tlmm 14 0>,
 			<&tlmm 28 0>,
 			<&tlmm 8 0>;
-		qcom,gpio-reset = <1>;
-		qcom,gpio-vana = <2>;
-		qcom,gpio-req-tbl-num = <0 1 2>;
-		qcom,gpio-req-tbl-flags = <1 0 0>;
-		qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+		gpio-reset = <1>;
+		gpio-vana = <2>;
+		gpio-req-tbl-num = <0 1 2>;
+		gpio-req-tbl-flags = <1 0 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK2",
 					"CAM_RESET2",
 					"CAM_VANA1";
-		qcom,sensor-mode = <0>;
-		qcom,cci-master = <1>;
+		sensor-mode = <0>;
+		cci-master = <1>;
 		status = "ok";
 		clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
 		clock-names = "cam_clk";
-		qcom,clock-rates = <24000000>;
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
index a715025..5db4c35 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
@@ -16,9 +16,9 @@
 		cell-index = <0>;
 		reg = <0x00 0x00>;
 		compatible = "qcom,camera-flash";
-		qcom,flash-source = <&pmi8998_flash0 &pmi8998_flash1>;
-		qcom,torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
-		qcom,switch-source = <&pmi8998_switch0>;
+		flash-source = <&pmi8998_flash0 &pmi8998_flash1>;
+		torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
+		switch-source = <&pmi8998_switch0>;
 		status = "ok";
 	};
 
@@ -26,9 +26,9 @@
 		cell-index = <1>;
 		reg = <0x01 0x00>;
 		compatible = "qcom,camera-flash";
-		qcom,flash-source = <&pmi8998_flash2>;
-		qcom,torch-source = <&pmi8998_torch2>;
-		qcom,switch-source = <&pmi8998_switch1>;
+		flash-source = <&pmi8998_flash2>;
+		torch-source = <&pmi8998_torch2>;
+		switch-source = <&pmi8998_switch1>;
 		status = "ok";
 	};
 
@@ -78,36 +78,39 @@
 		cell-index = <0>;
 		reg = <0x0>;
 		compatible = "qcom,actuator";
-		qcom,cci-master = <0>;
+		cci-master = <0>;
 		cam_vaf-supply = <&actuator_regulator>;
-		qcom,cam-vreg-name = "cam_vaf";
-		qcom,cam-vreg-min-voltage = <2800000>;
-		qcom,cam-vreg-max-voltage = <2800000>;
-		qcom,cam-vreg-op-mode = <0>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
 	};
 
 	actuator_front: qcom,actuator@1 {
 		cell-index = <1>;
 		reg = <0x1>;
 		compatible = "qcom,actuator";
-		qcom,cci-master = <1>;
+		cci-master = <1>;
 		cam_vaf-supply = <&actuator_regulator>;
-		qcom,cam-vreg-name = "cam_vaf";
-		qcom,cam-vreg-min-voltage = <2800000>;
-		qcom,cam-vreg-max-voltage = <2800000>;
-		qcom,cam-vreg-op-mode = <0>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
 	};
 
 	ois_rear: qcom,ois@0 {
 		cell-index = <0>;
 		reg = <0x0>;
 		compatible = "qcom,ois";
-		qcom,cci-master = <0>;
+		cci-master = <0>;
 		cam_vaf-supply = <&actuator_regulator>;
-		qcom,cam-vreg-name = "cam_vaf";
-		qcom,cam-vreg-min-voltage = <2800000>;
-		qcom,cam-vreg-max-voltage = <2800000>;
-		qcom,cam-vreg-op-mode = <0>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
 		status = "disabled";
 	};
 
@@ -119,12 +122,13 @@
 		cam_vana-supply = <&pmi8998_bob>;
 		cam_vdig-supply = <&camera_rear_ldo>;
 		cam_clk-supply = <&titan_top_gdsc>;
-		qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
 			"cam_clk";
-		qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
-		qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
-		qcom,cam-vreg-op-mode = <0 80000 105000 0>;
-		qcom,gpio-no-mux = <0>;
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 3312000 1050000 0>;
+		rgltr-max-voltage = <0 3600000 1050000 0>;
+		rgltr-load-current = <0 80000 105000 0>;
+		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk0_active
 				&cam_sensor_rear_active>;
@@ -134,22 +138,22 @@
 			<&tlmm 80 0>,
 			<&tlmm 79 0>,
 			<&tlmm 27 0>;
-		qcom,gpio-reset = <1>;
-		qcom,gpio-vana = <2>;
-		qcom,gpio-vaf = <3>;
-		qcom,gpio-req-tbl-num = <0 1 2 3>;
-		qcom,gpio-req-tbl-flags = <1 0 0 0>;
-		qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+		gpio-reset = <1>;
+		gpio-vana = <2>;
+		gpio-vaf = <3>;
+		gpio-req-tbl-num = <0 1 2 3>;
+		gpio-req-tbl-flags = <1 0 0 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
 					"CAM_RESET0",
 					"CAM_VANA0",
 					"CAM_VAF";
-		qcom,sensor-position = <0>;
-		qcom,sensor-mode = <0>;
-		qcom,cci-master = <0>;
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-master = <0>;
 		status = "ok";
 		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
 		clock-names = "cam_clk";
-		qcom,clock-rates = <24000000>;
+		clock-rates = <24000000>;
 	};
 
 	eeprom_rear_aux: qcom,eeprom@1 {
@@ -160,12 +164,13 @@
 		cam_vio-supply = <&pm8998_lvs1>;
 		cam_vana-supply = <&pmi8998_bob>;
 		cam_clk-supply = <&titan_top_gdsc>;
-		qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+		regulator-names = "cam_vdig", "cam_vio", "cam_vana",
 			"cam_clk";
-		qcom,cam-vreg-min-voltage = <1050000 0 3312000 0>;
-		qcom,cam-vreg-max-voltage = <1050000 0 3600000 0>;
-		qcom,cam-vreg-op-mode = <105000 0 80000 0>;
-		qcom,gpio-no-mux = <0>;
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1050000 0 3312000 0>;
+		rgltr-max-voltage = <1050000 0 3600000 0>;
+		rgltr-load-current = <105000 0 80000 0>;
+		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk2_active
 				 &cam_sensor_rear2_active>;
@@ -174,20 +179,20 @@
 		gpios = <&tlmm 15 0>,
 			<&tlmm 9 0>,
 			<&tlmm 8 0>;
-		qcom,gpio-reset = <1>;
-		qcom,gpio-vana = <2>;
-		qcom,gpio-req-tbl-num = <0 1 2>;
-		qcom,gpio-req-tbl-flags = <1 0 0>;
-		qcom,gpio-req-tbl-label = "CAMIF_MCLK1",
+		gpio-reset = <1>;
+		gpio-vana = <2>;
+		gpio-req-tbl-num = <0 1 2>;
+		gpio-req-tbl-flags = <1 0 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
 					"CAM_RESET1",
 					"CAM_VANA1";
-		qcom,sensor-position = <0>;
-		qcom,sensor-mode = <0>;
-		qcom,cci-master = <1>;
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-master = <1>;
 		status = "ok";
 		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
 		clock-names = "cam_clk";
-		qcom,clock-rates = <24000000>;
+		clock-rates = <24000000>;
 	};
 
 	eeprom_front: qcom,eeprom@2 {
@@ -198,12 +203,13 @@
 		cam_vana-supply = <&pmi8998_bob>;
 		cam_vdig-supply = <&camera_ldo>;
 		cam_clk-supply = <&titan_top_gdsc>;
-		qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
 			"cam_clk";
-		qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
-		qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
-		qcom,cam-vreg-op-mode = <0 80000 105000 0>;
-		qcom,gpio-no-mux = <0>;
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 3312000 1050000 0>;
+		rgltr-max-voltage = <0 3600000 1050000 0>;
+		rgltr-load-current = <0 80000 105000 0>;
+		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk1_active
 				 &cam_sensor_front_active>;
@@ -213,46 +219,47 @@
 			<&tlmm 28 0>,
 			<&tlmm 8 0>,
 			<&tlmm 27 0>;
-		qcom,gpio-reset = <1>;
-		qcom,gpio-vana = <2>;
-		qcom,gpio-vaf = <3>;
-		qcom,gpio-req-tbl-num = <0 1 2 3>;
-		qcom,gpio-req-tbl-flags = <1 0 0 0>;
-		qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+		gpio-reset = <1>;
+		gpio-vana = <2>;
+		gpio-vaf = <3>;
+		gpio-req-tbl-num = <0 1 2 3>;
+		gpio-req-tbl-flags = <1 0 0 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK2",
 					"CAM_RESET2",
 					"CAM_VANA2",
 					"CAM_VAF";
-		qcom,sensor-position = <1>;
-		qcom,sensor-mode = <0>;
-		qcom,cci-master = <1>;
+		sensor-position = <1>;
+		sensor-mode = <0>;
+		cci-master = <1>;
 		status = "ok";
 		clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
 		clock-names = "cam_clk";
-		qcom,clock-rates = <24000000>;
+		clock-rates = <24000000>;
 	};
 
 	qcom,cam-sensor@0 {
 		cell-index = <0>;
 		compatible = "qcom,cam-sensor";
 		reg = <0x0>;
-		qcom,csiphy-sd-index = <0>;
-		qcom,sensor-position-roll = <90>;
-		qcom,sensor-position-pitch = <0>;
-		qcom,sensor-position-yaw = <180>;
-		qcom,led-flash-src = <&led_flash_rear>;
-		qcom,actuator-src = <&actuator_rear>;
-		qcom,ois-src = <&ois_rear>;
-		qcom,eeprom-src = <&eeprom_rear>;
+		csiphy-sd-index = <0>;
+		sensor-position-roll = <90>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		led-flash-src = <&led_flash_rear>;
+		actuator-src = <&actuator_rear>;
+		ois-src = <&ois_rear>;
+		eeprom-src = <&eeprom_rear>;
 		cam_vio-supply = <&pm8998_lvs1>;
 		cam_vana-supply = <&pmi8998_bob>;
 		cam_vdig-supply = <&camera_rear_ldo>;
 		cam_clk-supply = <&titan_top_gdsc>;
-		qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
 			"cam_clk";
-		qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
-		qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
-		qcom,cam-vreg-op-mode = <0 80000 105000 0>;
-		qcom,gpio-no-mux = <0>;
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 3312000 1050000 0>;
+		rgltr-max-voltage = <0 3600000 1050000 0>;
+		rgltr-load-current = <0 80000 105000 0>;
+		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk0_active
 				&cam_sensor_rear_active>;
@@ -261,40 +268,42 @@
 		gpios = <&tlmm 13 0>,
 			<&tlmm 80 0>,
 			<&tlmm 79 0>;
-		qcom,gpio-reset = <1>;
-		qcom,gpio-vana = <2>;
-		qcom,gpio-req-tbl-num = <0 1 2>;
-		qcom,gpio-req-tbl-flags = <1 0 0>;
-		qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+		gpio-reset = <1>;
+		gpio-vana = <2>;
+		gpio-req-tbl-num = <0 1 2>;
+		gpio-req-tbl-flags = <1 0 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
 					"CAM_RESET0",
 					"CAM_VANA";
-		qcom,sensor-mode = <0>;
-		qcom,cci-master = <0>;
+		sensor-mode = <0>;
+		cci-master = <0>;
 		status = "ok";
 		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
 		clock-names = "cam_clk";
-		qcom,clock-rates = <24000000>;
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
 	};
 
 	qcom,cam-sensor@1 {
 		cell-index = <1>;
 		compatible = "qcom,cam-sensor";
 		reg = <0x1>;
-		qcom,csiphy-sd-index = <1>;
-		qcom,sensor-position-roll = <90>;
-		qcom,sensor-position-pitch = <0>;
-		qcom,sensor-position-yaw = <180>;
-		qcom,eeprom-src = <&eeprom_rear_aux>;
+		csiphy-sd-index = <1>;
+		sensor-position-roll = <90>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		eeprom-src = <&eeprom_rear_aux>;
 		cam_vdig-supply = <&camera_ldo>;
 		cam_vio-supply = <&pm8998_lvs1>;
 		cam_vana-supply = <&pmi8998_bob>;
 		cam_clk-supply = <&titan_top_gdsc>;
-		qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+		regulator-names = "cam_vdig", "cam_vio", "cam_vana",
 			"cam_clk";
-		qcom,cam-vreg-min-voltage = <1050000 0 3312000 0>;
-		qcom,cam-vreg-max-voltage = <1050000 0 3600000 0>;
-		qcom,cam-vreg-op-mode = <105000 0 80000 0>;
-		qcom,gpio-no-mux = <0>;
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1050000 0 3312000 0>;
+		rgltr-max-voltage = <1050000 0 3600000 0>;
+		rgltr-load-current = <105000 0 80000 0>;
+		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk2_active
 				&cam_sensor_rear2_active>;
@@ -303,42 +312,44 @@
 		gpios = <&tlmm 15 0>,
 			<&tlmm 9 0>,
 			<&tlmm 8 0>;
-		qcom,gpio-reset = <1>;
-		qcom,gpio-vana = <2>;
-		qcom,gpio-req-tbl-num = <0 1 2>;
-		qcom,gpio-req-tbl-flags = <1 0 0>;
-		qcom,gpio-req-tbl-label = "CAMIF_MCLK1",
+		gpio-reset = <1>;
+		gpio-vana = <2>;
+		gpio-req-tbl-num = <0 1 2>;
+		gpio-req-tbl-flags = <1 0 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
 					"CAM_RESET1",
 					"CAM_VANA1";
-		qcom,sensor-mode = <0>;
-		qcom,cci-master = <1>;
+		sensor-mode = <0>;
+		cci-master = <1>;
 		status = "ok";
 		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
 		clock-names = "cam_clk";
-		qcom,clock-rates = <24000000>;
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
 	};
 
 	qcom,cam-sensor@2 {
 		cell-index = <2>;
 		compatible = "qcom,cam-sensor";
 		reg = <0x02>;
-		qcom,csiphy-sd-index = <2>;
-		qcom,sensor-position-roll = <90>;
-		qcom,sensor-position-pitch = <0>;
-		qcom,sensor-position-yaw = <0>;
-		qcom,eeprom-src = <&eeprom_front>;
-		qcom,actuator-src = <&actuator_front>;
-		qcom,led-flash-src = <&led_flash_front>;
+		csiphy-sd-index = <2>;
+		sensor-position-roll = <90>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <0>;
+		eeprom-src = <&eeprom_front>;
+		actuator-src = <&actuator_front>;
+		led-flash-src = <&led_flash_front>;
 		cam_vio-supply = <&pm8998_lvs1>;
 		cam_vana-supply = <&pmi8998_bob>;
 		cam_vdig-supply = <&camera_ldo>;
 		cam_clk-supply = <&titan_top_gdsc>;
-		qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
 			"cam_clk";
-		qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
-		qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
-		qcom,cam-vreg-op-mode = <0 80000 105000 0>;
-		qcom,gpio-no-mux = <0>;
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 3312000 1050000 0>;
+		rgltr-max-voltage = <0 3600000 1050000 0>;
+		rgltr-load-current = <0 80000 105000 0>;
+		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk1_active
 				 &cam_sensor_front_active>;
@@ -347,18 +358,19 @@
 		gpios = <&tlmm 14 0>,
 			<&tlmm 28 0>,
 			<&tlmm 8 0>;
-		qcom,gpio-reset = <1>;
-		qcom,gpio-vana = <2>;
-		qcom,gpio-req-tbl-num = <0 1 2>;
-		qcom,gpio-req-tbl-flags = <1 0 0>;
-		qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+		gpio-reset = <1>;
+		gpio-vana = <2>;
+		gpio-req-tbl-num = <0 1 2>;
+		gpio-req-tbl-flags = <1 0 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK2",
 					"CAM_RESET2",
 					"CAM_VANA1";
-		qcom,sensor-mode = <0>;
-		qcom,cci-master = <1>;
+		sensor-mode = <0>;
+		cci-master = <1>;
 		status = "ok";
 		clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
 		clock-names = "cam_clk";
-		qcom,clock-rates = <24000000>;
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
index 91b8738..cbd495a 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
@@ -22,12 +22,13 @@
 		compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
 		reg = <0x0ac65000 0x1000>;
 		reg-names = "csiphy";
+		reg-cam-base = <0x65000>;
 		interrupts = <0 477 0>;
 		interrupt-names = "csiphy";
 		gdscr-supply = <&titan_top_gdsc>;
-		qcom,cam-vreg-name = "gdscr";
-		qcom,csi-vdd-voltage = <1200000>;
-		qcom,mipi-csi-vdd-supply = <&pm8998_l26>;
+		regulator-names = "gdscr";
+		csi-vdd-voltage = <1200000>;
+		mipi-csi-vdd-supply = <&pm8998_l26>;
 		clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
 			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
 			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
@@ -48,8 +49,9 @@
 			"csi0phytimer_clk",
 			"ife_0_csid_clk",
 			"ife_0_csid_clk_src";
-		qcom,clock-rates =
-			<0 0 80000000 0 320000000 0 269333333 0 0 384000000>;
+		clock-cntl-level = "turbo";
+		clock-rates =
+			<0 0 0 0 320000000 0 269333333 0 0 384000000>;
 		status = "ok";
 	};
 
@@ -58,12 +60,13 @@
 		compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
 		reg = <0xac66000 0x1000>;
 		reg-names = "csiphy";
+		reg-cam-base = <0x66000>;
 		interrupts = <0 478 0>;
 		interrupt-names = "csiphy";
 		gdscr-supply = <&titan_top_gdsc>;
-		qcom,cam-vreg-name = "gdscr";
-		qcom,csi-vdd-voltage = <1200000>;
-		qcom,mipi-csi-vdd-supply = <&pm8998_l26>;
+		regulator-names = "gdscr";
+		csi-vdd-voltage = <1200000>;
+		mipi-csi-vdd-supply = <&pm8998_l26>;
 		clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
 			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
 			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
@@ -84,8 +87,9 @@
 			"csi1phytimer_clk",
 			"ife_1_csid_clk",
 			"ife_1_csid_clk_src";
-		qcom,clock-rates =
-			<0 0 80000000 0 320000000 0 269333333 0 0 384000000>;
+		clock-cntl-level = "turbo";
+		clock-rates =
+			<0 0 0 0 320000000 0 269333333 0 0 384000000>;
 
 		status = "ok";
 	};
@@ -95,12 +99,13 @@
 		compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
 		reg = <0xac67000 0x1000>;
 		reg-names = "csiphy";
+		reg-cam-base = <0x67000>;
 		interrupts = <0 479 0>;
 		interrupt-names = "csiphy";
 		gdscr-supply = <&titan_top_gdsc>;
-		qcom,cam-vreg-name = "gdscr";
-		qcom,csi-vdd-voltage = <1200000>;
-		qcom,mipi-csi-vdd-supply = <&pm8998_l26>;
+		regulator-names = "gdscr";
+		csi-vdd-voltage = <1200000>;
+		mipi-csi-vdd-supply = <&pm8998_l26>;
 		clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
 			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
 			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
@@ -121,23 +126,25 @@
 			"csi2phytimer_clk",
 			"ife_lite_csid_clk",
 			"ife_lite_csid_clk_src";
-		qcom,clock-rates =
-			<0 0 80000000 0 320000000 0 269333333 0 0 384000000>;
+		clock-cntl-level = "turbo";
+		clock-rates =
+			<0 0 0 0 320000000 0 269333333 0 0 384000000>;
 		status = "ok";
 	};
 
 	cam_cci: qcom,cci@ac4a000 {
 		cell-index = <0>;
 		compatible = "qcom,cci";
-		reg = <0xac4a000 0x4000>;
 		#address-cells = <1>;
 		#size-cells = <0>;
+		reg = <0xac4a000 0x4000>;
 		reg-names = "cci";
-		interrupts = <0 460 0>;
+		reg-cam-base = <0x4a000>;
 		interrupt-names = "cci";
+		interrupts = <0 460 0>;
 		status = "ok";
 		gdscr-supply = <&titan_top_gdsc>;
-		qcom,cam-vreg-name = "gdscr";
+		regulator-names = "gdscr";
 		clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
 			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
 			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
@@ -150,17 +157,19 @@
 			"cpas_ahb_clk",
 			"cci_clk",
 			"cci_clk_src";
-		qcom,clock-rates = <0 0 80000000 0 0 37500000>;
-		pinctrl-names = "cci_default", "cci_suspend";
+		src-clock-name = "cci_clk_src";
+		clock-cntl-level = "turbo";
+		clock-rates = <0 0 0 0 0 37500000>;
+		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cci0_active &cci1_active>;
 		pinctrl-1 = <&cci0_suspend &cci1_suspend>;
 		gpios = <&tlmm 17 0>,
 			<&tlmm 18 0>,
 			<&tlmm 19 0>,
 			<&tlmm 20 0>;
-		qcom,gpio-tbl-num = <0 1 2 3>;
-		qcom,gpio-tbl-flags = <1 1 1 1>;
-		qcom,gpio-tbl-label = "CCI_I2C_DATA0",
+		gpio-req-tbl-num = <0 1 2 3>;
+		gpio-req-tbl-flags = <1 1 1 1>;
+		gpio-req-tbl-label = "CCI_I2C_DATA0",
 					"CCI_I2C_CLK0",
 					"CCI_I2C_DATA1",
 					"CCI_I2C_CLK1";
@@ -353,14 +362,28 @@
 			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
 			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
 		src-clock-name = "slow_ahb_clk_src";
-		clock-rates = <0 0 0 80000000 0 0>;
+		clock-rates = <0 0 0 0 0 0>,
+			<0 0 0 19200000 0 0>,
+			<0 0 0 60000000 0 0>,
+			<0 0 0 66660000 0 0>,
+			<0 0 0 73840000 0 0>,
+			<0 0 0 80000000 0 0>,
+			<0 0 0 80000000 0 0>;
+		clock-cntl-level = "suspend", "minsvs", "lowsvs", "svs",
+			"svs_l1", "nominal", "turbo";
 		qcom,msm-bus,name = "cam_ahb";
-		qcom,msm-bus,num-cases = <4>;
+		qcom,msm-bus,num-cases = <7>;
 		qcom,msm-bus,num-paths = <1>;
 		qcom,msm-bus,vectors-KBps =
 			<MSM_BUS_MASTER_AMPSS_M0
 			MSM_BUS_SLAVE_CAMERA_CFG 0 0>,
 			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 180000>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 180000>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 300000>,
+			<MSM_BUS_MASTER_AMPSS_M0
 			MSM_BUS_SLAVE_CAMERA_CFG 0 300000>,
 			<MSM_BUS_MASTER_AMPSS_M0
 			MSM_BUS_SLAVE_CAMERA_CFG 0 640000>,
@@ -378,7 +401,7 @@
 			RPMH_REGULATOR_LEVEL_TURBO
 			RPMH_REGULATOR_LEVEL_TURBO_L1>;
 		vdd-corner-ahb-mapping = "suspend", "suspend",
-			"svs", "svs", "svs", "svs",
+			"minsvs", "lowsvs", "svs", "svs_l1",
 			"nominal", "nominal", "nominal",
 			"turbo", "turbo";
 		client-id-based;
@@ -508,6 +531,7 @@
 			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
 			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
 		clock-rates = <0 0 0 0 0>;
+		clock-cntl-level = "svs";
 		cdm-client-names = "ife";
 		status = "ok";
 	};
@@ -555,7 +579,8 @@
 			<&clock_camcc CAM_CC_IFE_0_CLK_SRC>,
 			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
 			<&clock_camcc CAM_CC_IFE_0_AXI_CLK>;
-		clock-rates = <0 0 0 0 0 0 500000000 0 0 0 60000000 0 0>;
+		clock-rates = <0 0 0 0 0 0 500000000 0 0 0 600000000 0 0>;
+		clock-cntl-level = "turbo";
 		src-clock-name = "ife_csid_clk_src";
 		status = "ok";
 	};
@@ -590,6 +615,7 @@
 			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
 			<&clock_camcc CAM_CC_IFE_0_AXI_CLK>;
 		clock-rates = <0 0 0 0 0 0 600000000 0 0>;
+		clock-cntl-level = "turbo";
 		src-clock-name = "ife_clk_src";
 		clock-names-option =  "ife_dsp_clk";
 		clocks-option = <&clock_camcc CAM_CC_IFE_0_DSP_CLK>;
@@ -634,7 +660,8 @@
 			<&clock_camcc CAM_CC_IFE_1_CLK_SRC>,
 			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
 			<&clock_camcc CAM_CC_IFE_1_AXI_CLK>;
-		clock-rates = <0 0 0 0 0 0 500000000 0 0 0 60000000 0 0>;
+		clock-rates = <0 0 0 0 0 0 500000000 0 0 0 600000000 0 0>;
+		clock-cntl-level = "turbo";
 		src-clock-name = "ife_csid_clk_src";
 		status = "ok";
 	};
@@ -669,6 +696,7 @@
 			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
 			<&clock_camcc CAM_CC_IFE_1_AXI_CLK>;
 		clock-rates = <0 0 0 0 0 0 600000000 0 0>;
+		clock-cntl-level = "turbo";
 		src-clock-name = "ife_clk_src";
 		clock-names-option =  "ife_dsp_clk";
 		clocks-option = <&clock_camcc CAM_CC_IFE_1_DSP_CLK>;
@@ -710,7 +738,8 @@
 			<&clock_camcc CAM_CC_IFE_LITE_CLK>,
 			<&clock_camcc CAM_CC_IFE_LITE_CLK_SRC>,
 			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
-		clock-rates = <0 0 0 0 0 0 384000000 0 0 0 40400000 0>;
+		clock-rates = <0 0 0 0 0 0 384000000 0 0 0 404000000 0>;
+		clock-cntl-level = "turbo";
 		src-clock-name = "ife_csid_clk_src";
 		status = "ok";
 	};
@@ -741,7 +770,8 @@
 			<&clock_camcc CAM_CC_IFE_LITE_CLK>,
 			<&clock_camcc CAM_CC_IFE_LITE_CLK_SRC>,
 			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
-		qcom,clock-rates = <0 0 0 0 0 0 404000000 0>;
+		clock-rates = <0 0 0 0 0 0 404000000 0>;
+		clock-cntl-level = "turbo";
 		src-clock-name = "ife_clk_src";
 		status = "ok";
 	};
@@ -790,6 +820,7 @@
 				<&clock_camcc CAM_CC_ICP_CLK_SRC>;
 
 		clock-rates = <0 0 400000000 0 0 0 0 0 600000000>;
+		clock-cntl-level = "turbo";
 		fw_name = "CAMERA_ICP.elf";
 		status = "ok";
 	};
@@ -811,6 +842,7 @@
 				<&clock_camcc CAM_CC_IPE_0_CLK_SRC>;
 
 		clock-rates = <0 0 0 0 600000000>;
+		clock-cntl-level = "turbo";
 		status = "ok";
 	};
 
@@ -831,6 +863,7 @@
 				<&clock_camcc CAM_CC_IPE_1_CLK_SRC>;
 
 		clock-rates = <0 0 0 0 600000000>;
+		clock-cntl-level = "turbo";
 		status = "ok";
 	};
 
@@ -851,6 +884,7 @@
 				<&clock_camcc CAM_CC_BPS_CLK_SRC>;
 
 		clock-rates = <0 0 0 0 600000000>;
+		clock-cntl-level = "turbo";
 		status = "ok";
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp-audio-overlay.dtsi
new file mode 100644
index 0000000..68f2e51
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp-audio-overlay.dtsi
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdm845-audio-overlay.dtsi"
+
+&soc {
+	sound-tavil {
+		qcom,us-euro-gpios = <&tavil_us_euro_sw>;
+	};
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts
index 7d7c9cf..4747c99 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts
@@ -19,6 +19,7 @@
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 
 #include "sdm845-cdp.dtsi"
+#include "sdm845-cdp-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM845 v1 CDP";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
index 0430ea4..dee2ec2 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
@@ -37,10 +37,6 @@
 };
 
 &soc {
-	sound-tavil {
-		qcom,us-euro-gpios = <&tavil_us_euro_sw>;
-	};
-
 	gpio_keys {
 		compatible = "gpio-keys";
 		label = "gpio-keys";
@@ -300,7 +296,7 @@
 	qcom,platform-reset-gpio = <&tlmm 6 0>;
 };
 
-&dsi_dual_nt35597_truly_cmd_display {
+&dsi_nt35597_truly_dsc_cmd_display {
 	qcom,dsi-display-active;
 };
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
index 04a332e..d2189a7 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
@@ -245,6 +245,8 @@
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
 
+		qcom,msr-fix-req;
+
 		port {
 			tpdm_swao1_out_tpda_swao: endpoint {
 				remote-endpoint = <&tpda_swao_in_tpdm_swao1>;
@@ -819,6 +821,8 @@
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
 
+		qcom,msr-fix-req;
+
 		port {
 			tpdm_lpass_out_funnel_lpass: endpoint {
 				remote-endpoint = <&funnel_lpass_in_tpdm_lpass>;
@@ -837,6 +841,8 @@
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
 
+		qcom,msr-fix-req;
+
 		port {
 			tpdm_center_out_tpda: endpoint {
 				remote-endpoint = <&tpda_in_tpdm_center>;
@@ -855,6 +861,8 @@
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
 
+		qcom,msr-fix-req;
+
 		port {
 			tpdm_north_out_tpda: endpoint {
 				remote-endpoint = <&tpda_in_tpdm_north>;
@@ -1090,6 +1098,8 @@
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
 
+		qcom,msr-fix-req;
+
 		port {
 			tpdm_mm_out_funnel_dl_mm: endpoint {
 				remote-endpoint = <&funnel_dl_mm_in_tpdm_mm>;
@@ -1181,6 +1191,8 @@
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
 
+		qcom,msr-fix-req;
+
 		port {
 			tpdm_turing_out_funnel_turing: endpoint {
 				remote-endpoint =
@@ -1235,6 +1247,8 @@
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
 
+		qcom,msr-fix-req;
+
 		port {
 			tpdm_ddr_out_funnel_ddr_0: endpoint {
 				remote-endpoint = <&funnel_ddr_0_in_tpdm_ddr>;
@@ -1376,7 +1390,6 @@
 
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
-		qcom,msr-fix-req;
 
 		port{
 			tpdm_spss_out_tpda_spss: endpoint {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-ion.dtsi b/arch/arm64/boot/dts/qcom/sdm845-ion.dtsi
index 2579819..829dfcc 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-ion.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-ion.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -33,10 +33,10 @@
 			qcom,ion-heap-type = "DMA";
 		};
 
-		qcom,ion-heap@13 { /* SPSS HEAP */
+		qcom,ion-heap@13 { /* SECURE SPSS HEAP */
 			reg = <13>;
-			memory-region = <&sp_mem>;
-			qcom,ion-heap-type = "DMA";
+			memory-region = <&secure_sp_mem>;
+			qcom,ion-heap-type = "HYP_CMA";
 		};
 
 		qcom,ion-heap@10 { /* SECURE DISPLAY HEAP */
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts
index e299744..52c0f05 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts
@@ -19,6 +19,7 @@
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 
 #include "sdm845-mtp.dtsi"
+#include "sdm845-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM845 v1 MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index c3217e7..fb31b05 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -150,7 +150,7 @@
 	qcom,platform-reset-gpio = <&tlmm 6 0>;
 };
 
-&dsi_dual_nt35597_truly_cmd_display {
+&dsi_nt35597_truly_dsc_cmd_display {
 	qcom,dsi-display-active;
 };
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index dc58f9c..2a7b6d1 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -2796,6 +2796,66 @@
 				bias-disable;
 			};
 		};
+
+		tsif0_signals_active: tsif0_signals_active {
+			tsif1_clk {
+				pins = "gpio89"; /* TSIF0 CLK */
+				function = "tsif1_clk";
+			};
+			tsif1_en {
+				pins = "gpio90"; /* TSIF0 Enable */
+				function = "tsif1_en";
+			};
+			tsif1_data {
+				pins = "gpio91"; /* TSIF0 DATA */
+				function = "tsif1_data";
+			};
+			signals_cfg {
+				pins = "gpio89", "gpio90", "gpio91";
+				drive_strength = <2>;	/* 2 mA */
+				bias-pull-down;		/* pull down */
+			};
+		};
+
+		/* sync signal is only used if configured to mode-2 */
+		tsif0_sync_active: tsif0_sync_active {
+			tsif1_sync {
+				pins = "gpio12";	/* TSIF0 SYNC */
+				function = "tsif1_sync";
+				drive_strength = <2>;	/* 2 mA */
+				bias-pull-down;		/* pull down */
+			};
+		};
+
+		tsif1_signals_active: tsif1_signals_active {
+			tsif2_clk {
+				pins = "gpio93"; /* TSIF1 CLK */
+				function = "tsif2_clk";
+			};
+			tsif2_en {
+				pins = "gpio94"; /* TSIF1 Enable */
+				function = "tsif2_en";
+			};
+			tsif2_data {
+				pins = "gpio95"; /* TSIF1 DATA */
+				function = "tsif2_data";
+			};
+			signals_cfg {
+				pins = "gpio93", "gpio94", "gpio95";
+				drive_strength = <2>;	/* 2 mA */
+				bias-pull-down;		/* pull down */
+			};
+		};
+
+		/* sync signal is only used if configured to mode-2 */
+		tsif1_sync_active: tsif1_sync_active {
+			tsif2_sync {
+				pins = "gpio96";	/* TSIF1 SYNC */
+				function = "tsif2_sync";
+				drive_strength = <2>;	/* 2 mA */
+				bias-pull-down;		/* pull down */
+			};
+		};
 	};
 };
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
index 6806145..6215771 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
@@ -23,8 +23,6 @@
 			#size-cells = <0>;
 			label = "L3";
 			qcom,spm-device-names = "L3";
-			qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3 &CPU4 &CPU5 &CPU6
-				&CPU7>;
 			qcom,psci-mode-shift = <4>;
 			qcom,psci-mode-mask = <0xfff>;
 
@@ -86,12 +84,64 @@
 				qcom,is-reset;
 				qcom,notify-rpm;
 			};
-
-			qcom,pm-cpu {
+			qcom,pm-cpu@0 {
 				#address-cells = <1>;
 				#size-cells = <0>;
 				qcom,psci-mode-shift = <0>;
 				qcom,psci-mode-mask = <0xf>;
+				qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3>;
+
+				qcom,pm-cpu-level@0 { /* C1 */
+					reg = <0>;
+					qcom,spm-cpu-mode = "wfi";
+					qcom,psci-cpu-mode = <0x1>;
+					qcom,latency-us = <43>;
+					qcom,ss-power = <454>;
+					qcom,energy-overhead = <38639>;
+					qcom,time-overhead = <83>;
+				};
+
+				qcom,pm-cpu-level@1 { /* C2D */
+					reg = <1>;
+					qcom,psci-cpu-mode = <0x2>;
+					qcom,spm-cpu-mode = "ret";
+					qcom,latency-us = <86>;
+					qcom,ss-power = <449>;
+					qcom,energy-overhead = <78456>;
+					qcom,time-overhead = <167>;
+				};
+
+				qcom,pm-cpu-level@2 {  /* C3 */
+					reg = <2>;
+					qcom,spm-cpu-mode = "pc";
+					qcom,psci-cpu-mode = <0x3>;
+					qcom,latency-us = <612>;
+					qcom,ss-power = <436>;
+					qcom,energy-overhead = <418225>;
+					qcom,time-overhead = <885>;
+					qcom,is-reset;
+					qcom,use-broadcast-timer;
+				};
+
+				qcom,pm-cpu-level@3 {  /* C4 */
+					reg = <3>;
+					qcom,spm-cpu-mode = "rail-pc";
+					qcom,psci-cpu-mode = <0x4>;
+					qcom,latency-us = <700>;
+					qcom,ss-power = <400>;
+					qcom,energy-overhead = <428225>;
+					qcom,time-overhead = <1000>;
+					qcom,is-reset;
+					qcom,use-broadcast-timer;
+				};
+			};
+
+			qcom,pm-cpu@1 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				qcom,psci-mode-shift = <0>;
+				qcom,psci-mode-mask = <0xf>;
+				qcom,cpu = <&CPU4 &CPU5 &CPU6 &CPU7>;
 
 				qcom,pm-cpu-level@0 { /* C1 */
 					reg = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd-audio-overlay.dtsi
new file mode 100644
index 0000000..2ee9031
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd-audio-overlay.dtsi
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdm845-audio-overlay.dtsi"
+
+&soc {
+	sound-tavil {
+		qcom,wsa-max-devs = <1>;
+		qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0213>;
+		qcom,wsa-aux-dev-prefix = "SpkrRight", "SpkrRight";
+
+		qcom,msm-mbhc-usbc-audio-supported = <1>;
+
+		qcom,usbc-analog-en2-gpio = <&tlmm 51 0>;
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&wcd_usbc_analog_en2_active>;
+		pinctrl-1 = <&wcd_usbc_analog_en2_idle>;
+	};
+};
+
+&wcd934x_cdc {
+	wcd_pinctrl@5 {
+		us_euro_sw_wcd_active {
+			mux {
+				pins = "gpio1";
+			};
+
+			config {
+				pins = "gpio1";
+				/delete-property/ output-high;
+				bias-high-impedance;
+			};
+		};
+
+		us_euro_sw_wcd_sleep {
+			mux {
+				pins = "gpio1";
+			};
+
+			config {
+				pins = "gpio1";
+				/delete-property/ output-low;
+				bias-high-impedance;
+			};
+		};
+	};
+
+	swr_master {
+		wsa881x@20170211 {
+			compatible = "qcom,wsa881x";
+			reg = <0x00 0x20170211>;
+			qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd2>;
+		};
+
+		wsa881x@21170213 {
+			compatible = "qcom,wsa881x";
+			reg = <0x00 0x21170213>;
+			qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd2>;
+		};
+	};
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-qrd-overlay.dts
new file mode 100644
index 0000000..5729d76
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd-overlay.dts
@@ -0,0 +1,29 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm845-qrd.dtsi"
+#include "sdm845-qrd-audio-overlay.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM845 v1 QRD";
+	compatible = "qcom,sdm845-qrd", "qcom,sdm845", "qcom,qrd";
+	qcom,msm-id = <321 0x0>;
+	qcom,board-id = <11 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
index f14293b..7982625 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -40,14 +40,6 @@
 		#include "fg-gen3-batterydata-itech-3000mah.dtsi"
 		#include "fg-gen3-batterydata-ascent-3450mah.dtsi"
 	};
-
-	aliases {
-		serial0 = &qupv3_se9_2uart;
-		spi0 = &qupv3_se8_spi;
-		i2c0 = &qupv3_se10_i2c;
-		i2c1 = &qupv3_se3_i2c;
-		hsuart0 = &qupv3_se6_4uart;
-	};
 };
 
 &qupv3_se9_2uart {
@@ -102,49 +94,6 @@
 	#cooling-cells = <2>;
 };
 
-&soc {
-	sound-tavil {
-		qcom,wsa-max-devs = <1>;
-		qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0213>;
-		qcom,wsa-aux-dev-prefix = "SpkrRight", "SpkrRight";
-
-		qcom,msm-mbhc-usbc-audio-supported = <1>;
-
-		qcom,usbc-analog-en2-gpio = <&tlmm 51 0>;
-		pinctrl-names = "aud_active", "aud_sleep";
-		pinctrl-0 = <&wcd_usbc_analog_en2_active>;
-		pinctrl-1 = <&wcd_usbc_analog_en2_idle>;
-	};
-};
-
-&wcd934x_cdc {
-	wcd: wcd_pinctrl@5 {
-		us_euro_sw_wcd_active: us_euro_sw_wcd_active {
-			mux {
-				pins = "gpio1";
-			};
-
-			config {
-				pins = "gpio1";
-				/delete-property/ output-high;
-				bias-high-impedance;
-			};
-		};
-
-		us_euro_sw_wcd_sleep: us_euro_sw_wcd_sleep {
-			mux {
-				pins = "gpio1";
-			};
-
-			config {
-				pins = "gpio1";
-				/delete-property/ output-low;
-				bias-high-impedance;
-			};
-		};
-	};
-};
-
 &ufsphy_mem {
 	compatible = "qcom,ufs-phy-qmp-v3";
 
@@ -256,6 +205,18 @@
 	qcom,mdss-dsi-panel-orientation = "180";
 };
 
+&dsi_sharp_4k_dsc_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+	qcom,panel-mode-gpio = <&tlmm 52 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+	qcom,mdss-dsi-panel-orientation = "180";
+};
+
 &dsi_sharp_4k_dsc_video_display {
 	qcom,dsi-display-active;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
index 7befe3b..b826768 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
@@ -131,19 +131,19 @@
 				qcom,cpr-voltage-floor =
 					/* Speed bin 0 */
 					<568000  568000  568000  568000  568000
-					 568000  568000  568000  568000  584000
-					 584000  584000  632000  632000  632000
-					 632000  672000  996000  996000>,
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  996000  996000>,
 					/* Speed bin 1 */
 					<568000  568000  568000  568000  568000
-					 568000  568000  568000  568000  584000
-					 584000  584000  632000  632000  632000
-					 632000  672000  712000  712000>,
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000>,
 					/* Speed bin 2 */
 					<568000  568000  568000  568000  568000
-					 568000  568000  568000  568000  584000
-					 584000  584000  632000  632000  632000
-					 632000  672000  712000  712000>;
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000>;
 
 				qcom,cpr-floor-to-ceiling-max-range =
 					<32000  32000  32000  32000  32000
@@ -192,10 +192,62 @@
 					 1950 2632>;
 
 				qcom,cpr-open-loop-voltage-fuse-adjustment =
-					<100000 100000 100000 100000>;
+					/* Speed bin 0 */
+					<100000 100000 100000 100000>,
+					<     0      0      0 100000>,
+					<     0      0      0      0>,
+					<     0      0      0      0>,
+					<     0      0      0      0>,
+					<     0      0      0      0>,
+					<     0      0      0      0>,
+					<     0      0      0      0>,
+					/* Speed bin 1 */
+					<100000 100000 100000 100000>,
+					<     0      0      0 100000>,
+					<     0      0      0      0>,
+					<     0      0      0      0>,
+					<     0      0      0      0>,
+					<     0      0      0      0>,
+					<     0      0      0      0>,
+					<     0      0      0      0>,
+					/* Speed bin 2 */
+					<100000 100000 100000 100000>,
+					<     0      0      0 100000>,
+					<     0      0      0      0>,
+					<     0      0      0      0>,
+					<     0      0      0      0>,
+					<     0      0      0      0>,
+					<     0      0      0      0>,
+					<     0      0      0      0>;
 
 				qcom,cpr-closed-loop-voltage-fuse-adjustment =
-					<100000 100000 100000 100000>;
+					/* Speed bin 0 */
+					<100000 100000 100000 100000>,
+					<     0      0      0 100000>,
+					<     0      0      0      0>,
+					<     0      0      0      0>,
+					<     0      0      0      0>,
+					<     0      0      0      0>,
+					<     0      0      0      0>,
+					<     0      0      0      0>,
+					/* Speed bin 1 */
+					<100000 100000 100000 100000>,
+					<     0      0      0 100000>,
+					<     0      0      0      0>,
+					<     0      0      0      0>,
+					<     0      0      0      0>,
+					<     0      0      0      0>,
+					<     0      0      0      0>,
+					<     0      0      0      0>,
+					/* Speed bin 2 */
+					<100000 100000 100000 100000>,
+					<     0      0      0 100000>,
+					<     0      0      0      0>,
+					<     0      0      0      0>,
+					<     0      0      0      0>,
+					<     0      0      0      0>,
+					<     0      0      0      0>,
+					<     0      0      0      0>;
 
 				qcom,allow-voltage-interpolation;
 				qcom,allow-quotient-interpolation;
@@ -265,16 +317,16 @@
 				qcom,cpr-voltage-floor =
 					/* Speed bin 0 */
 					<568000  568000  568000  568000  568000
-					 584000  584000  632000  672000  996000
+					 568000  568000  568000  568000  996000
 					 996000>,
 					/* Speed bin 1 */
 					<568000  568000  568000  568000  568000
-					 584000  584000  632000  672000  712000
-					 712000>,
+					 568000  568000  568000  568000  568000
+					 568000>,
 					/* Speed bin 2 */
 					<568000  568000  568000  568000  568000
-					 584000  584000  632000  672000  712000
-					 712000  712000  712000>;
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000>;
 
 				qcom,cpr-floor-to-ceiling-max-range =
 					/* Speed bin 0 */
@@ -323,10 +375,62 @@
 					 2501 2095>;
 
 				qcom,cpr-open-loop-voltage-fuse-adjustment =
-					<100000 100000 100000 100000>;
+					/* Speed bin 0 */
+					<100000 100000 100000 100000>,
+					<     0  24000   4000 100000>,
+					<     0  24000   4000      0>,
+					<     0  24000   4000      0>,
+					<     0  24000   4000      0>,
+					<     0  24000   4000      0>,
+					<     0  24000   4000      0>,
+					<     0  24000   4000      0>,
+					/* Speed bin 1 */
+					<100000 100000 100000 100000>,
+					<     0  24000   4000 100000>,
+					<     0  24000   4000  20000>,
+					<     0  24000   4000  20000>,
+					<     0  24000   4000  20000>,
+					<     0  24000   4000  20000>,
+					<     0  24000   4000  20000>,
+					<     0  24000   4000  20000>,
+					/* Speed bin 2 */
+					<100000 100000 100000 100000>,
+					<     0  24000   4000 100000>,
+					<     0  24000   4000  40000>,
+					<     0  24000   4000  40000>,
+					<     0  24000   4000  40000>,
+					<     0  24000   4000  40000>,
+					<     0  24000   4000  40000>,
+					<     0  24000   4000  40000>;
 
 				qcom,cpr-closed-loop-voltage-fuse-adjustment =
-					<100000 100000 100000 100000>;
+					/* Speed bin 0 */
+					<100000 100000 100000 100000>,
+					<     0  29000   6000 100000>,
+					<     0  29000   6000      0>,
+					<     0  29000   6000      0>,
+					<     0  29000   6000      0>,
+					<     0  29000   6000      0>,
+					<     0  29000   6000      0>,
+					<     0  29000   6000      0>,
+					/* Speed bin 1 */
+					<100000 100000 100000 100000>,
+					<     0  29000   6000 100000>,
+					<     0  29000   6000  20000>,
+					<     0  29000   6000  20000>,
+					<     0  29000   6000  20000>,
+					<     0  29000   6000  20000>,
+					<     0  29000   6000  20000>,
+					<     0  29000   6000  20000>,
+					/* Speed bin 2 */
+					<100000 100000 100000 100000>,
+					<     0  29000   6000 100000>,
+					<     0  29000   6000  40000>,
+					<     0  29000   6000  40000>,
+					<     0  29000   6000  40000>,
+					<     0  29000   6000  40000>,
+					<     0  29000   6000  40000>,
+					<     0  29000   6000  40000>;
 
 				qcom,allow-voltage-interpolation;
 				qcom,allow-quotient-interpolation;
@@ -453,22 +557,22 @@
 					/* Speed bin 0 */
 					<568000  568000  568000  568000  568000
 					 568000  568000  568000  568000  568000
-					 584000  584000  632000  632000  632000
-					 632000  632000  672000  712000  712000
-					 772000  772000>,
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000>,
 					/* Speed bin 1 */
 					<568000  568000  568000  568000  568000
 					 568000  568000  568000  568000  568000
-					 584000  584000  632000  632000  632000
-					 632000  632000  672000  712000  712000
-					 772000  772000  772000  772000>,
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000>,
 					/* Speed bin 2 */
 					<568000  568000  568000  568000  568000
 					 568000  568000  568000  568000  568000
-					 584000  584000  632000  632000  632000
-					 632000  632000  672000  712000  712000
-					 772000  772000  772000  772000
-					 772000>;
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000
+					 568000>;
 
 				qcom,cpr-floor-to-ceiling-max-range =
 					/* Speed bin 0 */
@@ -622,6 +726,13 @@
 			regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
 			regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
 		};
+
+		ebi_cdev: regulator-cdev {
+			compatible = "qcom,rpmh-reg-cdev";
+			mboxes = <&qmp_aop 0>;
+			qcom,reg-resource-name = "ebi";
+			#cooling-cells = <2>;
+		};
 	};
 
 	rpmh-regulator-smpa2 {
@@ -718,6 +829,13 @@
 			regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
 			qcom,min-dropout-voltage-level = <(-1)>;
 		};
+
+		cx_cdev: regulator-cdev {
+			compatible = "qcom,rpmh-reg-cdev";
+			mboxes = <&qmp_aop 0>;
+			qcom,reg-resource-name = "cx";
+			#cooling-cells = <2>;
+		};
 	};
 
 	rpmh-regulator-ldoa1 {
@@ -786,6 +904,13 @@
 			regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
 			regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
 		};
+
+		mx_cdev: regulator-cdev {
+			compatible = "qcom,rpmh-reg-cdev";
+			mboxes = <&qmp_aop 0>;
+			qcom,reg-resource-name = "mx";
+			#cooling-cells = <2>;
+		};
 	};
 
 	rpmh-regulator-ldoa5 {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
index 726a63f..bde64b9 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
@@ -399,19 +399,20 @@
 			 <&clock_gcc GCC_USB3_PRIM_PHY_PIPE_CLK>,
 			 <&clock_dispcc DISP_CC_MDSS_DP_LINK_CLK>,
 			 <&clock_dispcc DISP_CC_MDSS_DP_LINK_INTF_CLK>,
-			 <&clock_dispcc DISP_CC_MDSS_DP_CRYPTO_CLK>,
 			 <&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK>,
+			 <&clock_dispcc DISP_CC_MDSS_DP_CRYPTO_CLK>,
 			 <&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK_SRC>,
 			 <&mdss_dp_pll DP_VCO_DIVIDED_CLK_SRC_MUX>;
 		clock-names = "core_aux_clk", "core_usb_ref_clk_src",
 			"core_usb_ref_clk", "core_usb_cfg_ahb_clk",
 			"core_usb_pipe_clk", "ctrl_link_clk",
-			"ctrl_link_iface_clk", "ctrl_crypto_clk",
-			"ctrl_pixel_clk", "pixel_clk_rcg", "pixel_parent";
+			"ctrl_link_iface_clk", "ctrl_pixel_clk",
+			"crypto_clk", "pixel_clk_rcg", "pixel_parent";
 
 		qcom,dp-usbpd-detection = <&pmi8998_pdphy>;
 
 		qcom,aux-cfg-settings = [00 13 04 00 0a 26 0a 03 bb 03];
+		qcom,max-pclk-frequency-khz = <576000>;
 
 		qcom,core-supply-entries {
 			#address-cells = <1>;
@@ -545,22 +546,34 @@
 	qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
 	qcom,mdss-dsi-t-clk-post = <0x0d>;
 	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+	qcom,display-topology = <1 0 1>,
+				<2 0 1>;
+	qcom,default-topology-index = <0>;
 };
 
 &dsi_dual_sim_vid {
 	qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
 	qcom,mdss-dsi-t-clk-post = <0x0d>;
 	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+	qcom,display-topology = <2 0 2>,
+				<1 0 2>;
+	qcom,default-topology-index = <0>;
 };
 
 &dsi_sim_cmd {
 	qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
 	qcom,mdss-dsi-t-clk-post = <0x0d>;
 	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+	qcom,display-topology = <1 0 1>,
+				<2 0 1>;
+	qcom,default-topology-index = <0>;
 };
 
 &dsi_dual_sim_cmd {
 	qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
 	qcom,mdss-dsi-t-clk-post = <0x0d>;
 	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+	qcom,display-topology = <2 0 2>,
+				<1 0 2>;
+	qcom,default-topology-index = <0>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index 2ae3832..3153e66 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -82,7 +82,7 @@
 		qcom,sde-dsc-off = <0x81000 0x81400 0x81800 0x81c00>;
 		qcom,sde-dsc-size = <0x140>;
 
-		qcom,sde-dither-off = <0x30e0 0x30e0 0x30e0 0x30e0>;
+		qcom,sde-dither-off = <0x30e0 0x30e0 0x30e0 0x30e0 0x0>;
 		qcom,sde-dither-version = <0x00010000>;
 		qcom,sde-dither-size = <0x20>;
 
@@ -207,15 +207,41 @@
 			};
 		};
 
+		smmu_sde_sec: qcom,smmu_sde_sec_cb {
+			compatible = "qcom,smmu_sde_sec";
+			iommus = <&apps_smmu 0x881 0x8>,
+			       <&apps_smmu 0xc81 0x8>;
+		};
+
 		/* data and reg bus scale settings */
 		qcom,sde-data-bus {
-			qcom,msm-bus,name = "mdss_sde";
+			qcom,msm-bus,name = "mdss_sde_mnoc";
 			qcom,msm-bus,num-cases = <3>;
 			qcom,msm-bus,num-paths = <2>;
 			qcom,msm-bus,vectors-KBps =
-				<22 512 0 0>, <23 512 0 0>,
-				<22 512 0 6400000>, <23 512 0 6400000>,
-				<22 512 0 6400000>, <23 512 0 6400000>;
+			    <22 773 0 0>, <23 773 0 0>,
+			    <22 773 0 6400000>, <23 773 0 6400000>,
+			    <22 773 0 6400000>, <23 773 0 6400000>;
+		};
+
+		qcom,sde-llcc-bus {
+			qcom,msm-bus,name = "mdss_sde_llcc";
+			qcom,msm-bus,num-cases = <3>;
+			qcom,msm-bus,num-paths = <1>;
+			qcom,msm-bus,vectors-KBps =
+			    <132 770 0 0>,
+			    <132 770 0 6400000>,
+			    <132 770 0 6400000>;
+		};
+
+		qcom,sde-ebi-bus {
+			qcom,msm-bus,name = "mdss_sde_ebi";
+			qcom,msm-bus,num-cases = <3>;
+			qcom,msm-bus,num-paths = <1>;
+			qcom,msm-bus,vectors-KBps =
+			    <129 512 0 0>,
+			    <129 512 0 6400000>,
+			    <129 512 0 6400000>;
 		};
 
 		qcom,sde-reg-bus {
@@ -252,14 +278,36 @@
 
 		/* data and reg bus scale settings */
 		qcom,sde-data-bus {
-			qcom,msm-bus,name = "disp_rsc";
+			qcom,msm-bus,name = "disp_rsc_mnoc";
 			qcom,msm-bus,active-only;
 			qcom,msm-bus,num-cases = <3>;
 			qcom,msm-bus,num-paths = <2>;
 			qcom,msm-bus,vectors-KBps =
-			    <20003 20512 0 0>, <20004 20512 0 0>,
-			    <20003 20512 0 6400000>, <20004 20512 0 6400000>,
-			    <20003 20512 0 6400000>, <20004 20512 0 6400000>;
+			    <20003 20515 0 0>, <20004 20515 0 0>,
+			    <20003 20515 0 6400000>, <20004 20515 0 6400000>,
+			    <20003 20515 0 6400000>, <20004 20515 0 6400000>;
+		};
+
+		qcom,sde-llcc-bus {
+			qcom,msm-bus,name = "disp_rsc_llcc";
+			qcom,msm-bus,active-only;
+			qcom,msm-bus,num-cases = <3>;
+			qcom,msm-bus,num-paths = <1>;
+			qcom,msm-bus,vectors-KBps =
+			    <20001 20513 0 0>,
+			    <20001 20513 0 6400000>,
+			    <20001 20513 0 6400000>;
+		};
+
+		qcom,sde-ebi-bus {
+			qcom,msm-bus,name = "disp_rsc_ebi";
+			qcom,msm-bus,active-only;
+			qcom,msm-bus,num-cases = <3>;
+			qcom,msm-bus,num-paths = <1>;
+			qcom,msm-bus,vectors-KBps =
+			    <20000 20512 0 0>,
+			    <20000 20512 0 6400000>,
+			    <20000 20512 0 6400000>;
 		};
 	};
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
index bf72741..95ee14c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
@@ -36,3 +36,8 @@
 &clock_videocc {
 	compatible = "qcom,video_cc-sdm845-v2";
 };
+
+&msm_vidc {
+	qcom,allowed-clock-rates = <100000000 200000000 330000000
+		404000000 444000000 533000000>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
index 71c521a..1c07c5e 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
@@ -45,8 +45,8 @@
 			"bus_clk", "core0_clk", "core0_bus_clk",
 			"core1_clk", "core1_bus_clk";
 		qcom,clock-configs = <0x1 0x0 0x0 0x1 0x0 0x1 0x0>;
-		qcom,allowed-clock-rates = <200000000 320000000 380000000
-			444000000 533000000>;
+		qcom,allowed-clock-rates = <100000000 200000000 320000000
+			380000000 444000000 533000000>;
 
 		/* Buses */
 		bus_cnoc {
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 1eaeb59..2c0373d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -603,7 +603,7 @@
 			size = <0 0x1400000>;
 		};
 
-		sp_mem: sp_region {  /* SPSS-HLOS ION shared mem */
+		secure_sp_mem: secure_sp_region { /* SPSS-HLOS ION shared mem */
 			compatible = "shared-dma-pool";
 			alloc-ranges = <0 0x00000000 0 0xffffffff>; /* 32-bit */
 			reusable;
@@ -639,6 +639,7 @@
 
 #include "msm-gdsc-sdm845.dtsi"
 #include "sdm845-sde-pll.dtsi"
+#include "msm-rdbg.dtsi"
 #include "sdm845-sde.dtsi"
 #include "sdm845-sde-display.dtsi"
 #include "sdm845-qupv3.dtsi"
@@ -759,6 +760,8 @@
 		compatible = "qcom,spmi-pmic-arb-debug";
 		reg = <0x6b22000 0x60>, <0x7820A8 4>;
 		reg-names = "core", "fuse";
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "core_clk";
 		qcom,fuse-disable-bit = <12>;
 		#address-cells = <2>;
 		#size-cells = <0>;
@@ -769,6 +772,7 @@
 			reg = <0x0 SPMI_USID>;
 			#address-cells = <2>;
 			#size-cells = <0>;
+			qcom,can-sleep;
 		};
 
 		qcom,pm8998-debug@1 {
@@ -776,6 +780,7 @@
 			reg = <0x1 SPMI_USID>;
 			#address-cells = <2>;
 			#size-cells = <0>;
+			qcom,can-sleep;
 		};
 
 		qcom,pmi8998-debug@2 {
@@ -783,6 +788,7 @@
 			reg = <0x2 SPMI_USID>;
 			#address-cells = <2>;
 			#size-cells = <0>;
+			qcom,can-sleep;
 		};
 
 		qcom,pmi8998-debug@3 {
@@ -790,6 +796,7 @@
 			reg = <0x3 SPMI_USID>;
 			#address-cells = <2>;
 			#size-cells = <0>;
+			qcom,can-sleep;
 		};
 
 		qcom,pm8005-debug@4 {
@@ -797,6 +804,7 @@
 			reg = <0x4 SPMI_USID>;
 			#address-cells = <2>;
 			#size-cells = <0>;
+			qcom,can-sleep;
 		};
 
 		qcom,pm8005-debug@5 {
@@ -804,6 +812,7 @@
 			reg = <0x5 SPMI_USID>;
 			#address-cells = <2>;
 			#size-cells = <0>;
+			qcom,can-sleep;
 		};
 	};
 
@@ -960,12 +969,13 @@
 		qcom,target-dev = <&l3_cpu0>;
 		qcom,cachemiss-ev = <0x17>;
 		qcom,core-dev-table =
-			<  300000 300000 >,
-			<  748800 576000 >,
-			<  979200 652800 >,
-			< 1209600 806400 >,
-			< 1516800 883200 >,
-			< 1593600 960000 >;
+			<  300000  300000 >,
+			<  748800  576000 >,
+			<  979200  652800 >,
+			< 1209600  806400 >,
+			< 1516800  883200 >,
+			< 1593600  960000 >,
+			< 1708800 1094400 >;
 	};
 
 	devfreq_l3lat_4: qcom,cpu4-l3lat-mon {
@@ -974,11 +984,12 @@
 		qcom,target-dev = <&l3_cpu4>;
 		qcom,cachemiss-ev = <0x17>;
 		qcom,core-dev-table =
-			<  300000 300000 >,
-			< 1036800 652800 >,
-			< 1190400 806400 >,
-			< 1574400 883200 >,
-			< 1651200 960000 >;
+			<  300000  300000 >,
+			< 1036800  576000 >,
+			< 1190400  806400 >,
+			< 1574400  883200 >,
+			< 1804800  960000 >,
+			< 2092800 1094400 >;
 	};
 
 	cpu_pmu: cpu-pmu {
@@ -1808,6 +1819,15 @@
 		interrupt-names = "slimbus_irq", "slimbus_bam_irq";
 		qcom,apps-ch-pipes = <0x780000>;
 		qcom,ea-pc = <0x270>;
+		qcom,iommu-s1-bypass;
+
+		iommu_slim_aud_ctrl_cb: qcom,iommu_slim_ctrl_cb {
+			compatible = "qcom,iommu-slim-ctrl-cb";
+			iommus = <&apps_smmu 0x1806 0x0>,
+				 <&apps_smmu 0x180d 0x0>,
+				 <&apps_smmu 0x180e 0x1>,
+				 <&apps_smmu 0x1810 0x1>;
+		};
 	};
 
 	slim_qca: slim@17240000 {
@@ -1819,6 +1839,12 @@
 		reg-names = "slimbus_physical", "slimbus_bam_physical";
 		interrupts = <0 291 0>, <0 292 0>;
 		interrupt-names = "slimbus_irq", "slimbus_bam_irq";
+		qcom,iommu-s1-bypass;
+
+		iommu_slim_qca_ctrl_cb: qcom,iommu_slim_ctrl_cb {
+			compatible = "qcom,iommu-slim-ctrl-cb";
+			iommus = <&apps_smmu 0x1813 0x0>;
+		};
 
 		/* Slimbus Slave DT for WCN3990 */
 		btfmslim_codec: wcn3990 {
@@ -2271,7 +2297,7 @@
 		qcom,rx-ring-size = <0x400>;
 	};
 
-	qmp_aop: mailbox@1799000c {
+	qmp_aop: qcom,qmp-aop@c300000 {
 		compatible = "qcom,qmp-mbox";
 		label = "aop";
 		reg = <0xc300000 0x100000>,
@@ -2279,6 +2305,7 @@
 		reg-names = "msgram", "irq-reg-base";
 		qcom,irq-mask = <0x1>;
 		interrupts = <0 389 1>;
+		priority = <0>;
 		mbox-desc-offset = <0x0>;
 		#mbox-cells = <1>;
 	};
@@ -2588,6 +2615,7 @@
 		qcom,disk-encrypt-pipe-pair = <2>;
 		qcom,support-fde;
 		qcom,no-clock-support;
+		qcom,fde-key-size;
 		qcom,msm-bus,name = "qseecom-noc";
 		qcom,msm-bus,num-cases = <4>;
 		qcom,msm-bus,num-paths = <1>;
@@ -2741,17 +2769,17 @@
 			<90 512 80000 640000>,
 			<90 585 80000 640000>,
 			<1 676 80000 80000>,
-			<143 777 0 150000000>,
+			<143 777 0 150>, /* IB defined for IPA clk in MHz*/
 		/* NOMINAL */
 			<90 512 206000 960000>,
 			<90 585 206000 960000>,
 			<1 676 206000 160000>,
-			<143 777 0 300000000>,
+			<143 777 0 300>, /* IB defined for IPA clk in MHz*/
 		/* TURBO */
 			<90 512 206000 3600000>,
 			<90 585 206000 3600000>,
 			<1 676 206000 300000>,
-			<143 777 0 355333333>;
+			<143 777 0 355>; /* IB defined for IPA clk in MHz*/
 		qcom,bus-vector-names = "MIN", "SVS", "NOMINAL", "TURBO";
 
 		/* IPA RAM mmap */
@@ -2944,6 +2972,61 @@
 		qcom,vdd-3.3-ch0-config = <3104000 3312000>;
 	};
 
+	qmi-tmd-devices {
+		compatible = "qcom,qmi_cooling_devices";
+
+		modem {
+			qcom,instance-id = <0x0>;
+
+			modem_pa: modem_pa {
+				qcom,qmi-dev-name = "pa";
+				#cooling-cells = <2>;
+			};
+
+			modem_proc: modem_proc {
+				qcom,qmi-dev-name = "modem";
+				#cooling-cells = <2>;
+			};
+
+			modem_current: modem_current {
+				qcom,qmi-dev-name = "modem_current";
+				#cooling-cells = <2>;
+			};
+
+			modem_vdd: modem_vdd {
+				qcom,qmi-dev-name = "cpuv_restriction_cold";
+				#cooling-cells = <2>;
+			};
+		};
+
+		adsp {
+			qcom,instance-id = <0x1>;
+
+			adsp_vdd: adsp_vdd {
+				qcom,qmi-dev-name = "cpuv_restriction_cold";
+				#cooling-cells = <2>;
+			};
+		};
+
+		cdsp {
+			qcom,instance-id = <0x43>;
+
+			cdsp_vdd: cdsp_vdd {
+				qcom,qmi-dev-name = "cpuv_restriction_cold";
+				#cooling-cells = <2>;
+			};
+		};
+
+		slpi {
+			qcom,instance-id = <0x53>;
+
+			slpi_vdd: slpi_vdd {
+				qcom,qmi-dev-name = "cpuv_restriction_cold";
+				#cooling-cells = <2>;
+			};
+		};
+	};
+
 	thermal_zones: thermal-zones {
 		aoss0-usr {
 			polling-delay-passive = <0>;
@@ -3261,10 +3344,10 @@
 			};
 		};
 
-		silver-virt-max-usr {
-			polling-delay-passive = <100>;
-			polling-delay = <100>;
-			thermal-governor = "user_space";
+		silv-virt-max-step {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "step_wise";
 			trips {
 				silver-trip {
 					temperature = <120000>;
@@ -3274,10 +3357,10 @@
 			};
 		};
 
-		gold-virt-max-usr {
-			polling-delay-passive = <100>;
-			polling-delay = <100>;
-			thermal-governor = "user_space";
+		gold-virt-max-step {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "step_wise";
 			trips {
 				gold-trip {
 					temperature = <120000>;
@@ -3327,615 +3410,6 @@
 			};
 		};
 
-		aoss0-lowf {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-governor = "low_limits_floor";
-			thermal-sensors = <&tsens0 0>;
-			tracks-low;
-			trips {
-				aoss0_trip: aoss0-trip {
-					temperature = <5000>;
-					hysteresis = <5000>;
-					type = "passive";
-				};
-			};
-			cooling-maps {
-				cpu0_vdd_cdev {
-					trip = <&aoss0_trip>;
-					cooling-device = <&CPU0 4 4>;
-				};
-				cpu4_vdd_cdev {
-					trip = <&aoss0_trip>;
-					cooling-device = <&CPU4 9 9>;
-				};
-				gpu_vdd_cdev {
-					trip = <&aoss0_trip>;
-					cooling-device = <&msm_gpu 1 1>;
-				};
-			};
-		};
-
-		cpu0-silver-lowf {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-governor = "low_limits_floor";
-			thermal-sensors = <&tsens0 1>;
-			tracks-low;
-			trips {
-				cpu0_trip: cpu0-trip {
-					temperature = <5000>;
-					hysteresis = <5000>;
-					type = "passive";
-				};
-			};
-			cooling-maps {
-				cpu0_vdd_cdev {
-					trip = <&cpu0_trip>;
-					cooling-device = <&CPU0 4 4>;
-				};
-				cpu4_vdd_cdev {
-					trip = <&cpu0_trip>;
-					cooling-device = <&CPU4 9 9>;
-				};
-				gpu_vdd_cdev {
-					trip = <&cpu0_trip>;
-					cooling-device = <&msm_gpu 1 1>;
-				};
-			};
-		};
-
-		cpu1-silver-lowf {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-governor = "low_limits_floor";
-			thermal-sensors = <&tsens0 2>;
-			tracks-low;
-			trips {
-				cpu1_trip: cpu1-trip {
-					temperature = <5000>;
-					hysteresis = <5000>;
-					type = "passive";
-				};
-			};
-			cooling-maps {
-				cpu0_vdd_cdev {
-					trip = <&cpu1_trip>;
-					cooling-device = <&CPU0 4 4>;
-				};
-				cpu4_vdd_cdev {
-					trip = <&cpu1_trip>;
-					cooling-device = <&CPU4 9 9>;
-				};
-				gpu_vdd_cdev {
-					trip = <&cpu1_trip>;
-					cooling-device = <&msm_gpu 1 1>;
-				};
-			};
-		};
-
-		cpu2-silver-lowf {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-governor = "low_limits_floor";
-			thermal-sensors = <&tsens0 3>;
-			tracks-low;
-			trips {
-				cpu2_trip: cpu2-trip {
-					temperature = <5000>;
-					hysteresis = <5000>;
-					type = "passive";
-				};
-			};
-			cooling-maps {
-				cpu0_vdd_cdev {
-					trip = <&cpu2_trip>;
-					cooling-device = <&CPU0 4 4>;
-				};
-				cpu4_vdd_cdev {
-					trip = <&cpu2_trip>;
-					cooling-device = <&CPU4 9 9>;
-				};
-				gpu_vdd_cdev {
-					trip = <&cpu2_trip>;
-					cooling-device = <&msm_gpu 1 1>;
-				};
-			};
-		};
-
-		cpu3-silver-lowf {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-governor = "low_limits_floor";
-			thermal-sensors = <&tsens0 4>;
-			tracks-low;
-			trips {
-				cpu3_trip: cpu3-trip {
-					temperature = <5000>;
-					hysteresis = <5000>;
-					type = "passive";
-				};
-			};
-			cooling-maps {
-				cpu0_vdd_cdev {
-					trip = <&cpu3_trip>;
-					cooling-device = <&CPU0 4 4>;
-				};
-				cpu4_vdd_cdev {
-					trip = <&cpu3_trip>;
-					cooling-device = <&CPU4 9 9>;
-				};
-				gpu_vdd_cdev {
-					trip = <&cpu3_trip>;
-					cooling-device = <&msm_gpu 1 1>;
-				};
-			};
-		};
-
-		kryo-l3-0-lowf {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-governor = "low_limits_floor";
-			thermal-sensors = <&tsens0 5>;
-			tracks-low;
-			trips {
-				l3_0_trip: l3-0-trip {
-					temperature = <5000>;
-					hysteresis = <5000>;
-					type = "passive";
-				};
-			};
-			cooling-maps {
-				cpu0_vdd_cdev {
-					trip = <&l3_0_trip>;
-					cooling-device = <&CPU0 4 4>;
-				};
-				cpu4_vdd_cdev {
-					trip = <&l3_0_trip>;
-					cooling-device = <&CPU4 9 9>;
-				};
-				gpu_vdd_cdev {
-					trip = <&l3_0_trip>;
-					cooling-device = <&msm_gpu 1 1>;
-				};
-			};
-		};
-
-		kryo-l3-1-lowf {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-governor = "low_limits_floor";
-			thermal-sensors = <&tsens0 6>;
-			tracks-low;
-			trips {
-				l3_1_trip: l3-1-trip {
-					temperature = <5000>;
-					hysteresis = <5000>;
-					type = "passive";
-				};
-			};
-			cooling-maps {
-				cpu0_vdd_cdev {
-					trip = <&l3_1_trip>;
-					cooling-device = <&CPU0 4 4>;
-				};
-				cpu4_vdd_cdev {
-					trip = <&l3_1_trip>;
-					cooling-device = <&CPU4 9 9>;
-				};
-				gpu_vdd_cdev {
-					trip = <&l3_1_trip>;
-					cooling-device = <&msm_gpu 1 1>;
-				};
-			};
-		};
-
-		cpu0-gold-lowf {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-governor = "low_limits_floor";
-			thermal-sensors = <&tsens0 7>;
-			tracks-low;
-			trips {
-				cpug0_trip: cpug0-trip {
-					temperature = <5000>;
-					hysteresis = <5000>;
-					type = "passive";
-				};
-			};
-			cooling-maps {
-				cpu0_vdd_cdev {
-					trip = <&cpug0_trip>;
-					cooling-device = <&CPU0 4 4>;
-				};
-				cpu4_vdd_cdev {
-					trip = <&cpug0_trip>;
-					cooling-device = <&CPU4 9 9>;
-				};
-				gpu_vdd_cdev {
-					trip = <&cpug0_trip>;
-					cooling-device = <&msm_gpu 1 1>;
-				};
-			};
-		};
-
-		cpu1-gold-lowf {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-governor = "low_limits_floor";
-			thermal-sensors = <&tsens0 8>;
-			tracks-low;
-			trips {
-				cpug1_trip: cpug1-trip {
-					temperature = <5000>;
-					hysteresis = <5000>;
-					type = "passive";
-				};
-			};
-			cooling-maps {
-				cpu0_vdd_cdev {
-					trip = <&cpug1_trip>;
-					cooling-device = <&CPU0 4 4>;
-				};
-				cpu4_vdd_cdev {
-					trip = <&cpug1_trip>;
-					cooling-device = <&CPU4 9 9>;
-				};
-				gpu_vdd_cdev {
-					trip = <&cpug1_trip>;
-					cooling-device = <&msm_gpu 1 1>;
-				};
-			};
-		};
-
-		cpu2-gold-lowf {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-governor = "low_limits_floor";
-			thermal-sensors = <&tsens0 9>;
-			tracks-low;
-			trips {
-				cpug2_trip: cpug2-trip {
-					temperature = <5000>;
-					hysteresis = <5000>;
-					type = "passive";
-				};
-			};
-			cooling-maps {
-				cpu0_vdd_cdev {
-					trip = <&cpug2_trip>;
-					cooling-device = <&CPU0 4 4>;
-				};
-				cpu4_vdd_cdev {
-					trip = <&cpug2_trip>;
-					cooling-device = <&CPU4 9 9>;
-				};
-				gpu_vdd_cdev {
-					trip = <&cpug2_trip>;
-					cooling-device = <&msm_gpu 1 1>;
-				};
-			};
-		};
-
-		cpu3-gold-lowf {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-governor = "low_limits_floor";
-			thermal-sensors = <&tsens0 10>;
-			tracks-low;
-			trips {
-				cpug3_trip: cpug3-trip {
-					temperature = <5000>;
-					hysteresis = <5000>;
-					type = "passive";
-				};
-			};
-			cooling-maps {
-				cpu0_vdd_cdev {
-					trip = <&cpug3_trip>;
-					cooling-device = <&CPU0 4 4>;
-				};
-				cpu4_vdd_cdev {
-					trip = <&cpug3_trip>;
-					cooling-device = <&CPU4 9 9>;
-				};
-				gpu_vdd_cdev {
-					trip = <&cpug3_trip>;
-					cooling-device = <&msm_gpu 1 1>;
-				};
-			};
-		};
-
-		gpu0-lowf {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-governor = "low_limits_floor";
-			thermal-sensors = <&tsens0 11>;
-			tracks-low;
-			trips {
-				gpu0_trip_l: gpu0-trip {
-					temperature = <5000>;
-					hysteresis = <5000>;
-					type = "passive";
-				};
-			};
-			cooling-maps {
-				cpu0_vdd_cdev {
-					trip = <&gpu0_trip_l>;
-					cooling-device = <&CPU0 4 4>;
-				};
-				cpu4_vdd_cdev {
-					trip = <&gpu0_trip_l>;
-					cooling-device = <&CPU4 9 9>;
-				};
-				gpu_vdd_cdev {
-					trip = <&gpu0_trip_l>;
-					cooling-device = <&msm_gpu 1 1>;
-				};
-			};
-		};
-
-		gpu1-lowf {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-governor = "low_limits_floor";
-			thermal-sensors = <&tsens0 12>;
-			tracks-low;
-			trips {
-				gpu1_trip_l: gpu1-trip_l {
-					temperature = <5000>;
-					hysteresis = <5000>;
-					type = "passive";
-				};
-			};
-			cooling-maps {
-				cpu0_vdd_cdev {
-					trip = <&gpu1_trip_l>;
-					cooling-device = <&CPU0 4 4>;
-				};
-				cpu4_vdd_cdev {
-					trip = <&gpu1_trip_l>;
-					cooling-device = <&CPU4 9 9>;
-				};
-				gpu_vdd_cdev {
-					trip = <&gpu1_trip_l>;
-					cooling-device = <&msm_gpu 1 1>;
-				};
-			};
-		};
-
-		aoss1-lowf {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-governor = "low_limits_floor";
-			thermal-sensors = <&tsens1 0>;
-			tracks-low;
-			trips {
-				aoss1_trip: aoss1-trip {
-					temperature = <5000>;
-					hysteresis = <5000>;
-					type = "passive";
-				};
-			};
-			cooling-maps {
-				cpu0_vdd_cdev {
-					trip = <&aoss1_trip>;
-					cooling-device = <&CPU0 4 4>;
-				};
-				cpu4_vdd_cdev {
-					trip = <&aoss1_trip>;
-					cooling-device = <&CPU4 9 9>;
-				};
-				gpu_vdd_cdev {
-					trip = <&aoss1_trip>;
-					cooling-device = <&msm_gpu 1 1>;
-				};
-			};
-		};
-
-		mdm-dsp-lowf {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-governor = "low_limits_floor";
-			thermal-sensors = <&tsens1 1>;
-			tracks-low;
-			trips {
-				dsp_trip: dsp-trip {
-					temperature = <5000>;
-					hysteresis = <5000>;
-					type = "passive";
-				};
-			};
-			cooling-maps {
-				cpu0_vdd_cdev {
-					trip = <&dsp_trip>;
-					cooling-device = <&CPU0 4 4>;
-				};
-				cpu4_vdd_cdev {
-					trip = <&dsp_trip>;
-					cooling-device = <&CPU4 9 9>;
-				};
-				gpu_vdd_cdev {
-					trip = <&dsp_trip>;
-					cooling-device = <&msm_gpu 1 1>;
-				};
-			};
-		};
-
-		ddr-lowf {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-governor = "low_limits_floor";
-			thermal-sensors = <&tsens1 2>;
-			tracks-low;
-			trips {
-				ddr_trip: ddr-trip {
-					temperature = <5000>;
-					hysteresis = <5000>;
-					type = "passive";
-				};
-			};
-			cooling-maps {
-				cpu0_vdd_cdev {
-					trip = <&ddr_trip>;
-					cooling-device = <&CPU0 4 4>;
-				};
-				cpu4_vdd_cdev {
-					trip = <&ddr_trip>;
-					cooling-device = <&CPU4 9 9>;
-				};
-				gpu_vdd_cdev {
-					trip = <&ddr_trip>;
-					cooling-device = <&msm_gpu 1 1>;
-				};
-			};
-		};
-
-		wlan-lowf {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-governor = "low_limits_floor";
-			thermal-sensors = <&tsens1 3>;
-			tracks-low;
-			trips {
-				wlan_trip: wlan-trip {
-					temperature = <5000>;
-					hysteresis = <5000>;
-					type = "passive";
-				};
-			};
-			cooling-maps {
-				cpu0_vdd_cdev {
-					trip = <&wlan_trip>;
-					cooling-device = <&CPU0 4 4>;
-				};
-				cpu4_vdd_cdev {
-					trip = <&wlan_trip>;
-					cooling-device = <&CPU4 9 9>;
-				};
-				gpu_vdd_cdev {
-					trip = <&wlan_trip>;
-					cooling-device = <&msm_gpu 1 1>;
-				};
-			};
-		};
-
-		compute-hvx-lowf {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-governor = "low_limits_floor";
-			thermal-sensors = <&tsens1 4>;
-			tracks-low;
-			trips {
-				hvx_trip: hvx-trip {
-					temperature = <5000>;
-					hysteresis = <5000>;
-					type = "passive";
-				};
-			};
-			cooling-maps {
-				cpu0_vdd_cdev {
-					trip = <&hvx_trip>;
-					cooling-device = <&CPU0 4 4>;
-				};
-				cpu4_vdd_cdev {
-					trip = <&hvx_trip>;
-					cooling-device = <&CPU4 9 9>;
-				};
-				gpu_vdd_cdev {
-					trip = <&hvx_trip>;
-					cooling-device = <&msm_gpu 1 1>;
-				};
-			};
-		};
-
-		camera-lowf {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-governor = "low_limits_floor";
-			thermal-sensors = <&tsens1 5>;
-			tracks-low;
-			trips {
-				camera_trip: camera-trip {
-					temperature = <5000>;
-					hysteresis = <5000>;
-					type = "passive";
-				};
-			};
-			cooling-maps {
-				cpu0_vdd_cdev {
-					trip = <&camera_trip>;
-					cooling-device = <&CPU0 4 4>;
-				};
-				cpu4_vdd_cdev {
-					trip = <&camera_trip>;
-					cooling-device = <&CPU4 9 9>;
-				};
-				gpu_vdd_cdev {
-					trip = <&camera_trip>;
-					cooling-device = <&msm_gpu 1 1>;
-				};
-			};
-		};
-
-		mmss-lowf {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-governor = "low_limits_floor";
-			thermal-sensors = <&tsens1 6>;
-			tracks-low;
-			trips {
-				mmss_trip: mmss-trip {
-					temperature = <5000>;
-					hysteresis = <5000>;
-					type = "passive";
-				};
-			};
-			cooling-maps {
-				cpu0_vdd_cdev {
-					trip = <&mmss_trip>;
-					cooling-device = <&CPU0 4 4>;
-				};
-				cpu4_vdd_cdev {
-					trip = <&mmss_trip>;
-					cooling-device = <&CPU4 9 9>;
-				};
-				gpu_vdd_cdev {
-					trip = <&mmss_trip>;
-					cooling-device = <&msm_gpu 1 1>;
-				};
-			};
-		};
-
-		mdm-core-lowf {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-governor = "low_limits_floor";
-			thermal-sensors = <&tsens1 7>;
-			tracks-low;
-			trips {
-				mdm_trip: mdm-trip {
-					temperature = <5000>;
-					hysteresis = <5000>;
-					type = "passive";
-				};
-			};
-			cooling-maps {
-				cpu0_vdd_cdev {
-					trip = <&mdm_trip>;
-					cooling-device = <&CPU0 4 4>;
-				};
-				cpu4_vdd_cdev {
-					trip = <&mdm_trip>;
-					cooling-device = <&CPU4 9 9>;
-				};
-				gpu_vdd_cdev {
-					trip = <&mdm_trip>;
-					cooling-device = <&msm_gpu 1 1>;
-				};
-			};
-		};
-
 		lmh-dcvs-01 {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
@@ -4071,6 +3545,57 @@
 		iommus = <&apps_smmu 0x06d6 0x0>;
 		status = "ok";
 	};
+
+	tspp: msm_tspp@0x8880000 {
+		compatible = "qcom,msm_tspp";
+		reg = <0x088a7000 0x200>, /* MSM_TSIF0_PHYS */
+		      <0x088a8000 0x200>, /* MSM_TSIF1_PHYS */
+		      <0x088a9000 0x1000>, /* MSM_TSPP_PHYS  */
+		      <0x08884000 0x23000>; /* MSM_TSPP_BAM_PHYS */
+		reg-names = "MSM_TSIF0_PHYS",
+			"MSM_TSIF1_PHYS",
+			"MSM_TSPP_PHYS",
+			"MSM_TSPP_BAM_PHYS";
+		interrupts = <0 121 0>, /* TSIF_TSPP_IRQ */
+			<0 119 0>, /* TSIF0_IRQ */
+			<0 120 0>, /* TSIF1_IRQ */
+			<0 122 0>; /* TSIF_BAM_IRQ */
+		interrupt-names = "TSIF_TSPP_IRQ",
+			"TSIF0_IRQ",
+			"TSIF1_IRQ",
+			"TSIF_BAM_IRQ";
+
+		clock-names = "iface_clk", "ref_clk";
+		clocks = <&clock_gcc GCC_TSIF_AHB_CLK>,
+			<&clock_gcc GCC_TSIF_REF_CLK>;
+
+		qcom,msm-bus,name = "tsif";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<82 512 0 0>, /* No vote */
+				<82 512 12288 24576>;
+				/* Max. bandwidth, 2xTSIF, each max of 96Mbps */
+
+		pinctrl-names = "disabled",
+			"tsif0-mode1", "tsif0-mode2",
+			"tsif1-mode1", "tsif1-mode2",
+			"dual-tsif-mode1", "dual-tsif-mode2";
+
+		pinctrl-0 = <>;				/* disabled */
+		pinctrl-1 = <&tsif0_signals_active>;	/* tsif0-mode1 */
+		pinctrl-2 = <&tsif0_signals_active
+			&tsif0_sync_active>;		/* tsif0-mode2 */
+		pinctrl-3 = <&tsif1_signals_active>;	/* tsif1-mode1 */
+		pinctrl-4 = <&tsif1_signals_active
+			&tsif1_sync_active>;		/* tsif1-mode2 */
+		pinctrl-5 = <&tsif0_signals_active
+			&tsif1_signals_active>;		/* dual-tsif-mode1 */
+		pinctrl-6 = <&tsif0_signals_active
+			&tsif0_sync_active
+			&tsif1_signals_active
+			&tsif1_sync_active>;		/* dual-tsif-mode2 */
+	};
 };
 
 &clock_cpucc {
@@ -4321,3 +3846,1202 @@
 		};
 	};
 };
+
+&thermal_zones {
+	aoss0-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 0>;
+		tracks-low;
+		trips {
+			aoss0_trip: aoss0-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&aoss0_trip>;
+				cooling-device = <&CPU0 4 4>;
+			};
+			cpu4_vdd_cdev {
+				trip = <&aoss0_trip>;
+				cooling-device = <&CPU4 9 9>;
+			};
+			gpu_vdd_cdev {
+				trip = <&aoss0_trip>;
+				cooling-device = <&msm_gpu 1 1>;
+			};
+			cx_vdd_cdev {
+				trip = <&aoss0_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			mx_vdd_cdev {
+				trip = <&aoss0_trip>;
+				cooling-device = <&mx_cdev 0 0>;
+			};
+			ebi_vdd_cdev {
+				trip = <&aoss0_trip>;
+				cooling-device = <&ebi_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&aoss0_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+			adsp_vdd_cdev {
+				trip = <&aoss0_trip>;
+				cooling-device = <&adsp_vdd 0 0>;
+			};
+			cdsp_vdd_cdev {
+				trip = <&aoss0_trip>;
+				cooling-device = <&cdsp_vdd 0 0>;
+			};
+			slpi_vdd_cdev {
+				trip = <&aoss0_trip>;
+				cooling-device = <&slpi_vdd 0 0>;
+			};
+		};
+	};
+
+	cpu0-silver-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 1>;
+		tracks-low;
+		trips {
+			cpu0_trip: cpu0-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&cpu0_trip>;
+				cooling-device = <&CPU0 4 4>;
+			};
+			cpu4_vdd_cdev {
+				trip = <&cpu0_trip>;
+				cooling-device = <&CPU4 9 9>;
+			};
+			gpu_vdd_cdev {
+				trip = <&cpu0_trip>;
+				cooling-device = <&msm_gpu 1 1>;
+			};
+			cx_vdd_cdev {
+				trip = <&cpu0_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			mx_vdd_cdev {
+				trip = <&cpu0_trip>;
+				cooling-device = <&mx_cdev 0 0>;
+			};
+			ebi_vdd_cdev {
+				trip = <&cpu0_trip>;
+				cooling-device = <&ebi_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&cpu0_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+			adsp_vdd_cdev {
+				trip = <&cpu0_trip>;
+				cooling-device = <&adsp_vdd 0 0>;
+			};
+			cdsp_vdd_cdev {
+				trip = <&cpu0_trip>;
+				cooling-device = <&cdsp_vdd 0 0>;
+			};
+			slpi_vdd_cdev {
+				trip = <&cpu0_trip>;
+				cooling-device = <&slpi_vdd 0 0>;
+			};
+		};
+	};
+
+	cpu1-silver-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 2>;
+		tracks-low;
+		trips {
+			cpu1_trip: cpu1-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&cpu1_trip>;
+				cooling-device = <&CPU0 4 4>;
+			};
+			cpu4_vdd_cdev {
+				trip = <&cpu1_trip>;
+				cooling-device = <&CPU4 9 9>;
+			};
+			gpu_vdd_cdev {
+				trip = <&cpu1_trip>;
+				cooling-device = <&msm_gpu 1 1>;
+			};
+			cx_vdd_cdev {
+				trip = <&cpu1_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			mx_vdd_cdev {
+				trip = <&cpu1_trip>;
+				cooling-device = <&mx_cdev 0 0>;
+			};
+			ebi_vdd_cdev {
+				trip = <&cpu1_trip>;
+				cooling-device = <&ebi_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&cpu1_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+			adsp_vdd_cdev {
+				trip = <&cpu1_trip>;
+				cooling-device = <&adsp_vdd 0 0>;
+			};
+			cdsp_vdd_cdev {
+				trip = <&cpu1_trip>;
+				cooling-device = <&cdsp_vdd 0 0>;
+			};
+			slpi_vdd_cdev {
+				trip = <&cpu1_trip>;
+				cooling-device = <&slpi_vdd 0 0>;
+			};
+		};
+	};
+
+	cpu2-silver-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 3>;
+		tracks-low;
+		trips {
+			cpu2_trip: cpu2-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&cpu2_trip>;
+				cooling-device = <&CPU0 4 4>;
+			};
+			cpu4_vdd_cdev {
+				trip = <&cpu2_trip>;
+				cooling-device = <&CPU4 9 9>;
+			};
+			gpu_vdd_cdev {
+				trip = <&cpu2_trip>;
+				cooling-device = <&msm_gpu 1 1>;
+			};
+			cx_vdd_cdev {
+				trip = <&cpu2_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			mx_vdd_cdev {
+				trip = <&cpu2_trip>;
+				cooling-device = <&mx_cdev 0 0>;
+			};
+			ebi_vdd_cdev {
+				trip = <&cpu2_trip>;
+				cooling-device = <&ebi_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&cpu2_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+			adsp_vdd_cdev {
+				trip = <&cpu2_trip>;
+				cooling-device = <&adsp_vdd 0 0>;
+			};
+			cdsp_vdd_cdev {
+				trip = <&cpu2_trip>;
+				cooling-device = <&cdsp_vdd 0 0>;
+			};
+			slpi_vdd_cdev {
+				trip = <&cpu2_trip>;
+				cooling-device = <&slpi_vdd 0 0>;
+			};
+		};
+	};
+
+	cpu3-silver-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 4>;
+		tracks-low;
+		trips {
+			cpu3_trip: cpu3-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&cpu3_trip>;
+				cooling-device = <&CPU0 4 4>;
+			};
+			cpu4_vdd_cdev {
+				trip = <&cpu3_trip>;
+				cooling-device = <&CPU4 9 9>;
+			};
+			gpu_vdd_cdev {
+				trip = <&cpu3_trip>;
+				cooling-device = <&msm_gpu 1 1>;
+			};
+			cx_vdd_cdev {
+				trip = <&cpu3_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			mx_vdd_cdev {
+				trip = <&cpu3_trip>;
+				cooling-device = <&mx_cdev 0 0>;
+			};
+			ebi_vdd_cdev {
+				trip = <&cpu3_trip>;
+				cooling-device = <&ebi_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&cpu3_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+			adsp_vdd_cdev {
+				trip = <&cpu3_trip>;
+				cooling-device = <&adsp_vdd 0 0>;
+			};
+			cdsp_vdd_cdev {
+				trip = <&cpu3_trip>;
+				cooling-device = <&cdsp_vdd 0 0>;
+			};
+			slpi_vdd_cdev {
+				trip = <&cpu3_trip>;
+				cooling-device = <&slpi_vdd 0 0>;
+			};
+		};
+	};
+
+	kryo-l3-0-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 5>;
+		tracks-low;
+		trips {
+			l3_0_trip: l3-0-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&l3_0_trip>;
+				cooling-device = <&CPU0 4 4>;
+			};
+			cpu4_vdd_cdev {
+				trip = <&l3_0_trip>;
+				cooling-device = <&CPU4 9 9>;
+			};
+			gpu_vdd_cdev {
+				trip = <&l3_0_trip>;
+				cooling-device = <&msm_gpu 1 1>;
+			};
+			cx_vdd_cdev {
+				trip = <&l3_0_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			mx_vdd_cdev {
+				trip = <&l3_0_trip>;
+				cooling-device = <&mx_cdev 0 0>;
+			};
+			ebi_vdd_cdev {
+				trip = <&l3_0_trip>;
+				cooling-device = <&ebi_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&l3_0_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+			adsp_vdd_cdev {
+				trip = <&l3_0_trip>;
+				cooling-device = <&adsp_vdd 0 0>;
+			};
+			cdsp_vdd_cdev {
+				trip = <&l3_0_trip>;
+				cooling-device = <&cdsp_vdd 0 0>;
+			};
+			slpi_vdd_cdev {
+				trip = <&l3_0_trip>;
+				cooling-device = <&slpi_vdd 0 0>;
+			};
+		};
+	};
+
+	kryo-l3-1-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 6>;
+		tracks-low;
+		trips {
+			l3_1_trip: l3-1-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&l3_1_trip>;
+				cooling-device = <&CPU0 4 4>;
+			};
+			cpu4_vdd_cdev {
+				trip = <&l3_1_trip>;
+				cooling-device = <&CPU4 9 9>;
+			};
+			gpu_vdd_cdev {
+				trip = <&l3_1_trip>;
+				cooling-device = <&msm_gpu 1 1>;
+			};
+			cx_vdd_cdev {
+				trip = <&l3_1_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			mx_vdd_cdev {
+				trip = <&l3_1_trip>;
+				cooling-device = <&mx_cdev 0 0>;
+			};
+			ebi_vdd_cdev {
+				trip = <&l3_1_trip>;
+				cooling-device = <&ebi_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&l3_1_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+			adsp_vdd_cdev {
+				trip = <&l3_1_trip>;
+				cooling-device = <&adsp_vdd 0 0>;
+			};
+			cdsp_vdd_cdev {
+				trip = <&l3_1_trip>;
+				cooling-device = <&cdsp_vdd 0 0>;
+			};
+			slpi_vdd_cdev {
+				trip = <&l3_1_trip>;
+				cooling-device = <&slpi_vdd 0 0>;
+			};
+		};
+	};
+
+	cpu0-gold-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 7>;
+		tracks-low;
+		trips {
+			cpug0_trip: cpug0-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&cpug0_trip>;
+				cooling-device = <&CPU0 4 4>;
+			};
+			cpu4_vdd_cdev {
+				trip = <&cpug0_trip>;
+				cooling-device = <&CPU4 9 9>;
+			};
+			gpu_vdd_cdev {
+				trip = <&cpug0_trip>;
+				cooling-device = <&msm_gpu 1 1>;
+			};
+			cx_vdd_cdev {
+				trip = <&cpug0_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			mx_vdd_cdev {
+				trip = <&cpug0_trip>;
+				cooling-device = <&mx_cdev 0 0>;
+			};
+			ebi_vdd_cdev {
+				trip = <&cpug0_trip>;
+				cooling-device = <&ebi_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&cpug0_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+			adsp_vdd_cdev {
+				trip = <&cpug0_trip>;
+				cooling-device = <&adsp_vdd 0 0>;
+			};
+			cdsp_vdd_cdev {
+				trip = <&cpug0_trip>;
+				cooling-device = <&cdsp_vdd 0 0>;
+			};
+			slpi_vdd_cdev {
+				trip = <&cpug0_trip>;
+				cooling-device = <&slpi_vdd 0 0>;
+			};
+		};
+	};
+
+	cpu1-gold-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 8>;
+		tracks-low;
+		trips {
+			cpug1_trip: cpug1-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&cpug1_trip>;
+				cooling-device = <&CPU0 4 4>;
+			};
+			cpu4_vdd_cdev {
+				trip = <&cpug1_trip>;
+				cooling-device = <&CPU4 9 9>;
+			};
+			gpu_vdd_cdev {
+				trip = <&cpug1_trip>;
+				cooling-device = <&msm_gpu 1 1>;
+			};
+			cx_vdd_cdev {
+				trip = <&cpug1_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			mx_vdd_cdev {
+				trip = <&cpug1_trip>;
+				cooling-device = <&mx_cdev 0 0>;
+			};
+			ebi_vdd_cdev {
+				trip = <&cpug1_trip>;
+				cooling-device = <&ebi_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&cpug1_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+			adsp_vdd_cdev {
+				trip = <&cpug1_trip>;
+				cooling-device = <&adsp_vdd 0 0>;
+			};
+			cdsp_vdd_cdev {
+				trip = <&cpug1_trip>;
+				cooling-device = <&cdsp_vdd 0 0>;
+			};
+			slpi_vdd_cdev {
+				trip = <&cpug1_trip>;
+				cooling-device = <&slpi_vdd 0 0>;
+			};
+		};
+	};
+
+	cpu2-gold-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 9>;
+		tracks-low;
+		trips {
+			cpug2_trip: cpug2-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&cpug2_trip>;
+				cooling-device = <&CPU0 4 4>;
+			};
+			cpu4_vdd_cdev {
+				trip = <&cpug2_trip>;
+				cooling-device = <&CPU4 9 9>;
+			};
+			gpu_vdd_cdev {
+				trip = <&cpug2_trip>;
+				cooling-device = <&msm_gpu 1 1>;
+			};
+			cx_vdd_cdev {
+				trip = <&cpug2_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			mx_vdd_cdev {
+				trip = <&cpug2_trip>;
+				cooling-device = <&mx_cdev 0 0>;
+			};
+			ebi_vdd_cdev {
+				trip = <&cpug2_trip>;
+				cooling-device = <&ebi_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&cpug2_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+			adsp_vdd_cdev {
+				trip = <&cpug2_trip>;
+				cooling-device = <&adsp_vdd 0 0>;
+			};
+			cdsp_vdd_cdev {
+				trip = <&cpug2_trip>;
+				cooling-device = <&cdsp_vdd 0 0>;
+			};
+			slpi_vdd_cdev {
+				trip = <&cpug2_trip>;
+				cooling-device = <&slpi_vdd 0 0>;
+			};
+		};
+	};
+
+	cpu3-gold-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 10>;
+		tracks-low;
+		trips {
+			cpug3_trip: cpug3-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&cpug3_trip>;
+				cooling-device = <&CPU0 4 4>;
+			};
+			cpu4_vdd_cdev {
+				trip = <&cpug3_trip>;
+				cooling-device = <&CPU4 9 9>;
+			};
+			gpu_vdd_cdev {
+				trip = <&cpug3_trip>;
+				cooling-device = <&msm_gpu 1 1>;
+			};
+			cx_vdd_cdev {
+				trip = <&cpug3_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			mx_vdd_cdev {
+				trip = <&cpug3_trip>;
+				cooling-device = <&mx_cdev 0 0>;
+			};
+			ebi_vdd_cdev {
+				trip = <&cpug3_trip>;
+				cooling-device = <&ebi_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&cpug3_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+			adsp_vdd_cdev {
+				trip = <&cpug3_trip>;
+				cooling-device = <&adsp_vdd 0 0>;
+			};
+			cdsp_vdd_cdev {
+				trip = <&cpug3_trip>;
+				cooling-device = <&cdsp_vdd 0 0>;
+			};
+			slpi_vdd_cdev {
+				trip = <&cpug3_trip>;
+				cooling-device = <&slpi_vdd 0 0>;
+			};
+		};
+	};
+
+	gpu0-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 11>;
+		tracks-low;
+		trips {
+			gpu0_trip_l: gpu0-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&gpu0_trip_l>;
+				cooling-device = <&CPU0 4 4>;
+			};
+			cpu4_vdd_cdev {
+				trip = <&gpu0_trip_l>;
+				cooling-device = <&CPU4 9 9>;
+			};
+			gpu_vdd_cdev {
+				trip = <&gpu0_trip_l>;
+				cooling-device = <&msm_gpu 1 1>;
+			};
+			cx_vdd_cdev {
+				trip = <&gpu0_trip_l>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			mx_vdd_cdev {
+				trip = <&gpu0_trip_l>;
+				cooling-device = <&mx_cdev 0 0>;
+			};
+			ebi_vdd_cdev {
+				trip = <&gpu0_trip_l>;
+				cooling-device = <&ebi_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&gpu0_trip_l>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+			adsp_vdd_cdev {
+				trip = <&gpu0_trip_l>;
+				cooling-device = <&adsp_vdd 0 0>;
+			};
+			cdsp_vdd_cdev {
+				trip = <&gpu0_trip_l>;
+				cooling-device = <&cdsp_vdd 0 0>;
+			};
+			slpi_vdd_cdev {
+				trip = <&gpu0_trip_l>;
+				cooling-device = <&slpi_vdd 0 0>;
+			};
+		};
+	};
+
+	gpu1-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 12>;
+		tracks-low;
+		trips {
+			gpu1_trip_l: gpu1-trip_l {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&gpu1_trip_l>;
+				cooling-device = <&CPU0 4 4>;
+			};
+			cpu4_vdd_cdev {
+				trip = <&gpu1_trip_l>;
+				cooling-device = <&CPU4 9 9>;
+			};
+			gpu_vdd_cdev {
+				trip = <&gpu1_trip_l>;
+				cooling-device = <&msm_gpu 1 1>;
+			};
+			cx_vdd_cdev {
+				trip = <&gpu1_trip_l>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			mx_vdd_cdev {
+				trip = <&gpu1_trip_l>;
+				cooling-device = <&mx_cdev 0 0>;
+			};
+			ebi_vdd_cdev {
+				trip = <&gpu1_trip_l>;
+				cooling-device = <&ebi_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&gpu1_trip_l>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+			adsp_vdd_cdev {
+				trip = <&gpu1_trip_l>;
+				cooling-device = <&adsp_vdd 0 0>;
+			};
+			cdsp_vdd_cdev {
+				trip = <&gpu1_trip_l>;
+				cooling-device = <&cdsp_vdd 0 0>;
+			};
+			slpi_vdd_cdev {
+				trip = <&gpu1_trip_l>;
+				cooling-device = <&slpi_vdd 0 0>;
+			};
+		};
+	};
+
+	aoss1-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens1 0>;
+		tracks-low;
+		trips {
+			aoss1_trip: aoss1-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&aoss1_trip>;
+				cooling-device = <&CPU0 4 4>;
+			};
+			cpu4_vdd_cdev {
+				trip = <&aoss1_trip>;
+				cooling-device = <&CPU4 9 9>;
+			};
+			gpu_vdd_cdev {
+				trip = <&aoss1_trip>;
+				cooling-device = <&msm_gpu 1 1>;
+			};
+			cx_vdd_cdev {
+				trip = <&aoss1_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			mx_vdd_cdev {
+				trip = <&aoss1_trip>;
+				cooling-device = <&mx_cdev 0 0>;
+			};
+			ebi_vdd_cdev {
+				trip = <&aoss1_trip>;
+				cooling-device = <&ebi_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&aoss1_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+			adsp_vdd_cdev {
+				trip = <&aoss1_trip>;
+				cooling-device = <&adsp_vdd 0 0>;
+			};
+			cdsp_vdd_cdev {
+				trip = <&aoss1_trip>;
+				cooling-device = <&cdsp_vdd 0 0>;
+			};
+			slpi_vdd_cdev {
+				trip = <&aoss1_trip>;
+				cooling-device = <&slpi_vdd 0 0>;
+			};
+		};
+	};
+
+	mdm-dsp-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens1 1>;
+		tracks-low;
+		trips {
+			dsp_trip: dsp-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&dsp_trip>;
+				cooling-device = <&CPU0 4 4>;
+			};
+			cpu4_vdd_cdev {
+				trip = <&dsp_trip>;
+				cooling-device = <&CPU4 9 9>;
+			};
+			gpu_vdd_cdev {
+				trip = <&dsp_trip>;
+				cooling-device = <&msm_gpu 1 1>;
+			};
+			cx_vdd_cdev {
+				trip = <&dsp_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			mx_vdd_cdev {
+				trip = <&dsp_trip>;
+				cooling-device = <&mx_cdev 0 0>;
+			};
+			ebi_vdd_cdev {
+				trip = <&dsp_trip>;
+				cooling-device = <&ebi_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&dsp_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+			adsp_vdd_cdev {
+				trip = <&dsp_trip>;
+				cooling-device = <&adsp_vdd 0 0>;
+			};
+			cdsp_vdd_cdev {
+				trip = <&dsp_trip>;
+				cooling-device = <&cdsp_vdd 0 0>;
+			};
+			slpi_vdd_cdev {
+				trip = <&dsp_trip>;
+				cooling-device = <&slpi_vdd 0 0>;
+			};
+		};
+	};
+
+	ddr-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens1 2>;
+		tracks-low;
+		trips {
+			ddr_trip: ddr-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&ddr_trip>;
+				cooling-device = <&CPU0 4 4>;
+			};
+			cpu4_vdd_cdev {
+				trip = <&ddr_trip>;
+				cooling-device = <&CPU4 9 9>;
+			};
+			gpu_vdd_cdev {
+				trip = <&ddr_trip>;
+				cooling-device = <&msm_gpu 1 1>;
+			};
+			cx_vdd_cdev {
+				trip = <&ddr_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			mx_vdd_cdev {
+				trip = <&ddr_trip>;
+				cooling-device = <&mx_cdev 0 0>;
+			};
+			ebi_vdd_cdev {
+				trip = <&ddr_trip>;
+				cooling-device = <&ebi_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&ddr_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+			adsp_vdd_cdev {
+				trip = <&ddr_trip>;
+				cooling-device = <&adsp_vdd 0 0>;
+			};
+			cdsp_vdd_cdev {
+				trip = <&ddr_trip>;
+				cooling-device = <&cdsp_vdd 0 0>;
+			};
+			slpi_vdd_cdev {
+				trip = <&ddr_trip>;
+				cooling-device = <&slpi_vdd 0 0>;
+			};
+		};
+	};
+
+	wlan-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens1 3>;
+		tracks-low;
+		trips {
+			wlan_trip: wlan-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&wlan_trip>;
+				cooling-device = <&CPU0 4 4>;
+			};
+			cpu4_vdd_cdev {
+				trip = <&wlan_trip>;
+				cooling-device = <&CPU4 9 9>;
+			};
+			gpu_vdd_cdev {
+				trip = <&wlan_trip>;
+				cooling-device = <&msm_gpu 1 1>;
+			};
+			cx_vdd_cdev {
+				trip = <&wlan_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			mx_vdd_cdev {
+				trip = <&wlan_trip>;
+				cooling-device = <&mx_cdev 0 0>;
+			};
+			ebi_vdd_cdev {
+				trip = <&wlan_trip>;
+				cooling-device = <&ebi_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&wlan_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+			adsp_vdd_cdev {
+				trip = <&wlan_trip>;
+				cooling-device = <&adsp_vdd 0 0>;
+			};
+			cdsp_vdd_cdev {
+				trip = <&wlan_trip>;
+				cooling-device = <&cdsp_vdd 0 0>;
+			};
+			slpi_vdd_cdev {
+				trip = <&wlan_trip>;
+				cooling-device = <&slpi_vdd 0 0>;
+			};
+		};
+	};
+
+	compute-hvx-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens1 4>;
+		tracks-low;
+		trips {
+			hvx_trip: hvx-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&hvx_trip>;
+				cooling-device = <&CPU0 4 4>;
+			};
+			cpu4_vdd_cdev {
+				trip = <&hvx_trip>;
+				cooling-device = <&CPU4 9 9>;
+			};
+			gpu_vdd_cdev {
+				trip = <&hvx_trip>;
+				cooling-device = <&msm_gpu 1 1>;
+			};
+			cx_vdd_cdev {
+				trip = <&hvx_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			mx_vdd_cdev {
+				trip = <&hvx_trip>;
+				cooling-device = <&mx_cdev 0 0>;
+			};
+			ebi_vdd_cdev {
+				trip = <&hvx_trip>;
+				cooling-device = <&ebi_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&hvx_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+			adsp_vdd_cdev {
+				trip = <&hvx_trip>;
+				cooling-device = <&adsp_vdd 0 0>;
+			};
+			cdsp_vdd_cdev {
+				trip = <&hvx_trip>;
+				cooling-device = <&cdsp_vdd 0 0>;
+			};
+			slpi_vdd_cdev {
+				trip = <&hvx_trip>;
+				cooling-device = <&slpi_vdd 0 0>;
+			};
+		};
+	};
+
+	camera-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens1 5>;
+		tracks-low;
+		trips {
+			camera_trip: camera-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&camera_trip>;
+				cooling-device = <&CPU0 4 4>;
+			};
+			cpu4_vdd_cdev {
+				trip = <&camera_trip>;
+				cooling-device = <&CPU4 9 9>;
+			};
+			gpu_vdd_cdev {
+				trip = <&camera_trip>;
+				cooling-device = <&msm_gpu 1 1>;
+			};
+			cx_vdd_cdev {
+				trip = <&camera_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			mx_vdd_cdev {
+				trip = <&camera_trip>;
+				cooling-device = <&mx_cdev 0 0>;
+			};
+			ebi_vdd_cdev {
+				trip = <&camera_trip>;
+				cooling-device = <&ebi_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&camera_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+			adsp_vdd_cdev {
+				trip = <&camera_trip>;
+				cooling-device = <&adsp_vdd 0 0>;
+			};
+			cdsp_vdd_cdev {
+				trip = <&camera_trip>;
+				cooling-device = <&cdsp_vdd 0 0>;
+			};
+			slpi_vdd_cdev {
+				trip = <&camera_trip>;
+				cooling-device = <&slpi_vdd 0 0>;
+			};
+		};
+	};
+
+	mmss-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens1 6>;
+		tracks-low;
+		trips {
+			mmss_trip: mmss-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&mmss_trip>;
+				cooling-device = <&CPU0 4 4>;
+			};
+			cpu4_vdd_cdev {
+				trip = <&mmss_trip>;
+				cooling-device = <&CPU4 9 9>;
+			};
+			gpu_vdd_cdev {
+				trip = <&mmss_trip>;
+				cooling-device = <&msm_gpu 1 1>;
+			};
+			cx_vdd_cdev {
+				trip = <&mmss_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			mx_vdd_cdev {
+				trip = <&mmss_trip>;
+				cooling-device = <&mx_cdev 0 0>;
+			};
+			ebi_vdd_cdev {
+				trip = <&mmss_trip>;
+				cooling-device = <&ebi_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&mmss_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+			adsp_vdd_cdev {
+				trip = <&mmss_trip>;
+				cooling-device = <&adsp_vdd 0 0>;
+			};
+			cdsp_vdd_cdev {
+				trip = <&mmss_trip>;
+				cooling-device = <&cdsp_vdd 0 0>;
+			};
+			slpi_vdd_cdev {
+				trip = <&mmss_trip>;
+				cooling-device = <&slpi_vdd 0 0>;
+			};
+		};
+	};
+
+	mdm-core-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens1 7>;
+		tracks-low;
+		trips {
+			mdm_trip: mdm-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&mdm_trip>;
+				cooling-device = <&CPU0 4 4>;
+			};
+			cpu4_vdd_cdev {
+				trip = <&mdm_trip>;
+				cooling-device = <&CPU4 9 9>;
+			};
+			gpu_vdd_cdev {
+				trip = <&mdm_trip>;
+				cooling-device = <&msm_gpu 1 1>;
+			};
+			cx_vdd_cdev {
+				trip = <&mdm_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			mx_vdd_cdev {
+				trip = <&mdm_trip>;
+				cooling-device = <&mx_cdev 0 0>;
+			};
+			ebi_vdd_cdev {
+				trip = <&mdm_trip>;
+				cooling-device = <&ebi_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&mdm_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+			adsp_vdd_cdev {
+				trip = <&mdm_trip>;
+				cooling-device = <&adsp_vdd 0 0>;
+			};
+			cdsp_vdd_cdev {
+				trip = <&mdm_trip>;
+				cooling-device = <&cdsp_vdd 0 0>;
+			};
+			slpi_vdd_cdev {
+				trip = <&mdm_trip>;
+				cooling-device = <&slpi_vdd 0 0>;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/smb1355.dtsi b/arch/arm64/boot/dts/qcom/smb1355.dtsi
index 33c5e97..999d87a 100644
--- a/arch/arm64/boot/dts/qcom/smb1355.dtsi
+++ b/arch/arm64/boot/dts/qcom/smb1355.dtsi
@@ -39,6 +39,11 @@
 			interrupt-parent = <&smb1355>;
 			status = "disabled";
 
+			io-channels = <&pmi8998_rradc 2>,
+				      <&pmi8998_rradc 12>;
+			io-channel-names = "charger_temp",
+					   "charger_temp_max";
+
 			qcom,chgr@1000 {
 				reg = <0x1000 0x100>;
 				interrupts = <0x10 0x1 IRQ_TYPE_EDGE_RISING>;
diff --git a/arch/arm64/configs/sdm670_defconfig b/arch/arm64/configs/sdm670_defconfig
new file mode 100644
index 0000000..f28a9a6
--- /dev/null
+++ b/arch/arm64/configs/sdm670_defconfig
@@ -0,0 +1,639 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_FHANDLE is not set
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_SCHED_WALT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_ALL=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_SCHED_CORE_CTL=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_AIO is not set
+# CONFIG_MEMBARRIER is not set
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_CC_STACKPROTECTOR_REGULAR=y
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_SDM670=y
+CONFIG_PCI=y
+CONFIG_PCI_MSM=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_PREEMPT=y
+CONFIG_HZ_100=y
+CONFIG_CLEANCACHE=y
+CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_SECCOMP=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+# CONFIG_ARM64_VHE is not set
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_PM_DEBUG=y
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPU_FREQ_MSM=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_L2TP=y
+CONFIG_L2TP_DEBUGFS=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_RMNET_DATA=y
+CONFIG_RMNET_DATA_FC=y
+CONFIG_RMNET_DATA_DEBUG_PKT=y
+CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+# CONFIG_CFG80211_CRDA_SUPPORT is not set
+CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
+CONFIG_IPC_ROUTER=y
+CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_DMA_CMA=y
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_MEMORY_STATE_TIME=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_RNDIS_IPA=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_USB_USBNET=y
+CONFIG_WIL6210=m
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_CLD_LL_CORE=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_HBTP_INPUT=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_MSM_GENI=y
+CONFIG_SERIAL_MSM_GENI_CONSOLE=y
+CONFIG_DIAG_CHAR=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+CONFIG_MSM_ADSPRPC=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_QCOM_GENI=y
+CONFIG_SOUNDWIRE=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_QCOM_GENI=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SLIMBUS_MSM_NGD=y
+CONFIG_SPMI=y
+CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
+CONFIG_PINCTRL_SDM670=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_QPNP_FG_GEN3=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
+CONFIG_QPNP_SMB2=y
+CONFIG_QPNP_QNOVO=y
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_STEP_WISE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_QCOM_SPMI_TEMP_ALARM=y
+CONFIG_THERMAL_QPNP=y
+CONFIG_THERMAL_QPNP_ADC_TM=y
+CONFIG_THERMAL_TSENS=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_MFD_I2C_PMIC=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_WCD934X_CODEC=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_CPRH_KBSS=y
+CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP=y
+CONFIG_REGULATOR_RPMH=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_SPECTRA_CAMERA=y
+CONFIG_MSM_VIDC_V4L2=y
+CONFIG_MSM_VIDC_GOVERNORS=y
+CONFIG_MSM_SDE_ROTATOR=y
+CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
+CONFIG_QCOM_KGSL=y
+CONFIG_DRM=y
+CONFIG_DRM_SDE_EVTLOG_DEBUG=y
+CONFIG_DRM_SDE_RSC=y
+CONFIG_FB_VIRTUAL=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_SDM845=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_PD_POLICY=y
+CONFIG_QPNP_USB_PDPHY=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_DUAL_ROLE_USB_INTF=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_RING_BUFFER=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_QPNP=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_WLED=y
+CONFIG_LEDS_QPNP_HAPTICS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_EDAC=y
+CONFIG_EDAC_MM_EDAC=y
+CONFIG_EDAC_KRYO3XX_ARM64=y
+CONFIG_EDAC_KRYO3XX_ARM64_PANIC_ON_UE=y
+CONFIG_EDAC_QCOM_LLCC=y
+CONFIG_EDAC_QCOM_LLCC_PANIC_ON_UE=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_QPNP=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_GPI_DMA=y
+CONFIG_QCOM_GPI_DMA_DEBUG=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
+CONFIG_GSI=y
+CONFIG_IPA3=y
+CONFIG_RMNET_IPA3=y
+CONFIG_IPA_UT=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_QPNP_COINCELL=y
+CONFIG_QPNP_REVID=y
+CONFIG_USB_BAM=y
+CONFIG_MSM_11AD=m
+CONFIG_SEEMP_CORE=y
+CONFIG_QCOM_GENI_SE=y
+CONFIG_CLOCK_QPNP_DIV=y
+CONFIG_MSM_CLK_RPMH=y
+CONFIG_CLOCK_CPU_OSM=y
+CONFIG_MSM_CLK_AOP_QMP=y
+CONFIG_QCOM_MDSS_PLL=y
+CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_MSM_QMP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_ARM_SMMU=y
+CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_QCOM_CPUSS_DUMP=y
+CONFIG_QCOM_RUN_QUEUE_STATS=y
+CONFIG_QCOM_LLCC=y
+CONFIG_QCOM_SDM670_LLCC=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_MSM_CORE_HANG_DETECT=y
+CONFIG_MSM_GLADIATOR_HANG_DETECT=y
+CONFIG_MSM_GLADIATOR_ERP=y
+CONFIG_QCOM_EUD=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_MSM_SMEM=y
+CONFIG_MSM_GLINK=y
+CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
+CONFIG_MSM_GLINK_SPI_XPRT=y
+CONFIG_MSM_SPCOM=y
+CONFIG_MSM_SPSS_UTILS=y
+CONFIG_TRACER_PKT=y
+CONFIG_QTI_RPMH_API=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_SMP2P_TEST=y
+CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_GLINK_PKT=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
+CONFIG_ICNSS=y
+CONFIG_ICNSS_DEBUG=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_MSM_ADSP_LOADER=y
+CONFIG_MSM_PERFORMANCE=y
+CONFIG_MSM_CDSP_LOADER=y
+CONFIG_MSM_AVTIMER=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_PM=y
+CONFIG_MSM_QBT1000=y
+CONFIG_APSS_CORE_EA=y
+CONFIG_QCOM_DCC_V2=y
+CONFIG_QTI_RPM_STATS_LOG=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_QCOMCCI_HWMON=y
+CONFIG_QCOM_M4M_HWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_DEVFREQ_SIMPLE_DEV=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_EXTCON_USB_GPIO=y
+CONFIG_IIO=y
+CONFIG_QCOM_RRADC=y
+CONFIG_PWM=y
+CONFIG_PWM_QPNP=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_PHY_XGENE=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_SENSORS_SSC=y
+CONFIG_MSM_TZ_LOG=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_FUSE_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_EFIVAR_FS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+CONFIG_PAGE_OWNER=y
+CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_SLUB_DEBUG_PANIC_ON=y
+CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
+CONFIG_PAGE_POISONING=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_OBJECTS_FREE=y
+CONFIG_DEBUG_OBJECTS_TIMERS=y
+CONFIG_DEBUG_OBJECTS_WORK=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
+CONFIG_SLUB_DEBUG_ON=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000
+CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y
+CONFIG_WQ_WATCHDOG=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_PANIC_ON_SCHED_BUG=y
+CONFIG_PANIC_ON_RT_THROTTLING=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_STACK_END_CHECK=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_LIST=y
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAIL_PAGE_ALLOC=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
+CONFIG_IPC_LOGGING=y
+CONFIG_QCOM_RTB=y
+CONFIG_QCOM_RTB_SEPARATE_CPUS=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_CPU_FREQ_SWITCH_PROFILER=y
+CONFIG_LKDTM=y
+CONFIG_MEMTEST=y
+CONFIG_PANIC_ON_DATA_CORRUPTION=y
+CONFIG_ARM64_PTDUMP=y
+CONFIG_PID_IN_CONTEXTIDR=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SOURCE_ETM4X=y
+CONFIG_CORESIGHT_REMOTE_ETM=y
+CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_DUMMY=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_CRYPTO_CRC32_ARM64=y
+CONFIG_XZ_DEC=y
+CONFIG_QMI_ENCDEC=y
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index 8a5b17d..9f98841 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -39,7 +39,7 @@
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
-CONFIG_CC_STACKPROTECTOR_REGULAR=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
 CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -260,7 +260,6 @@
 CONFIG_DUMMY=y
 CONFIG_TUN=y
 CONFIG_SKY2=y
-CONFIG_RNDIS_IPA=y
 CONFIG_SMSC911X=y
 CONFIG_PPP=y
 CONFIG_PPP_BSDCOMP=y
@@ -291,6 +290,7 @@
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_MSM_LEGACY=y
 CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_QCOM_GENI=y
 CONFIG_SOUNDWIRE=y
@@ -329,9 +329,11 @@
 CONFIG_MSM_BCL_PERIPHERAL_CTL=y
 CONFIG_QTI_THERMAL_LIMITS_DCVS=y
 CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_REG_COOLING_DEVICE=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
 CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
-CONFIG_WCD934X_CODEC=y
+CONFIG_WCD9XXX_CODEC_CORE=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_CPRH_KBSS=y
 CONFIG_REGULATOR_QPNP_LABIBB=y
@@ -340,6 +342,7 @@
 CONFIG_REGULATOR_STUB=y
 CONFIG_MEDIA_SUPPORT=y
 CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
 CONFIG_MEDIA_CONTROLLER=y
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
 CONFIG_VIDEO_ADV_DEBUG=y
@@ -350,6 +353,10 @@
 CONFIG_MSM_VIDC_GOVERNORS=y
 CONFIG_MSM_SDE_ROTATOR=y
 CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_DVB_MPQ_TSPP1=y
+CONFIG_TSPP=m
 CONFIG_QCOM_KGSL=y
 CONFIG_DRM=y
 CONFIG_DRM_SDE_EVTLOG_DEBUG=y
@@ -365,10 +372,14 @@
 CONFIG_SND_USB_AUDIO=y
 CONFIG_SND_USB_AUDIO_QMI=y
 CONFIG_SND_SOC=y
+CONFIG_SND_SOC_MACHINE_SDM845=y
 CONFIG_SND_SOC_SDM845=y
 CONFIG_UHID=y
 CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
 CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
 CONFIG_HID_PLANTRONICS=y
 CONFIG_USB=y
 CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
@@ -438,6 +449,7 @@
 CONFIG_GSI=y
 CONFIG_IPA3=y
 CONFIG_RMNET_IPA3=y
+CONFIG_RNDIS_IPA=y
 CONFIG_IPA_UT=y
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_NDP_BAM=y
@@ -463,6 +475,7 @@
 CONFIG_ARM_SMMU=y
 CONFIG_QCOM_LAZY_MAPPING=y
 CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
 CONFIG_IOMMU_TESTS=y
 CONFIG_QCOM_RUN_QUEUE_STATS=y
 CONFIG_QCOM_LLCC=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index e70963a..cd4cbb1 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -42,7 +42,7 @@
 CONFIG_EMBEDDED=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
-CONFIG_CC_STACKPROTECTOR_REGULAR=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
 CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -56,7 +56,6 @@
 # CONFIG_IOSCHED_DEADLINE is not set
 CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_SDM845=y
-CONFIG_ARCH_SDM670=y
 CONFIG_PCI=y
 CONFIG_PCI_MSM=y
 CONFIG_SCHED_MC=y
@@ -269,7 +268,6 @@
 CONFIG_BONDING=y
 CONFIG_DUMMY=y
 CONFIG_TUN=y
-CONFIG_RNDIS_IPA=y
 CONFIG_PPP=y
 CONFIG_PPP_BSDCOMP=y
 CONFIG_PPP_DEFLATE=y
@@ -298,6 +296,7 @@
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_MSM_LEGACY=y
 CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_QCOM_GENI=y
 CONFIG_SOUNDWIRE=y
@@ -309,7 +308,6 @@
 CONFIG_SPMI=y
 CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
 CONFIG_PINCTRL_SDM845=y
-CONFIG_PINCTRL_SDM670=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
@@ -336,9 +334,11 @@
 CONFIG_MSM_BCL_PERIPHERAL_CTL=y
 CONFIG_QTI_THERMAL_LIMITS_DCVS=y
 CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_REG_COOLING_DEVICE=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
 CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
-CONFIG_WCD934X_CODEC=y
+CONFIG_WCD9XXX_CODEC_CORE=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_CPRH_KBSS=y
 CONFIG_REGULATOR_QPNP_LABIBB=y
@@ -347,6 +347,7 @@
 CONFIG_REGULATOR_STUB=y
 CONFIG_MEDIA_SUPPORT=y
 CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
 CONFIG_MEDIA_CONTROLLER=y
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
 CONFIG_VIDEO_ADV_DEBUG=y
@@ -357,6 +358,10 @@
 CONFIG_MSM_VIDC_GOVERNORS=y
 CONFIG_MSM_SDE_ROTATOR=y
 CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_DVB_MPQ_TSPP1=y
+CONFIG_TSPP=m
 CONFIG_QCOM_KGSL=y
 CONFIG_DRM=y
 CONFIG_DRM_SDE_EVTLOG_DEBUG=y
@@ -372,10 +377,14 @@
 CONFIG_SND_USB_AUDIO=y
 CONFIG_SND_USB_AUDIO_QMI=y
 CONFIG_SND_SOC=y
+CONFIG_SND_SOC_MACHINE_SDM845=y
 CONFIG_SND_SOC_SDM845=y
 CONFIG_UHID=y
 CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
 CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
 CONFIG_HID_PLANTRONICS=y
 CONFIG_USB=y
 CONFIG_USB_XHCI_HCD=y
@@ -454,6 +463,7 @@
 CONFIG_GSI=y
 CONFIG_IPA3=y
 CONFIG_RMNET_IPA3=y
+CONFIG_RNDIS_IPA=y
 CONFIG_IPA_UT=y
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_NDP_BAM=y
@@ -485,7 +495,6 @@
 CONFIG_QCOM_RUN_QUEUE_STATS=y
 CONFIG_QCOM_LLCC=y
 CONFIG_QCOM_SDM845_LLCC=y
-CONFIG_QCOM_SDM670_LLCC=y
 CONFIG_MSM_SERVICE_LOCATOR=y
 CONFIG_MSM_SERVICE_NOTIFIER=y
 CONFIG_MSM_BOOT_STATS=y
@@ -590,7 +599,6 @@
 CONFIG_DEBUG_STACK_USAGE=y
 CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_LOCKUP_DETECTOR=y
-CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y
 CONFIG_WQ_WATCHDOG=y
 CONFIG_PANIC_TIMEOUT=5
 CONFIG_PANIC_ON_SCHED_BUG=y
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index a2c2478..4bcfe01 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -217,10 +217,11 @@
 		/* tkr_mono.cycle_last == tkr_raw.cycle_last */
 		vdso_data->cs_cycle_last	= tk->tkr_mono.cycle_last;
 		vdso_data->raw_time_sec		= tk->raw_time.tv_sec;
-		vdso_data->raw_time_nsec	= tk->raw_time.tv_nsec;
+		vdso_data->raw_time_nsec	= (tk->raw_time.tv_nsec <<
+						   tk->tkr_raw.shift) +
+						  tk->tkr_raw.xtime_nsec;
 		vdso_data->xtime_clock_sec	= tk->xtime_sec;
 		vdso_data->xtime_clock_nsec	= tk->tkr_mono.xtime_nsec;
-		/* tkr_raw.xtime_nsec == 0 */
 		vdso_data->cs_mono_mult		= tk->tkr_mono.mult;
 		vdso_data->cs_raw_mult		= tk->tkr_raw.mult;
 		/* tkr_mono.shift == tkr_raw.shift */
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index e00b467..76320e9 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -256,7 +256,6 @@
 	seqcnt_check fail=monotonic_raw
 
 	/* All computations are done with left-shifted nsecs. */
-	lsl	x14, x14, x12
 	get_nsec_per_sec res=x9
 	lsl	x9, x9, x12
 
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
index 1c2a5e2..e93c949 100644
--- a/arch/frv/include/asm/atomic.h
+++ b/arch/frv/include/asm/atomic.h
@@ -139,7 +139,7 @@
 #define atomic64_sub_and_test(i,v)	(atomic64_sub_return((i), (v)) == 0)
 #define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
 #define atomic64_inc_and_test(v)	(atomic64_inc_return((v)) == 0)
-
+#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1, 0)
 
 #define atomic_cmpxchg(v, old, new)	(cmpxchg(&(v)->counter, old, new))
 #define atomic_xchg(v, new)		(xchg(&(v)->counter, new))
@@ -161,6 +161,39 @@
 	return c;
 }
 
+static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
+{
+	long long c, old;
+
+	c = atomic64_read(v);
+	for (;;) {
+		if (unlikely(c == u))
+			break;
+		old = atomic64_cmpxchg(v, c, c + i);
+		if (likely(old == c))
+			break;
+		c = old;
+	}
+	return c != u;
+}
+
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
+{
+	long long c, old, dec;
+
+	c = atomic64_read(v);
+	for (;;) {
+		dec = c - 1;
+		if (unlikely(dec < 0))
+			break;
+		old = atomic64_cmpxchg((v), c, dec);
+		if (likely(old == c))
+			break;
+		c = old;
+	}
+		return dec;
+}
+
 #define ATOMIC_OP(op)							\
 static inline int atomic_fetch_##op(int i, atomic_t *v)			\
 {									\
diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
index 836f147..efa59f1 100644
--- a/arch/frv/mm/elf-fdpic.c
+++ b/arch/frv/mm/elf-fdpic.c
@@ -74,7 +74,7 @@
 		addr = PAGE_ALIGN(addr);
 		vma = find_vma(current->mm, addr);
 		if (TASK_SIZE - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			goto success;
 	}
 
diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile
index 2728a9a..145b5ce 100644
--- a/arch/mips/boot/Makefile
+++ b/arch/mips/boot/Makefile
@@ -128,19 +128,19 @@
 			-DADDR_BITS=$(ADDR_BITS) \
 			-DADDR_CELLS=$(itb_addr_cells)
 
-$(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
 	$(call if_changed_dep,cpp_its_S,none,vmlinux.bin)
 
-$(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
 	$(call if_changed_dep,cpp_its_S,gzip,vmlinux.bin.gz)
 
-$(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX)  FORCE
 	$(call if_changed_dep,cpp_its_S,bzip2,vmlinux.bin.bz2)
 
-$(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
 	$(call if_changed_dep,cpp_its_S,lzma,vmlinux.bin.lzma)
 
-$(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
 	$(call if_changed_dep,cpp_its_S,lzo,vmlinux.bin.lzo)
 
 quiet_cmd_itb-image = ITB     $@
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 12c7181..c86b66b 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -804,8 +804,10 @@
 			break;
 		}
 		/* Compact branch: BNEZC || JIALC */
-		if (insn.i_format.rs)
+		if (!insn.i_format.rs) {
+			/* JIALC: set $31/ra */
 			regs->regs[31] = epc + 4;
+		}
 		regs->cp0_epc += 8;
 		break;
 #endif
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index d08ea3f..a44052c 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -92,7 +92,7 @@
 
 		vma = find_vma(mm, addr);
 		if (TASK_SIZE - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 
diff --git a/arch/mn10300/include/asm/switch_to.h b/arch/mn10300/include/asm/switch_to.h
index 393d311..67e333a 100644
--- a/arch/mn10300/include/asm/switch_to.h
+++ b/arch/mn10300/include/asm/switch_to.h
@@ -16,7 +16,7 @@
 struct task_struct;
 struct thread_struct;
 
-#if !defined(CONFIG_LAZY_SAVE_FPU)
+#if defined(CONFIG_FPU) && !defined(CONFIG_LAZY_SAVE_FPU)
 struct fpu_state_struct;
 extern asmlinkage void fpu_save(struct fpu_state_struct *);
 #define switch_fpu(prev, next)						\
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 0a393a0..1d7691f 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -88,7 +88,7 @@
 		unsigned long len, unsigned long pgoff, unsigned long flags)
 {
 	struct mm_struct *mm = current->mm;
-	struct vm_area_struct *vma;
+	struct vm_area_struct *vma, *prev;
 	unsigned long task_size = TASK_SIZE;
 	int do_color_align, last_mmap;
 	struct vm_unmapped_area_info info;
@@ -115,9 +115,10 @@
 		else
 			addr = PAGE_ALIGN(addr);
 
-		vma = find_vma(mm, addr);
+		vma = find_vma_prev(mm, addr, &prev);
 		if (task_size - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)) &&
+		    (!prev || addr >= vm_end_gap(prev)))
 			goto found_addr;
 	}
 
@@ -141,7 +142,7 @@
 			  const unsigned long len, const unsigned long pgoff,
 			  const unsigned long flags)
 {
-	struct vm_area_struct *vma;
+	struct vm_area_struct *vma, *prev;
 	struct mm_struct *mm = current->mm;
 	unsigned long addr = addr0;
 	int do_color_align, last_mmap;
@@ -175,9 +176,11 @@
 			addr = COLOR_ALIGN(addr, last_mmap, pgoff);
 		else
 			addr = PAGE_ALIGN(addr);
-		vma = find_vma(mm, addr);
+
+		vma = find_vma_prev(mm, addr, &prev);
 		if (TASK_SIZE - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)) &&
+		    (!prev || addr >= vm_end_gap(prev)))
 			goto found_addr;
 	}
 
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 1ba82ea..2e2fc1e 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1411,10 +1411,8 @@
 	.align	7
 do_hash_page:
 #ifdef CONFIG_PPC_STD_MMU_64
-	andis.	r0,r4,0xa410		/* weird error? */
+	andis.	r0,r4,0xa450		/* weird error? */
 	bne-	handle_page_fault	/* if not, try to insert a HPTE */
-	andis.  r0,r4,DSISR_DABRMATCH@h
-	bne-    handle_dabr_fault
 	CURRENT_THREAD_INFO(r11, r1)
 	lwz	r0,TI_PREEMPT(r11)	/* If we're in an "NMI" */
 	andis.	r0,r0,NMI_MASK@h	/* (i.e. an irq when soft-disabled) */
@@ -1438,11 +1436,16 @@
 
 	/* Error */
 	blt-	13f
+
+	/* Reload DSISR into r4 for the DABR check below */
+	ld      r4,_DSISR(r1)
 #endif /* CONFIG_PPC_STD_MMU_64 */
 
 /* Here we have a page fault that hash_page can't handle. */
 handle_page_fault:
-11:	ld	r4,_DAR(r1)
+11:	andis.  r0,r4,DSISR_DABRMATCH@h
+	bne-    handle_dabr_fault
+	ld	r4,_DAR(r1)
 	ld	r5,_DSISR(r1)
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	bl	do_page_fault
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index e785cc9..fe97cbe 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -511,6 +511,15 @@
 	regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
 #endif
 
+	/*
+	 * jprobes use jprobe_return() which skips the normal return
+	 * path of the function, and this messes up the accounting of the
+	 * function graph tracer.
+	 *
+	 * Pause function graph tracing while performing the jprobe function.
+	 */
+	pause_graph_tracing();
+
 	return 1;
 }
 
@@ -533,6 +542,8 @@
 	 * saved regs...
 	 */
 	memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
+	/* It's OK to start function graph tracing again */
+	unpause_graph_tracing();
 	preempt_enable_no_resched();
 	return 1;
 }
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 094deb6..5c02984 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2807,12 +2807,34 @@
 {
 	int r;
 	int srcu_idx;
+	unsigned long ebb_regs[3] = {};	/* shut up GCC */
 
 	if (!vcpu->arch.sane) {
 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
 		return -EINVAL;
 	}
 
+	/*
+	 * Don't allow entry with a suspended transaction, because
+	 * the guest entry/exit code will lose it.
+	 * If the guest has TM enabled, save away their TM-related SPRs
+	 * (they will get restored by the TM unavailable interrupt).
+	 */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+	if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
+	    (current->thread.regs->msr & MSR_TM)) {
+		if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
+			run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+			run->fail_entry.hardware_entry_failure_reason = 0;
+			return -EINVAL;
+		}
+		current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
+		current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
+		current->thread.tm_texasr = mfspr(SPRN_TEXASR);
+		current->thread.regs->msr &= ~MSR_TM;
+	}
+#endif
+
 	kvmppc_core_prepare_to_enter(vcpu);
 
 	/* No need to go into the guest when all we'll do is come back out */
@@ -2834,6 +2856,13 @@
 
 	flush_all_to_thread(current);
 
+	/* Save userspace EBB register values */
+	if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+		ebb_regs[0] = mfspr(SPRN_EBBHR);
+		ebb_regs[1] = mfspr(SPRN_EBBRR);
+		ebb_regs[2] = mfspr(SPRN_BESCR);
+	}
+
 	vcpu->arch.wqp = &vcpu->arch.vcore->wq;
 	vcpu->arch.pgdir = current->mm->pgd;
 	vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
@@ -2856,6 +2885,13 @@
 			r = kvmppc_xics_rm_complete(vcpu, 0);
 	} while (is_kvmppc_resume_guest(r));
 
+	/* Restore userspace EBB register values */
+	if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+		mtspr(SPRN_EBBHR, ebb_regs[0]);
+		mtspr(SPRN_EBBRR, ebb_regs[1]);
+		mtspr(SPRN_BESCR, ebb_regs[2]);
+	}
+
  out:
 	vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
 	atomic_dec(&vcpu->kvm->arch.vcpus_running);
diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c
index 35254a6..a2b2d97 100644
--- a/arch/powerpc/mm/hugetlbpage-radix.c
+++ b/arch/powerpc/mm/hugetlbpage-radix.c
@@ -65,7 +65,7 @@
 		addr = ALIGN(addr, huge_page_size(h));
 		vma = find_vma(mm, addr);
 		if (TASK_SIZE - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 	/*
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
index 2f1e443..5bc2845 100644
--- a/arch/powerpc/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
@@ -106,7 +106,7 @@
 		addr = PAGE_ALIGN(addr);
 		vma = find_vma(mm, addr);
 		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 
@@ -142,7 +142,7 @@
 		addr = PAGE_ALIGN(addr);
 		vma = find_vma(mm, addr);
 		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
-				(!vma || addr + len <= vma->vm_start))
+				(!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 2b27458..c4d5c9c 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -105,7 +105,7 @@
 	if ((mm->task_size - len) < addr)
 		return 0;
 	vma = find_vma(mm, addr);
-	return (!vma || (addr + len) <= vma->vm_start);
+	return (!vma || (addr + len) <= vm_start_gap(vma));
 }
 
 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c
index d24a8a3..28ae8bd 100644
--- a/arch/powerpc/perf/perf_regs.c
+++ b/arch/powerpc/perf/perf_regs.c
@@ -100,5 +100,6 @@
 			struct pt_regs *regs_user_copy)
 {
 	regs_user->regs = task_pt_regs(current);
-	regs_user->abi  = perf_reg_abi(current);
+	regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) :
+			 PERF_SAMPLE_REGS_ABI_NONE;
 }
diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
index 32c46b4..b53f80f 100644
--- a/arch/powerpc/sysdev/xics/icp-opal.c
+++ b/arch/powerpc/sysdev/xics/icp-opal.c
@@ -130,14 +130,16 @@
 {
 	int hw_cpu = get_hard_smp_processor_id(cpu);
 
+	kvmppc_set_host_ipi(cpu, 1);
 	opal_int_set_mfrr(hw_cpu, IPI_PRIORITY);
 }
 
 static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
 {
-	int hw_cpu = hard_smp_processor_id();
+	int cpu = smp_processor_id();
 
-	opal_int_set_mfrr(hw_cpu, 0xff);
+	kvmppc_set_host_ipi(cpu, 0);
+	opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
 
 	return smp_ipi_demux();
 }
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index c438168..3bc2825 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -240,12 +240,17 @@
 	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
 .Lsie_done:
 # some program checks are suppressing. C code (e.g. do_protection_exception)
-# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
-# instructions between sie64a and .Lsie_done should not cause program
-# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
+# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
+# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
+# Other instructions between sie64a and .Lsie_done should not cause program
+# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
 # See also .Lcleanup_sie
-.Lrewind_pad:
-	nop	0
+.Lrewind_pad6:
+	nopr	7
+.Lrewind_pad4:
+	nopr	7
+.Lrewind_pad2:
+	nopr	7
 	.globl sie_exit
 sie_exit:
 	lg	%r14,__SF_EMPTY+8(%r15)		# load guest register save area
@@ -258,7 +263,9 @@
 	stg	%r14,__SF_EMPTY+16(%r15)	# set exit reason code
 	j	sie_exit
 
-	EX_TABLE(.Lrewind_pad,.Lsie_fault)
+	EX_TABLE(.Lrewind_pad6,.Lsie_fault)
+	EX_TABLE(.Lrewind_pad4,.Lsie_fault)
+	EX_TABLE(.Lrewind_pad2,.Lsie_fault)
 	EX_TABLE(sie_exit,.Lsie_fault)
 EXPORT_SYMBOL(sie64a)
 EXPORT_SYMBOL(sie_exit)
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 4aa8a7e..f5d7984 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -972,11 +972,12 @@
 	ptr = asce.origin * 4096;
 	if (asce.r) {
 		*fake = 1;
+		ptr = 0;
 		asce.dt = ASCE_TYPE_REGION1;
 	}
 	switch (asce.dt) {
 	case ASCE_TYPE_REGION1:
-		if (vaddr.rfx01 > asce.tl && !asce.r)
+		if (vaddr.rfx01 > asce.tl && !*fake)
 			return PGM_REGION_FIRST_TRANS;
 		break;
 	case ASCE_TYPE_REGION2:
@@ -1004,8 +1005,7 @@
 		union region1_table_entry rfte;
 
 		if (*fake) {
-			/* offset in 16EB guest memory block */
-			ptr = ptr + ((unsigned long) vaddr.rsx << 53UL);
+			ptr += (unsigned long) vaddr.rfx << 53;
 			rfte.val = ptr;
 			goto shadow_r2t;
 		}
@@ -1031,8 +1031,7 @@
 		union region2_table_entry rste;
 
 		if (*fake) {
-			/* offset in 8PB guest memory block */
-			ptr = ptr + ((unsigned long) vaddr.rtx << 42UL);
+			ptr += (unsigned long) vaddr.rsx << 42;
 			rste.val = ptr;
 			goto shadow_r3t;
 		}
@@ -1059,8 +1058,7 @@
 		union region3_table_entry rtte;
 
 		if (*fake) {
-			/* offset in 4TB guest memory block */
-			ptr = ptr + ((unsigned long) vaddr.sx << 31UL);
+			ptr += (unsigned long) vaddr.rtx << 31;
 			rtte.val = ptr;
 			goto shadow_sgt;
 		}
@@ -1096,8 +1094,7 @@
 		union segment_table_entry ste;
 
 		if (*fake) {
-			/* offset in 2G guest memory block */
-			ptr = ptr + ((unsigned long) vaddr.sx << 20UL);
+			ptr += (unsigned long) vaddr.sx << 20;
 			ste.val = ptr;
 			goto shadow_pgt;
 		}
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index eb9df28..812368f 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -98,7 +98,7 @@
 		addr = PAGE_ALIGN(addr);
 		vma = find_vma(mm, addr);
 		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 
@@ -136,7 +136,7 @@
 		addr = PAGE_ALIGN(addr);
 		vma = find_vma(mm, addr);
 		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
-				(!vma || addr + len <= vma->vm_start))
+				(!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
index 6777177..7df7d59 100644
--- a/arch/sh/mm/mmap.c
+++ b/arch/sh/mm/mmap.c
@@ -63,7 +63,7 @@
 
 		vma = find_vma(mm, addr);
 		if (TASK_SIZE - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 
@@ -113,7 +113,7 @@
 
 		vma = find_vma(mm, addr);
 		if (TASK_SIZE - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index fe8b8ee..02e05e2 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -118,7 +118,7 @@
 
 		vma = find_vma(mm, addr);
 		if (task_size - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 
@@ -181,7 +181,7 @@
 
 		vma = find_vma(mm, addr);
 		if (task_size - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 4094a51..496fa92 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -85,7 +85,7 @@
 
 void bad_trap(struct pt_regs *regs, long lvl)
 {
-	char buffer[32];
+	char buffer[36];
 	siginfo_t info;
 
 	if (notify_die(DIE_TRAP, "bad trap", regs,
@@ -116,7 +116,7 @@
 
 void bad_trap_tl1(struct pt_regs *regs, long lvl)
 {
-	char buffer[32];
+	char buffer[36];
 	
 	if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
 		       0, lvl, SIGTRAP) == NOTIFY_STOP)
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 988acc8..58cde8d 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -116,7 +116,7 @@
 		addr = ALIGN(addr, HPAGE_SIZE);
 		vma = find_vma(mm, addr);
 		if (task_size - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 	if (mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index 77ceaa3..67508b2 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -232,7 +232,7 @@
 		addr = ALIGN(addr, huge_page_size(h));
 		vma = find_vma(mm, addr);
 		if (TASK_SIZE - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 	if (current->mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index cb85222..3bdb917 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -431,11 +431,11 @@
  [ C(DTLB) ] = {
 	[ C(OP_READ) ] = {
 		[ C(RESULT_ACCESS) ] = 0x81d0,	/* MEM_INST_RETIRED.ALL_LOADS */
-		[ C(RESULT_MISS)   ] = 0x608,	/* DTLB_LOAD_MISSES.WALK_COMPLETED */
+		[ C(RESULT_MISS)   ] = 0xe08,	/* DTLB_LOAD_MISSES.WALK_COMPLETED */
 	},
 	[ C(OP_WRITE) ] = {
 		[ C(RESULT_ACCESS) ] = 0x82d0,	/* MEM_INST_RETIRED.ALL_STORES */
-		[ C(RESULT_MISS)   ] = 0x649,	/* DTLB_STORE_MISSES.WALK_COMPLETED */
+		[ C(RESULT_MISS)   ] = 0xe49,	/* DTLB_STORE_MISSES.WALK_COMPLETED */
 	},
 	[ C(OP_PREFETCH) ] = {
 		[ C(RESULT_ACCESS) ] = 0x0,
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index a55ed63..1119414 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -140,7 +140,7 @@
 		addr = PAGE_ALIGN(addr);
 		vma = find_vma(mm, addr);
 		if (end - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 
@@ -183,7 +183,7 @@
 		addr = PAGE_ALIGN(addr);
 		vma = find_vma(mm, addr);
 		if (TASK_SIZE - len >= addr &&
-				(!vma || addr + len <= vma->vm_start))
+				(!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 2ae8584..fe342e8 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -144,7 +144,7 @@
 		addr = ALIGN(addr, huge_page_size(h));
 		vma = find_vma(mm, addr);
 		if (TASK_SIZE - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 	if (mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 6b7ce62..aca6295 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -100,5 +100,6 @@
 	printk(KERN_DEBUG "High memory starts at vaddr %08lx\n",
 			(ulong) pfn_to_kaddr(highstart_pfn));
 
+	__vmalloc_start_set = true;
 	setup_bootmem_allocator();
 }
diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h
index f71f88e..19707db 100644
--- a/arch/xtensa/include/asm/irq.h
+++ b/arch/xtensa/include/asm/irq.h
@@ -29,7 +29,8 @@
 # define PLATFORM_NR_IRQS 0
 #endif
 #define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS
-#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS)
+#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS + 1)
+#define XTENSA_PIC_LINUX_IRQ(hwirq) ((hwirq) + 1)
 
 #if VARIANT_NR_IRQS == 0
 static inline void variant_init_irq(void) { }
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index 4ac3d23..4416944 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -34,11 +34,6 @@
 {
 	int irq = irq_find_mapping(NULL, hwirq);
 
-	if (hwirq >= NR_IRQS) {
-		printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
-				__func__, hwirq);
-	}
-
 #ifdef CONFIG_DEBUG_STACKOVERFLOW
 	/* Debugging check for stack overflow: is there less than 1KB free? */
 	{
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index 83cf496..3aaaae1 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -87,7 +87,7 @@
 		/* At this point:  (!vmm || addr < vmm->vm_end). */
 		if (TASK_SIZE - len < addr)
 			return -ENOMEM;
-		if (!vmm || addr + len <= vmm->vm_start)
+		if (!vmm || addr + len <= vm_start_gap(vmm))
 			return addr;
 		addr = vmm->vm_end;
 		if (flags & MAP_SHARED)
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
index dbeea2b..1fda7e2 100644
--- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
+++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
@@ -24,16 +24,18 @@
 
 /* Interrupt configuration. */
 
-#define PLATFORM_NR_IRQS	10
+#define PLATFORM_NR_IRQS	0
 
 /* Default assignment of LX60 devices to external interrupts. */
 
 #ifdef CONFIG_XTENSA_MX
 #define DUART16552_INTNUM	XCHAL_EXTINT3_NUM
 #define OETH_IRQ		XCHAL_EXTINT4_NUM
+#define C67X00_IRQ		XCHAL_EXTINT8_NUM
 #else
 #define DUART16552_INTNUM	XCHAL_EXTINT0_NUM
 #define OETH_IRQ		XCHAL_EXTINT1_NUM
+#define C67X00_IRQ		XCHAL_EXTINT5_NUM
 #endif
 
 /*
@@ -63,5 +65,5 @@
 
 #define C67X00_PADDR		(XCHAL_KIO_PADDR + 0x0D0D0000)
 #define C67X00_SIZE		0x10
-#define C67X00_IRQ		5
+
 #endif /* __XTENSA_XTAVNET_HARDWARE_H */
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
index 779be72..42285f3 100644
--- a/arch/xtensa/platforms/xtfpga/setup.c
+++ b/arch/xtensa/platforms/xtfpga/setup.c
@@ -175,8 +175,8 @@
 		.flags = IORESOURCE_MEM,
 	},
 	[2] = { /* IRQ number */
-		.start = OETH_IRQ,
-		.end   = OETH_IRQ,
+		.start = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
+		.end   = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
 		.flags = IORESOURCE_IRQ,
 	},
 };
@@ -213,8 +213,8 @@
 		.flags = IORESOURCE_MEM,
 	},
 	[1] = { /* IRQ number */
-		.start = C67X00_IRQ,
-		.end   = C67X00_IRQ,
+		.start = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
+		.end   = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
 		.flags = IORESOURCE_IRQ,
 	},
 };
@@ -247,7 +247,7 @@
 static struct plat_serial8250_port serial_platform_data[] = {
 	[0] = {
 		.mapbase	= DUART16552_PADDR,
-		.irq		= DUART16552_INTNUM,
+		.irq		= XTENSA_PIC_LINUX_IRQ(DUART16552_INTNUM),
 		.flags		= UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
 				  UPF_IOREMAP,
 		.iotype		= XCHAL_HAVE_BE ? UPIO_MEM32BE : UPIO_MEM32,
diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c
index 93e7c1b..5610cd5 100644
--- a/block/partitions/msdos.c
+++ b/block/partitions/msdos.c
@@ -300,6 +300,8 @@
 			continue;
 		bsd_start = le32_to_cpu(p->p_offset);
 		bsd_size = le32_to_cpu(p->p_size);
+		if (memcmp(flavour, "bsd\0", 4) == 0)
+			bsd_start += offset;
 		if (offset == bsd_start && size == bsd_size)
 			/* full parent partition, we have it already */
 			continue;
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 23f3b95..147d2e3 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -889,13 +889,13 @@
 	unsigned long flags;
 	int retval;
 
-	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
-
 	if (rpmflags & RPM_GET_PUT) {
 		if (!atomic_dec_and_test(&dev->power.usage_count))
 			return 0;
 	}
 
+	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
 	spin_lock_irqsave(&dev->power.lock, flags);
 	retval = rpm_idle(dev, rpmflags);
 	spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -921,13 +921,13 @@
 	unsigned long flags;
 	int retval;
 
-	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
-
 	if (rpmflags & RPM_GET_PUT) {
 		if (!atomic_dec_and_test(&dev->power.usage_count))
 			return 0;
 	}
 
+	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
 	spin_lock_irqsave(&dev->power.lock, flags);
 	retval = rpm_suspend(dev, rpmflags);
 	spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -952,7 +952,8 @@
 	unsigned long flags;
 	int retval;
 
-	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
+			dev->power.runtime_status != RPM_ACTIVE);
 
 	if (rpmflags & RPM_GET_PUT)
 		atomic_inc(&dev->power.usage_count);
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 4a80ee7..c42202d 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -1436,34 +1436,35 @@
 static void make_response(struct xen_blkif_ring *ring, u64 id,
 			  unsigned short op, int st)
 {
-	struct blkif_response  resp;
+	struct blkif_response *resp;
 	unsigned long     flags;
 	union blkif_back_rings *blk_rings;
 	int notify;
 
-	resp.id        = id;
-	resp.operation = op;
-	resp.status    = st;
-
 	spin_lock_irqsave(&ring->blk_ring_lock, flags);
 	blk_rings = &ring->blk_rings;
 	/* Place on the response ring for the relevant domain. */
 	switch (ring->blkif->blk_protocol) {
 	case BLKIF_PROTOCOL_NATIVE:
-		memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
-		       &resp, sizeof(resp));
+		resp = RING_GET_RESPONSE(&blk_rings->native,
+					 blk_rings->native.rsp_prod_pvt);
 		break;
 	case BLKIF_PROTOCOL_X86_32:
-		memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
-		       &resp, sizeof(resp));
+		resp = RING_GET_RESPONSE(&blk_rings->x86_32,
+					 blk_rings->x86_32.rsp_prod_pvt);
 		break;
 	case BLKIF_PROTOCOL_X86_64:
-		memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
-		       &resp, sizeof(resp));
+		resp = RING_GET_RESPONSE(&blk_rings->x86_64,
+					 blk_rings->x86_64.rsp_prod_pvt);
 		break;
 	default:
 		BUG();
 	}
+
+	resp->id        = id;
+	resp->operation = op;
+	resp->status    = st;
+
 	blk_rings->common.rsp_prod_pvt++;
 	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
 	spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index dea61f6..ecb35fe 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -75,9 +75,8 @@
 struct blkif_common_request {
 	char dummy;
 };
-struct blkif_common_response {
-	char dummy;
-};
+
+/* i386 protocol version */
 
 struct blkif_x86_32_request_rw {
 	uint8_t        nr_segments;  /* number of segments                   */
@@ -129,14 +128,6 @@
 	} u;
 } __attribute__((__packed__));
 
-/* i386 protocol version */
-#pragma pack(push, 4)
-struct blkif_x86_32_response {
-	uint64_t        id;              /* copied from request */
-	uint8_t         operation;       /* copied from request */
-	int16_t         status;          /* BLKIF_RSP_???       */
-};
-#pragma pack(pop)
 /* x86_64 protocol version */
 
 struct blkif_x86_64_request_rw {
@@ -193,18 +184,12 @@
 	} u;
 } __attribute__((__packed__));
 
-struct blkif_x86_64_response {
-	uint64_t       __attribute__((__aligned__(8))) id;
-	uint8_t         operation;       /* copied from request */
-	int16_t         status;          /* BLKIF_RSP_???       */
-};
-
 DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
-		  struct blkif_common_response);
+		  struct blkif_response);
 DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
-		  struct blkif_x86_32_response);
+		  struct blkif_response __packed);
 DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
-		  struct blkif_x86_64_response);
+		  struct blkif_response);
 
 union blkif_back_rings {
 	struct blkif_back_ring        native;
@@ -281,6 +266,7 @@
 
 	wait_queue_head_t	wq;
 	atomic_t		inflight;
+	bool			active;
 	/* One thread per blkif ring. */
 	struct task_struct	*xenblkd;
 	unsigned int		waiting_reqs;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 3cc6d1d..9b69fe4 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -159,7 +159,7 @@
 		init_waitqueue_head(&ring->shutdown_wq);
 		ring->blkif = blkif;
 		ring->st_print = jiffies;
-		xen_blkif_get(blkif);
+		ring->active = true;
 	}
 
 	return 0;
@@ -249,6 +249,9 @@
 		struct xen_blkif_ring *ring = &blkif->rings[r];
 		unsigned int i = 0;
 
+		if (!ring->active)
+			continue;
+
 		if (ring->xenblkd) {
 			kthread_stop(ring->xenblkd);
 			wake_up(&ring->shutdown_wq);
@@ -296,7 +299,7 @@
 		BUG_ON(ring->free_pages_num != 0);
 		BUG_ON(ring->persistent_gnt_c != 0);
 		WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
-		xen_blkif_put(blkif);
+		ring->active = false;
 	}
 	blkif->nr_ring_pages = 0;
 	/*
diff --git a/drivers/bluetooth/btfm_slim.c b/drivers/bluetooth/btfm_slim.c
index dc9bb0b..f50bf6f 100644
--- a/drivers/bluetooth/btfm_slim.c
+++ b/drivers/bluetooth/btfm_slim.c
@@ -127,7 +127,7 @@
 	if (!btfmslim || !ch)
 		return -EINVAL;
 
-	BTFMSLIM_DBG("port:%d", ch->port);
+	BTFMSLIM_DBG("port: %d ch: %d", ch->port, ch->ch);
 
 	/* Define the channel with below parameters */
 	prop.prot = SLIM_AUTO_ISO;
diff --git a/drivers/bluetooth/btfm_slim.h b/drivers/bluetooth/btfm_slim.h
index 00d46a5..161be78 100644
--- a/drivers/bluetooth/btfm_slim.h
+++ b/drivers/bluetooth/btfm_slim.h
@@ -13,7 +13,7 @@
 #define BTFM_SLIM_H
 #include <linux/slimbus/slimbus.h>
 
-#define BTFMSLIM_DBG(fmt, arg...)  pr_debug("%s: " fmt "\n", __func__, ## arg)
+#define BTFMSLIM_DBG(fmt, arg...)  pr_debug(fmt "\n", ## arg)
 #define BTFMSLIM_INFO(fmt, arg...) pr_info("%s: " fmt "\n", __func__, ## arg)
 #define BTFMSLIM_ERR(fmt, arg...)  pr_err("%s: " fmt "\n", __func__, ## arg)
 
diff --git a/drivers/bluetooth/btfm_slim_codec.c b/drivers/bluetooth/btfm_slim_codec.c
index 86760cd..73a789c 100644
--- a/drivers/bluetooth/btfm_slim_codec.c
+++ b/drivers/bluetooth/btfm_slim_codec.c
@@ -54,8 +54,8 @@
 	int ret;
 	struct btfmslim *btfmslim = dai->dev->platform_data;
 
-	BTFMSLIM_DBG("substream = %s  stream = %d",
-		 substream->name, substream->stream);
+	BTFMSLIM_DBG("substream = %s  stream = %d dai name = %s",
+		 substream->name, substream->stream, dai->name);
 	ret = btfm_slim_hw_init(btfmslim);
 	return ret;
 }
@@ -65,8 +65,8 @@
 {
 	struct btfmslim *btfmslim = dai->dev->platform_data;
 
-	BTFMSLIM_DBG("substream = %s  stream = %d",
-		 substream->name, substream->stream);
+	BTFMSLIM_DBG("substream = %s  stream = %d dai name = %s",
+		 substream->name, substream->stream, dai->name);
 	btfm_slim_hw_deinit(btfmslim);
 }
 
@@ -74,7 +74,7 @@
 			    struct snd_pcm_hw_params *params,
 			    struct snd_soc_dai *dai)
 {
-	BTFMSLIM_DBG("dai_name = %s DAI-ID %x rate %d num_ch %d",
+	BTFMSLIM_DBG("dai name = %s DAI-ID %x rate %d num_ch %d",
 		dai->name, dai->id, params_rate(params),
 		params_channels(params));
 
@@ -89,7 +89,7 @@
 	struct btfmslim_ch *ch;
 	uint8_t rxport, grp = false, nchan = 1;
 
-	BTFMSLIM_DBG("dai->name:%s, dai->id: %d, dai->rate: %d", dai->name,
+	BTFMSLIM_DBG("dai name: %s, dai->id: %d, dai->rate: %d", dai->name,
 		dai->id, dai->rate);
 
 	switch (dai->id) {
@@ -137,7 +137,7 @@
 	struct btfmslim_ch *ch;
 	uint8_t rxport, grp = false, nchan = 1;
 
-	BTFMSLIM_DBG("dai->name:%s, dai->id: %d, dai->rate: %d", dai->name,
+	BTFMSLIM_DBG("dai name: %s, dai->id: %d, dai->rate: %d", dai->name,
 		dai->id, dai->rate);
 
 	switch (dai->id) {
@@ -387,7 +387,7 @@
 static struct snd_soc_codec_driver btfmslim_codec = {
 	.probe	= btfm_slim_codec_probe,
 	.remove	= btfm_slim_codec_remove,
-	.read		= btfm_slim_codec_read,
+	.read	= btfm_slim_codec_read,
 	.write	= btfm_slim_codec_write,
 };
 
diff --git a/drivers/bluetooth/btfm_slim_wcn3990.c b/drivers/bluetooth/btfm_slim_wcn3990.c
index c2d5b7b..72e28da 100644
--- a/drivers/bluetooth/btfm_slim_wcn3990.c
+++ b/drivers/bluetooth/btfm_slim_wcn3990.c
@@ -76,7 +76,7 @@
 	uint8_t reg_val = 0;
 	uint16_t reg;
 
-	BTFMSLIM_DBG("enable(%d)", enable);
+	BTFMSLIM_DBG("port(%d) enable(%d)", port_num, enable);
 	if (rxport) {
 		/* Port enable */
 		reg = CHRK_SB_PGD_PORT_RX_CFGN(port_num - 0x10);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 3e1367a..49fb8e5 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -604,5 +604,12 @@
           applications DSP processor. Say M if you want to enable this
           module.
 
+config MSM_RDBG
+	tristate "QTI Remote debug driver"
+	help
+	  Implements a shared memory based transport mechanism that allows
+	  for a debugger running on a host PC to communicate with a remote
+	  stub running on peripheral subsystems such as the ADSP, MODEM etc.
+
 endmenu
 
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index b73165a..19c3c98 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -65,3 +65,4 @@
 ifdef CONFIG_COMPAT
 obj-$(CONFIG_MSM_ADSPRPC)       += adsprpc_compat.o
 endif
+obj-$(CONFIG_MSM_RDBG)		+= rdbg.o
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 9102df7..ce86c27 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -58,7 +58,8 @@
 #define BALIGN		128
 #define NUM_CHANNELS	4	/* adsp, mdsp, slpi, cdsp*/
 #define NUM_SESSIONS	9	/*8 compute, 1 cpz*/
-#define M_FDLIST	16
+#define M_FDLIST	(16)
+#define M_CRCLIST	(64)
 
 #define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
 
@@ -172,6 +173,7 @@
 	struct overlap *overs;
 	struct overlap **overps;
 	struct smq_msg msg;
+	uint32_t *crc;
 };
 
 struct fastrpc_ctx_lst {
@@ -681,7 +683,7 @@
 
 
 static int context_restore_interrupted(struct fastrpc_file *fl,
-				       struct fastrpc_ioctl_invoke_attrs *inv,
+				       struct fastrpc_ioctl_invoke_crc *inv,
 				       struct smq_invoke_ctx **po)
 {
 	int err = 0;
@@ -788,7 +790,7 @@
 static void context_free(struct smq_invoke_ctx *ctx);
 
 static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
-			 struct fastrpc_ioctl_invoke_attrs *invokefd,
+			 struct fastrpc_ioctl_invoke_crc *invokefd,
 			 struct smq_invoke_ctx **po)
 {
 	int err = 0, bufs, size = 0;
@@ -834,7 +836,7 @@
 		if (err)
 			goto bail;
 	}
-
+	ctx->crc = (uint32_t *)invokefd->crc;
 	ctx->sc = invoke->sc;
 	if (bufs) {
 		VERIFY(err, 0 == context_build_overlap(ctx));
@@ -993,6 +995,7 @@
 	int err = 0;
 	int mflags = 0;
 	uint64_t *fdlist;
+	uint32_t *crclist;
 
 	/* calculate size of the metadata */
 	rpra = 0;
@@ -1018,7 +1021,9 @@
 			goto bail;
 		ipage += 1;
 	}
-	metalen = copylen = (ssize_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST);
+	metalen = copylen = (ssize_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST) +
+				 (sizeof(uint32_t) * M_CRCLIST);
+
 	/* calculate len requreed for copying */
 	for (oix = 0; oix < inbufs + outbufs; ++oix) {
 		int i = ctx->overps[oix]->raix;
@@ -1049,6 +1054,9 @@
 		if (err)
 			goto bail;
 	}
+	if (ctx->buf->virt && metalen <= copylen)
+		memset(ctx->buf->virt, 0, metalen);
+
 	/* copy metadata */
 	rpra = ctx->buf->virt;
 	ctx->rpra = rpra;
@@ -1112,6 +1120,8 @@
 	fdlist = (uint64_t *)&pages[bufs + handles];
 	for (i = 0; i < M_FDLIST; i++)
 		fdlist[i] = 0;
+	crclist = (uint32_t *)&fdlist[M_FDLIST];
+	memset(crclist, 0, sizeof(uint32_t)*M_FDLIST);
 
 	/* copy non ion buffers */
 	PERF(ctx->fl->profile, ctx->fl->perf.copy,
@@ -1191,6 +1201,8 @@
 	struct smq_phy_page *pages;
 	struct fastrpc_mmap *mmap;
 	uint64_t *fdlist;
+	uint32_t *crclist = NULL;
+
 	remote_arg64_t *rpra = ctx->rpra;
 	int i, inbufs, outbufs, handles;
 	int err = 0;
@@ -1201,6 +1213,8 @@
 	list = smq_invoke_buf_start(ctx->rpra, sc);
 	pages = smq_phy_page_start(sc, list);
 	fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
+	crclist = (uint32_t *)(fdlist + M_FDLIST);
+
 	for (i = inbufs; i < inbufs + outbufs; ++i) {
 		if (!ctx->maps[i]) {
 			K_COPY_TO_USER(err, kernel,
@@ -1223,6 +1237,10 @@
 				fastrpc_mmap_free(mmap);
 		}
 	}
+	if (ctx->crc && crclist && rpra)
+		K_COPY_TO_USER(err, kernel, (void __user *)ctx->crc,
+			crclist, M_CRCLIST*sizeof(uint32_t));
+
  bail:
 	return err;
 }
@@ -1345,7 +1363,7 @@
 
 static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
 				   uint32_t kernel,
-				   struct fastrpc_ioctl_invoke_attrs *inv)
+				   struct fastrpc_ioctl_invoke_crc *inv)
 {
 	struct smq_invoke_ctx *ctx = 0;
 	struct fastrpc_ioctl_invoke *invoke = &inv->inv;
@@ -1436,7 +1454,7 @@
 				struct fastrpc_ioctl_init_attrs *uproc)
 {
 	int err = 0;
-	struct fastrpc_ioctl_invoke_attrs ioctl;
+	struct fastrpc_ioctl_invoke_crc ioctl;
 	struct fastrpc_ioctl_init *init = &uproc->init;
 	struct smq_phy_page pages[1];
 	struct fastrpc_mmap *file = 0, *mem = 0;
@@ -1455,6 +1473,7 @@
 		ioctl.inv.pra = ra;
 		ioctl.fds = 0;
 		ioctl.attrs = 0;
+		ioctl.crc = NULL;
 		fl->pd = 0;
 		VERIFY(err, !(err = fastrpc_internal_invoke(fl,
 			FASTRPC_MODE_PARALLEL, 1, &ioctl)));
@@ -1524,6 +1543,7 @@
 		ioctl.inv.pra = ra;
 		ioctl.fds = fds;
 		ioctl.attrs = 0;
+		ioctl.crc = NULL;
 		VERIFY(err, !(err = fastrpc_internal_invoke(fl,
 			FASTRPC_MODE_PARALLEL, 1, &ioctl)));
 		if (err)
@@ -1542,7 +1562,7 @@
 static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
 {
 	int err = 0;
-	struct fastrpc_ioctl_invoke_attrs ioctl;
+	struct fastrpc_ioctl_invoke_crc ioctl;
 	remote_arg_t ra[1];
 	int tgid = 0;
 
@@ -1560,6 +1580,7 @@
 	ioctl.inv.pra = ra;
 	ioctl.fds = 0;
 	ioctl.attrs = 0;
+	ioctl.crc = NULL;
 	VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
 		FASTRPC_MODE_PARALLEL, 1, &ioctl)));
 bail:
@@ -1569,7 +1590,7 @@
 static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
 			       struct fastrpc_mmap *map)
 {
-	struct fastrpc_ioctl_invoke_attrs ioctl;
+	struct fastrpc_ioctl_invoke_crc ioctl;
 	struct smq_phy_page page;
 	int num = 1;
 	remote_arg_t ra[3];
@@ -1606,6 +1627,7 @@
 	ioctl.inv.pra = ra;
 	ioctl.fds = 0;
 	ioctl.attrs = 0;
+	ioctl.crc = NULL;
 	VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
 		FASTRPC_MODE_PARALLEL, 1, &ioctl)));
 	map->raddr = (uintptr_t)routargs.vaddrout;
@@ -1616,7 +1638,7 @@
 static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
 				 struct fastrpc_mmap *map)
 {
-	struct fastrpc_ioctl_invoke_attrs ioctl;
+	struct fastrpc_ioctl_invoke_crc ioctl;
 	remote_arg_t ra[1];
 	int err = 0;
 	struct {
@@ -1639,6 +1661,7 @@
 	ioctl.inv.pra = ra;
 	ioctl.fds = 0;
 	ioctl.attrs = 0;
+	ioctl.crc = NULL;
 	VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
 		FASTRPC_MODE_PARALLEL, 1, &ioctl)));
 	return err;
@@ -2194,7 +2217,7 @@
 				 unsigned long ioctl_param)
 {
 	union {
-		struct fastrpc_ioctl_invoke_attrs inv;
+		struct fastrpc_ioctl_invoke_crc inv;
 		struct fastrpc_ioctl_mmap mmap;
 		struct fastrpc_ioctl_munmap munmap;
 		struct fastrpc_ioctl_init_attrs init;
@@ -2207,10 +2230,12 @@
 
 	p.inv.fds = 0;
 	p.inv.attrs = 0;
+	p.inv.crc = NULL;
 
 	switch (ioctl_num) {
 	case FASTRPC_IOCTL_INVOKE:
 		size = sizeof(struct fastrpc_ioctl_invoke);
+		/* fall through */
 	case FASTRPC_IOCTL_INVOKE_FD:
 		if (!size)
 			size = sizeof(struct fastrpc_ioctl_invoke_fd);
@@ -2218,6 +2243,10 @@
 	case FASTRPC_IOCTL_INVOKE_ATTRS:
 		if (!size)
 			size = sizeof(struct fastrpc_ioctl_invoke_attrs);
+		/* fall through */
+	case FASTRPC_IOCTL_INVOKE_CRC:
+		if (!size)
+			size = sizeof(struct fastrpc_ioctl_invoke_crc);
 		VERIFY(err, 0 == copy_from_user(&p.inv, param, size));
 		if (err)
 			goto bail;
diff --git a/drivers/char/adsprpc_compat.c b/drivers/char/adsprpc_compat.c
index 8e72b4d..078b4d9 100644
--- a/drivers/char/adsprpc_compat.c
+++ b/drivers/char/adsprpc_compat.c
@@ -36,6 +36,8 @@
 		_IOWR('R', 9, struct compat_fastrpc_ioctl_perf)
 #define COMPAT_FASTRPC_IOCTL_INIT_ATTRS \
 		_IOWR('R', 10, struct compat_fastrpc_ioctl_init_attrs)
+#define COMPAT_FASTRPC_IOCTL_INVOKE_CRC \
+		_IOWR('R', 11, struct compat_fastrpc_ioctl_invoke_crc)
 
 struct compat_remote_buf {
 	compat_uptr_t pv;	/* buffer pointer */
@@ -64,6 +66,13 @@
 	compat_uptr_t attrs;	/* attribute list */
 };
 
+struct compat_fastrpc_ioctl_invoke_crc {
+	struct compat_fastrpc_ioctl_invoke inv;
+	compat_uptr_t fds;	/* fd list */
+	compat_uptr_t attrs;	/* attribute list */
+	compat_uptr_t crc;	/* crc list */
+};
+
 struct compat_fastrpc_ioctl_mmap {
 	compat_int_t fd;	/* ion fd */
 	compat_uint_t flags;	/* flags for dsp to map with */
@@ -100,14 +109,14 @@
 };
 
 static int compat_get_fastrpc_ioctl_invoke(
-			struct compat_fastrpc_ioctl_invoke_attrs __user *inv32,
-			struct fastrpc_ioctl_invoke_attrs __user **inva,
+			struct compat_fastrpc_ioctl_invoke_crc __user *inv32,
+			struct fastrpc_ioctl_invoke_crc __user **inva,
 			unsigned int cmd)
 {
 	compat_uint_t u, sc;
 	compat_ssize_t s;
 	compat_uptr_t p;
-	struct fastrpc_ioctl_invoke_attrs *inv;
+	struct fastrpc_ioctl_invoke_crc *inv;
 	union compat_remote_arg *pra32;
 	union remote_arg *pra;
 	int err, len, j;
@@ -146,10 +155,16 @@
 		err |= put_user(p, (compat_uptr_t *)&inv->fds);
 	}
 	err |= put_user(NULL, &inv->attrs);
-	if (cmd == COMPAT_FASTRPC_IOCTL_INVOKE_ATTRS) {
+	if ((cmd == COMPAT_FASTRPC_IOCTL_INVOKE_ATTRS) ||
+		(cmd == COMPAT_FASTRPC_IOCTL_INVOKE_CRC)) {
 		err |= get_user(p, &inv32->attrs);
 		err |= put_user(p, (compat_uptr_t *)&inv->attrs);
 	}
+	err |= put_user(NULL, (compat_uptr_t __user **)&inv->crc);
+	if (cmd == COMPAT_FASTRPC_IOCTL_INVOKE_CRC) {
+		err |= get_user(p, &inv32->crc);
+		err |= put_user(p, (compat_uptr_t __user *)&inv->crc);
+	}
 
 	*inva = inv;
 	return err;
@@ -273,9 +288,10 @@
 	case COMPAT_FASTRPC_IOCTL_INVOKE:
 	case COMPAT_FASTRPC_IOCTL_INVOKE_FD:
 	case COMPAT_FASTRPC_IOCTL_INVOKE_ATTRS:
+	case COMPAT_FASTRPC_IOCTL_INVOKE_CRC:
 	{
-		struct compat_fastrpc_ioctl_invoke_attrs __user *inv32;
-		struct fastrpc_ioctl_invoke_attrs __user *inv;
+		struct compat_fastrpc_ioctl_invoke_crc __user *inv32;
+		struct fastrpc_ioctl_invoke_crc __user *inv;
 
 		inv32 = compat_ptr(arg);
 		VERIFY(err, 0 == compat_get_fastrpc_ioctl_invoke(inv32,
@@ -283,7 +299,7 @@
 		if (err)
 			return err;
 		return filp->f_op->unlocked_ioctl(filp,
-				FASTRPC_IOCTL_INVOKE_ATTRS, (unsigned long)inv);
+				FASTRPC_IOCTL_INVOKE_CRC, (unsigned long)inv);
 	}
 	case COMPAT_FASTRPC_IOCTL_MMAP:
 	{
diff --git a/drivers/char/adsprpc_shared.h b/drivers/char/adsprpc_shared.h
index 7175b9e..fb7afa3 100644
--- a/drivers/char/adsprpc_shared.h
+++ b/drivers/char/adsprpc_shared.h
@@ -27,6 +27,7 @@
 #define FASTRPC_IOCTL_GETINFO	_IOWR('R', 8, uint32_t)
 #define FASTRPC_IOCTL_GETPERF	_IOWR('R', 9, struct fastrpc_ioctl_perf)
 #define FASTRPC_IOCTL_INIT_ATTRS _IOWR('R', 10, struct fastrpc_ioctl_init_attrs)
+#define FASTRPC_IOCTL_INVOKE_CRC _IOWR('R', 11, struct fastrpc_ioctl_invoke_crc)
 
 #define FASTRPC_GLINK_GUID "fastrpcglink-apps-dsp"
 #define FASTRPC_SMD_GUID "fastrpcsmd-apps-dsp"
@@ -156,6 +157,13 @@
 	unsigned int *attrs;	/* attribute list */
 };
 
+struct fastrpc_ioctl_invoke_crc {
+	struct fastrpc_ioctl_invoke inv;
+	int *fds;		/* fd list */
+	unsigned int *attrs;	/* attribute list */
+	unsigned int *crc;
+};
+
 struct fastrpc_ioctl_init {
 	uint32_t flags;		/* one of FASTRPC_INIT_* macros */
 	uintptr_t __user file;	/* pointer to elf file */
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
index 5a8ef04..119f5ac 100644
--- a/drivers/char/diag/diagfwd_peripheral.c
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -848,7 +848,7 @@
 			 __func__, fwd_info->peripheral, fwd_info->type);
 		return 0;
 	}
-
+	mutex_lock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]);
 	fwd_info->ch_open = 1;
 	diagfwd_buffers_init(fwd_info);
 	diagfwd_write_buffers_init(fwd_info);
@@ -866,7 +866,7 @@
 		if (fwd_info->p_ops && fwd_info->p_ops->open)
 			fwd_info->p_ops->open(fwd_info->ctxt);
 	}
-
+	mutex_unlock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]);
 	return 0;
 }
 
@@ -877,6 +877,7 @@
 	if (!fwd_info)
 		return -EIO;
 
+	mutex_lock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]);
 	fwd_info->ch_open = 0;
 	if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->close)
 		fwd_info->c_ops->close(fwd_info);
@@ -892,7 +893,7 @@
 	}
 	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "p: %d t: %d considered closed\n",
 		 fwd_info->peripheral, fwd_info->type);
-
+	mutex_unlock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]);
 	return 0;
 }
 
diff --git a/drivers/char/rdbg.c b/drivers/char/rdbg.c
new file mode 100644
index 0000000..92d9399
--- /dev/null
+++ b/drivers/char/rdbg.c
@@ -0,0 +1,1167 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <soc/qcom/smem.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+
+#define SMP2P_NUM_PROCS				8
+#define MAX_RETRIES				20
+
+#define SM_VERSION				1
+#define SM_BLOCKSIZE				128
+
+#define SMQ_MAGIC_INIT				0xFF00FF00
+#define SMQ_MAGIC_PRODUCER			(SMQ_MAGIC_INIT | 0x1)
+#define SMQ_MAGIC_CONSUMER			(SMQ_MAGIC_INIT | 0x2)
+
+enum SMQ_STATUS {
+	SMQ_SUCCESS    =  0,
+	SMQ_ENOMEMORY  = -1,
+	SMQ_EBADPARM   = -2,
+	SMQ_UNDERFLOW  = -3,
+	SMQ_OVERFLOW   = -4
+};
+
+enum smq_type {
+	PRODUCER = 1,
+	CONSUMER = 2,
+	INVALID  = 3
+};
+
+struct smq_block_map {
+	uint32_t index_read;
+	uint32_t num_blocks;
+	uint8_t *map;
+};
+
+struct smq_node {
+	uint16_t index_block;
+	uint16_t num_blocks;
+} __attribute__ ((__packed__));
+
+struct smq_hdr {
+	uint8_t producer_version;
+	uint8_t consumer_version;
+} __attribute__ ((__packed__));
+
+struct smq_out_state {
+	uint32_t init;
+	uint32_t index_check_queue_for_reset;
+	uint32_t index_sent_write;
+	uint32_t index_free_read;
+} __attribute__ ((__packed__));
+
+struct smq_out {
+	struct smq_out_state s;
+	struct smq_node sent[1];
+};
+
+struct smq_in_state {
+	uint32_t init;
+	uint32_t index_check_queue_for_reset_ack;
+	uint32_t index_sent_read;
+	uint32_t index_free_write;
+} __attribute__ ((__packed__));
+
+struct smq_in {
+	struct smq_in_state s;
+	struct smq_node free[1];
+};
+
+struct smq {
+	struct smq_hdr *hdr;
+	struct smq_out *out;
+	struct smq_in *in;
+	uint8_t *blocks;
+	uint32_t num_blocks;
+	struct mutex *lock;
+	uint32_t initialized;
+	struct smq_block_map block_map;
+	enum smq_type type;
+};
+
+struct gpio_info {
+	int gpio_base_id;
+	int irq_base_id;
+};
+
+struct rdbg_data {
+	struct device *device;
+	struct completion work;
+	struct gpio_info in;
+	struct gpio_info out;
+	bool   device_initialized;
+	int    gpio_out_offset;
+	bool   device_opened;
+	void   *smem_addr;
+	size_t smem_size;
+	struct smq    producer_smrb;
+	struct smq    consumer_smrb;
+	struct mutex  write_mutex;
+};
+
+struct rdbg_device {
+	struct cdev cdev;
+	struct class *class;
+	dev_t dev_no;
+	int num_devices;
+	struct rdbg_data *rdbg_data;
+};
+
+static struct rdbg_device g_rdbg_instance = {
+	{ {0} },
+	NULL,
+	0,
+	SMP2P_NUM_PROCS,
+	NULL
+};
+
+struct processor_specific_info {
+	char *name;
+	unsigned int smem_buffer_addr;
+	size_t smem_buffer_size;
+};
+
+static struct processor_specific_info proc_info[SMP2P_NUM_PROCS] = {
+		{0},	/*APPS*/
+		{"rdbg_modem", 0, 0},	/*MODEM*/
+		{"rdbg_adsp", SMEM_LC_DEBUGGER, 16*1024},	/*ADSP*/
+		{0},	/*SMP2P_RESERVED_PROC_1*/
+		{"rdbg_wcnss", 0, 0},		/*WCNSS*/
+		{0},	/*SMP2P_RESERVED_PROC_2*/
+		{0},	/*SMP2P_POWER_PROC*/
+		{0}		/*SMP2P_REMOTE_MOCK_PROC*/
+};
+
+static int smq_blockmap_get(struct smq_block_map *block_map,
+	uint32_t *block_index, uint32_t n)
+{
+	uint32_t start;
+	uint32_t mark = 0;
+	uint32_t found = 0;
+	uint32_t i = 0;
+
+	start = block_map->index_read;
+
+	if (n == 1) {
+		do {
+			if (!block_map->map[block_map->index_read]) {
+				*block_index = block_map->index_read;
+				block_map->map[block_map->index_read] = 1;
+				block_map->index_read++;
+				block_map->index_read %= block_map->num_blocks;
+				return SMQ_SUCCESS;
+			}
+			block_map->index_read++;
+		} while (start != (block_map->index_read %=
+			block_map->num_blocks));
+	} else {
+		mark = block_map->num_blocks;
+
+		do {
+			if (!block_map->map[block_map->index_read]) {
+				if (mark > block_map->index_read) {
+					mark = block_map->index_read;
+					start = block_map->index_read;
+					found = 0;
+				}
+
+				found++;
+				if (found == n) {
+					*block_index = mark;
+					for (i = 0; i < n; i++)
+						block_map->map[mark + i] =
+							(uint8_t)(n - i);
+					block_map->index_read += block_map->map
+						[block_map->index_read] - 1;
+					return SMQ_SUCCESS;
+				}
+			} else {
+				found = 0;
+				block_map->index_read += block_map->map
+					[block_map->index_read] - 1;
+				mark = block_map->num_blocks;
+			}
+			block_map->index_read++;
+		} while (start != (block_map->index_read %=
+			block_map->num_blocks));
+	}
+
+	return SMQ_ENOMEMORY;
+}
+
+static void smq_blockmap_put(struct smq_block_map *block_map, uint32_t i)
+{
+	uint32_t num_blocks = block_map->map[i];
+
+	while (num_blocks--) {
+		block_map->map[i] = 0;
+		i++;
+	}
+}
+
+static int smq_blockmap_reset(struct smq_block_map *block_map)
+{
+	if (!block_map->map)
+		return SMQ_ENOMEMORY;
+	memset(block_map->map, 0, block_map->num_blocks + 1);
+	block_map->index_read = 0;
+
+	return SMQ_SUCCESS;
+}
+
+static int smq_blockmap_ctor(struct smq_block_map *block_map,
+	uint32_t num_blocks)
+{
+	if (num_blocks <= 1)
+		return SMQ_ENOMEMORY;
+
+	block_map->map = kcalloc(num_blocks, sizeof(uint8_t), GFP_KERNEL);
+	if (!block_map->map)
+		return SMQ_ENOMEMORY;
+
+	block_map->num_blocks = num_blocks - 1;
+	smq_blockmap_reset(block_map);
+
+	return SMQ_SUCCESS;
+}
+
+static void smq_blockmap_dtor(struct smq_block_map *block_map)
+{
+	kfree(block_map->map);
+	block_map->map = NULL;
+}
+
+static int smq_free(struct smq *smq, void *data)
+{
+	struct smq_node node;
+	uint32_t index_block;
+	int err = SMQ_SUCCESS;
+
+	if (smq->lock)
+		mutex_lock(smq->lock);
+
+	if ((smq->hdr->producer_version != SM_VERSION) &&
+		(smq->out->s.init != SMQ_MAGIC_PRODUCER)) {
+		err = SMQ_UNDERFLOW;
+		goto bail;
+	}
+
+	index_block = ((uint8_t *)data - smq->blocks) / SM_BLOCKSIZE;
+	if (index_block >= smq->num_blocks) {
+		err = SMQ_EBADPARM;
+		goto bail;
+	}
+
+	node.index_block = (uint16_t)index_block;
+	node.num_blocks = 0;
+	*((struct smq_node *)(smq->in->free + smq->in->
+		s.index_free_write)) = node;
+
+	smq->in->s.index_free_write = (smq->in->s.index_free_write + 1)
+		% smq->num_blocks;
+
+bail:
+	if (smq->lock)
+		mutex_unlock(smq->lock);
+	return err;
+}
+
+static int smq_receive(struct smq *smq, void **pp, int *pnsize, int *pbmore)
+{
+	struct smq_node *node;
+	int err = SMQ_SUCCESS;
+	int more = 0;
+
+	if ((smq->hdr->producer_version != SM_VERSION) &&
+		(smq->out->s.init != SMQ_MAGIC_PRODUCER))
+		return SMQ_UNDERFLOW;
+
+	if (smq->in->s.index_sent_read == smq->out->s.index_sent_write) {
+		err = SMQ_UNDERFLOW;
+		goto bail;
+	}
+
+	node = (struct smq_node *)(smq->out->sent + smq->in->s.index_sent_read);
+	if (node->index_block >= smq->num_blocks) {
+		err = SMQ_EBADPARM;
+		goto bail;
+	}
+
+	smq->in->s.index_sent_read = (smq->in->s.index_sent_read + 1)
+		% smq->num_blocks;
+
+	*pp = smq->blocks + (node->index_block * SM_BLOCKSIZE);
+	*pnsize = SM_BLOCKSIZE * node->num_blocks;
+
+	/*
+	 * Ensure that the reads and writes are updated in the memory
+	 * when they are done and not cached. Also, ensure that the reads
+	 * and writes are not reordered as they are shared between two cores.
+	 */
+	rmb();
+	if (smq->in->s.index_sent_read != smq->out->s.index_sent_write)
+		more = 1;
+
+bail:
+	*pbmore = more;
+	return err;
+}
+
+static int smq_alloc_send(struct smq *smq, const uint8_t *pcb, int nsize)
+{
+	void *pv = 0;
+	int num_blocks;
+	uint32_t index_block = 0;
+	int err = SMQ_SUCCESS;
+	struct smq_node *node = NULL;
+
+	mutex_lock(smq->lock);
+
+	if ((smq->in->s.init == SMQ_MAGIC_CONSUMER) &&
+	 (smq->hdr->consumer_version == SM_VERSION)) {
+		if (smq->out->s.index_check_queue_for_reset ==
+			smq->in->s.index_check_queue_for_reset_ack) {
+			while (smq->out->s.index_free_read !=
+				smq->in->s.index_free_write) {
+				node = (struct smq_node *)(
+					smq->in->free +
+					smq->out->s.index_free_read);
+				if (node->index_block >= smq->num_blocks) {
+					err = SMQ_EBADPARM;
+					goto bail;
+				}
+
+				smq->out->s.index_free_read =
+					(smq->out->s.index_free_read + 1)
+						% smq->num_blocks;
+
+				smq_blockmap_put(&smq->block_map,
+					node->index_block);
+				/*
+				 * Ensure that the reads and writes are
+				 * updated in the memory when they are done
+				 * and not cached. Also, ensure that the reads
+				 * and writes are not reordered as they are
+				 * shared between two cores.
+				 */
+				rmb();
+			}
+		}
+	}
+
+	num_blocks = ALIGN(nsize, SM_BLOCKSIZE)/SM_BLOCKSIZE;
+	err = smq_blockmap_get(&smq->block_map, &index_block, num_blocks);
+	if (err != SMQ_SUCCESS)
+		goto bail;
+
+	pv = smq->blocks + (SM_BLOCKSIZE * index_block);
+
+	err = copy_from_user((void *)pv, (void *)pcb, nsize);
+	if (err != 0)
+		goto bail;
+
+	((struct smq_node *)(smq->out->sent +
+		smq->out->s.index_sent_write))->index_block
+			= (uint16_t)index_block;
+	((struct smq_node *)(smq->out->sent +
+		smq->out->s.index_sent_write))->num_blocks
+			= (uint16_t)num_blocks;
+
+	smq->out->s.index_sent_write = (smq->out->s.index_sent_write + 1)
+		% smq->num_blocks;
+
+bail:
+	if (err != SMQ_SUCCESS) {
+		if (pv)
+			smq_blockmap_put(&smq->block_map, index_block);
+	}
+	mutex_unlock(smq->lock);
+	return err;
+}
+
+static int smq_reset_producer_queue_internal(struct smq *smq,
+	uint32_t reset_num)
+{
+	int retval = 0;
+	uint32_t i;
+
+	if (smq->type != PRODUCER)
+		goto bail;
+
+	mutex_lock(smq->lock);
+	if (smq->out->s.index_check_queue_for_reset != reset_num) {
+		smq->out->s.index_check_queue_for_reset = reset_num;
+		for (i = 0; i < smq->num_blocks; i++)
+			(smq->out->sent + i)->index_block = 0xFFFF;
+
+		smq_blockmap_reset(&smq->block_map);
+		smq->out->s.index_sent_write = 0;
+		smq->out->s.index_free_read = 0;
+		retval = 1;
+	}
+	mutex_unlock(smq->lock);
+
+bail:
+	return retval;
+}
+
+static int smq_check_queue_reset(struct smq *p_cons, struct smq *p_prod)
+{
+	int retval = 0;
+	uint32_t reset_num, i;
+
+	if ((p_cons->type != CONSUMER) ||
+		(p_cons->out->s.init != SMQ_MAGIC_PRODUCER) ||
+		(p_cons->hdr->producer_version != SM_VERSION))
+		goto bail;
+
+	reset_num = p_cons->out->s.index_check_queue_for_reset;
+	if (p_cons->in->s.index_check_queue_for_reset_ack != reset_num) {
+		p_cons->in->s.index_check_queue_for_reset_ack = reset_num;
+		for (i = 0; i < p_cons->num_blocks; i++)
+			(p_cons->in->free + i)->index_block = 0xFFFF;
+
+		p_cons->in->s.index_sent_read = 0;
+		p_cons->in->s.index_free_write = 0;
+
+		retval = smq_reset_producer_queue_internal(p_prod, reset_num);
+	}
+
+bail:
+	return retval;
+}
+
+static int check_subsystem_debug_enabled(void *base_addr, int size)
+{
+	int num_blocks;
+	uint8_t *pb_orig;
+	uint8_t *pb;
+	struct smq smq;
+	int err = 0;
+
+	pb = pb_orig = (uint8_t *)base_addr;
+	pb += sizeof(struct smq_hdr);
+	pb = PTR_ALIGN(pb, 8);
+	size -= pb - (uint8_t *)pb_orig;
+	num_blocks = (int)((size - sizeof(struct smq_out_state) -
+		sizeof(struct smq_in_state))/(SM_BLOCKSIZE +
+		sizeof(struct smq_node) * 2));
+	if (num_blocks <= 0) {
+		err = SMQ_EBADPARM;
+		goto bail;
+	}
+
+	pb += num_blocks * SM_BLOCKSIZE;
+	smq.out = (struct smq_out *)pb;
+	pb += sizeof(struct smq_out_state) + (num_blocks *
+		sizeof(struct smq_node));
+	smq.in = (struct smq_in *)pb;
+
+	if (smq.in->s.init != SMQ_MAGIC_CONSUMER) {
+		pr_err("%s, smq in consumer not initialized", __func__);
+		err = -ECOMM;
+	}
+
+bail:
+	return err;
+}
+
+static void smq_dtor(struct smq *smq)
+{
+	if (smq->initialized == SMQ_MAGIC_INIT) {
+		switch (smq->type) {
+		case PRODUCER:
+			smq->out->s.init = 0;
+			smq_blockmap_dtor(&smq->block_map);
+			break;
+		case CONSUMER:
+			smq->in->s.init = 0;
+			break;
+		default:
+		case INVALID:
+			break;
+		}
+
+		smq->initialized = 0;
+	}
+}
+
+/*
+ * The shared memory is used as a circular ring buffer in each direction.
+ * Thus we have a bi-directional shared memory channel between the AP
+ * and a subsystem. We call this SMQ. Each memory channel contains a header,
+ * data and a control mechanism that is used to synchronize read and write
+ * of data between the AP and the remote subsystem.
+ *
+ * Overall SMQ memory view:
+ *
+ *    +------------------------------------------------+
+ *    | SMEM buffer                                    |
+ *    |-----------------------+------------------------|
+ *    |Producer: LA           | Producer: Remote       |
+ *    |Consumer: Remote       |           subsystem    |
+ *    |          subsystem    | Consumer: LA           |
+ *    |                       |                        |
+ *    |               Producer|                Consumer|
+ *    +-----------------------+------------------------+
+ *    |                       |
+ *    |                       |
+ *    |                       +--------------------------------------+
+ *    |                                                              |
+ *    |                                                              |
+ *    v                                                              v
+ *    +--------------------------------------------------------------+
+ *    |   Header  |       Data      |            Control             |
+ *    +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+
+ *    |           | b | b | b |     | S  |n |n |     | S |n |n |     |
+ *    |  Producer | l | l | l |     | M  |o |o |     | M |o |o |     |
+ *    |    Ver    | o | o | o |     | Q  |d |d |     | Q |d |d |     |
+ *    |-----------| c | c | c | ... |    |e |e | ... |   |e |e | ... |
+ *    |           | k | k | k |     | O  |  |  |     | I |  |  |     |
+ *    |  Consumer |   |   |   |     | u  |0 |1 |     | n |0 |1 |     |
+ *    |    Ver    | 0 | 1 | 2 |     | t  |  |  |     |   |  |  |     |
+ *    +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+
+ *                                       |           |
+ *                                       +           |
+ *                                                   |
+ *                          +------------------------+
+ *                          |
+ *                          v
+ *                        +----+----+----+----+
+ *                        | SMQ Nodes         |
+ *                        |----|----|----|----|
+ *                 Node # |  0 |  1 |  2 | ...|
+ *                        |----|----|----|----|
+ * Starting Block Index # |  0 |  3 |  8 | ...|
+ *                        |----|----|----|----|
+ *            # of blocks |  3 |  5 |  1 | ...|
+ *                        +----+----+----+----+
+ *
+ * Header: Contains version numbers for software compatibility to ensure
+ * that both producers and consumers on the AP and subsystems know how to
+ * read from and write to the queue.
+ * Both the producer and consumer versions are 1.
+ *     +---------+-------------------+
+ *     | Size    | Field             |
+ *     +---------+-------------------+
+ *     | 1 byte  | Producer Version  |
+ *     +---------+-------------------+
+ *     | 1 byte  | Consumer Version  |
+ *     +---------+-------------------+
+ *
+ * Data: The data portion contains multiple blocks [0..N] of a fixed size.
+ * The block size SM_BLOCKSIZE is fixed to 128 bytes for header version #1.
+ * Payload sent from the debug agent app is split (if necessary) and placed
+ * in these blocks. The first data block is placed at the next 8 byte aligned
+ * address after the header.
+ *
+ * The number of blocks for a given SMEM allocation is derived as follows:
+ *   Number of Blocks = ((Total Size - Alignment - Size of Header
+ *		- Size of SMQIn - Size of SMQOut)/(SM_BLOCKSIZE))
+ *
+ * The producer maintains a private block map of each of these blocks to
+ * determine which of these blocks in the queue is available and which are free.
+ *
+ * Control:
+ * The control portion contains a list of nodes [0..N] where N is number
+ * of available data blocks. Each node identifies the data
+ * block indexes that contain a particular debug message to be transferred,
+ * and the number of blocks it took to hold the contents of the message.
+ *
+ * Each node has the following structure:
+ *     +---------+-------------------+
+ *     | Size    | Field             |
+ *     +---------+-------------------+
+ *     | 2 bytes |Staring Block Index|
+ *     +---------+-------------------+
+ *     | 2 bytes |Number of Blocks   |
+ *     +---------+-------------------+
+ *
+ * The producer and the consumer update different parts of the control channel
+ * (SMQOut / SMQIn) respectively. Each of these control data structures contains
+ * information about the last node that was written / read, and the actual nodes
+ * that were written/read.
+ *
+ * SMQOut Structure (R/W by producer, R by consumer):
+ *     +---------+-------------------+
+ *     | Size    | Field             |
+ *     +---------+-------------------+
+ *     | 4 bytes | Magic Init Number |
+ *     +---------+-------------------+
+ *     | 4 bytes | Reset             |
+ *     +---------+-------------------+
+ *     | 4 bytes | Last Sent Index   |
+ *     +---------+-------------------+
+ *     | 4 bytes | Index Free Read   |
+ *     +---------+-------------------+
+ *
+ * SMQIn Structure (R/W by consumer, R by producer):
+ *     +---------+-------------------+
+ *     | Size    | Field             |
+ *     +---------+-------------------+
+ *     | 4 bytes | Magic Init Number |
+ *     +---------+-------------------+
+ *     | 4 bytes | Reset ACK         |
+ *     +---------+-------------------+
+ *     | 4 bytes | Last Read Index   |
+ *     +---------+-------------------+
+ *     | 4 bytes | Index Free Write  |
+ *     +---------+-------------------+
+ *
+ * Magic Init Number:
+ * Both SMQ Out and SMQ In initialize this field with a predefined magic
+ * number so as to make sure that both the consumer and producer blocks
+ * have fully initialized and have valid data in the shared memory control area.
+ *	Producer Magic #: 0xFF00FF01
+ *	Consumer Magic #: 0xFF00FF02
+ */
+static int smq_ctor(struct smq *smq, void *base_addr, int size,
+	enum smq_type type, struct mutex *lock_ptr)
+{
+	int num_blocks;
+	uint8_t *pb_orig;
+	uint8_t *pb;
+	uint32_t i;
+	int err;
+
+	if (smq->initialized == SMQ_MAGIC_INIT) {
+		err = SMQ_EBADPARM;
+		goto bail;
+	}
+
+	if (!base_addr || !size) {
+		err = SMQ_EBADPARM;
+		goto bail;
+	}
+
+	if (type == PRODUCER)
+		smq->lock = lock_ptr;
+
+	pb_orig = (uint8_t *)base_addr;
+	smq->hdr = (struct smq_hdr *)pb_orig;
+	pb = pb_orig;
+	pb += sizeof(struct smq_hdr);
+	pb = PTR_ALIGN(pb, 8);
+	size -= pb - (uint8_t *)pb_orig;
+	num_blocks = (int)((size - sizeof(struct smq_out_state) -
+		sizeof(struct smq_in_state))/(SM_BLOCKSIZE +
+		sizeof(struct smq_node) * 2));
+	if (num_blocks <= 0) {
+		err = SMQ_ENOMEMORY;
+		goto bail;
+	}
+
+	smq->blocks = pb;
+	smq->num_blocks = num_blocks;
+	pb += num_blocks * SM_BLOCKSIZE;
+	smq->out = (struct smq_out *)pb;
+	pb += sizeof(struct smq_out_state) + (num_blocks *
+		sizeof(struct smq_node));
+	smq->in = (struct smq_in *)pb;
+	smq->type = type;
+	if (type == PRODUCER) {
+		smq->hdr->producer_version = SM_VERSION;
+		for (i = 0; i < smq->num_blocks; i++)
+			(smq->out->sent + i)->index_block = 0xFFFF;
+
+		err = smq_blockmap_ctor(&smq->block_map, smq->num_blocks);
+		if (err != SMQ_SUCCESS)
+			goto bail;
+
+		smq->out->s.index_sent_write = 0;
+		smq->out->s.index_free_read = 0;
+		if (smq->out->s.init == SMQ_MAGIC_PRODUCER) {
+			smq->out->s.index_check_queue_for_reset += 1;
+		} else {
+			smq->out->s.index_check_queue_for_reset = 1;
+			smq->out->s.init = SMQ_MAGIC_PRODUCER;
+		}
+	} else {
+		smq->hdr->consumer_version = SM_VERSION;
+		for (i = 0; i < smq->num_blocks; i++)
+			(smq->in->free + i)->index_block = 0xFFFF;
+
+		smq->in->s.index_sent_read = 0;
+		smq->in->s.index_free_write = 0;
+		if (smq->out->s.init == SMQ_MAGIC_PRODUCER) {
+			smq->in->s.index_check_queue_for_reset_ack =
+				smq->out->s.index_check_queue_for_reset;
+		} else {
+			smq->in->s.index_check_queue_for_reset_ack = 0;
+		}
+
+		smq->in->s.init = SMQ_MAGIC_CONSUMER;
+	}
+	smq->initialized = SMQ_MAGIC_INIT;
+	err = SMQ_SUCCESS;
+
+bail:
+	return err;
+}
+
+static void send_interrupt_to_subsystem(struct rdbg_data *rdbgdata)
+{
+	int offset = rdbgdata->gpio_out_offset;
+	int val = 1 ^ gpio_get_value(rdbgdata->out.gpio_base_id + offset);
+
+	gpio_set_value(rdbgdata->out.gpio_base_id + offset, val);
+	rdbgdata->gpio_out_offset = (offset + 1) % 32;
+
+	dev_dbg(rdbgdata->device, "%s: sent interrupt %d to subsystem",
+		__func__, val);
+}
+
+static irqreturn_t on_interrupt_from(int irq, void *ptr)
+{
+	struct rdbg_data *rdbgdata = (struct rdbg_data *) ptr;
+
+	dev_dbg(rdbgdata->device, "%s: Received interrupt %d from subsystem",
+		__func__, irq);
+
+	complete(&(rdbgdata->work));
+	return IRQ_HANDLED;
+}
+
+static int initialize_smq(struct rdbg_data *rdbgdata)
+{
+	int err = 0;
+	unsigned char *smem_consumer_buffer = rdbgdata->smem_addr;
+
+	smem_consumer_buffer += (rdbgdata->smem_size/2);
+
+	if (smq_ctor(&(rdbgdata->producer_smrb), (void *)(rdbgdata->smem_addr),
+		((rdbgdata->smem_size)/2), PRODUCER, &rdbgdata->write_mutex)) {
+		dev_err(rdbgdata->device, "%s: smq producer allocation failed",
+			__func__);
+		err = -ENOMEM;
+		goto bail;
+	}
+
+	if (smq_ctor(&(rdbgdata->consumer_smrb), (void *)smem_consumer_buffer,
+		((rdbgdata->smem_size)/2), CONSUMER, NULL)) {
+		dev_err(rdbgdata->device, "%s: smq conmsumer allocation failed",
+			__func__);
+		err = -ENOMEM;
+	}
+
+bail:
+	return err;
+
+}
+
+static int rdbg_open(struct inode *inode, struct file *filp)
+{
+	int device_id = -1;
+	struct rdbg_device *device = &g_rdbg_instance;
+	struct rdbg_data *rdbgdata = NULL;
+	int err = 0;
+
+	if (!inode || !device->rdbg_data) {
+		pr_err("Memory not allocated yet");
+		err = -ENODEV;
+		goto bail;
+	}
+
+	device_id = MINOR(inode->i_rdev);
+	rdbgdata = &device->rdbg_data[device_id];
+
+	if (rdbgdata->device_opened) {
+		dev_err(rdbgdata->device, "%s: Device already opened",
+			__func__);
+		err = -EEXIST;
+		goto bail;
+	}
+
+	rdbgdata->smem_size = proc_info[device_id].smem_buffer_size;
+	if (!rdbgdata->smem_size) {
+		dev_err(rdbgdata->device, "%s: smem not initialized", __func__);
+		err = -ENOMEM;
+		goto bail;
+	}
+
+	rdbgdata->smem_addr = smem_find(proc_info[device_id].smem_buffer_addr,
+		rdbgdata->smem_size, 0, SMEM_ANY_HOST_FLAG);
+	if (!rdbgdata->smem_addr) {
+		dev_err(rdbgdata->device, "%s: Could not allocate smem memory",
+			__func__);
+		err = -ENOMEM;
+		goto bail;
+	}
+	dev_dbg(rdbgdata->device, "%s: SMEM address=0x%lx smem_size=%d",
+		__func__, (unsigned long)rdbgdata->smem_addr,
+		(unsigned int)rdbgdata->smem_size);
+
+	if (check_subsystem_debug_enabled(rdbgdata->smem_addr,
+		rdbgdata->smem_size/2)) {
+		dev_err(rdbgdata->device, "%s: Subsystem %s is not debug enabled",
+			__func__, proc_info[device_id].name);
+		err = -ECOMM;
+		goto bail;
+	}
+
+	init_completion(&rdbgdata->work);
+
+	err = request_irq(rdbgdata->in.irq_base_id, on_interrupt_from,
+			IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+			proc_info[device_id].name,
+			(void *)&device->rdbg_data[device_id]);
+	if (err) {
+		dev_err(rdbgdata->device,
+			"%s: Failed to register interrupt.Err=%d,irqid=%d.",
+			__func__, err, rdbgdata->in.irq_base_id);
+		goto irq_bail;
+	}
+
+	err = enable_irq_wake(rdbgdata->in.irq_base_id);
+	if (err < 0) {
+		dev_dbg(rdbgdata->device, "enable_irq_wake() failed with err=%d",
+			err);
+		err = 0;
+	}
+
+	mutex_init(&rdbgdata->write_mutex);
+
+	err = initialize_smq(rdbgdata);
+	if (err) {
+		dev_err(rdbgdata->device, "Error initializing smq. Err=%d",
+			err);
+		goto smq_bail;
+	}
+
+	rdbgdata->device_opened = 1;
+
+	filp->private_data = (void *)rdbgdata;
+
+	return 0;
+
+smq_bail:
+	smq_dtor(&(rdbgdata->producer_smrb));
+	smq_dtor(&(rdbgdata->consumer_smrb));
+	mutex_destroy(&rdbgdata->write_mutex);
+irq_bail:
+	free_irq(rdbgdata->in.irq_base_id, (void *)
+		&device->rdbg_data[device_id]);
+bail:
+	return err;
+}
+
+static int rdbg_release(struct inode *inode, struct file *filp)
+{
+	int device_id = -1;
+	struct rdbg_device *rdbgdevice = &g_rdbg_instance;
+	struct rdbg_data *rdbgdata = NULL;
+	int err = 0;
+
+	if (!inode || !rdbgdevice->rdbg_data) {
+		pr_err("Memory not allocated yet");
+		err = -ENODEV;
+		goto bail;
+	}
+
+	device_id = MINOR(inode->i_rdev);
+	rdbgdata = &rdbgdevice->rdbg_data[device_id];
+
+	if (rdbgdata->device_opened == 1) {
+		dev_dbg(rdbgdata->device, "%s: Destroying %s.", __func__,
+			proc_info[device_id].name);
+		rdbgdata->device_opened = 0;
+		complete(&(rdbgdata->work));
+		free_irq(rdbgdata->in.irq_base_id, (void *)
+			&rdbgdevice->rdbg_data[device_id]);
+		if (rdbgdevice->rdbg_data[device_id].producer_smrb.initialized)
+			smq_dtor(&(rdbgdevice->rdbg_data[device_id].
+				producer_smrb));
+		if (rdbgdevice->rdbg_data[device_id].consumer_smrb.initialized)
+			smq_dtor(&(rdbgdevice->rdbg_data[device_id].
+				consumer_smrb));
+		mutex_destroy(&rdbgdata->write_mutex);
+	}
+
+	filp->private_data = NULL;
+
+bail:
+	return err;
+}
+
+static ssize_t rdbg_read(struct file *filp, char __user *buf, size_t size,
+	loff_t *offset)
+{
+	int err = 0;
+	struct rdbg_data *rdbgdata = filp->private_data;
+	void *p_sent_buffer = NULL;
+	int nsize = 0;
+	int more = 0;
+
+	if (!rdbgdata) {
+		pr_err("Invalid argument");
+		err = -EINVAL;
+		goto bail;
+	}
+
+	dev_dbg(rdbgdata->device, "%s: In receive", __func__);
+	err = wait_for_completion_interruptible(&(rdbgdata->work));
+	if (err) {
+		dev_err(rdbgdata->device, "%s: Error in wait", __func__);
+		goto bail;
+	}
+
+	smq_check_queue_reset(&(rdbgdata->consumer_smrb),
+		&(rdbgdata->producer_smrb));
+	if (smq_receive(&(rdbgdata->consumer_smrb), &p_sent_buffer,
+			&nsize, &more) != SMQ_SUCCESS) {
+		dev_err(rdbgdata->device, "%s: Error in smq_recv(). Err code = %d",
+			__func__, err);
+		err = -ENODATA;
+		goto bail;
+	}
+
+	size = ((size < nsize) ? size : nsize);
+	err = copy_to_user(buf, p_sent_buffer, size);
+	if (err != 0) {
+		dev_err(rdbgdata->device, "%s: Error in copy_to_user(). Err code = %d",
+			__func__, err);
+		err = -ENODATA;
+		goto bail;
+	}
+
+	smq_free(&(rdbgdata->consumer_smrb), p_sent_buffer);
+	err = size;
+	dev_dbg(rdbgdata->device, "%s: Read data to buffer with address 0x%lx",
+		__func__, (unsigned long) buf);
+
+bail:
+	return err;
+}
+
+static ssize_t rdbg_write(struct file *filp, const char __user *buf,
+	size_t size, loff_t *offset)
+{
+	int err = 0;
+	int num_retries = 0;
+	struct rdbg_data *rdbgdata = filp->private_data;
+
+	if (!rdbgdata) {
+		pr_err("Invalid argument");
+		err = -EINVAL;
+		goto bail;
+	}
+
+	do {
+		err = smq_alloc_send(&(rdbgdata->producer_smrb), buf, size);
+		dev_dbg(rdbgdata->device, "%s, smq_alloc_send returned %d.",
+			__func__, err);
+	} while (err != 0 && num_retries++ < MAX_RETRIES);
+
+	if (err != 0) {
+		err = -ECOMM;
+		goto bail;
+	}
+
+	send_interrupt_to_subsystem(rdbgdata);
+
+	err = size;
+
+bail:
+	return err;
+}
+
+
+static const struct file_operations rdbg_fops = {
+	.open = rdbg_open,
+	.read =  rdbg_read,
+	.write =  rdbg_write,
+	.release = rdbg_release,
+};
+
+static int register_smp2p(char *node_name, struct gpio_info *gpio_info_ptr)
+{
+	struct device_node *node = NULL;
+	int cnt = 0;
+	int id = 0;
+
+	node = of_find_compatible_node(NULL, NULL, node_name);
+	if (node) {
+		cnt = of_gpio_count(node);
+		if (cnt && gpio_info_ptr) {
+			id = of_get_gpio(node, 0);
+			gpio_info_ptr->gpio_base_id = id;
+			gpio_info_ptr->irq_base_id = gpio_to_irq(id);
+			return 0;
+		}
+	}
+	return -EINVAL;
+}
+
+static int __init rdbg_init(void)
+{
+	int err = 0;
+	struct rdbg_device *rdbgdevice = &g_rdbg_instance;
+	int minor = 0;
+	int major = 0;
+	int minor_nodes_created = 0;
+
+	char *rdbg_compatible_string = "qcom,smp2pgpio_client_rdbg_";
+	int max_len = strlen(rdbg_compatible_string) + strlen("xx_out");
+
+	char *node_name = kcalloc(max_len, sizeof(char), GFP_KERNEL);
+
+	if (!node_name) {
+		err = -ENOMEM;
+		goto bail;
+	}
+
+	if (rdbgdevice->num_devices < 1 ||
+		rdbgdevice->num_devices > SMP2P_NUM_PROCS) {
+		pr_err("rgdb: invalid num_devices");
+		err = -EDOM;
+		goto name_bail;
+	}
+
+	rdbgdevice->rdbg_data = kcalloc(rdbgdevice->num_devices,
+		sizeof(struct rdbg_data), GFP_KERNEL);
+	if (!rdbgdevice->rdbg_data) {
+		err = -ENOMEM;
+		goto name_bail;
+	}
+
+	err = alloc_chrdev_region(&rdbgdevice->dev_no, 0,
+		rdbgdevice->num_devices, "rdbgctl");
+	if (err) {
+		pr_err("Error in alloc_chrdev_region.");
+		goto data_bail;
+	}
+	major = MAJOR(rdbgdevice->dev_no);
+
+	cdev_init(&rdbgdevice->cdev, &rdbg_fops);
+	rdbgdevice->cdev.owner = THIS_MODULE;
+	err = cdev_add(&rdbgdevice->cdev, MKDEV(major, 0),
+		rdbgdevice->num_devices);
+	if (err) {
+		pr_err("Error in cdev_add");
+		goto chrdev_bail;
+	}
+
+	rdbgdevice->class = class_create(THIS_MODULE, "rdbg");
+	if (IS_ERR(rdbgdevice->class)) {
+		err = PTR_ERR(rdbgdevice->class);
+		pr_err("Error in class_create");
+		goto cdev_bail;
+	}
+
+	for (minor = 0; minor < rdbgdevice->num_devices; minor++) {
+		if (!proc_info[minor].name)
+			continue;
+
+		if (snprintf(node_name, max_len, "%s%d_in",
+			rdbg_compatible_string, minor) <= 0) {
+			pr_err("Error in snprintf");
+			err = -ENOMEM;
+			goto device_bail;
+		}
+
+		if (register_smp2p(node_name,
+			&rdbgdevice->rdbg_data[minor].in)) {
+			pr_debug("No incoming device tree entry found for %s",
+				proc_info[minor].name);
+			continue;
+		}
+
+		if (snprintf(node_name, max_len, "%s%d_out",
+			rdbg_compatible_string, minor) <= 0) {
+			pr_err("Error in snprintf");
+			err = -ENOMEM;
+			goto device_bail;
+		}
+
+		if (register_smp2p(node_name,
+			&rdbgdevice->rdbg_data[minor].out)) {
+			pr_err("No outgoing device tree entry found for %s",
+				proc_info[minor].name);
+			err = -EINVAL;
+			goto device_bail;
+		}
+
+		rdbgdevice->rdbg_data[minor].device = device_create(
+			rdbgdevice->class, NULL, MKDEV(major, minor),
+			NULL, "%s", proc_info[minor].name);
+		if (IS_ERR(rdbgdevice->rdbg_data[minor].device)) {
+			err = PTR_ERR(rdbgdevice->rdbg_data[minor].device);
+			pr_err("Error in device_create");
+			goto device_bail;
+		}
+		rdbgdevice->rdbg_data[minor].device_initialized = 1;
+		minor_nodes_created++;
+		dev_dbg(rdbgdevice->rdbg_data[minor].device,
+			"%s: created /dev/%s c %d %d'", __func__,
+			proc_info[minor].name, major, minor);
+	}
+
+	if (!minor_nodes_created) {
+		pr_err("No device tree entries found");
+		err = -EINVAL;
+		goto class_bail;
+	}
+
+	goto name_bail;
+
+device_bail:
+	for (--minor; minor >= 0; minor--) {
+		if (rdbgdevice->rdbg_data[minor].device_initialized)
+			device_destroy(rdbgdevice->class,
+				MKDEV(MAJOR(rdbgdevice->dev_no), minor));
+	}
+class_bail:
+	class_destroy(rdbgdevice->class);
+cdev_bail:
+	cdev_del(&rdbgdevice->cdev);
+chrdev_bail:
+	unregister_chrdev_region(rdbgdevice->dev_no, rdbgdevice->num_devices);
+data_bail:
+	kfree(rdbgdevice->rdbg_data);
+name_bail:
+	kfree(node_name);
+bail:
+	return err;
+}
+
+static void __exit rdbg_exit(void)
+{
+	struct rdbg_device *rdbgdevice = &g_rdbg_instance;
+	int minor;
+
+	for (minor = 0; minor < rdbgdevice->num_devices; minor++) {
+		if (rdbgdevice->rdbg_data[minor].device_initialized) {
+			device_destroy(rdbgdevice->class,
+				MKDEV(MAJOR(rdbgdevice->dev_no), minor));
+		}
+	}
+	class_destroy(rdbgdevice->class);
+	cdev_del(&rdbgdevice->cdev);
+	unregister_chrdev_region(rdbgdevice->dev_no, 1);
+	kfree(rdbgdevice->rdbg_data);
+}
+
+module_init(rdbg_init);
+module_exit(rdbg_exit);
+
+MODULE_DESCRIPTION("rdbg module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index 946025a..84eca4f 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -295,6 +295,8 @@
 	}
 
 	kfree(ibmvtpm);
+	/* For tpm_ibmvtpm_get_desired_dma */
+	dev_set_drvdata(&vdev->dev, NULL);
 
 	return 0;
 }
@@ -309,13 +311,16 @@
 static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
 {
 	struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
-	struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
+	struct ibmvtpm_dev *ibmvtpm;
 
-	/* ibmvtpm initializes at probe time, so the data we are
-	* asking for may not be set yet. Estimate that 4K required
-	* for TCE-mapped buffer in addition to CRQ.
-	*/
-	if (!ibmvtpm)
+	/*
+	 * ibmvtpm initializes at probe time, so the data we are
+	 * asking for may not be set yet. Estimate that 4K required
+	 * for TCE-mapped buffer in addition to CRQ.
+	 */
+	if (chip)
+		ibmvtpm = dev_get_drvdata(&chip->dev);
+	else
 		return CRQ_RES_BUF_SIZE + PAGE_SIZE;
 
 	return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 3d6754e..bb7c862 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -258,6 +258,9 @@
 {
 	if (flags & CLK_DIVIDER_ROUND_CLOSEST)
 		return abs(rate - now) < abs(rate - best);
+	else if (flags & CLK_DIVIDER_ROUND_KHZ)
+		return (DIV_ROUND_CLOSEST(abs(rate - now), 1000)
+			< DIV_ROUND_CLOSEST(abs(rate - best), 1000));
 
 	return now <= rate && now > best;
 }
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
index 86e148d..1984d4a 100644
--- a/drivers/clk/qcom/camcc-sdm845.c
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -2059,7 +2059,7 @@
 {
 	return platform_driver_register(&cam_cc_sdm845_driver);
 }
-core_initcall(cam_cc_sdm845_init);
+subsys_initcall(cam_cc_sdm845_init);
 
 static void __exit cam_cc_sdm845_exit(void)
 {
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 4d3b427..e7d3ee4 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -815,7 +815,7 @@
 		return -EINVAL;
 
 	return divider_round_rate(hw, rate, prate, pll->post_div_table,
-					pll->width, CLK_DIVIDER_ROUND_CLOSEST);
+					pll->width, CLK_DIVIDER_ROUND_KHZ);
 }
 
 static int clk_generic_pll_postdiv_set_rate(struct clk_hw *hw,
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index 7aef887..f12f03d 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -573,55 +573,11 @@
 }
 
 const struct clk_ops clk_ops_cpu_osm = {
-	.enable = clk_osm_enable,
 	.round_rate = clk_osm_round_rate,
 	.list_rate = clk_osm_list_rate,
 	.debug_init = clk_debug_measure_add,
 };
 
-static struct clk_ops clk_ops_core;
-
-static int cpu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
-				    unsigned long parent_rate)
-{
-	struct clk_osm *cpuclk = to_clk_osm(hw);
-	struct clk_hw *p_hw = clk_hw_get_parent(hw);
-	struct clk_osm *parent = to_clk_osm(p_hw);
-	int index = 0;
-	unsigned long r_rate;
-
-	if (!cpuclk || !parent)
-		return -EINVAL;
-
-	r_rate = clk_osm_round_rate(p_hw, rate, NULL);
-
-	if (rate != r_rate) {
-		pr_err("invalid requested rate=%ld\n", rate);
-		return -EINVAL;
-	}
-
-	/* Convert rate to table index */
-	index = clk_osm_search_table(parent->osm_table,
-				     parent->num_entries, r_rate);
-	if (index < 0) {
-		pr_err("cannot set %s to %lu\n", clk_hw_get_name(hw), rate);
-		return -EINVAL;
-	}
-	pr_debug("rate: %lu --> index %d\n", rate, index);
-	/*
-	 * Choose index and send request to OSM hardware.
-	 * TODO: Program INACTIVE_OS_REQUEST if needed.
-	 */
-	clk_osm_write_reg(parent, index,
-			DCVS_PERF_STATE_DESIRED_REG(cpuclk->core_num),
-			OSM_BASE);
-
-	/* Make sure the write goes through before proceeding */
-	clk_osm_mb(parent, OSM_BASE);
-
-	return 0;
-}
-
 static int l3_clk_set_rate(struct clk_hw *hw, unsigned long rate,
 				    unsigned long parent_rate)
 {
@@ -657,38 +613,6 @@
 	return 0;
 }
 
-static long cpu_clk_round_rate(struct clk_hw *hw, unsigned long rate,
-					unsigned long *parent_rate)
-{
-	struct clk_hw *parent_hw = clk_hw_get_parent(hw);
-
-	if (!parent_hw)
-		return -EINVAL;
-
-	return clk_hw_round_rate(parent_hw, rate);
-}
-
-static unsigned long cpu_clk_recalc_rate(struct clk_hw *hw,
-					unsigned long parent_rate)
-{
-	struct clk_osm *cpuclk = to_clk_osm(hw);
-	struct clk_hw *p_hw = clk_hw_get_parent(hw);
-	struct clk_osm *parent = to_clk_osm(p_hw);
-	int index = 0;
-
-	if (!cpuclk || !parent)
-		return -EINVAL;
-
-	index = clk_osm_read_reg(parent,
-			DCVS_PERF_STATE_DESIRED_REG(cpuclk->core_num));
-
-	pr_debug("%s: Index %d, freq %ld\n", __func__, index,
-				parent->osm_table[index].frequency);
-
-	/* Convert index to frequency */
-	return parent->osm_table[index].frequency;
-}
-
 static unsigned long l3_clk_recalc_rate(struct clk_hw *hw,
 					unsigned long parent_rate)
 {
@@ -759,7 +683,7 @@
 		.name = "cpu0_pwrcl_clk",
 		.parent_names = (const char *[]){ "pwrcl_clk" },
 		.num_parents = 1,
-		.ops = &clk_ops_core,
+		.ops = &clk_dummy_ops,
 	},
 };
 
@@ -771,8 +695,7 @@
 		.name = "cpu1_pwrcl_clk",
 		.parent_names = (const char *[]){ "pwrcl_clk" },
 		.num_parents = 1,
-		.flags = CLK_SET_RATE_PARENT,
-		.ops = &clk_ops_core,
+		.ops = &clk_dummy_ops,
 	},
 };
 
@@ -784,8 +707,7 @@
 		.name = "cpu2_pwrcl_clk",
 		.parent_names = (const char *[]){ "pwrcl_clk" },
 		.num_parents = 1,
-		.flags = CLK_SET_RATE_PARENT,
-		.ops = &clk_ops_core,
+		.ops = &clk_dummy_ops,
 	},
 };
 
@@ -797,8 +719,7 @@
 		.name = "cpu3_pwrcl_clk",
 		.parent_names = (const char *[]){ "pwrcl_clk" },
 		.num_parents = 1,
-		.flags = CLK_SET_RATE_PARENT,
-		.ops = &clk_ops_core,
+		.ops = &clk_dummy_ops,
 	},
 };
 
@@ -816,7 +737,7 @@
 		.name = "cpu4_perfcl_clk",
 		.parent_names = (const char *[]){ "perfcl_clk" },
 		.num_parents = 1,
-		.ops = &clk_ops_core,
+		.ops = &clk_dummy_ops,
 	},
 };
 
@@ -828,8 +749,7 @@
 		.name = "cpu5_perfcl_clk",
 		.parent_names = (const char *[]){ "perfcl_clk" },
 		.num_parents = 1,
-		.flags = CLK_SET_RATE_PARENT,
-		.ops = &clk_ops_core,
+		.ops = &clk_dummy_ops,
 	},
 };
 
@@ -841,8 +761,7 @@
 		.name = "cpu6_perfcl_clk",
 		.parent_names = (const char *[]){ "perfcl_clk" },
 		.num_parents = 1,
-		.flags = CLK_SET_RATE_PARENT,
-		.ops = &clk_ops_core,
+		.ops = &clk_dummy_ops,
 	},
 };
 
@@ -854,8 +773,7 @@
 		.name = "cpu7_perfcl_clk",
 		.parent_names = (const char *[]){ "perfcl_clk" },
 		.num_parents = 1,
-		.flags = CLK_SET_RATE_PARENT,
-		.ops = &clk_ops_core,
+		.ops = &clk_dummy_ops,
 	},
 };
 
@@ -3348,11 +3266,6 @@
 		clk_osm_write_reg(&perfcl_clk, val, CORE_DCVS_CTRL, OSM_BASE);
 	}
 
-	clk_ops_core = clk_dummy_ops;
-	clk_ops_core.set_rate = cpu_clk_set_rate;
-	clk_ops_core.round_rate = cpu_clk_round_rate;
-	clk_ops_core.recalc_rate = cpu_clk_recalc_rate;
-
 	rc = clk_osm_acd_init(&l3_clk);
 	if (rc) {
 		pr_err("failed to initialize ACD for L3, rc=%d\n", rc);
@@ -3455,7 +3368,7 @@
 {
 	return platform_driver_register(&clk_cpu_osm_driver);
 }
-arch_initcall(clk_cpu_osm_init);
+subsys_initcall(clk_cpu_osm_init);
 
 static void __exit clk_cpu_osm_exit(void)
 {
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 89bae2e..e1cda90 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -451,7 +451,7 @@
 {
 	return platform_driver_register(&clk_rpmh_driver);
 }
-core_initcall(clk_rpmh_init);
+subsys_initcall(clk_rpmh_init);
 
 static void __exit clk_rpmh_exit(void)
 {
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
index d6ecf12..53bfe77 100644
--- a/drivers/clk/qcom/dispcc-sdm845.c
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -1130,7 +1130,7 @@
 {
 	return platform_driver_register(&disp_cc_sdm845_driver);
 }
-core_initcall(disp_cc_sdm845_init);
+subsys_initcall(disp_cc_sdm845_init);
 
 static void __exit disp_cc_sdm845_exit(void)
 {
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index cd47e14..25f9d62 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -3856,7 +3856,7 @@
 {
 	return platform_driver_register(&gcc_sdm845_driver);
 }
-core_initcall(gcc_sdm845_init);
+subsys_initcall(gcc_sdm845_init);
 
 static void __exit gcc_sdm845_exit(void)
 {
diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
index 8442890..b2f6a3c 100644
--- a/drivers/clk/qcom/gpucc-sdm845.c
+++ b/drivers/clk/qcom/gpucc-sdm845.c
@@ -37,7 +37,6 @@
 #include "vdd-level-sdm845.h"
 
 #define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-#define F_SLEW(f, s, h, m, n, sf) { (f), (s), (2 * (h) - 1), (m), (n), (sf) }
 
 static int vdd_gx_corner[] = {
 	RPMH_REGULATOR_LEVEL_OFF,		/* VDD_GX_NONE */
@@ -67,6 +66,7 @@
 	P_GPU_CC_PLL1_OUT_EVEN,
 	P_GPU_CC_PLL1_OUT_MAIN,
 	P_GPU_CC_PLL1_OUT_ODD,
+	P_CRC_DIV,
 };
 
 static const struct parent_map gpu_cc_parent_map_0[] = {
@@ -107,8 +107,28 @@
 	"core_bi_pll_test_se",
 };
 
+static const struct parent_map gpu_cc_parent_map_2[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CRC_DIV,  1 },
+	{ P_GPU_CC_PLL0_OUT_ODD, 2 },
+	{ P_GPU_CC_PLL1_OUT_EVEN, 3 },
+	{ P_GPU_CC_PLL1_OUT_ODD, 4 },
+	{ P_GPLL0_OUT_MAIN, 5 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gpu_cc_parent_names_2[] = {
+	"bi_tcxo",
+	"crc_div",
+	"gpu_cc_pll0_out_odd",
+	"gpu_cc_pll1_out_even",
+	"gpu_cc_pll1_out_odd",
+	"gcc_gpu_gpll0_clk_src",
+	"core_bi_pll_test_se",
+};
+
 static struct pll_vco fabia_vco[] = {
-	{ 250000000, 2000000000, 0 },
+	{ 249600000, 2000000000, 0 },
 	{ 125000000, 1000000000, 1 },
 };
 
@@ -186,12 +206,27 @@
 	},
 };
 
+static struct clk_fixed_factor crc_div = {
+	.mult = 1,
+	.div = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "crc_div",
+		.parent_names = (const char *[]){ "gpu_cc_pll0_out_even" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
 static const struct freq_tbl ftbl_gpu_cc_gx_gfx3d_clk_src[] = {
-	F_SLEW(147000000, P_GPU_CC_PLL0_OUT_EVEN,  1, 0, 0,  294000000),
-	F_SLEW(210000000, P_GPU_CC_PLL0_OUT_EVEN,  1, 0, 0,  420000000),
-	F_SLEW(338000000, P_GPU_CC_PLL0_OUT_EVEN,  1, 0, 0,  676000000),
-	F_SLEW(425000000, P_GPU_CC_PLL0_OUT_EVEN,  1, 0, 0,  850000000),
-	F_SLEW(600000000, P_GPU_CC_PLL0_OUT_EVEN,  1, 0, 0, 1200000000),
+	F(147000000, P_CRC_DIV,  1, 0, 0),
+	F(210000000, P_CRC_DIV,  1, 0, 0),
+	F(280000000, P_CRC_DIV,  1, 0, 0),
+	F(338000000, P_CRC_DIV,  1, 0, 0),
+	F(425000000, P_CRC_DIV,  1, 0, 0),
+	F(487000000, P_CRC_DIV,  1, 0, 0),
+	F(548000000, P_CRC_DIV,  1, 0, 0),
+	F(600000000, P_CRC_DIV,  1, 0, 0),
 	{ }
 };
 
@@ -199,12 +234,12 @@
 	.cmd_rcgr = 0x101c,
 	.mnd_width = 0,
 	.hid_width = 5,
-	.parent_map = gpu_cc_parent_map_1,
+	.parent_map = gpu_cc_parent_map_2,
 	.freq_tbl = ftbl_gpu_cc_gx_gfx3d_clk_src,
 	.flags = FORCE_ENABLE_RCG,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gpu_cc_gx_gfx3d_clk_src",
-		.parent_names = gpu_cc_parent_names_1,
+		.parent_names = gpu_cc_parent_names_2,
 		.num_parents = 7,
 		.flags = CLK_SET_RATE_PARENT,
 		.ops =  &clk_rcg2_ops,
@@ -532,16 +567,23 @@
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (res == NULL) {
-		dev_err(&pdev->dev, "Failed to get resources for clock_gfxcc.\n");
+		dev_err(&pdev->dev, "Failed to get resources for clock_gfxcc\n");
 		return -EINVAL;
 	}
 
 	base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
 	if (IS_ERR(base)) {
-		dev_err(&pdev->dev, "Failed to ioremap the GFX CC base.\n");
+		dev_err(&pdev->dev, "Failed to ioremap the GFX CC base\n");
 		return PTR_ERR(base);
 	}
 
+	/* Register clock fixed factor for CRC divide. */
+	ret = devm_clk_hw_register(&pdev->dev, &crc_div.hw);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register hardware clock\n");
+		return ret;
+	}
+
 	regmap = devm_regmap_init_mmio(&pdev->dev, base,
 				gpu_cc_gfx_sdm845_desc.config);
 	if (IS_ERR(regmap)) {
@@ -577,7 +619,7 @@
 		return ret;
 	}
 
-	dev_info(&pdev->dev, "Registered GFX CC clocks.\n");
+	dev_info(&pdev->dev, "Registered GFX CC clocks\n");
 
 	return ret;
 }
@@ -594,7 +636,7 @@
 {
 	return platform_driver_register(&gpu_cc_gfx_sdm845_driver);
 }
-arch_initcall(gpu_cc_gfx_sdm845_init);
+subsys_initcall(gpu_cc_gfx_sdm845_init);
 
 static void __exit gpu_cc_gfx_sdm845_exit(void)
 {
@@ -626,7 +668,7 @@
 		return ret;
 	}
 
-	dev_info(&pdev->dev, "Registered GPU CC clocks.\n");
+	dev_info(&pdev->dev, "Registered GPU CC clocks\n");
 
 	return ret;
 }
@@ -643,7 +685,7 @@
 {
 	return platform_driver_register(&gpu_cc_sdm845_driver);
 }
-core_initcall(gpu_cc_sdm845_init);
+subsys_initcall(gpu_cc_sdm845_init);
 
 static void __exit gpu_cc_sdm845_exit(void)
 {
diff --git a/drivers/clk/qcom/videocc-sdm845.c b/drivers/clk/qcom/videocc-sdm845.c
index 362ea0b..ba4e591 100644
--- a/drivers/clk/qcom/videocc-sdm845.c
+++ b/drivers/clk/qcom/videocc-sdm845.c
@@ -402,7 +402,7 @@
 {
 	return platform_driver_register(&video_cc_sdm845_driver);
 }
-core_initcall(video_cc_sdm845_init);
+subsys_initcall(video_cc_sdm845_init);
 
 static void __exit video_cc_sdm845_exit(void)
 {
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 8ca07fe..0cca360 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -556,7 +556,7 @@
 				 0x12c, 0, 4, 24, 3, BIT(31),
 				 CLK_SET_RATE_PARENT);
 static SUNXI_CCU_M_WITH_MUX_GATE(lcd1_ch1_clk, "lcd1-ch1", lcd_ch1_parents,
-				 0x12c, 0, 4, 24, 3, BIT(31),
+				 0x130, 0, 4, 24, 3, BIT(31),
 				 CLK_SET_RATE_PARENT);
 
 static const char * const csi_sclk_parents[] = { "pll-video0", "pll-video1",
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index e8e16a5..0fe2518 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -169,8 +169,8 @@
 	int ret;
 	ret = sscanf(buf, "%u", &input);
 
-	/* cannot be lower than 11 otherwise freq will not fall */
-	if (ret != 1 || input < 11 || input > 100 ||
+	/* cannot be lower than 1 otherwise freq will not fall */
+	if (ret != 1 || input < 1 || input > 100 ||
 			input >= dbs_data->up_threshold)
 		return -EINVAL;
 
diff --git a/drivers/cpuidle/lpm-levels-of.c b/drivers/cpuidle/lpm-levels-of.c
index ed239c4..39e0484 100644
--- a/drivers/cpuidle/lpm-levels-of.c
+++ b/drivers/cpuidle/lpm-levels-of.c
@@ -21,7 +21,6 @@
 #include <linux/moduleparam.h>
 #include "lpm-levels.h"
 
-bool use_psci;
 enum lpm_type {
 	IDLE = 0,
 	SUSPEND,
@@ -306,6 +305,7 @@
 	struct lpm_level_avail *level_list = NULL;
 	char cpu_name[20] = {0};
 	int ret = 0;
+	struct list_head *pos;
 
 	cpu_kobj = devm_kzalloc(&lpm_pdev->dev, sizeof(*cpu_kobj) *
 			cpumask_weight(&p->child_cpus), GFP_KERNEL);
@@ -313,38 +313,45 @@
 		return -ENOMEM;
 
 	cpu_idx = 0;
-	for_each_cpu(cpu, &p->child_cpus) {
-		snprintf(cpu_name, sizeof(cpu_name), "cpu%d", cpu);
-		cpu_kobj[cpu_idx] = kobject_create_and_add(cpu_name, parent);
-		if (!cpu_kobj[cpu_idx]) {
-			ret = -ENOMEM;
-			goto release_kobj;
-		}
+	list_for_each(pos, &p->cpu) {
+		struct lpm_cpu *lpm_cpu = list_entry(pos, struct lpm_cpu, list);
 
-		level_list = devm_kzalloc(&lpm_pdev->dev,
-				p->cpu->nlevels * sizeof(*level_list),
-				GFP_KERNEL);
-		if (!level_list) {
-			ret = -ENOMEM;
-			goto release_kobj;
-		}
-
-		/*
-		 * Skip enable/disable for WFI. cpuidle expects WFI to be
-		 * available at all times.
-		 */
-		for (i = 1; i < p->cpu->nlevels; i++) {
-
-			level_list[i].latency_us = p->levels[i].pwr.latency_us;
-			ret = create_lvl_avail_nodes(p->cpu->levels[i].name,
-					cpu_kobj[cpu_idx], &level_list[i],
-					(void *)p->cpu, cpu, true);
-			if (ret)
+		for_each_cpu(cpu, &lpm_cpu->related_cpus) {
+			snprintf(cpu_name, sizeof(cpu_name), "cpu%d", cpu);
+			cpu_kobj[cpu_idx] = kobject_create_and_add(cpu_name,
+					parent);
+			if (!cpu_kobj[cpu_idx]) {
+				ret = -ENOMEM;
 				goto release_kobj;
-		}
+			}
 
-		cpu_level_available[cpu] = level_list;
-		cpu_idx++;
+			level_list = devm_kzalloc(&lpm_pdev->dev,
+					lpm_cpu->nlevels * sizeof(*level_list),
+					GFP_KERNEL);
+			if (!level_list) {
+				ret = -ENOMEM;
+				goto release_kobj;
+			}
+
+			/*
+			 * Skip enable/disable for WFI. cpuidle expects WFI to
+			 * be available at all times.
+			 */
+			for (i = 1; i < lpm_cpu->nlevels; i++) {
+				level_list[i].latency_us =
+					p->levels[i].pwr.latency_us;
+				ret = create_lvl_avail_nodes(
+						lpm_cpu->levels[i].name,
+						cpu_kobj[cpu_idx],
+						&level_list[i],
+						(void *)lpm_cpu, cpu, true);
+				if (ret)
+					goto release_kobj;
+			}
+
+			cpu_level_available[cpu] = level_list;
+			cpu_idx++;
+		}
 	}
 
 	return ret;
@@ -385,7 +392,7 @@
 			return ret;
 	}
 
-	if (p->cpu) {
+	if (!list_empty(&p->cpu)) {
 		ret = create_cpu_lvl_nodes(p, cluster_kobj);
 		if (ret)
 			return ret;
@@ -431,30 +438,27 @@
 		return ret;
 	}
 
-	if (use_psci) {
-		key = "qcom,psci-mode-shift";
-		ret = of_property_read_u32(node, key,
-				&c->psci_mode_shift);
-		if (ret) {
-			pr_err("%s(): Failed to read param: %s\n",
-							__func__, key);
-			return ret;
-		}
+	key = "qcom,psci-mode-shift";
+	ret = of_property_read_u32(node, key,
+			&c->psci_mode_shift);
+	if (ret) {
+		pr_err("%s(): Failed to read param: %s\n",
+				__func__, key);
+		return ret;
+	}
 
-		key = "qcom,psci-mode-mask";
-		ret = of_property_read_u32(node, key,
-				&c->psci_mode_mask);
-		if (ret) {
-			pr_err("%s(): Failed to read param: %s\n",
-							__func__, key);
-			return ret;
-		}
+	key = "qcom,psci-mode-mask";
+	ret = of_property_read_u32(node, key,
+			&c->psci_mode_mask);
+	if (ret) {
+		pr_err("%s(): Failed to read param: %s\n",
+				__func__, key);
+		return ret;
+	}
 
-		/* Set ndevice to 1 as default */
-		c->ndevices = 1;
+	/* Set ndevice to 1 as default */
+	c->ndevices = 1;
 
-	} else
-		pr_warn("Target supports PSCI only\n");
 	return 0;
 }
 
@@ -503,22 +507,14 @@
 	if (ret)
 		goto failed;
 
-	if (use_psci) {
-		char *k = "qcom,psci-mode";
+	key = "qcom,psci-mode";
 
-		ret = of_property_read_u32(node, k, &level->psci_id);
-		if (ret)
-			goto failed;
-
-		level->is_reset = of_property_read_bool(node, "qcom,is-reset");
-	} else
-		pr_warn("Build supports PSCI targets only");
-
-	key = "label";
-	ret = of_property_read_string(node, key, &level->level_name);
+	ret = of_property_read_u32(node, key, &level->psci_id);
 	if (ret)
 		goto failed;
 
+	level->is_reset = of_property_read_bool(node, "qcom,is-reset");
+
 	if (cluster->nlevels != cluster->default_level) {
 		key = "min child idx";
 		ret = of_property_read_u32(node, "qcom,min-child-idx",
@@ -531,10 +527,6 @@
 	}
 
 	level->notify_rpm = of_property_read_bool(node, "qcom,notify-rpm");
-	level->disable_dynamic_routing = of_property_read_bool(node,
-					"qcom,disable-dynamic-int-routing");
-	level->last_core_only = of_property_read_bool(node,
-					"qcom,last-core-only");
 
 	key = "parse_power_params";
 	ret = parse_power_params(node, &level->pwr);
@@ -569,20 +561,16 @@
 		return ret;
 	}
 
-	if (use_psci) {
-		key = "qcom,psci-cpu-mode";
+	key = "qcom,psci-cpu-mode";
+	ret = of_property_read_u32(n, key, &l->psci_id);
+	if (ret) {
+		pr_err("Failed reading %s on device %s\n", key,
+				n->name);
+		return ret;
+	}
+	key = "qcom,hyp-psci";
 
-		ret = of_property_read_u32(n, key, &l->psci_id);
-		if (ret) {
-			pr_err("Failed reading %s on device %s\n", key,
-					n->name);
-			return ret;
-		}
-		key = "qcom,hyp-psci";
-
-		l->hyp_psci = of_property_read_bool(n, key);
-	} else
-		pr_warn("Build supports PSCI targets only");
+	l->hyp_psci = of_property_read_bool(n, key);
 	return 0;
 
 }
@@ -639,51 +627,26 @@
 				next_pwr->time_overhead_us : residency;
 }
 
-static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c)
+static int parse_cpu(struct device_node *node, struct lpm_cpu *cpu)
 {
+
 	struct device_node *n;
-	int ret = -ENOMEM;
-	int i, j;
-	char *key;
-
-	c->cpu = devm_kzalloc(&lpm_pdev->dev, sizeof(*c->cpu), GFP_KERNEL);
-	if (!c->cpu)
-		return ret;
-
-	c->cpu->parent = c;
-	if (use_psci) {
-
-		key = "qcom,psci-mode-shift";
-
-		ret = of_property_read_u32(node, key, &c->cpu->psci_mode_shift);
-		if (ret) {
-			pr_err("Failed reading %s on device %s\n", key,
-					node->name);
-			return ret;
-		}
-		key = "qcom,psci-mode-mask";
-
-		ret = of_property_read_u32(node, key, &c->cpu->psci_mode_mask);
-		if (ret) {
-			pr_err("Failed reading %s on device %s\n", key,
-					node->name);
-			return ret;
-		}
-	}
+	int ret, i, j;
+	const char *key;
 	for_each_child_of_node(node, n) {
-		struct lpm_cpu_level *l = &c->cpu->levels[c->cpu->nlevels];
+		struct lpm_cpu_level *l = &cpu->levels[cpu->nlevels];
 
-		c->cpu->nlevels++;
+		cpu->nlevels++;
 
 		ret = parse_cpu_mode(n, l);
 		if (ret < 0) {
 			pr_info("Failed %s\n", l->name);
-			goto failed;
+			return ret;
 		}
 
 		ret = parse_power_params(n, &l->pwr);
 		if (ret)
-			goto failed;
+			return ret;
 
 		key = "qcom,use-broadcast-timer";
 		l->use_bc_timer = of_property_read_bool(n, key);
@@ -698,32 +661,83 @@
 		if (ret == -EINVAL)
 			l->reset_level = LPM_RESET_LVL_NONE;
 		else if (ret)
-			goto failed;
+			return ret;
 	}
-	for (i = 0; i < c->cpu->nlevels; i++) {
-		for (j = 0; j < c->cpu->nlevels; j++) {
+	for (i = 0; i < cpu->nlevels; i++) {
+		for (j = 0; j < cpu->nlevels; j++) {
 			if (i >= j) {
-				c->cpu->levels[i].pwr.residencies[j] = 0;
+				cpu->levels[i].pwr.residencies[j] = 0;
 				continue;
 			}
 
-			c->cpu->levels[i].pwr.residencies[j] =
-				calculate_residency(&c->cpu->levels[i].pwr,
-					&c->cpu->levels[j].pwr);
+			cpu->levels[i].pwr.residencies[j] =
+				calculate_residency(&cpu->levels[i].pwr,
+						&cpu->levels[j].pwr);
 
 			pr_err("%s: idx %d %u\n", __func__, j,
-					c->cpu->levels[i].pwr.residencies[j]);
+					cpu->levels[i].pwr.residencies[j]);
 		}
 	}
+	for_each_cpu(i, &cpu->related_cpus) {
+		per_cpu(max_residency, i) = devm_kzalloc(&lpm_pdev->dev,
+				sizeof(uint32_t) * cpu->nlevels,
+				GFP_KERNEL);
+		if (!per_cpu(max_residency, i))
+			return -ENOMEM;
+		per_cpu(min_residency, i) = devm_kzalloc(
+				&lpm_pdev->dev,
+				sizeof(uint32_t) * cpu->nlevels,
+				GFP_KERNEL);
+		if (!per_cpu(min_residency, i))
+			return -ENOMEM;
+		set_optimum_cpu_residency(cpu, i, true);
+	}
 
 	return 0;
-failed:
-	for (i = 0; i < c->cpu->nlevels; i++) {
-		kfree(c->cpu->levels[i].name);
-		c->cpu->levels[i].name = NULL;
+}
+
+static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c)
+{
+	int ret = -ENOMEM, i;
+	char *key;
+	struct lpm_cpu *cpu;
+
+	cpu = devm_kzalloc(&lpm_pdev->dev, sizeof(*cpu), GFP_KERNEL);
+	if (!cpu)
+		return ret;
+
+	if (get_cpumask_for_node(node, &cpu->related_cpus))
+		return -EINVAL;
+
+	cpu->parent = c;
+
+	key = "qcom,psci-mode-shift";
+	ret = of_property_read_u32(node, key, &cpu->psci_mode_shift);
+	if (ret) {
+		pr_err("Failed reading %s on device %s\n", key,
+				node->name);
+		return ret;
 	}
-	kfree(c->cpu);
-	c->cpu = NULL;
+	key = "qcom,psci-mode-mask";
+
+	ret = of_property_read_u32(node, key, &cpu->psci_mode_mask);
+	if (ret) {
+		pr_err("Failed reading %s on device %s\n", key,
+				node->name);
+		return ret;
+	}
+
+	if (parse_cpu(node, cpu))
+		goto failed;
+	cpumask_or(&c->child_cpus, &c->child_cpus, &cpu->related_cpus);
+	list_add(&cpu->list, &c->cpu);
+	return 0;
+failed:
+	for (i = 0; i < cpu->nlevels; i++) {
+		kfree(cpu->levels[i].name);
+		cpu->levels[i].name = NULL;
+	}
+	kfree(cpu);
 	pr_err("%s(): Failed with error code:%d\n", __func__, ret);
 	return ret;
 }
@@ -731,6 +745,7 @@
 void free_cluster_node(struct lpm_cluster *cluster)
 {
 	struct list_head *list;
+	struct lpm_cpu *cpu, *n;
 	int i;
 
 	list_for_each(list, &cluster->child) {
@@ -741,22 +756,21 @@
 		free_cluster_node(n);
 	};
 
-	if (cluster->cpu) {
-		for (i = 0; i < cluster->cpu->nlevels; i++) {
-			kfree(cluster->cpu->levels[i].name);
-			cluster->cpu->levels[i].name = NULL;
+	list_for_each_entry_safe(cpu, n, &cluster->cpu, list) {
+		struct lpm_cpu *cpu = list_entry(list, typeof(*cpu), list);
+
+		for (i = 0; i < cpu->nlevels; i++) {
+			kfree(cpu->levels[i].name);
+			cpu->levels[i].name = NULL;
 		}
+		list_del(list);
 	}
 	for (i = 0; i < cluster->nlevels; i++) {
 		kfree(cluster->levels[i].mode);
 		cluster->levels[i].mode = NULL;
 	}
-	kfree(cluster->cpu);
 	kfree(cluster->name);
-	kfree(cluster->lpm_dev);
-	cluster->cpu = NULL;
 	cluster->name = NULL;
-	cluster->lpm_dev = NULL;
 	cluster->ndevices = 0;
 }
 
@@ -785,6 +799,7 @@
 		goto failed_parse_params;
 
 	INIT_LIST_HEAD(&c->child);
+	INIT_LIST_HEAD(&c->cpu);
 	c->parent = parent;
 	spin_lock_init(&c->sync_lock);
 	c->min_child_level = NR_LPM_LEVELS;
@@ -795,7 +810,6 @@
 			continue;
 		key = "qcom,pm-cluster-level";
 		if (!of_node_cmp(n->name, key)) {
-			WARN_ON(!use_psci && c->no_saw_devices);
 			if (parse_cluster_level(n, c))
 				goto failed_parse_cluster;
 			continue;
@@ -805,7 +819,6 @@
 		if (!of_node_cmp(n->name, key)) {
 			struct lpm_cluster *child;
 
-			WARN_ON(!use_psci && c->no_saw_devices);
 			child = parse_cluster(n, c);
 			if (!child)
 				goto failed_parse_cluster;
@@ -819,34 +832,11 @@
 
 		key = "qcom,pm-cpu";
 		if (!of_node_cmp(n->name, key)) {
-			/*
-			 * Parse the the cpu node only if a pm-cpu node
-			 * is available, though the mask is defined @ the
-			 * cluster level
-			 */
-			if (get_cpumask_for_node(node, &c->child_cpus))
-				goto failed_parse_cluster;
-
 			if (parse_cpu_levels(n, c))
 				goto failed_parse_cluster;
 
 			c->aff_level = 1;
 
-			for_each_cpu(i, &c->child_cpus) {
-				per_cpu(max_residency, i) = devm_kzalloc(
-					&lpm_pdev->dev,
-					sizeof(uint32_t) * c->cpu->nlevels,
-					GFP_KERNEL);
-				if (!per_cpu(max_residency, i))
-					return ERR_PTR(-ENOMEM);
-				per_cpu(min_residency, i) = devm_kzalloc(
-					&lpm_pdev->dev,
-					sizeof(uint32_t) * c->cpu->nlevels,
-					GFP_KERNEL);
-				if (!per_cpu(min_residency, i))
-					return ERR_PTR(-ENOMEM);
-				set_optimum_cpu_residency(c->cpu, i, true);
-			}
 		}
 	}
 
@@ -883,8 +873,6 @@
 {
 	struct device_node *top = NULL;
 
-	use_psci = of_property_read_bool(pdev->dev.of_node, "qcom,use-psci");
-
 	top = of_find_node_by_name(pdev->dev.of_node, "qcom,pm-cluster");
 	if (!top) {
 		pr_err("Failed to find root node\n");
@@ -898,6 +886,7 @@
 void cluster_dt_walkthrough(struct lpm_cluster *cluster)
 {
 	struct list_head *list;
+	struct lpm_cpu *cpu;
 	int i, j;
 	static int id;
 	char str[10] = {0};
@@ -918,12 +907,12 @@
 					&cluster->name[j], &l->mode[i]);
 	}
 
-	if (cluster->cpu) {
+	list_for_each_entry(cpu, &cluster->cpu, list) {
 		pr_info("%d\n", __LINE__);
-		for (j = 0; j < cluster->cpu->nlevels; j++)
+		for (j = 0; j < cpu->nlevels; j++)
 			pr_info("%s\tCPU mode: %s id:%d\n", str,
-					cluster->cpu->levels[j].name,
-					cluster->cpu->levels[j].mode);
+					cpu->levels[j].name,
+					cpu->levels[j].mode);
 	}
 
 	id++;
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index 8b59bee..7536aa9b 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -53,10 +53,8 @@
 #include <trace/events/trace_msm_low_power.h>
 
 #define SCLK_HZ (32768)
-#define SCM_HANDOFF_LOCK_ID "S:7"
 #define PSCI_POWER_STATE(reset) (reset << 30)
 #define PSCI_AFFINITY_LEVEL(lvl) ((lvl & 0x3) << 24)
-static remote_spinlock_t scm_handoff_lock;
 
 enum {
 	MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0),
@@ -106,7 +104,7 @@
 
 static DEFINE_PER_CPU(struct lpm_history, hist);
 
-static DEFINE_PER_CPU(struct lpm_cluster*, cpu_cluster);
+static DEFINE_PER_CPU(struct lpm_cpu*, cpu_lpm);
 static bool suspend_in_progress;
 static struct hrtimer lpm_hrtimer;
 static struct hrtimer histtimer;
@@ -135,10 +133,16 @@
 static bool sleep_disabled;
 module_param_named(sleep_disabled, sleep_disabled, bool, 0664);
 
+/**
+ * msm_cpuidle_get_deep_idle_latency - Get deep idle latency value
+ *
+ * Returns an s32 latency value
+ */
 s32 msm_cpuidle_get_deep_idle_latency(void)
 {
 	return 10;
 }
+EXPORT_SYMBOL(msm_cpuidle_get_deep_idle_latency);
 
 void lpm_suspend_wake_time(uint64_t wakeup_time)
 {
@@ -209,7 +213,7 @@
 	struct power_params *pwr_params;
 	struct lpm_cpu *cpu;
 	struct lpm_cluster *n;
-	uint32_t latency = 0;
+	uint32_t lat = 0;
 	int i;
 
 	list_for_each(list, child) {
@@ -218,19 +222,21 @@
 			if (strcmp(lat_level->level_name, n->cluster_name))
 				continue;
 		}
-		cpu = n->cpu;
-		for (i = 0; i < cpu->nlevels; i++) {
-			level = &cpu->levels[i];
-			pwr_params = &level->pwr;
-			if (lat_level->reset_level == level->reset_level) {
-				if ((latency > pwr_params->latency_us)
-							|| (!latency))
-					latency = pwr_params->latency_us;
-				break;
+		list_for_each_entry(cpu, &n->cpu, list) {
+			for (i = 0; i < cpu->nlevels; i++) {
+				level = &cpu->levels[i];
+				pwr_params = &level->pwr;
+				if (lat_level->reset_level
+						== level->reset_level) {
+					if ((lat > pwr_params->latency_us)
+							|| (!lat))
+						lat = pwr_params->latency_us;
+					break;
+				}
 			}
 		}
 	}
-	return latency;
+	return lat;
 }
 
 static struct lpm_cluster *cluster_aff_match(struct lpm_cluster *cluster,
@@ -239,9 +245,9 @@
 	struct lpm_cluster *n;
 
 	if ((cluster->aff_level == affinity_level)
-		|| ((cluster->cpu) && (affinity_level == 0)))
+		|| ((!list_empty(&cluster->cpu)) && (affinity_level == 0)))
 		return cluster;
-	else if (!cluster->cpu) {
+	else if (list_empty(&cluster->cpu)) {
 		n =  list_entry(cluster->child.next, typeof(*n), list);
 		return cluster_aff_match(n, affinity_level);
 	} else
@@ -316,7 +322,7 @@
 
 static int lpm_dying_cpu(unsigned int cpu)
 {
-	struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+	struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
 
 	cluster_prepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
 	return 0;
@@ -324,7 +330,7 @@
 
 static int lpm_starting_cpu(unsigned int cpu)
 {
-	struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+	struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
 
 	cluster_unprepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
 	return 0;
@@ -378,7 +384,7 @@
 static void clusttimer_cancel(void)
 {
 	int cpu = raw_smp_processor_id();
-	struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+	struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
 
 	hrtimer_try_to_cancel(&cluster->histtimer);
 
@@ -414,22 +420,6 @@
 	hrtimer_start(&lpm_hrtimer, modified_ktime, HRTIMER_MODE_REL_PINNED);
 }
 
-static int set_device_mode(struct lpm_cluster *cluster, int ndevice,
-		struct lpm_cluster_level *level)
-{
-	struct low_power_ops *ops;
-
-	if (use_psci)
-		return 0;
-
-	ops = &cluster->lpm_dev[ndevice];
-	if (ops && ops->set_mode)
-		return ops->set_mode(ops, level->mode[ndevice],
-				level->notify_rpm);
-	else
-		return -EINVAL;
-}
-
 static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev,
 		struct lpm_cpu *cpu, int *idx_restrict,
 		uint32_t *idx_restrict_time)
@@ -953,10 +943,6 @@
 		if (!lpm_cluster_mode_allow(cluster, i, from_idle))
 			continue;
 
-		if (level->last_core_only &&
-			cpumask_weight(cpu_online_mask) > 1)
-			continue;
-
 		if (!cpumask_equal(&cluster->num_children_in_sync,
 					&level->num_cpu_votes))
 			continue;
@@ -1001,7 +987,6 @@
 		bool from_idle, int predicted)
 {
 	struct lpm_cluster_level *level = &cluster->levels[idx];
-	int ret, i;
 
 	if (!cpumask_equal(&cluster->num_children_in_sync, &cluster->child_cpus)
 			|| is_IPI_pending(&cluster->num_children_in_sync)) {
@@ -1022,30 +1007,16 @@
 						ktime_to_us(ktime_get()));
 	}
 
-	for (i = 0; i < cluster->ndevices; i++) {
-		ret = set_device_mode(cluster, i, level);
-		if (ret)
-			goto failed_set_mode;
-	}
 	if (level->notify_rpm) {
-		struct cpumask nextcpu, *cpumask;
 		uint64_t us;
 		uint32_t pred_us;
 
-		us = get_cluster_sleep_time(cluster, &nextcpu,
-						from_idle, &pred_us);
-		cpumask = level->disable_dynamic_routing ? NULL : &nextcpu;
-
-		if (ret) {
-			pr_info("Failed msm_rpm_enter_sleep() rc = %d\n", ret);
-			goto failed_set_mode;
-		}
-
+		us = get_cluster_sleep_time(cluster, NULL, from_idle,
+				&pred_us);
 		us = us + 1;
 		clear_predict_history();
 		clear_cl_predict_history();
 
-		do_div(us, USEC_PER_SEC/SCLK_HZ);
 		system_sleep_enter(us);
 	}
 	/* Notify cluster enter event after successfully config completion */
@@ -1062,17 +1033,6 @@
 	}
 
 	return 0;
-failed_set_mode:
-
-	for (i = 0; i < cluster->ndevices; i++) {
-		int rc = 0;
-
-		level = &cluster->levels[cluster->default_level];
-		// rc = set_device_mode(cluster, i, level);
-		WARN_ON(rc);
-	}
-
-	return ret;
 }
 
 static void cluster_prepare(struct lpm_cluster *cluster,
@@ -1152,7 +1112,7 @@
 {
 	struct lpm_cluster_level *level;
 	bool first_cpu;
-	int last_level, i, ret;
+	int last_level, i;
 
 	if (!cluster)
 		return;
@@ -1202,13 +1162,8 @@
 	last_level = cluster->last_level;
 	cluster->last_level = cluster->default_level;
 
-	for (i = 0; i < cluster->ndevices; i++) {
+	for (i = 0; i < cluster->ndevices; i++)
 		level = &cluster->levels[cluster->default_level];
-		ret = set_device_mode(cluster, i, level);
-
-		WARN_ON(ret);
-
-	}
 
 	cluster_notify(cluster, &cluster->levels[last_level], false);
 
@@ -1221,12 +1176,11 @@
 	spin_unlock(&cluster->sync_lock);
 }
 
-static inline void cpu_prepare(struct lpm_cluster *cluster, int cpu_index,
+static inline void cpu_prepare(struct lpm_cpu *cpu, int cpu_index,
 				bool from_idle)
 {
-	struct lpm_cpu_level *cpu_level = &cluster->cpu->levels[cpu_index];
-	bool jtag_save_restore =
-			cluster->cpu->levels[cpu_index].jtag_save_restore;
+	struct lpm_cpu_level *cpu_level = &cpu->levels[cpu_index];
+	bool jtag_save_restore = cpu->levels[cpu_index].jtag_save_restore;
 
 	/* Use broadcast timer for aggregating sleep mode within a cluster.
 	 * A broadcast timer could be used in the following scenarios
@@ -1254,12 +1208,11 @@
 		msm_jtag_save_state();
 }
 
-static inline void cpu_unprepare(struct lpm_cluster *cluster, int cpu_index,
+static inline void cpu_unprepare(struct lpm_cpu *cpu, int cpu_index,
 				bool from_idle)
 {
-	struct lpm_cpu_level *cpu_level = &cluster->cpu->levels[cpu_index];
-	bool jtag_save_restore =
-			cluster->cpu->levels[cpu_index].jtag_save_restore;
+	struct lpm_cpu_level *cpu_level = &cpu->levels[cpu_index];
+	bool jtag_save_restore = cpu->levels[cpu_index].jtag_save_restore;
 
 	if (from_idle && cpu_level->use_bc_timer)
 		tick_broadcast_exit();
@@ -1305,13 +1258,12 @@
 	return state_id;
 }
 
-#if !defined(CONFIG_CPU_V7)
-bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, bool from_idle)
+static bool psci_enter_sleep(struct lpm_cpu *cpu, int idx, bool from_idle)
 {
 	int affinity_level = 0;
-	int state_id = get_cluster_id(cluster, &affinity_level);
+	int state_id = get_cluster_id(cpu->parent, &affinity_level);
 	int power_state =
-		PSCI_POWER_STATE(cluster->cpu->levels[idx].is_reset);
+		PSCI_POWER_STATE(cpu->levels[idx].is_reset);
 	bool success = false;
 	/*
 	 * idx = 0 is the default LPM state
@@ -1325,7 +1277,7 @@
 
 	affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
 	state_id |= (power_state | affinity_level
-			| cluster->cpu->levels[idx].psci_id);
+			| cpu->levels[idx].psci_id);
 
 	update_debug_pc_event(CPU_ENTER, state_id,
 			0xdeaffeed, 0xdeaffeed, true);
@@ -1336,52 +1288,17 @@
 			success, 0xdeaffeed, true);
 	return success;
 }
-#elif defined(CONFIG_ARM_PSCI)
-bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, bool from_idle)
-{
-	int affinity_level = 0;
-	int state_id = get_cluster_id(cluster, &affinity_level);
-	int power_state =
-		PSCI_POWER_STATE(cluster->cpu->levels[idx].is_reset);
-	bool success = false;
-
-	if (!idx) {
-		stop_critical_timings();
-		wfi();
-		start_critical_timings();
-		return 1;
-	}
-
-	affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
-	state_id |= (power_state | affinity_level
-			| cluster->cpu->levels[idx].psci_id);
-
-	update_debug_pc_event(CPU_ENTER, state_id,
-			0xdeaffeed, 0xdeaffeed, true);
-	stop_critical_timings();
-	success = !arm_cpuidle_suspend(state_id);
-	start_critical_timings();
-	update_debug_pc_event(CPU_EXIT, state_id,
-			success, 0xdeaffeed, true);
-}
-#else
-bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, bool from_idle)
-{
-	WARN_ONCE(true, "PSCI cpu_suspend ops not supported\n");
-	return false;
-}
-#endif
 
 static int lpm_cpuidle_select(struct cpuidle_driver *drv,
 		struct cpuidle_device *dev)
 {
-	struct lpm_cluster *cluster = per_cpu(cpu_cluster, dev->cpu);
+	struct lpm_cpu *cpu = per_cpu(cpu_lpm, dev->cpu);
 	int idx;
 
-	if (!cluster)
+	if (!cpu)
 		return 0;
 
-	idx = cpu_power_select(dev, cluster->cpu);
+	idx = cpu_power_select(dev, cpu);
 
 	if (idx < 0)
 		return 0;
@@ -1425,18 +1342,18 @@
 static int lpm_cpuidle_enter(struct cpuidle_device *dev,
 		struct cpuidle_driver *drv, int idx)
 {
-	struct lpm_cluster *cluster = per_cpu(cpu_cluster, dev->cpu);
+	struct lpm_cpu *cpu = per_cpu(cpu_lpm, dev->cpu);
 	bool success = true;
 	const struct cpumask *cpumask = get_cpu_mask(dev->cpu);
 	int64_t start_time = ktime_to_ns(ktime_get()), end_time;
 	struct power_params *pwr_params;
 
-	pwr_params = &cluster->cpu->levels[idx].pwr;
+	pwr_params = &cpu->levels[idx].pwr;
 
-	pwr_params = &cluster->cpu->levels[idx].pwr;
+	pwr_params = &cpu->levels[idx].pwr;
 
-	cpu_prepare(cluster, idx, true);
-	cluster_prepare(cluster, cpumask, idx, true, ktime_to_ns(ktime_get()));
+	cpu_prepare(cpu, idx, true);
+	cluster_prepare(cpu->parent, cpumask, idx, true, start_time);
 
 	trace_cpu_idle_enter(idx);
 	lpm_stats_cpu_enter(idx, start_time);
@@ -1444,15 +1361,14 @@
 	if (need_resched() || (idx < 0))
 		goto exit;
 
-	WARN_ON(!use_psci);
-	success = psci_enter_sleep(cluster, idx, true);
+	success = psci_enter_sleep(cpu, idx, true);
 
 exit:
 	end_time = ktime_to_ns(ktime_get());
 	lpm_stats_cpu_exit(idx, end_time, success);
 
-	cluster_unprepare(cluster, cpumask, idx, true, end_time);
-	cpu_unprepare(cluster, idx, true);
+	cluster_unprepare(cpu->parent, cpumask, idx, true, end_time);
+	cpu_unprepare(cpu, idx, true);
 	sched_set_cpu_cstate(smp_processor_id(), 0, 0, 0);
 	end_time = ktime_to_ns(ktime_get()) - start_time;
 	do_div(end_time, 1000);
@@ -1522,8 +1438,9 @@
 	int i = 0, ret = 0;
 	unsigned int cpu;
 	struct lpm_cluster *p = NULL;
+	struct lpm_cpu *lpm_cpu;
 
-	if (!cl->cpu) {
+	if (list_empty(&cl->cpu)) {
 		struct lpm_cluster *n;
 
 		list_for_each_entry(n, &cl->child, list) {
@@ -1534,51 +1451,56 @@
 		return ret;
 	}
 
-	cl->drv = kcalloc(1, sizeof(*cl->drv), GFP_KERNEL);
-	if (!cl->drv)
-		return -ENOMEM;
+	list_for_each_entry(lpm_cpu, &cl->cpu, list) {
+		lpm_cpu->drv = kcalloc(1, sizeof(*lpm_cpu->drv), GFP_KERNEL);
+		if (!lpm_cpu->drv)
+			return -ENOMEM;
 
-	cl->drv->name = "msm_idle";
+		lpm_cpu->drv->name = "msm_idle";
 
-	for (i = 0; i < cl->cpu->nlevels; i++) {
-		struct cpuidle_state *st = &cl->drv->states[i];
-		struct lpm_cpu_level *cpu_level = &cl->cpu->levels[i];
+		for (i = 0; i < lpm_cpu->nlevels; i++) {
+			struct cpuidle_state *st = &lpm_cpu->drv->states[i];
+			struct lpm_cpu_level *cpu_level = &lpm_cpu->levels[i];
 
-		snprintf(st->name, CPUIDLE_NAME_LEN, "C%u\n", i);
-		snprintf(st->desc, CPUIDLE_DESC_LEN, cpu_level->name);
-		st->flags = 0;
-		st->exit_latency = cpu_level->pwr.latency_us;
-		st->power_usage = cpu_level->pwr.ss_power;
-		st->target_residency = 0;
-		st->enter = lpm_cpuidle_enter;
-	}
-
-	cl->drv->state_count = cl->cpu->nlevels;
-	cl->drv->safe_state_index = 0;
-	for_each_cpu(cpu, &cl->child_cpus)
-		per_cpu(cpu_cluster, cpu) = cl;
-
-	for_each_possible_cpu(cpu) {
-		if (cpu_online(cpu))
-			continue;
-		p = per_cpu(cpu_cluster, cpu);
-		while (p) {
-			int j;
-
-			spin_lock(&p->sync_lock);
-			cpumask_set_cpu(cpu, &p->num_children_in_sync);
-			for (j = 0; j < p->nlevels; j++)
-				cpumask_copy(&p->levels[j].num_cpu_votes,
-						&p->num_children_in_sync);
-			spin_unlock(&p->sync_lock);
-			p = p->parent;
+			snprintf(st->name, CPUIDLE_NAME_LEN, "C%u\n", i);
+			snprintf(st->desc, CPUIDLE_DESC_LEN, cpu_level->name);
+			st->flags = 0;
+			st->exit_latency = cpu_level->pwr.latency_us;
+			st->power_usage = cpu_level->pwr.ss_power;
+			st->target_residency = 0;
+			st->enter = lpm_cpuidle_enter;
 		}
-	}
-	ret = cpuidle_register_cpu(cl->drv, &cl->child_cpus);
 
-	if (ret) {
-		kfree(cl->drv);
-		return -ENOMEM;
+		lpm_cpu->drv->state_count = lpm_cpu->nlevels;
+		lpm_cpu->drv->safe_state_index = 0;
+		for_each_cpu(cpu, &lpm_cpu->related_cpus)
+			per_cpu(cpu_lpm, cpu) = lpm_cpu;
+
+		for_each_possible_cpu(cpu) {
+			if (cpu_online(cpu))
+				continue;
+			if (per_cpu(cpu_lpm, cpu))
+				p = per_cpu(cpu_lpm, cpu)->parent;
+			while (p) {
+				int j;
+
+				spin_lock(&p->sync_lock);
+				cpumask_set_cpu(cpu, &p->num_children_in_sync);
+				for (j = 0; j < p->nlevels; j++)
+					cpumask_copy(
+						&p->levels[j].num_cpu_votes,
+						&p->num_children_in_sync);
+				spin_unlock(&p->sync_lock);
+				p = p->parent;
+			}
+		}
+		ret = cpuidle_register_cpu(lpm_cpu->drv,
+					&lpm_cpu->related_cpus);
+
+		if (ret) {
+			kfree(lpm_cpu->drv);
+			return -ENOMEM;
+		}
 	}
 	return 0;
 }
@@ -1608,7 +1530,7 @@
 		level_name[i] = cpu->levels[i].name;
 
 	lpm_stats_config_level("cpu", level_name, cpu->nlevels,
-			parent->stats, &parent->child_cpus);
+			parent->stats, &cpu->related_cpus);
 
 	kfree(level_name);
 }
@@ -1617,8 +1539,9 @@
 		struct lpm_cluster *parent)
 {
 	const char **level_name;
-	int i;
 	struct lpm_cluster *child;
+	struct lpm_cpu *cpu;
+	int i;
 
 	if (!cl)
 		return;
@@ -1636,10 +1559,12 @@
 
 	kfree(level_name);
 
-	if (cl->cpu) {
-		register_cpu_lpm_stats(cl->cpu, cl);
-		return;
+	list_for_each_entry(cpu, &cl->cpu, list) {
+		pr_err("%s()\n", __func__);
+		register_cpu_lpm_stats(cpu, cl);
 	}
+	if (!list_empty(&cl->cpu))
+		return;
 
 	list_for_each_entry(child, &cl->child, list)
 		register_cluster_lpm_stats(child, cl);
@@ -1662,8 +1587,8 @@
 static int lpm_suspend_enter(suspend_state_t state)
 {
 	int cpu = raw_smp_processor_id();
-	struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
-	struct lpm_cpu *lpm_cpu = cluster->cpu;
+	struct lpm_cpu *lpm_cpu = per_cpu(cpu_lpm, cpu);
+	struct lpm_cluster *cluster = lpm_cpu->parent;
 	const struct cpumask *cpumask = get_cpu_mask(cpu);
 	int idx;
 
@@ -1676,7 +1601,7 @@
 		pr_err("Failed suspend\n");
 		return 0;
 	}
-	cpu_prepare(cluster, idx, false);
+	cpu_prepare(lpm_cpu, idx, false);
 	cluster_prepare(cluster, cpumask, idx, false, 0);
 	if (idx > 0)
 		update_debug_pc_event(CPU_ENTER, idx, 0xdeaffeed,
@@ -1689,15 +1614,14 @@
 	 * LPMs(XO and Vmin).
 	 */
 
-	WARN_ON(!use_psci);
-	psci_enter_sleep(cluster, idx, true);
+	psci_enter_sleep(lpm_cpu, idx, true);
 
 	if (idx > 0)
 		update_debug_pc_event(CPU_EXIT, idx, true, 0xdeaffeed,
 					false);
 
 	cluster_unprepare(cluster, cpumask, idx, false, 0);
-	cpu_unprepare(cluster, idx, false);
+	cpu_unprepare(lpm_cpu, idx, false);
 	return 0;
 }
 
@@ -1737,14 +1661,6 @@
 	hrtimer_init(&histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 	cluster_timer_init(lpm_root_node);
 
-	ret = remote_spin_lock_init(&scm_handoff_lock, SCM_HANDOFF_LOCK_ID);
-	if (ret) {
-		pr_err("%s: Failed initializing scm_handoff_lock (%d)\n",
-			__func__, ret);
-		put_online_cpus();
-		return ret;
-	}
-
 	size = num_dbg_elements * sizeof(struct lpm_debug);
 	lpm_debug = dma_alloc_coherent(&pdev->dev, size,
 			&lpm_debug_phys, GFP_KERNEL);
@@ -1813,54 +1729,3 @@
 	return rc;
 }
 late_initcall(lpm_levels_module_init);
-
-enum msm_pm_l2_scm_flag lpm_cpu_pre_pc_cb(unsigned int cpu)
-{
-	struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
-	enum msm_pm_l2_scm_flag retflag = MSM_SCM_L2_ON;
-
-	/*
-	 * No need to acquire the lock if probe isn't completed yet
-	 * In the event of the hotplug happening before lpm probe, we want to
-	 * flush the cache to make sure that L2 is flushed. In particular, this
-	 * could cause incoherencies for a cluster architecture. This wouldn't
-	 * affect the idle case as the idle driver wouldn't be registered
-	 * before the probe function
-	 */
-	if (!cluster)
-		return MSM_SCM_L2_OFF;
-
-	/*
-	 * Assumes L2 only. What/How parameters gets passed into TZ will
-	 * determine how this function reports this info back in msm-pm.c
-	 */
-	spin_lock(&cluster->sync_lock);
-
-	if (!cluster->lpm_dev) {
-		retflag = MSM_SCM_L2_OFF;
-		goto unlock_and_return;
-	}
-
-	if (!cpumask_equal(&cluster->num_children_in_sync,
-						&cluster->child_cpus))
-		goto unlock_and_return;
-
-	if (cluster->lpm_dev)
-		retflag = cluster->lpm_dev->tz_flag;
-	/*
-	 * The scm_handoff_lock will be release by the secure monitor.
-	 * It is used to serialize power-collapses from this point on,
-	 * so that both Linux and the secure context have a consistent
-	 * view regarding the number of running cpus (cpu_count).
-	 *
-	 * It must be acquired before releasing the cluster lock.
-	 */
-unlock_and_return:
-	update_debug_pc_event(PRE_PC_CB, retflag, 0xdeadbeef, 0xdeadbeef,
-			0xdeadbeef);
-	trace_pre_pc_cb(retflag);
-	remote_spin_lock_rlock_id(&scm_handoff_lock,
-				  REMOTE_SPINLOCK_TID_START + cpu);
-	spin_unlock(&cluster->sync_lock);
-	return retflag;
-}
diff --git a/drivers/cpuidle/lpm-levels.h b/drivers/cpuidle/lpm-levels.h
index 3d35ae9..c9f272e 100644
--- a/drivers/cpuidle/lpm-levels.h
+++ b/drivers/cpuidle/lpm-levels.h
@@ -17,8 +17,6 @@
 #define MAXSAMPLES 5
 #define CLUST_SMPL_INVLD_TIME 40000
 
-extern bool use_psci;
-
 struct lpm_lookup_table {
 	uint32_t modes;
 	const char *mode_name;
@@ -47,10 +45,13 @@
 };
 
 struct lpm_cpu {
+	struct list_head list;
+	struct cpumask related_cpus;
 	struct lpm_cpu_level levels[NR_LPM_LEVELS];
 	int nlevels;
 	unsigned int psci_mode_shift;
 	unsigned int psci_mode_mask;
+	struct cpuidle_driver *drv;
 	struct lpm_cluster *parent;
 };
 
@@ -74,21 +75,13 @@
 	struct cpumask num_cpu_votes;
 	struct power_params pwr;
 	bool notify_rpm;
-	bool disable_dynamic_routing;
 	bool sync_level;
-	bool last_core_only;
 	struct lpm_level_avail available;
 	unsigned int psci_id;
 	bool is_reset;
 	int reset_level;
 };
 
-struct low_power_ops {
-	struct msm_spm_device *spm;
-	int (*set_mode)(struct low_power_ops *ops, int mode, bool notify_rpm);
-	enum msm_pm_l2_scm_flag tz_flag;
-};
-
 struct cluster_history {
 	uint32_t resi[MAXSAMPLES];
 	int mode[MAXSAMPLES];
@@ -108,16 +101,13 @@
 	const char *cluster_name;
 	const char **name;
 	unsigned long aff_level; /* Affinity level of the node */
-	struct low_power_ops *lpm_dev;
 	int ndevices;
 	struct lpm_cluster_level levels[NR_LPM_LEVELS];
 	int nlevels;
-	enum msm_pm_l2_scm_flag l2_flag;
 	int min_child_level;
 	int default_level;
 	int last_level;
-	struct lpm_cpu *cpu;
-	struct cpuidle_driver *drv;
+	struct list_head cpu;
 	spinlock_t sync_lock;
 	struct cpumask child_cpus;
 	struct cpumask num_children_in_sync;
@@ -125,14 +115,10 @@
 	struct lpm_stats *stats;
 	unsigned int psci_mode_shift;
 	unsigned int psci_mode_mask;
-	bool no_saw_devices;
 	struct cluster_history history;
 	struct hrtimer histtimer;
 };
 
-int set_l2_mode(struct low_power_ops *ops, int mode, bool notify_rpm);
-int set_system_mode(struct low_power_ops *ops, int mode, bool notify_rpm);
-int set_l3_mode(struct low_power_ops *ops, int mode, bool notify_rpm);
 void lpm_suspend_wake_time(uint64_t wakeup_time);
 
 struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev);
diff --git a/drivers/devfreq/governor_msm_adreno_tz.c b/drivers/devfreq/governor_msm_adreno_tz.c
index e8bfff2..3c50c4e 100644
--- a/drivers/devfreq/governor_msm_adreno_tz.c
+++ b/drivers/devfreq/governor_msm_adreno_tz.c
@@ -236,7 +236,7 @@
 {
 	unsigned int tz_ca_data[2];
 	struct scm_desc desc = {0};
-	unsigned int *tz_buf;
+	u8 *tz_buf;
 	int ret;
 
 	/* Set data for TZ */
@@ -281,7 +281,7 @@
 			scm_is_call_available(SCM_SVC_DCVS, TZ_UPDATE_ID_64) &&
 			scm_is_call_available(SCM_SVC_DCVS, TZ_RESET_ID_64)) {
 		struct scm_desc desc = {0};
-		unsigned int *tz_buf;
+		u8 *tz_buf;
 
 		if (!is_scm_armv8()) {
 			ret = scm_call(SCM_SVC_DCVS, TZ_INIT_ID_64,
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index e18dc59..6204cc3 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -251,8 +251,11 @@
 	 */
 
 	/* have we filled in period_length yet? */
-	if (*total_len + control_block->length < period_len)
+	if (*total_len + control_block->length < period_len) {
+		/* update number of bytes in this period so far */
+		*total_len += control_block->length;
 		return;
+	}
 
 	/* calculate the length that remains to reach period_length */
 	control_block->length = period_len - *total_len;
diff --git a/drivers/edac/kryo3xx_arm64_edac.c b/drivers/edac/kryo3xx_arm64_edac.c
index 4ac880b..f5bb3ed 100644
--- a/drivers/edac/kryo3xx_arm64_edac.c
+++ b/drivers/edac/kryo3xx_arm64_edac.c
@@ -62,7 +62,7 @@
 
 static inline void set_errxmisc_overflow(void)
 {
-	u64 val = 0x7F7F00000000;
+	u64 val = 0x7F7F00000000ULL;
 
 	asm volatile("msr s3_0_c5_c5_0, %0" : : "r" (val));
 }
@@ -118,8 +118,9 @@
 #define DATA_BUF_ERR		0x2
 #define CACHE_DATA_ERR		0x6
 #define CACHE_TAG_DIRTY_ERR	0x7
-#define TLB_PARITY_ERR		0x8
-#define BUS_ERROR		0x18
+#define TLB_PARITY_ERR_DATA	0x8
+#define TLB_PARITY_ERR_TAG	0x9
+#define BUS_ERROR		0x12
 
 struct erp_drvdata {
 	struct edac_device_ctl_info *edev_ctl;
@@ -217,10 +218,13 @@
 		edac_printk(KERN_CRIT, EDAC_CPU, "ECC Error from cache tag or dirty RAM\n");
 		break;
 
-	case TLB_PARITY_ERR:
+	case TLB_PARITY_ERR_DATA:
 		edac_printk(KERN_CRIT, EDAC_CPU, "Parity error on TLB RAM\n");
 		break;
 
+	case TLB_PARITY_ERR_TAG:
+		edac_printk(KERN_CRIT, EDAC_CPU, "Parity error on TLB DATA\n");
+
 	case BUS_ERROR:
 		edac_printk(KERN_CRIT, EDAC_CPU, "Bus Error\n");
 		break;
@@ -283,6 +287,16 @@
 	spin_unlock_irqrestore(&local_handler_lock, flags);
 }
 
+static bool l3_is_bus_error(u64 errxstatus)
+{
+	if (KRYO3XX_ERRXSTATUS_SERR(errxstatus) == BUS_ERROR) {
+		edac_printk(KERN_CRIT, EDAC_CPU, "Bus Error\n");
+		return true;
+	}
+
+	return false;
+}
+
 static void kryo3xx_check_l3_scu_error(struct edac_device_ctl_info *edev_ctl)
 {
 	u64 errxstatus = 0;
@@ -296,6 +310,11 @@
 
 	if (KRYO3XX_ERRXSTATUS_VALID(errxstatus) &&
 		KRYO3XX_ERRXMISC_LVL(errxmisc) == L3) {
+		if (l3_is_bus_error(errxstatus)) {
+			if (edev_ctl->panic_on_ue)
+				panic("Causing panic due to Bus Error\n");
+			return;
+		}
 		if (KRYO3XX_ERRXSTATUS_UE(errxstatus)) {
 			edac_printk(KERN_CRIT, EDAC_CPU, "Detected L3 uncorrectable error\n");
 			dump_err_reg(KRYO3XX_L3_UE, L3, errxstatus, errxmisc,
diff --git a/drivers/firmware/qcom/tz_log.c b/drivers/firmware/qcom/tz_log.c
index 1b51d08..471476c 100644
--- a/drivers/firmware/qcom/tz_log.c
+++ b/drivers/firmware/qcom/tz_log.c
@@ -477,10 +477,10 @@
 
 static int _disp_tz_interrupt_stats(void)
 {
-	int i, j, int_info_size;
+	int i, j;
 	int len = 0;
 	int *num_int;
-	unsigned char *ptr;
+	void *ptr;
 	struct tzdbg_int_t *tzdbg_ptr;
 	struct tzdbg_int_t_tz40 *tzdbg_ptr_tz40;
 
@@ -488,14 +488,12 @@
 			(tzdbg.diag_buf->int_info_off - sizeof(uint32_t)));
 	ptr = ((unsigned char *)tzdbg.diag_buf +
 					tzdbg.diag_buf->int_info_off);
-	int_info_size = ((tzdbg.diag_buf->ring_off -
-				tzdbg.diag_buf->int_info_off)/(*num_int));
 
 	pr_info("qsee_version = 0x%x\n", tzdbg.tz_version);
 
 	if (tzdbg.tz_version < QSEE_VERSION_TZ_4_X) {
+		tzdbg_ptr = ptr;
 		for (i = 0; i < (*num_int); i++) {
-			tzdbg_ptr = (struct tzdbg_int_t *)ptr;
 			len += snprintf(tzdbg.disp_buf + len,
 				(debug_rw_buf_size - 1) - len,
 				"     Interrupt Number          : 0x%x\n"
@@ -519,11 +517,11 @@
 								__func__);
 				break;
 			}
-			ptr += int_info_size;
+			tzdbg_ptr++;
 		}
 	} else {
+		tzdbg_ptr_tz40 = ptr;
 		for (i = 0; i < (*num_int); i++) {
-			tzdbg_ptr_tz40 = (struct tzdbg_int_t_tz40 *)ptr;
 			len += snprintf(tzdbg.disp_buf + len,
 				(debug_rw_buf_size - 1) - len,
 				"     Interrupt Number          : 0x%x\n"
@@ -547,7 +545,7 @@
 								__func__);
 				break;
 			}
-			ptr += int_info_size;
+			tzdbg_ptr_tz40++;
 		}
 	}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 8e6bf54..f8fdbd1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -693,6 +693,10 @@
 			DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
 				 adev->clock.default_dispclk / 100);
 			adev->clock.default_dispclk = 60000;
+		} else if (adev->clock.default_dispclk <= 60000) {
+			DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n",
+				 adev->clock.default_dispclk / 100);
+			adev->clock.default_dispclk = 62500;
 		}
 		adev->clock.dp_extclk =
 			le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index f7d236f..57fbde1 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -164,7 +164,7 @@
 	struct drm_device *dev = crtc->dev;
 	struct amdgpu_device *adev = dev->dev_private;
 	int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
-	ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
+	ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
 
 	memset(&args, 0, sizeof(args));
 
@@ -177,7 +177,7 @@
 void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev)
 {
 	int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
-	ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
+	ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
 
 	memset(&args, 0, sizeof(args));
 
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 42448c7..db9b79a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1290,8 +1290,11 @@
 	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
 
 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
-		active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
-		line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+					    (u32)mode->clock);
+		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+					  (u32)mode->clock);
+		line_time = min(line_time, (u32)65535);
 
 		/* watermark for high clocks */
 		if (adev->pm.dpm_enabled) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 904dabd..36d5128 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1257,8 +1257,11 @@
 	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
 
 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
-		active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
-		line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+					    (u32)mode->clock);
+		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+					  (u32)mode->clock);
+		line_time = min(line_time, (u32)65535);
 
 		/* watermark for high clocks */
 		if (adev->pm.dpm_enabled) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 6d02bdb..75689a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1054,8 +1054,11 @@
 	fixed20_12 a, b, c;
 
 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
-		active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
-		line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+					    (u32)mode->clock);
+		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+					  (u32)mode->clock);
+		line_time = min(line_time, (u32)65535);
 		priority_a_cnt = 0;
 		priority_b_cnt = 0;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index b1fb601..ba2321e 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1211,8 +1211,11 @@
 	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
 
 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
-		active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
-		line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+					    (u32)mode->clock);
+		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+					  (u32)mode->clock);
+		line_time = min(line_time, (u32)65535);
 
 		/* watermark for high clocks */
 		if (adev->pm.dpm_enabled) {
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 908011d..7abda94 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -113,6 +113,7 @@
 	struct ttm_bo_kmap_obj cache_kmap;
 	int next_cursor;
 	bool support_wide_screen;
+	bool DisableP2A;
 
 	enum ast_tx_chip tx_chip_type;
 	u8 dp501_maxclk;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index f75c642..533e762 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -124,6 +124,12 @@
 	} else
 		*need_post = false;
 
+	/* Check P2A Access */
+	ast->DisableP2A = true;
+	data = ast_read32(ast, 0xf004);
+	if (data != 0xFFFFFFFF)
+		ast->DisableP2A = false;
+
 	/* Check if we support wide screen */
 	switch (ast->chip) {
 	case AST1180:
@@ -140,15 +146,17 @@
 			ast->support_wide_screen = true;
 		else {
 			ast->support_wide_screen = false;
-			/* Read SCU7c (silicon revision register) */
-			ast_write32(ast, 0xf004, 0x1e6e0000);
-			ast_write32(ast, 0xf000, 0x1);
-			data = ast_read32(ast, 0x1207c);
-			data &= 0x300;
-			if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
-				ast->support_wide_screen = true;
-			if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
-				ast->support_wide_screen = true;
+			if (ast->DisableP2A == false) {
+				/* Read SCU7c (silicon revision register) */
+				ast_write32(ast, 0xf004, 0x1e6e0000);
+				ast_write32(ast, 0xf000, 0x1);
+				data = ast_read32(ast, 0x1207c);
+				data &= 0x300;
+				if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
+					ast->support_wide_screen = true;
+				if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
+					ast->support_wide_screen = true;
+			}
 		}
 		break;
 	}
@@ -216,80 +224,81 @@
 	uint32_t data, data2;
 	uint32_t denum, num, div, ref_pll;
 
-	ast_write32(ast, 0xf004, 0x1e6e0000);
-	ast_write32(ast, 0xf000, 0x1);
-
-
-	ast_write32(ast, 0x10000, 0xfc600309);
-
-	do {
-		if (pci_channel_offline(dev->pdev))
-			return -EIO;
-	} while (ast_read32(ast, 0x10000) != 0x01);
-	data = ast_read32(ast, 0x10004);
-
-	if (data & 0x40)
+	if (ast->DisableP2A)
+	{
 		ast->dram_bus_width = 16;
+		ast->dram_type = AST_DRAM_1Gx16;
+		ast->mclk = 396;
+	}
 	else
-		ast->dram_bus_width = 32;
+	{
+		ast_write32(ast, 0xf004, 0x1e6e0000);
+		ast_write32(ast, 0xf000, 0x1);
+		data = ast_read32(ast, 0x10004);
 
-	if (ast->chip == AST2300 || ast->chip == AST2400) {
-		switch (data & 0x03) {
-		case 0:
-			ast->dram_type = AST_DRAM_512Mx16;
-			break;
-		default:
-		case 1:
-			ast->dram_type = AST_DRAM_1Gx16;
+		if (data & 0x40)
+			ast->dram_bus_width = 16;
+		else
+			ast->dram_bus_width = 32;
+
+		if (ast->chip == AST2300 || ast->chip == AST2400) {
+			switch (data & 0x03) {
+			case 0:
+				ast->dram_type = AST_DRAM_512Mx16;
+				break;
+			default:
+			case 1:
+				ast->dram_type = AST_DRAM_1Gx16;
+				break;
+			case 2:
+				ast->dram_type = AST_DRAM_2Gx16;
+				break;
+			case 3:
+				ast->dram_type = AST_DRAM_4Gx16;
+				break;
+			}
+		} else {
+			switch (data & 0x0c) {
+			case 0:
+			case 4:
+				ast->dram_type = AST_DRAM_512Mx16;
+				break;
+			case 8:
+				if (data & 0x40)
+					ast->dram_type = AST_DRAM_1Gx16;
+				else
+					ast->dram_type = AST_DRAM_512Mx32;
+				break;
+			case 0xc:
+				ast->dram_type = AST_DRAM_1Gx32;
+				break;
+			}
+		}
+
+		data = ast_read32(ast, 0x10120);
+		data2 = ast_read32(ast, 0x10170);
+		if (data2 & 0x2000)
+			ref_pll = 14318;
+		else
+			ref_pll = 12000;
+
+		denum = data & 0x1f;
+		num = (data & 0x3fe0) >> 5;
+		data = (data & 0xc000) >> 14;
+		switch (data) {
+		case 3:
+			div = 0x4;
 			break;
 		case 2:
-			ast->dram_type = AST_DRAM_2Gx16;
+		case 1:
+			div = 0x2;
 			break;
-		case 3:
-			ast->dram_type = AST_DRAM_4Gx16;
+		default:
+			div = 0x1;
 			break;
 		}
-	} else {
-		switch (data & 0x0c) {
-		case 0:
-		case 4:
-			ast->dram_type = AST_DRAM_512Mx16;
-			break;
-		case 8:
-			if (data & 0x40)
-				ast->dram_type = AST_DRAM_1Gx16;
-			else
-				ast->dram_type = AST_DRAM_512Mx32;
-			break;
-		case 0xc:
-			ast->dram_type = AST_DRAM_1Gx32;
-			break;
-		}
+		ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
 	}
-
-	data = ast_read32(ast, 0x10120);
-	data2 = ast_read32(ast, 0x10170);
-	if (data2 & 0x2000)
-		ref_pll = 14318;
-	else
-		ref_pll = 12000;
-
-	denum = data & 0x1f;
-	num = (data & 0x3fe0) >> 5;
-	data = (data & 0xc000) >> 14;
-	switch (data) {
-	case 3:
-		div = 0x4;
-		break;
-	case 2:
-	case 1:
-		div = 0x2;
-		break;
-	default:
-		div = 0x1;
-		break;
-	}
-	ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 30672a3d..270e8fb 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -375,12 +375,20 @@
 	ast_enable_mmio(dev);
 	ast_set_def_ext_reg(dev);
 
-	if (ast->chip == AST2300 || ast->chip == AST2400)
-		ast_init_dram_2300(dev);
-	else
-		ast_init_dram_reg(dev);
+	if (ast->DisableP2A == false)
+	{
+		if (ast->chip == AST2300 || ast->chip == AST2400)
+			ast_init_dram_2300(dev);
+		else
+			ast_init_dram_reg(dev);
 
-	ast_init_3rdtx(dev);
+		ast_init_3rdtx(dev);
+	}
+	else
+	{
+		if (ast->tx_chip_type != AST_TX_NONE)
+			ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80);	/* Enable DVO */
+	}
 }
 
 /* AST 2300 DRAM settings */
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 2db7fb5..0e934a9 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -225,6 +225,7 @@
 
 	INIT_LIST_HEAD(&connector->probed_modes);
 	INIT_LIST_HEAD(&connector->modes);
+	mutex_init(&connector->mutex);
 	connector->edid_blob_ptr = NULL;
 	connector->status = connector_status_unknown;
 
@@ -359,6 +360,8 @@
 		connector->funcs->atomic_destroy_state(connector,
 						       connector->state);
 
+	mutex_destroy(&connector->mutex);
+
 	memset(connector, 0, sizeof(*connector));
 }
 EXPORT_SYMBOL(drm_connector_cleanup);
@@ -374,14 +377,18 @@
  */
 int drm_connector_register(struct drm_connector *connector)
 {
-	int ret;
+	int ret = 0;
 
-	if (connector->registered)
+	if (!connector->dev->registered)
 		return 0;
 
+	mutex_lock(&connector->mutex);
+	if (connector->registered)
+		goto unlock;
+
 	ret = drm_sysfs_connector_add(connector);
 	if (ret)
-		return ret;
+		goto unlock;
 
 	ret = drm_debugfs_connector_add(connector);
 	if (ret) {
@@ -397,12 +404,14 @@
 	drm_mode_object_register(connector->dev, &connector->base);
 
 	connector->registered = true;
-	return 0;
+	goto unlock;
 
 err_debugfs:
 	drm_debugfs_connector_remove(connector);
 err_sysfs:
 	drm_sysfs_connector_remove(connector);
+unlock:
+	mutex_unlock(&connector->mutex);
 	return ret;
 }
 EXPORT_SYMBOL(drm_connector_register);
@@ -415,8 +424,11 @@
  */
 void drm_connector_unregister(struct drm_connector *connector)
 {
-	if (!connector->registered)
+	mutex_lock(&connector->mutex);
+	if (!connector->registered) {
+		mutex_unlock(&connector->mutex);
 		return;
+	}
 
 	if (connector->funcs->early_unregister)
 		connector->funcs->early_unregister(connector);
@@ -425,6 +437,7 @@
 	drm_debugfs_connector_remove(connector);
 
 	connector->registered = false;
+	mutex_unlock(&connector->mutex);
 }
 EXPORT_SYMBOL(drm_connector_unregister);
 
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 0f2fa90..362b8cd 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -710,6 +710,8 @@
 	if (ret)
 		goto err_minors;
 
+	dev->registered = true;
+
 	if (dev->driver->load) {
 		ret = dev->driver->load(dev, flags);
 		if (ret)
@@ -749,6 +751,8 @@
 
 	drm_lastclose(dev);
 
+	dev->registered = false;
+
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		drm_modeset_unregister_all(dev);
 
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ca6efb6..7513e76 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1199,6 +1199,15 @@
 		goto out_free_priv;
 
 	pci_set_drvdata(pdev, &dev_priv->drm);
+	/*
+	 * Disable the system suspend direct complete optimization, which can
+	 * leave the device suspended skipping the driver's suspend handlers
+	 * if the device was already runtime suspended. This is needed due to
+	 * the difference in our runtime and system suspend sequence and
+	 * becaue the HDA driver may require us to enable the audio power
+	 * domain during system suspend.
+	 */
+	pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
 
 	ret = i915_driver_init_early(dev_priv, ent);
 	if (ret < 0)
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
index c0cb297..2cfe96d3 100644
--- a/drivers/gpu/drm/i915/i915_pvinfo.h
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -36,10 +36,6 @@
 #define VGT_VERSION_MAJOR 1
 #define VGT_VERSION_MINOR 0
 
-#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor))
-#define INTEL_VGT_IF_VERSION \
-	INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
-
 /*
  * notifications from guest to vgpu device model
  */
@@ -55,8 +51,8 @@
 
 struct vgt_if {
 	u64 magic;		/* VGT_MAGIC */
-	uint16_t version_major;
-	uint16_t version_minor;
+	u16 version_major;
+	u16 version_minor;
 	u32 vgt_id;		/* ID of vGT instance */
 	u32 rsv1[12];		/* pad to offset 0x40 */
 	/*
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index dae340c..125adcc 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -60,8 +60,8 @@
  */
 void i915_check_vgpu(struct drm_i915_private *dev_priv)
 {
-	uint64_t magic;
-	uint32_t version;
+	u64 magic;
+	u16 version_major;
 
 	BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
 
@@ -69,10 +69,8 @@
 	if (magic != VGT_MAGIC)
 		return;
 
-	version = INTEL_VGT_IF_VERSION_ENCODE(
-		__raw_i915_read16(dev_priv, vgtif_reg(version_major)),
-		__raw_i915_read16(dev_priv, vgtif_reg(version_minor)));
-	if (version != INTEL_VGT_IF_VERSION) {
+	version_major = __raw_i915_read16(dev_priv, vgtif_reg(version_major));
+	if (version_major < VGT_VERSION_MAJOR) {
 		DRM_INFO("VGT interface version mismatch!\n");
 		return;
 	}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5dc6082..f8efd20 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2253,6 +2253,9 @@
 	intel_fill_fb_ggtt_view(&view, fb, rotation);
 	vma = i915_gem_object_to_ggtt(obj, &view);
 
+	if (WARN_ON_ONCE(!vma))
+		return;
+
 	i915_vma_unpin_fence(vma);
 	i915_gem_object_unpin_from_display_plane(vma);
 }
@@ -13764,6 +13767,15 @@
 	 * type. For DP ports it behaves like most other platforms, but on HDMI
 	 * there's an extra 1 line difference. So we need to add two instead of
 	 * one to the value.
+	 *
+	 * On VLV/CHV DSI the scanline counter would appear to increment
+	 * approx. 1/3 of a scanline before start of vblank. Unfortunately
+	 * that means we can't tell whether we're in vblank or not while
+	 * we're on that particular line. We must still set scanline_offset
+	 * to 1 so that the vblank timestamps come out correct when we query
+	 * the scanline counter from within the vblank interrupt handler.
+	 * However if queried just before the start of vblank we'll get an
+	 * answer that's slightly in the future.
 	 */
 	if (IS_GEN2(dev)) {
 		const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 2c6d59d..49de476 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4114,11 +4114,19 @@
 	struct drm_crtc_state *cstate;
 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
 	struct skl_wm_values *results = &intel_state->wm_results;
+	struct drm_device *dev = state->dev;
 	struct skl_pipe_wm *pipe_wm;
 	bool changed = false;
 	int ret, i;
 
 	/*
+	 * When we distrust bios wm we always need to recompute to set the
+	 * expected DDB allocations for each CRTC.
+	 */
+	if (to_i915(dev)->wm.distrust_bios_wm)
+		changed = true;
+
+	/*
 	 * If this transaction isn't actually touching any CRTC's, don't
 	 * bother with watermark calculation.  Note that if we pass this
 	 * test, we're guaranteed to hold at least one CRTC state mutex,
@@ -4128,6 +4136,7 @@
 	 */
 	for_each_crtc_in_state(state, crtc, cstate, i)
 		changed = true;
+
 	if (!changed)
 		return 0;
 
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index dbed12c..64f4e2e 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -81,10 +81,13 @@
  */
 void intel_pipe_update_start(struct intel_crtc *crtc)
 {
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
 	long timeout = msecs_to_jiffies_timeout(1);
 	int scanline, min, max, vblank_start;
 	wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
+	bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+		intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI);
 	DEFINE_WAIT(wait);
 
 	vblank_start = adjusted_mode->crtc_vblank_start;
@@ -136,6 +139,24 @@
 
 	drm_crtc_vblank_put(&crtc->base);
 
+	/*
+	 * On VLV/CHV DSI the scanline counter would appear to
+	 * increment approx. 1/3 of a scanline before start of vblank.
+	 * The registers still get latched at start of vblank however.
+	 * This means we must not write any registers on the first
+	 * line of vblank (since not the whole line is actually in
+	 * vblank). And unfortunately we can't use the interrupt to
+	 * wait here since it will fire too soon. We could use the
+	 * frame start interrupt instead since it will fire after the
+	 * critical scanline, but that would require more changes
+	 * in the interrupt code. So for now we'll just do the nasty
+	 * thing and poll for the bad scanline to pass us by.
+	 *
+	 * FIXME figure out if BXT+ DSI suffers from this as well
+	 */
+	while (need_vlv_dsi_wa && scanline == vblank_start)
+		scanline = intel_get_crtc_scanline(crtc);
+
 	crtc->debug.scanline_start = scanline;
 	crtc->debug.start_vbl_time = ktime_get();
 	crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 0e8c4d9..e097780 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1061,7 +1061,7 @@
 	}
 
 	err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer));
-	if (err) {
+	if (err < 0) {
 		dev_err(hdmi->dev, "Failed to pack vendor infoframe: %zd\n",
 			err);
 		return err;
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 9ded825..1ac5c6c 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -166,6 +166,7 @@
 	msm_gem_prime.o \
 	msm_gem_submit.o \
 	msm_gem_shrinker.o \
+	msm_gem_vma.o \
 	msm_gpu.o \
 	msm_iommu.o \
 	msm_smmu.o \
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index fd266ed..156abf0 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -583,7 +583,7 @@
 #endif
 	}
 
-	if (!gpu->mmu) {
+	if (!gpu->aspace) {
 		/* TODO we think it is possible to configure the GPU to
 		 * restrict access to VRAM carveout.  But the required
 		 * registers are unknown.  For now just bail out and
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index d0d3c7b..2dc9412 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -672,7 +672,7 @@
 #endif
 	}
 
-	if (!gpu->mmu) {
+	if (!gpu->aspace) {
 		/* TODO we think it is possible to configure the GPU to
 		 * restrict access to VRAM carveout.  But the required
 		 * registers are unknown.  For now just bail out and
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index f386f46..961d47f 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -64,7 +64,7 @@
 
 	DBG("%s", gpu->name);
 
-	ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova);
+	ret = msm_gem_get_iova(gpu->rb->bo, gpu->aspace, &gpu->rb_iova);
 	if (ret) {
 		gpu->rb_iova = 0;
 		dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
@@ -381,7 +381,7 @@
 		return ret;
 	}
 
-	mmu = gpu->mmu;
+	mmu = gpu->aspace->mmu;
 	if (mmu) {
 		ret = mmu->funcs->attach(mmu, iommu_ports,
 				ARRAY_SIZE(iommu_ports));
@@ -406,7 +406,7 @@
 		return -ENOMEM;
 	}
 
-	ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->id,
+	ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace,
 			&adreno_gpu->memptrs_iova);
 	if (ret) {
 		dev_err(drm->dev, "could not map memptrs: %d\n", ret);
@@ -423,8 +423,7 @@
 			msm_gem_put_vaddr(gpu->memptrs_bo);
 
 		if (gpu->memptrs_iova)
-			msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
-
+			msm_gem_put_iova(gpu->memptrs_bo, gpu->base.aspace);
 		drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
 	}
 	release_firmware(gpu->pm4);
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index a3c6f58..5a33fdb 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -261,6 +261,7 @@
 static int dp_display_process_hpd_high(struct dp_display_private *dp)
 {
 	int rc = 0;
+	u32 max_pclk_from_edid = 0;
 
 	rc = dp->panel->read_dpcd(dp->panel);
 	if (rc)
@@ -269,6 +270,11 @@
 	sde_get_edid(dp->dp_display.connector, &dp->aux->drm_aux->ddc,
 		(void **)&dp->panel->edid_ctrl);
 
+	max_pclk_from_edid = dp->panel->get_max_pclk(dp->panel);
+
+	dp->dp_display.max_pclk_khz = min(max_pclk_from_edid,
+		dp->parser->max_pclk_khz);
+
 	dp->dp_display.is_connected = true;
 	drm_helper_hpd_irq_event(dp->dp_display.connector->dev);
 
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index 877287a..3caa277 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -29,6 +29,7 @@
 	struct dp_bridge *bridge;
 	struct drm_connector *connector;
 	bool is_connected;
+	u32 max_pclk_khz;
 
 	int (*enable)(struct dp_display *dp_display);
 	int (*post_enable)(struct dp_display *dp_display);
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 78c04c4..91aafdd 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -450,5 +450,17 @@
 		struct drm_display_mode *mode,
 		void *display)
 {
-	return MODE_OK;
+	struct dp_display *dp_disp;
+
+	if (!mode || !display) {
+		pr_err("invalid params\n");
+		return MODE_ERROR;
+	}
+
+	dp_disp = display;
+
+	if (mode->clock > dp_disp->max_pclk_khz)
+		return MODE_BAD;
+	else
+		return MODE_OK;
 }
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index fed1dbb..4496e9a 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -72,6 +72,34 @@
 	return rc;
 }
 
+static u32 dp_panel_get_max_pclk(struct dp_panel *dp_panel)
+{
+	struct dp_panel_private *panel;
+	struct drm_dp_link *dp_link;
+	u32 bpc, bpp, max_data_rate_khz, max_pclk_rate_khz;
+	const u8 num_components = 3;
+
+	if (!dp_panel) {
+		pr_err("invalid input\n");
+		return 0;
+	}
+
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+	dp_link = &dp_panel->dp_link;
+
+	bpc = sde_get_sink_bpc(dp_panel->edid_ctrl);
+	bpp = bpc * num_components;
+
+	max_data_rate_khz = (dp_link->num_lanes * dp_link->rate * 8);
+	max_pclk_rate_khz = max_data_rate_khz / bpp;
+
+	pr_debug("bpp=%d, max_lane_cnt=%d\n", bpp, dp_link->num_lanes);
+	pr_debug("max_data_rate=%dKHz, max_pclk_rate=%dKHz\n",
+		max_data_rate_khz, max_pclk_rate_khz);
+
+	return max_pclk_rate_khz;
+}
+
 static int dp_panel_timing_cfg(struct dp_panel *dp_panel)
 {
 	int rc = 0;
@@ -276,6 +304,7 @@
 	dp_panel->timing_cfg = dp_panel_timing_cfg;
 	dp_panel->read_dpcd = dp_panel_read_dpcd;
 	dp_panel->get_link_rate = dp_panel_get_link_rate;
+	dp_panel->get_max_pclk = dp_panel_get_max_pclk;
 
 	return dp_panel;
 error:
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index 5852c70..b63c51f 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -46,6 +46,7 @@
 	struct dp_panel_info pinfo;
 
 	u32 vic;
+	u32 max_pclk_khz;
 
 	int (*sde_edid_register)(struct dp_panel *dp_panel);
 	void (*sde_edid_deregister)(struct dp_panel *dp_panel);
@@ -53,6 +54,7 @@
 	int (*timing_cfg)(struct dp_panel *dp_panel);
 	int (*read_dpcd)(struct dp_panel *dp_panel);
 	u32 (*get_link_rate)(struct dp_panel *dp_panel);
+	u32 (*get_max_pclk)(struct dp_panel *dp_panel);
 };
 
 struct dp_panel *dp_panel_get(struct device *dev, struct dp_aux *aux,
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index 4788f3b..b2aef9c 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -9,7 +9,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
  */
 
 #define pr_fmt(fmt)	"dsi-ctrl:[%s] " fmt, __func__
@@ -25,6 +24,7 @@
 #include "msm_drv.h"
 #include "msm_kms.h"
 #include "msm_gpu.h"
+#include "msm_mmu.h"
 #include "dsi_ctrl.h"
 #include "dsi_ctrl_hw.h"
 #include "dsi_clk.h"
@@ -253,6 +253,16 @@
 	return 0;
 }
 
+static inline struct msm_gem_address_space*
+dsi_ctrl_get_aspace(struct dsi_ctrl *dsi_ctrl,
+		int domain)
+{
+	if (!dsi_ctrl || !dsi_ctrl->drm_dev)
+		return NULL;
+
+	return msm_gem_smmu_address_space_get(dsi_ctrl->drm_dev, domain);
+}
+
 static int dsi_ctrl_check_state(struct dsi_ctrl *dsi_ctrl,
 				enum dsi_ctrl_driver_ops op,
 				u32 op_state)
@@ -884,7 +894,7 @@
 			  const struct mipi_dsi_msg *msg,
 			  u32 flags)
 {
-	int rc = 0;
+	int rc = 0, ret = 0;
 	struct mipi_dsi_packet packet;
 	struct dsi_ctrl_cmd_dma_fifo_info cmd;
 	struct dsi_ctrl_cmd_dma_info cmd_mem;
@@ -948,42 +958,59 @@
 	hw_flags |= (flags & DSI_CTRL_CMD_DEFER_TRIGGER) ?
 			DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER : 0;
 
-	if (!(flags & DSI_CTRL_CMD_DEFER_TRIGGER))
-		reinit_completion(&dsi_ctrl->int_info.cmd_dma_done);
-
-	if (flags & DSI_CTRL_CMD_FETCH_MEMORY) {
-		dsi_ctrl->hw.ops.kickoff_command(&dsi_ctrl->hw,
-						&cmd_mem,
-						hw_flags);
-	} else if (flags & DSI_CTRL_CMD_FIFO_STORE) {
-		dsi_ctrl->hw.ops.kickoff_fifo_command(&dsi_ctrl->hw,
-						      &cmd,
-						      hw_flags);
+	if (flags & DSI_CTRL_CMD_DEFER_TRIGGER) {
+		if (flags & DSI_CTRL_CMD_FETCH_MEMORY) {
+			dsi_ctrl->hw.ops.kickoff_command(&dsi_ctrl->hw,
+							&cmd_mem,
+							hw_flags);
+		} else if (flags & DSI_CTRL_CMD_FIFO_STORE) {
+			dsi_ctrl->hw.ops.kickoff_fifo_command(&dsi_ctrl->hw,
+							      &cmd,
+							      hw_flags);
+		}
 	}
 
 	if (!(flags & DSI_CTRL_CMD_DEFER_TRIGGER)) {
-		u32 retry = 10;
-		u32 status = 0;
-		u64 error = 0;
-		u32 mask = (DSI_CMD_MODE_DMA_DONE);
+		dsi_ctrl_enable_status_interrupt(dsi_ctrl,
+					DSI_SINT_CMD_MODE_DMA_DONE, NULL);
+		reinit_completion(&dsi_ctrl->irq_info.cmd_dma_done);
 
-		while ((status == 0) && (retry > 0)) {
-			udelay(1000);
-			status = dsi_ctrl->hw.ops.get_interrupt_status(
-								&dsi_ctrl->hw);
-			error = dsi_ctrl->hw.ops.get_error_status(
-								&dsi_ctrl->hw);
-			status &= mask;
-			retry--;
-			dsi_ctrl->hw.ops.clear_interrupt_status(&dsi_ctrl->hw,
-								status);
-			dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
-							    error);
+		if (flags & DSI_CTRL_CMD_FETCH_MEMORY) {
+			dsi_ctrl->hw.ops.kickoff_command(&dsi_ctrl->hw,
+						      &cmd_mem,
+						      hw_flags);
+		} else if (flags & DSI_CTRL_CMD_FIFO_STORE) {
+			dsi_ctrl->hw.ops.kickoff_fifo_command(&dsi_ctrl->hw,
+							      &cmd,
+							      hw_flags);
 		}
-		pr_debug("INT STATUS = %x, retry = %d\n", status, retry);
-		if (retry == 0)
-			pr_err("[DSI_%d]Command transfer failed\n",
-			       dsi_ctrl->cell_index);
+
+		ret = wait_for_completion_timeout(
+				&dsi_ctrl->irq_info.cmd_dma_done,
+				msecs_to_jiffies(DSI_CTRL_TX_TO_MS));
+
+		if (ret == 0) {
+			u32 status = 0;
+			u32 mask = DSI_CMD_MODE_DMA_DONE;
+
+			if (status & mask) {
+				status |= (DSI_CMD_MODE_DMA_DONE |
+						DSI_BTA_DONE);
+				dsi_ctrl->hw.ops.clear_interrupt_status(
+								&dsi_ctrl->hw,
+								status);
+				dsi_ctrl_disable_status_interrupt(dsi_ctrl,
+						DSI_SINT_CMD_MODE_DMA_DONE);
+				complete_all(&dsi_ctrl->irq_info.cmd_dma_done);
+				pr_warn("dma_tx done but irq not triggered\n");
+			} else {
+				rc = -ETIMEDOUT;
+				dsi_ctrl_disable_status_interrupt(dsi_ctrl,
+						DSI_SINT_CMD_MODE_DMA_DONE);
+				pr_err("[DSI_%d]Command transfer failed\n",
+						dsi_ctrl->cell_index);
+			}
+		}
 
 		dsi_ctrl->hw.ops.reset_cmd_fifo(&dsi_ctrl->hw);
 	}
@@ -1152,19 +1179,19 @@
 	return rc;
 }
 
-int dsi_ctrl_intr_deinit(struct dsi_ctrl *dsi_ctrl)
-{
-	struct dsi_ctrl_interrupts *ints = &dsi_ctrl->int_info;
-
-	devm_free_irq(&dsi_ctrl->pdev->dev, ints->irq, dsi_ctrl);
-
-	return 0;
-}
-
 static int dsi_ctrl_buffer_deinit(struct dsi_ctrl *dsi_ctrl)
 {
+	struct msm_gem_address_space *aspace = NULL;
+
 	if (dsi_ctrl->tx_cmd_buf) {
-		msm_gem_put_iova(dsi_ctrl->tx_cmd_buf, 0);
+		aspace = dsi_ctrl_get_aspace(dsi_ctrl,
+				MSM_SMMU_DOMAIN_UNSECURE);
+		if (!aspace) {
+			pr_err("failed to get address space\n");
+			return -ENOMEM;
+		}
+
+		msm_gem_put_iova(dsi_ctrl->tx_cmd_buf, aspace);
 
 		msm_gem_free_object(dsi_ctrl->tx_cmd_buf);
 		dsi_ctrl->tx_cmd_buf = NULL;
@@ -1177,6 +1204,13 @@
 {
 	int rc = 0;
 	u32 iova = 0;
+	struct msm_gem_address_space *aspace = NULL;
+
+	aspace = dsi_ctrl_get_aspace(dsi_ctrl, MSM_SMMU_DOMAIN_UNSECURE);
+	if (!aspace) {
+		pr_err("failed to get address space\n");
+		return -ENOMEM;
+	}
 
 	dsi_ctrl->tx_cmd_buf = msm_gem_new(dsi_ctrl->drm_dev,
 					   SZ_4K,
@@ -1191,7 +1225,7 @@
 
 	dsi_ctrl->cmd_buffer_size = SZ_4K;
 
-	rc = msm_gem_get_iova(dsi_ctrl->tx_cmd_buf, 0, &iova);
+	rc = msm_gem_get_iova(dsi_ctrl->tx_cmd_buf, aspace, &iova);
 	if (rc) {
 		pr_err("failed to get iova, rc=%d\n", rc);
 		(void)dsi_ctrl_buffer_deinit(dsi_ctrl);
@@ -1259,6 +1293,10 @@
 
 	dsi_ctrl->cell_index = index;
 	dsi_ctrl->version = version;
+	dsi_ctrl->irq_info.irq_num = -1;
+	dsi_ctrl->irq_info.irq_stat_mask = 0x0;
+
+	spin_lock_init(&dsi_ctrl->irq_info.irq_lock);
 
 	dsi_ctrl->name = of_get_property(pdev->dev.of_node, "label", NULL);
 	if (!dsi_ctrl->name)
@@ -1677,6 +1715,236 @@
 	return 0;
 }
 
+static void dsi_ctrl_handle_error_status(struct dsi_ctrl *dsi_ctrl,
+				unsigned long int error)
+{
+	pr_err("%s: %lu\n", __func__, error);
+
+	/* DTLN PHY error */
+	if (error & 0x3000e00)
+		if (dsi_ctrl->hw.ops.clear_error_status)
+			dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
+					0x3000e00);
+
+	/* DSI FIFO OVERFLOW error */
+	if (error & 0xf0000) {
+		if (dsi_ctrl->hw.ops.clear_error_status)
+			dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
+					0xf0000);
+	}
+
+	/* DSI FIFO UNDERFLOW error */
+	if (error & 0xf00000) {
+		if (dsi_ctrl->hw.ops.clear_error_status)
+			dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
+					0xf00000);
+	}
+
+	/* DSI PLL UNLOCK error */
+	if (error & BIT(8))
+		if (dsi_ctrl->hw.ops.clear_error_status)
+			dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
+					BIT(8));
+}
+
+/**
+ * dsi_ctrl_isr - interrupt service routine for DSI CTRL component
+ * @irq: Incoming IRQ number
+ * @ptr: Pointer to user data structure (struct dsi_ctrl)
+ * Returns: IRQ_HANDLED if no further action required
+ */
+static irqreturn_t dsi_ctrl_isr(int irq, void *ptr)
+{
+	struct dsi_ctrl *dsi_ctrl;
+	struct dsi_event_cb_info cb_info;
+	unsigned long flags;
+	uint32_t cell_index, status, i;
+	uint64_t errors;
+
+	if (!ptr)
+		return IRQ_NONE;
+	dsi_ctrl = ptr;
+
+	/* clear status interrupts */
+	if (dsi_ctrl->hw.ops.get_interrupt_status)
+		status = dsi_ctrl->hw.ops.get_interrupt_status(&dsi_ctrl->hw);
+	else
+		status = 0x0;
+
+	if (dsi_ctrl->hw.ops.clear_interrupt_status)
+		dsi_ctrl->hw.ops.clear_interrupt_status(&dsi_ctrl->hw, status);
+
+	spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags);
+	cell_index = dsi_ctrl->cell_index;
+	spin_unlock_irqrestore(&dsi_ctrl->irq_info.irq_lock, flags);
+
+	/* clear error interrupts */
+	if (dsi_ctrl->hw.ops.get_error_status)
+		errors = dsi_ctrl->hw.ops.get_error_status(&dsi_ctrl->hw);
+	else
+		errors = 0x0;
+
+	if (errors) {
+		/* handle DSI error recovery */
+		dsi_ctrl_handle_error_status(dsi_ctrl, errors);
+		if (dsi_ctrl->hw.ops.clear_error_status)
+			dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
+							errors);
+	}
+
+	if (status & DSI_CMD_MODE_DMA_DONE) {
+		dsi_ctrl_disable_status_interrupt(dsi_ctrl,
+					DSI_SINT_CMD_MODE_DMA_DONE);
+		complete_all(&dsi_ctrl->irq_info.cmd_dma_done);
+	}
+
+	if (status & DSI_CMD_FRAME_DONE) {
+		dsi_ctrl_disable_status_interrupt(dsi_ctrl,
+					DSI_SINT_CMD_FRAME_DONE);
+		complete_all(&dsi_ctrl->irq_info.cmd_frame_done);
+	}
+
+	if (status & DSI_VIDEO_MODE_FRAME_DONE) {
+		dsi_ctrl_disable_status_interrupt(dsi_ctrl,
+					DSI_SINT_VIDEO_MODE_FRAME_DONE);
+		complete_all(&dsi_ctrl->irq_info.vid_frame_done);
+	}
+
+	if (status & DSI_BTA_DONE) {
+		dsi_ctrl_disable_status_interrupt(dsi_ctrl,
+					DSI_SINT_BTA_DONE);
+		complete_all(&dsi_ctrl->irq_info.bta_done);
+	}
+
+	for (i = 0; status && i < DSI_STATUS_INTERRUPT_COUNT; ++i) {
+		if (status & 0x1) {
+			spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags);
+			cb_info = dsi_ctrl->irq_info.irq_stat_cb[i];
+			spin_unlock_irqrestore(
+					&dsi_ctrl->irq_info.irq_lock, flags);
+
+			if (cb_info.event_cb)
+				(void)cb_info.event_cb(cb_info.event_usr_ptr,
+						cb_info.event_idx,
+						cell_index, irq, 0, 0, 0);
+		}
+		status >>= 1;
+	}
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * _dsi_ctrl_setup_isr - register ISR handler
+ * @dsi_ctrl: Pointer to associated dsi_ctrl structure
+ * Returns: Zero on success
+ */
+static int dsi_ctrl_setup_isr(struct dsi_ctrl *dsi_ctrl)
+{
+	int irq_num, rc;
+
+	if (!dsi_ctrl)
+		return -EINVAL;
+	if (dsi_ctrl->irq_info.irq_num != -1)
+		return 0;
+
+	init_completion(&dsi_ctrl->irq_info.cmd_dma_done);
+	init_completion(&dsi_ctrl->irq_info.vid_frame_done);
+	init_completion(&dsi_ctrl->irq_info.cmd_frame_done);
+	init_completion(&dsi_ctrl->irq_info.bta_done);
+
+	irq_num = platform_get_irq(dsi_ctrl->pdev, 0);
+	if (irq_num < 0) {
+		pr_err("[DSI_%d] Failed to get IRQ number, %d\n",
+				dsi_ctrl->cell_index, irq_num);
+		rc = irq_num;
+	} else {
+		rc = devm_request_threaded_irq(&dsi_ctrl->pdev->dev, irq_num,
+				dsi_ctrl_isr, NULL, 0, "dsi_ctrl", dsi_ctrl);
+		if (rc) {
+			pr_err("[DSI_%d] Failed to request IRQ, %d\n",
+					dsi_ctrl->cell_index, rc);
+		} else {
+			dsi_ctrl->irq_info.irq_num = irq_num;
+			disable_irq_nosync(irq_num);
+
+			pr_info("[DSI_%d] IRQ %d registered\n",
+					dsi_ctrl->cell_index, irq_num);
+		}
+	}
+	return rc;
+}
+
+/**
+ * _dsi_ctrl_destroy_isr - unregister ISR handler
+ * @dsi_ctrl: Pointer to associated dsi_ctrl structure
+ */
+static void _dsi_ctrl_destroy_isr(struct dsi_ctrl *dsi_ctrl)
+{
+	if (!dsi_ctrl || !dsi_ctrl->pdev || dsi_ctrl->irq_info.irq_num < 0)
+		return;
+
+	if (dsi_ctrl->irq_info.irq_num != -1) {
+		devm_free_irq(&dsi_ctrl->pdev->dev,
+				dsi_ctrl->irq_info.irq_num, dsi_ctrl);
+		dsi_ctrl->irq_info.irq_num = -1;
+	}
+}
+
+void dsi_ctrl_enable_status_interrupt(struct dsi_ctrl *dsi_ctrl,
+		uint32_t intr_idx, struct dsi_event_cb_info *event_info)
+{
+	unsigned long flags;
+
+	if (!dsi_ctrl || dsi_ctrl->irq_info.irq_num == -1 ||
+			intr_idx >= DSI_STATUS_INTERRUPT_COUNT)
+		return;
+
+	spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags);
+
+	if (dsi_ctrl->irq_info.irq_stat_refcount[intr_idx] == 0) {
+		/* enable irq on first request */
+		if (dsi_ctrl->irq_info.irq_stat_mask == 0)
+			enable_irq(dsi_ctrl->irq_info.irq_num);
+
+		/* update hardware mask */
+		dsi_ctrl->irq_info.irq_stat_mask |= BIT(intr_idx);
+		dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw,
+				dsi_ctrl->irq_info.irq_stat_mask);
+	}
+	++(dsi_ctrl->irq_info.irq_stat_refcount[intr_idx]);
+
+	if (event_info)
+		dsi_ctrl->irq_info.irq_stat_cb[intr_idx] = *event_info;
+
+	spin_unlock_irqrestore(&dsi_ctrl->irq_info.irq_lock, flags);
+}
+
+void dsi_ctrl_disable_status_interrupt(struct dsi_ctrl *dsi_ctrl,
+		uint32_t intr_idx)
+{
+	unsigned long flags;
+
+	if (!dsi_ctrl || dsi_ctrl->irq_info.irq_num == -1 ||
+			intr_idx >= DSI_STATUS_INTERRUPT_COUNT)
+		return;
+
+	spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags);
+
+	if (dsi_ctrl->irq_info.irq_stat_refcount[intr_idx])
+		if (--(dsi_ctrl->irq_info.irq_stat_refcount[intr_idx]) == 0) {
+			dsi_ctrl->irq_info.irq_stat_mask &= ~BIT(intr_idx);
+			dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw,
+					dsi_ctrl->irq_info.irq_stat_mask);
+
+			/* don't need irq if no lines are enabled */
+			if (dsi_ctrl->irq_info.irq_stat_mask == 0)
+				disable_irq_nosync(dsi_ctrl->irq_info.irq_num);
+		}
+
+	spin_unlock_irqrestore(&dsi_ctrl->irq_info.irq_lock, flags);
+}
+
 /**
  * dsi_ctrl_host_init() - Initialize DSI host hardware.
  * @dsi_ctrl:        DSI controller handle.
@@ -1729,7 +1997,7 @@
 					  &dsi_ctrl->host_config.video_timing);
 	}
 
-
+	dsi_ctrl_setup_isr(dsi_ctrl);
 
 	dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw, 0x0);
 	dsi_ctrl->hw.ops.enable_error_interrupts(&dsi_ctrl->hw, 0x0);
@@ -1777,6 +2045,8 @@
 
 	mutex_lock(&dsi_ctrl->ctrl_lock);
 
+	_dsi_ctrl_destroy_isr(dsi_ctrl);
+
 	rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, 0x0);
 	if (rc) {
 		pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
@@ -1933,7 +2203,7 @@
  */
 int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags)
 {
-	int rc = 0;
+	int rc = 0, ret = 0;
 	u32 status = 0;
 	u32 mask = (DSI_CMD_MODE_DMA_DONE);
 
@@ -1944,27 +2214,43 @@
 
 	mutex_lock(&dsi_ctrl->ctrl_lock);
 
-	reinit_completion(&dsi_ctrl->int_info.cmd_dma_done);
-
-	dsi_ctrl->hw.ops.trigger_command_dma(&dsi_ctrl->hw);
+	if (!(flags & DSI_CTRL_CMD_BROADCAST_MASTER))
+		dsi_ctrl->hw.ops.trigger_command_dma(&dsi_ctrl->hw);
 
 	if ((flags & DSI_CTRL_CMD_BROADCAST) &&
-	    (flags & DSI_CTRL_CMD_BROADCAST_MASTER)) {
-		u32 retry = 10;
+		(flags & DSI_CTRL_CMD_BROADCAST_MASTER)) {
+		dsi_ctrl_enable_status_interrupt(dsi_ctrl,
+					DSI_SINT_CMD_MODE_DMA_DONE, NULL);
+		reinit_completion(&dsi_ctrl->irq_info.cmd_dma_done);
 
-		while ((status == 0) && (retry > 0)) {
-			udelay(1000);
+		/* trigger command */
+		dsi_ctrl->hw.ops.trigger_command_dma(&dsi_ctrl->hw);
+
+		ret = wait_for_completion_timeout(
+				&dsi_ctrl->irq_info.cmd_dma_done,
+				msecs_to_jiffies(DSI_CTRL_TX_TO_MS));
+
+		if (ret == 0) {
 			status = dsi_ctrl->hw.ops.get_interrupt_status(
 								&dsi_ctrl->hw);
-			status &= mask;
-			retry--;
-			dsi_ctrl->hw.ops.clear_interrupt_status(&dsi_ctrl->hw,
+			if (status & mask) {
+				status |= (DSI_CMD_MODE_DMA_DONE |
+						DSI_BTA_DONE);
+				dsi_ctrl->hw.ops.clear_interrupt_status(
+								&dsi_ctrl->hw,
 								status);
+				dsi_ctrl_disable_status_interrupt(dsi_ctrl,
+						DSI_SINT_CMD_MODE_DMA_DONE);
+				complete_all(&dsi_ctrl->irq_info.cmd_dma_done);
+				pr_warn("dma_tx done but irq not triggered\n");
+			} else {
+				rc = -ETIMEDOUT;
+				dsi_ctrl_disable_status_interrupt(dsi_ctrl,
+						DSI_SINT_CMD_MODE_DMA_DONE);
+				pr_err("[DSI_%d]Command transfer failed\n",
+						dsi_ctrl->cell_index);
+			}
 		}
-		pr_debug("INT STATUS = %x, retry = %d\n", status, retry);
-		if (retry == 0)
-			pr_err("[DSI_%d]Command transfer failed\n",
-			       dsi_ctrl->cell_index);
 	}
 
 	mutex_unlock(&dsi_ctrl->ctrl_lock);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
index 7f36fde..ec535ce11 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
@@ -138,33 +138,26 @@
 
 /**
  * struct dsi_ctrl_interrupts - define interrupt information
- * @irq:                   IRQ id for the DSI controller.
- * @intr_lock:             Spinlock to protect access to interrupt registers.
- * @interrupt_status:      Status interrupts which need to be serviced.
- * @error_status:          Error interurpts which need to be serviced.
- * @interrupts_enabled:    Status interrupts which are enabled.
- * @errors_enabled:        Error interrupts which are enabled.
+ * @irq_lock:            Spinlock for ISR handler.
+ * @irq_num:             Linux interrupt number associated with device.
+ * @irq_stat_mask:       Hardware mask of currently enabled interrupts.
+ * @irq_stat_refcount:   Number of times each interrupt has been requested.
+ * @irq_stat_cb:         Status IRQ callback definitions.
  * @cmd_dma_done:          Completion signal for DSI_CMD_MODE_DMA_DONE interrupt
  * @vid_frame_done:        Completion signal for DSI_VIDEO_MODE_FRAME_DONE int.
  * @cmd_frame_done:        Completion signal for DSI_CMD_FRAME_DONE interrupt.
- * @interrupt_done_work:   Work item for servicing status interrupts.
- * @error_status_work:     Work item for servicing error interrupts.
  */
 struct dsi_ctrl_interrupts {
-	u32 irq;
-	spinlock_t intr_lock; /* protects access to interrupt registers */
-	u32 interrupt_status;
-	u64 error_status;
-
-	u32 interrupts_enabled;
-	u64 errors_enabled;
+	spinlock_t irq_lock;
+	int irq_num;
+	uint32_t irq_stat_mask;
+	int irq_stat_refcount[DSI_STATUS_INTERRUPT_COUNT];
+	struct dsi_event_cb_info irq_stat_cb[DSI_STATUS_INTERRUPT_COUNT];
 
 	struct completion cmd_dma_done;
 	struct completion vid_frame_done;
 	struct completion cmd_frame_done;
-
-	struct work_struct interrupt_done_work;
-	struct work_struct error_status_work;
+	struct completion bta_done;
 };
 
 /**
@@ -180,7 +173,7 @@
  * @hw:                  DSI controller hardware object.
  * @current_state:       Current driver and hardware state.
  * @clk_cb:		 Callback for DSI clock control.
- * @int_info:            Interrupt information.
+ * @irq_info:            Interrupt information.
  * @clk_info:            Clock information.
  * @clk_freq:            DSi Link clock frequency information.
  * @pwr_info:            Power information.
@@ -212,7 +205,8 @@
 	struct dsi_ctrl_state_info current_state;
 	struct clk_ctrl_cb clk_cb;
 
-	struct dsi_ctrl_interrupts int_info;
+	struct dsi_ctrl_interrupts irq_info;
+
 	/* Clock and power states */
 	struct dsi_ctrl_clk_info clk_info;
 	struct link_clk_freq clk_freq;
@@ -560,6 +554,23 @@
 			      struct dsi_clk_link_set *source_clks);
 
 /**
+ * dsi_ctrl_enable_status_interrupt() - enable status interrupts
+ * @dsi_ctrl:        DSI controller handle.
+ * @intr_idx:        Index interrupt to disable.
+ * @event_info:      Pointer to event callback definition
+ */
+void dsi_ctrl_enable_status_interrupt(struct dsi_ctrl *dsi_ctrl,
+		uint32_t intr_idx, struct dsi_event_cb_info *event_info);
+
+/**
+ * dsi_ctrl_disable_status_interrupt() - disable status interrupts
+ * @dsi_ctrl:        DSI controller handle.
+ * @intr_idx:        Index interrupt to disable.
+ */
+void dsi_ctrl_disable_status_interrupt(
+		struct dsi_ctrl *dsi_ctrl, uint32_t intr_idx);
+
+/**
  * dsi_ctrl_drv_register() - register platform driver for dsi controller
  */
 void dsi_ctrl_drv_register(void);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
index bb72807..74be279 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
@@ -9,7 +9,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
  */
 
 #ifndef _DSI_CTRL_HW_H_
@@ -84,6 +83,36 @@
 };
 
 /**
+ * enum dsi_status_int_index - index of interrupts generated by DSI controller
+ * @DSI_SINT_CMD_MODE_DMA_DONE:        Command mode DMA packets are sent out.
+ * @DSI_SINT_CMD_STREAM0_FRAME_DONE:   A frame of cmd mode stream0 is sent out.
+ * @DSI_SINT_CMD_STREAM1_FRAME_DONE:   A frame of cmd mode stream1 is sent out.
+ * @DSI_SINT_CMD_STREAM2_FRAME_DONE:   A frame of cmd mode stream2 is sent out.
+ * @DSI_SINT_VIDEO_MODE_FRAME_DONE:    A frame of video mode stream is sent out.
+ * @DSI_SINT_BTA_DONE:                 A BTA is completed.
+ * @DSI_SINT_CMD_FRAME_DONE:           A frame of selected cmd mode stream is
+ *                                     sent out by MDP.
+ * @DSI_SINT_DYN_REFRESH_DONE:         The dynamic refresh operation completed.
+ * @DSI_SINT_DESKEW_DONE:              The deskew calibration operation done.
+ * @DSI_SINT_DYN_BLANK_DMA_DONE:       The dynamic blankin DMA operation has
+ *                                     completed.
+ */
+enum dsi_status_int_index {
+	DSI_SINT_CMD_MODE_DMA_DONE = 0,
+	DSI_SINT_CMD_STREAM0_FRAME_DONE = 1,
+	DSI_SINT_CMD_STREAM1_FRAME_DONE = 2,
+	DSI_SINT_CMD_STREAM2_FRAME_DONE = 3,
+	DSI_SINT_VIDEO_MODE_FRAME_DONE = 4,
+	DSI_SINT_BTA_DONE = 5,
+	DSI_SINT_CMD_FRAME_DONE = 6,
+	DSI_SINT_DYN_REFRESH_DONE = 7,
+	DSI_SINT_DESKEW_DONE = 8,
+	DSI_SINT_DYN_BLANK_DMA_DONE = 9,
+
+	DSI_STATUS_INTERRUPT_COUNT
+};
+
+/**
  * enum dsi_status_int_type - status interrupts generated by DSI controller
  * @DSI_CMD_MODE_DMA_DONE:        Command mode DMA packets are sent out.
  * @DSI_CMD_STREAM0_FRAME_DONE:   A frame of command mode stream0 is sent out.
@@ -99,16 +128,89 @@
  *                                completed.
  */
 enum dsi_status_int_type {
-	DSI_CMD_MODE_DMA_DONE = BIT(0),
-	DSI_CMD_STREAM0_FRAME_DONE = BIT(1),
-	DSI_CMD_STREAM1_FRAME_DONE = BIT(2),
-	DSI_CMD_STREAM2_FRAME_DONE = BIT(3),
-	DSI_VIDEO_MODE_FRAME_DONE = BIT(4),
-	DSI_BTA_DONE = BIT(5),
-	DSI_CMD_FRAME_DONE = BIT(6),
-	DSI_DYN_REFRESH_DONE = BIT(7),
-	DSI_DESKEW_DONE = BIT(8),
-	DSI_DYN_BLANK_DMA_DONE = BIT(9)
+	DSI_CMD_MODE_DMA_DONE = BIT(DSI_SINT_CMD_MODE_DMA_DONE),
+	DSI_CMD_STREAM0_FRAME_DONE = BIT(DSI_SINT_CMD_STREAM0_FRAME_DONE),
+	DSI_CMD_STREAM1_FRAME_DONE = BIT(DSI_SINT_CMD_STREAM1_FRAME_DONE),
+	DSI_CMD_STREAM2_FRAME_DONE = BIT(DSI_SINT_CMD_STREAM2_FRAME_DONE),
+	DSI_VIDEO_MODE_FRAME_DONE = BIT(DSI_SINT_VIDEO_MODE_FRAME_DONE),
+	DSI_BTA_DONE = BIT(DSI_SINT_BTA_DONE),
+	DSI_CMD_FRAME_DONE = BIT(DSI_SINT_CMD_FRAME_DONE),
+	DSI_DYN_REFRESH_DONE = BIT(DSI_SINT_DYN_REFRESH_DONE),
+	DSI_DESKEW_DONE = BIT(DSI_SINT_DESKEW_DONE),
+	DSI_DYN_BLANK_DMA_DONE = BIT(DSI_SINT_DYN_BLANK_DMA_DONE)
+};
+
+/**
+ * enum dsi_error_int_index - index of error interrupts from DSI controller
+ * @DSI_EINT_RDBK_SINGLE_ECC_ERR:        Single bit ECC error in read packet.
+ * @DSI_EINT_RDBK_MULTI_ECC_ERR:         Multi bit ECC error in read packet.
+ * @DSI_EINT_RDBK_CRC_ERR:               CRC error in read packet.
+ * @DSI_EINT_RDBK_INCOMPLETE_PKT:        Incomplete read packet.
+ * @DSI_EINT_PERIPH_ERROR_PKT:           Error packet returned from peripheral,
+ * @DSI_EINT_LP_RX_TIMEOUT:              Low power reverse transmission timeout.
+ * @DSI_EINT_HS_TX_TIMEOUT:              High speed fwd transmission timeout.
+ * @DSI_EINT_BTA_TIMEOUT:                BTA timeout.
+ * @DSI_EINT_PLL_UNLOCK:                 PLL has unlocked.
+ * @DSI_EINT_DLN0_ESC_ENTRY_ERR:         Incorrect LP Rx escape entry.
+ * @DSI_EINT_DLN0_ESC_SYNC_ERR:          LP Rx data is not byte aligned.
+ * @DSI_EINT_DLN0_LP_CONTROL_ERR:        Incorrect LP Rx state sequence.
+ * @DSI_EINT_PENDING_HS_TX_TIMEOUT:      Pending High-speed transfer timeout.
+ * @DSI_EINT_INTERLEAVE_OP_CONTENTION:   Interleave operation contention.
+ * @DSI_EINT_CMD_DMA_FIFO_UNDERFLOW:     Command mode DMA FIFO underflow.
+ * @DSI_EINT_CMD_MDP_FIFO_UNDERFLOW:     Command MDP FIFO underflow (failed to
+ *                                       receive one complete line from MDP).
+ * @DSI_EINT_DLN0_HS_FIFO_OVERFLOW:      High speed FIFO data lane 0 overflows.
+ * @DSI_EINT_DLN1_HS_FIFO_OVERFLOW:      High speed FIFO data lane 1 overflows.
+ * @DSI_EINT_DLN2_HS_FIFO_OVERFLOW:      High speed FIFO data lane 2 overflows.
+ * @DSI_EINT_DLN3_HS_FIFO_OVERFLOW:      High speed FIFO data lane 3 overflows.
+ * @DSI_EINT_DLN0_HS_FIFO_UNDERFLOW:     High speed FIFO data lane 0 underflows.
+ * @DSI_EINT_DLN1_HS_FIFO_UNDERFLOW:     High speed FIFO data lane 1 underflows.
+ * @DSI_EINT_DLN2_HS_FIFO_UNDERFLOW:     High speed FIFO data lane 2 underflows.
+ * @DSI_EINT_DLN3_HS_FIFO_UNDERFLOW:     High speed FIFO data lane 3 undeflows.
+ * @DSI_EINT_DLN0_LP0_CONTENTION:        PHY level contention while lane 0 low.
+ * @DSI_EINT_DLN1_LP0_CONTENTION:        PHY level contention while lane 1 low.
+ * @DSI_EINT_DLN2_LP0_CONTENTION:        PHY level contention while lane 2 low.
+ * @DSI_EINT_DLN3_LP0_CONTENTION:        PHY level contention while lane 3 low.
+ * @DSI_EINT_DLN0_LP1_CONTENTION:        PHY level contention while lane 0 high.
+ * @DSI_EINT_DLN1_LP1_CONTENTION:        PHY level contention while lane 1 high.
+ * @DSI_EINT_DLN2_LP1_CONTENTION:        PHY level contention while lane 2 high.
+ * @DSI_EINT_DLN3_LP1_CONTENTION:        PHY level contention while lane 3 high.
+ */
+enum dsi_error_int_index {
+	DSI_EINT_RDBK_SINGLE_ECC_ERR = 0,
+	DSI_EINT_RDBK_MULTI_ECC_ERR = 1,
+	DSI_EINT_RDBK_CRC_ERR = 2,
+	DSI_EINT_RDBK_INCOMPLETE_PKT = 3,
+	DSI_EINT_PERIPH_ERROR_PKT = 4,
+	DSI_EINT_LP_RX_TIMEOUT = 5,
+	DSI_EINT_HS_TX_TIMEOUT = 6,
+	DSI_EINT_BTA_TIMEOUT = 7,
+	DSI_EINT_PLL_UNLOCK = 8,
+	DSI_EINT_DLN0_ESC_ENTRY_ERR = 9,
+	DSI_EINT_DLN0_ESC_SYNC_ERR = 10,
+	DSI_EINT_DLN0_LP_CONTROL_ERR = 11,
+	DSI_EINT_PENDING_HS_TX_TIMEOUT = 12,
+	DSI_EINT_INTERLEAVE_OP_CONTENTION = 13,
+	DSI_EINT_CMD_DMA_FIFO_UNDERFLOW = 14,
+	DSI_EINT_CMD_MDP_FIFO_UNDERFLOW = 15,
+	DSI_EINT_DLN0_HS_FIFO_OVERFLOW = 16,
+	DSI_EINT_DLN1_HS_FIFO_OVERFLOW = 17,
+	DSI_EINT_DLN2_HS_FIFO_OVERFLOW = 18,
+	DSI_EINT_DLN3_HS_FIFO_OVERFLOW = 19,
+	DSI_EINT_DLN0_HS_FIFO_UNDERFLOW = 20,
+	DSI_EINT_DLN1_HS_FIFO_UNDERFLOW = 21,
+	DSI_EINT_DLN2_HS_FIFO_UNDERFLOW = 22,
+	DSI_EINT_DLN3_HS_FIFO_UNDERFLOW = 23,
+	DSI_EINT_DLN0_LP0_CONTENTION = 24,
+	DSI_EINT_DLN1_LP0_CONTENTION = 25,
+	DSI_EINT_DLN2_LP0_CONTENTION = 26,
+	DSI_EINT_DLN3_LP0_CONTENTION = 27,
+	DSI_EINT_DLN0_LP1_CONTENTION = 28,
+	DSI_EINT_DLN1_LP1_CONTENTION = 29,
+	DSI_EINT_DLN2_LP1_CONTENTION = 30,
+	DSI_EINT_DLN3_LP1_CONTENTION = 31,
+
+	DSI_ERROR_INTERRUPT_COUNT
 };
 
 /**
@@ -148,38 +250,38 @@
  * @DSI_DLN3_LP1_CONTENTION:        PHY level contention while lane 3 is high.
  */
 enum dsi_error_int_type {
-	DSI_RDBK_SINGLE_ECC_ERR = BIT(0),
-	DSI_RDBK_MULTI_ECC_ERR = BIT(1),
-	DSI_RDBK_CRC_ERR = BIT(2),
-	DSI_RDBK_INCOMPLETE_PKT = BIT(3),
-	DSI_PERIPH_ERROR_PKT = BIT(4),
-	DSI_LP_RX_TIMEOUT = BIT(5),
-	DSI_HS_TX_TIMEOUT = BIT(6),
-	DSI_BTA_TIMEOUT = BIT(7),
-	DSI_PLL_UNLOCK = BIT(8),
-	DSI_DLN0_ESC_ENTRY_ERR = BIT(9),
-	DSI_DLN0_ESC_SYNC_ERR = BIT(10),
-	DSI_DLN0_LP_CONTROL_ERR = BIT(11),
-	DSI_PENDING_HS_TX_TIMEOUT = BIT(12),
-	DSI_INTERLEAVE_OP_CONTENTION = BIT(13),
-	DSI_CMD_DMA_FIFO_UNDERFLOW = BIT(14),
-	DSI_CMD_MDP_FIFO_UNDERFLOW = BIT(15),
-	DSI_DLN0_HS_FIFO_OVERFLOW = BIT(16),
-	DSI_DLN1_HS_FIFO_OVERFLOW = BIT(17),
-	DSI_DLN2_HS_FIFO_OVERFLOW = BIT(18),
-	DSI_DLN3_HS_FIFO_OVERFLOW = BIT(19),
-	DSI_DLN0_HS_FIFO_UNDERFLOW = BIT(20),
-	DSI_DLN1_HS_FIFO_UNDERFLOW = BIT(21),
-	DSI_DLN2_HS_FIFO_UNDERFLOW = BIT(22),
-	DSI_DLN3_HS_FIFO_UNDERFLOW = BIT(23),
-	DSI_DLN0_LP0_CONTENTION = BIT(24),
-	DSI_DLN1_LP0_CONTENTION = BIT(25),
-	DSI_DLN2_LP0_CONTENTION = BIT(26),
-	DSI_DLN3_LP0_CONTENTION = BIT(27),
-	DSI_DLN0_LP1_CONTENTION = BIT(28),
-	DSI_DLN1_LP1_CONTENTION = BIT(29),
-	DSI_DLN2_LP1_CONTENTION = BIT(30),
-	DSI_DLN3_LP1_CONTENTION = BIT(31),
+	DSI_RDBK_SINGLE_ECC_ERR = BIT(DSI_EINT_RDBK_SINGLE_ECC_ERR),
+	DSI_RDBK_MULTI_ECC_ERR = BIT(DSI_EINT_RDBK_MULTI_ECC_ERR),
+	DSI_RDBK_CRC_ERR = BIT(DSI_EINT_RDBK_CRC_ERR),
+	DSI_RDBK_INCOMPLETE_PKT = BIT(DSI_EINT_RDBK_INCOMPLETE_PKT),
+	DSI_PERIPH_ERROR_PKT = BIT(DSI_EINT_PERIPH_ERROR_PKT),
+	DSI_LP_RX_TIMEOUT = BIT(DSI_EINT_LP_RX_TIMEOUT),
+	DSI_HS_TX_TIMEOUT = BIT(DSI_EINT_HS_TX_TIMEOUT),
+	DSI_BTA_TIMEOUT = BIT(DSI_EINT_BTA_TIMEOUT),
+	DSI_PLL_UNLOCK = BIT(DSI_EINT_PLL_UNLOCK),
+	DSI_DLN0_ESC_ENTRY_ERR = BIT(DSI_EINT_DLN0_ESC_ENTRY_ERR),
+	DSI_DLN0_ESC_SYNC_ERR = BIT(DSI_EINT_DLN0_ESC_SYNC_ERR),
+	DSI_DLN0_LP_CONTROL_ERR = BIT(DSI_EINT_DLN0_LP_CONTROL_ERR),
+	DSI_PENDING_HS_TX_TIMEOUT = BIT(DSI_EINT_PENDING_HS_TX_TIMEOUT),
+	DSI_INTERLEAVE_OP_CONTENTION = BIT(DSI_EINT_INTERLEAVE_OP_CONTENTION),
+	DSI_CMD_DMA_FIFO_UNDERFLOW = BIT(DSI_EINT_CMD_DMA_FIFO_UNDERFLOW),
+	DSI_CMD_MDP_FIFO_UNDERFLOW = BIT(DSI_EINT_CMD_MDP_FIFO_UNDERFLOW),
+	DSI_DLN0_HS_FIFO_OVERFLOW = BIT(DSI_EINT_DLN0_HS_FIFO_OVERFLOW),
+	DSI_DLN1_HS_FIFO_OVERFLOW = BIT(DSI_EINT_DLN1_HS_FIFO_OVERFLOW),
+	DSI_DLN2_HS_FIFO_OVERFLOW = BIT(DSI_EINT_DLN2_HS_FIFO_OVERFLOW),
+	DSI_DLN3_HS_FIFO_OVERFLOW = BIT(DSI_EINT_DLN3_HS_FIFO_OVERFLOW),
+	DSI_DLN0_HS_FIFO_UNDERFLOW = BIT(DSI_EINT_DLN0_HS_FIFO_UNDERFLOW),
+	DSI_DLN1_HS_FIFO_UNDERFLOW = BIT(DSI_EINT_DLN1_HS_FIFO_UNDERFLOW),
+	DSI_DLN2_HS_FIFO_UNDERFLOW = BIT(DSI_EINT_DLN2_HS_FIFO_UNDERFLOW),
+	DSI_DLN3_HS_FIFO_UNDERFLOW = BIT(DSI_EINT_DLN3_HS_FIFO_UNDERFLOW),
+	DSI_DLN0_LP0_CONTENTION = BIT(DSI_EINT_DLN0_LP0_CONTENTION),
+	DSI_DLN1_LP0_CONTENTION = BIT(DSI_EINT_DLN1_LP0_CONTENTION),
+	DSI_DLN2_LP0_CONTENTION = BIT(DSI_EINT_DLN2_LP0_CONTENTION),
+	DSI_DLN3_LP0_CONTENTION = BIT(DSI_EINT_DLN3_LP0_CONTENTION),
+	DSI_DLN0_LP1_CONTENTION = BIT(DSI_EINT_DLN0_LP1_CONTENTION),
+	DSI_DLN1_LP1_CONTENTION = BIT(DSI_EINT_DLN1_LP1_CONTENTION),
+	DSI_DLN2_LP1_CONTENTION = BIT(DSI_EINT_DLN2_LP1_CONTENTION),
+	DSI_DLN3_LP1_CONTENTION = BIT(DSI_EINT_DLN3_LP1_CONTENTION),
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
index a024c43..0af6f25 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
@@ -706,6 +706,8 @@
 {
 	u32 reg = 0;
 
+	reg = DSI_R32(ctrl, DSI_INT_CTRL);
+
 	if (ints & DSI_CMD_MODE_DMA_DONE)
 		reg |= BIT(0);
 	if (ints & DSI_CMD_FRAME_DONE)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
index 77da9b4..1e6727b 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
@@ -9,7 +9,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
  */
 
 #ifndef _DSI_DEFS_H_
@@ -447,5 +446,14 @@
 			r1->h == r2->h;
 }
 
+struct dsi_event_cb_info {
+	uint32_t event_idx;
+	void *event_usr_ptr;
+
+	int (*event_cb)(void *event_usr_ptr,
+		uint32_t event_idx, uint32_t instance_idx,
+		uint32_t data0, uint32_t data1,
+		uint32_t data2, uint32_t data3);
+};
 
 #endif /* _DSI_DEFS_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index 3dd4950..6f6c559 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -19,6 +19,8 @@
 #include <linux/err.h>
 
 #include "msm_drv.h"
+#include "sde_connector.h"
+#include "msm_mmu.h"
 #include "dsi_display.h"
 #include "dsi_panel.h"
 #include "dsi_ctrl.h"
@@ -109,6 +111,23 @@
 
 	return rc;
 }
+
+enum dsi_pixel_format dsi_display_get_dst_format(void *display)
+{
+	enum dsi_pixel_format format = DSI_PIXEL_FORMAT_MAX;
+	struct dsi_display *dsi_display = (struct dsi_display *)display;
+
+	if (!dsi_display || !dsi_display->panel) {
+		pr_err("Invalid params(s) dsi_display %pK, panel %pK\n",
+			dsi_display,
+			((dsi_display) ? dsi_display->panel : NULL));
+		return format;
+	}
+
+	format = dsi_display->panel->host_config.dst_format;
+	return format;
+}
+
 static ssize_t debugfs_dump_info_read(struct file *file,
 				      char __user *buff,
 				      size_t count,
@@ -499,7 +518,45 @@
 	return 0;
 }
 
+void dsi_display_enable_event(struct dsi_display *display,
+		uint32_t event_idx, struct dsi_event_cb_info *event_info,
+		bool enable)
+{
+	uint32_t irq_status_idx = DSI_STATUS_INTERRUPT_COUNT;
+	int i;
 
+	if (!display) {
+		pr_err("invalid display\n");
+		return;
+	}
+
+	if (event_info)
+		event_info->event_idx = event_idx;
+
+	switch (event_idx) {
+	case SDE_CONN_EVENT_VID_DONE:
+		irq_status_idx = DSI_SINT_VIDEO_MODE_FRAME_DONE;
+		break;
+	case SDE_CONN_EVENT_CMD_DONE:
+		irq_status_idx = DSI_SINT_CMD_FRAME_DONE;
+		break;
+	default:
+		/* nothing to do */
+		pr_debug("[%s] unhandled event %d\n", display->name, event_idx);
+		return;
+	}
+
+	if (enable) {
+		for (i = 0; i < display->ctrl_count; i++)
+			dsi_ctrl_enable_status_interrupt(
+					display->ctrl[i].ctrl, irq_status_idx,
+					event_info);
+	} else {
+		for (i = 0; i < display->ctrl_count; i++)
+			dsi_ctrl_disable_status_interrupt(
+					display->ctrl[i].ctrl, irq_status_idx);
+	}
+}
 
 static int dsi_display_ctrl_power_on(struct dsi_display *display)
 {
@@ -1215,8 +1272,7 @@
 			goto error;
 		}
 
-		rc = dsi_ctrl_cmd_tx_trigger(ctrl->ctrl,
-			DSI_CTRL_CMD_BROADCAST);
+		rc = dsi_ctrl_cmd_tx_trigger(ctrl->ctrl, flags);
 		if (rc) {
 			pr_err("[%s] cmd trigger failed, rc=%d\n",
 			       display->name, rc);
@@ -1224,9 +1280,7 @@
 		}
 	}
 
-	rc = dsi_ctrl_cmd_tx_trigger(m_ctrl->ctrl,
-				(DSI_CTRL_CMD_BROADCAST_MASTER |
-				 DSI_CTRL_CMD_BROADCAST));
+	rc = dsi_ctrl_cmd_tx_trigger(m_ctrl->ctrl, m_flags);
 	if (rc) {
 		pr_err("[%s] cmd trigger failed for master, rc=%d\n",
 		       display->name, rc);
@@ -1285,6 +1339,7 @@
 {
 	struct dsi_display *display = to_dsi_display(host);
 	struct dsi_display_ctrl *display_ctrl;
+	struct msm_gem_address_space *aspace = NULL;
 	int rc = 0, cnt = 0;
 
 	if (!host || !msg) {
@@ -1327,7 +1382,16 @@
 			pr_err("value of display->tx_cmd_buf is NULL");
 			goto error_disable_cmd_engine;
 		}
-		rc = msm_gem_get_iova(display->tx_cmd_buf, 0,
+
+		aspace = msm_gem_smmu_address_space_get(display->drm_dev,
+				MSM_SMMU_DOMAIN_UNSECURE);
+		if (!aspace) {
+			pr_err("failed to get aspace\n");
+			rc = -EINVAL;
+			goto free_gem;
+		}
+
+		rc = msm_gem_get_iova(display->tx_cmd_buf, aspace,
 					&(display->cmd_buffer_iova));
 		if (rc) {
 			pr_err("failed to get the iova rc %d\n", rc);
@@ -1383,7 +1447,7 @@
 	}
 	return rc;
 put_iova:
-	msm_gem_put_iova(display->tx_cmd_buf, 0);
+	msm_gem_put_iova(display->tx_cmd_buf, aspace);
 free_gem:
 	msm_gem_free_object(display->tx_cmd_buf);
 error:
@@ -2780,6 +2844,9 @@
 				(void)_dsi_display_dev_deinit(main_display);
 				component_del(&main_display->pdev->dev,
 					      &dsi_display_comp_ops);
+				mutex_lock(&dsi_display_list_lock);
+				list_del(&main_display->list);
+				mutex_unlock(&dsi_display_list_lock);
 				comp_add_success = false;
 				default_active_node = NULL;
 				pr_debug("removed the existing comp ops\n");
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
index 9aa3113..b382e4a 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
@@ -444,6 +444,17 @@
 int dsi_display_clock_gate(struct dsi_display *display, bool enable);
 int dsi_dispaly_static_frame(struct dsi_display *display, bool enable);
 
+/**
+ * dsi_display_enable_event() - enable interrupt based connector event
+ * @display:            Handle to display.
+ * @event_idx:          Event index.
+ * @event_info:         Event callback definition.
+ * @enable:             Whether to enable/disable the event interrupt.
+ */
+void dsi_display_enable_event(struct dsi_display *display,
+		uint32_t event_idx, struct dsi_event_cb_info *event_info,
+		bool enable);
+
 int dsi_display_set_backlight(void *display, u32 bl_lvl);
 
 /**
@@ -469,5 +480,12 @@
  */
 int dsi_display_pre_kickoff(struct dsi_display *display,
 		struct msm_display_kickoff_params *params);
+/**
+ * dsi_display_get_dst_format() - get dst_format from DSI display
+ * @display:         Handle to display
+ *
+ * Return: enum dsi_pixel_format type
+ */
+enum dsi_pixel_format dsi_display_get_dst_format(void *display);
 
 #endif /* _DSI_DISPLAY_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
index 4e09cfb..b499bd6 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
@@ -546,6 +546,19 @@
 	return dsi_display_pre_kickoff(display, params);
 }
 
+void dsi_conn_enable_event(struct drm_connector *connector,
+		uint32_t event_idx, bool enable, void *display)
+{
+	struct dsi_event_cb_info event_info;
+
+	memset(&event_info, 0, sizeof(event_info));
+
+	event_info.event_cb = sde_connector_trigger_event;
+	event_info.event_usr_ptr = connector;
+
+	dsi_display_enable_event(display, event_idx, &event_info, enable);
+}
+
 struct dsi_bridge *dsi_drm_bridge_init(struct dsi_display *display,
 				       struct drm_device *dev,
 				       struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
index 68520a8..45feec9 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
@@ -85,6 +85,16 @@
 		struct drm_display_mode *mode,
 		void *display);
 
+/**
+ * dsi_conn_enable_event - callback to notify DSI driver of event registeration
+ * @connector: Pointer to drm connector structure
+ * @event_idx: Connector event index
+ * @enable: Whether or not the event is enabled
+ * @display: Pointer to private display handle
+ */
+void dsi_conn_enable_event(struct drm_connector *connector,
+		uint32_t event_idx, bool enable, void *display);
+
 struct dsi_bridge *dsi_drm_bridge_init(struct dsi_display *display,
 		struct drm_device *dev,
 		struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 9527daf..75e98dc 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -133,7 +133,7 @@
 		container_of(work, struct mdp4_crtc, unref_cursor_work);
 	struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
 
-	msm_gem_put_iova(val, mdp4_kms->id);
+	msm_gem_put_iova(val, mdp4_kms->aspace);
 	drm_gem_object_unreference_unlocked(val);
 }
 
@@ -378,7 +378,8 @@
 		if (next_bo) {
 			/* take a obj ref + iova ref when we start scanning out: */
 			drm_gem_object_reference(next_bo);
-			msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova);
+			msm_gem_get_iova_locked(next_bo, mdp4_kms->aspace,
+				&iova);
 
 			/* enable cursor: */
 			mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
@@ -435,7 +436,7 @@
 	}
 
 	if (cursor_bo) {
-		ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova);
+		ret = msm_gem_get_iova(cursor_bo, mdp4_kms->aspace, &iova);
 		if (ret)
 			goto fail;
 	} else {
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 571a91e..acee5da 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -17,6 +17,7 @@
 
 
 #include "msm_drv.h"
+#include "msm_gem.h"
 #include "msm_mmu.h"
 #include "mdp4_kms.h"
 
@@ -151,12 +152,28 @@
 	}
 }
 
-static const char * const iommu_ports[] = {
-	"mdp_port0_cb0", "mdp_port1_cb0",
-};
+static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
+{
+	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+	struct msm_drm_private *priv = mdp4_kms->dev->dev_private;
+	unsigned int i;
+	struct msm_gem_address_space *aspace = mdp4_kms->aspace;
+
+	for (i = 0; i < priv->num_crtcs; i++)
+		mdp4_crtc_cancel_pending_flip(priv->crtcs[i], file);
+
+	if (aspace) {
+		aspace->mmu->funcs->detach(aspace->mmu,
+				iommu_ports, ARRAY_SIZE(iommu_ports));
+		msm_gem_address_space_destroy(aspace);
+	}
+}
 
 static void mdp4_destroy(struct msm_kms *kms)
 {
+	struct device *dev = mdp4_kms->dev->dev;
+	struct msm_gem_address_space *aspace = mdp4_kms->aspace;
+
 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
 	struct device *dev = mdp4_kms->dev->dev;
 	struct msm_mmu *mmu = mdp4_kms->mmu;
@@ -167,12 +184,18 @@
 	}
 
 	if (mdp4_kms->blank_cursor_iova)
-		msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
+		msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->aspace);
 	drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
 
 	if (mdp4_kms->rpm_enabled)
 		pm_runtime_disable(dev);
 
+	if (aspace) {
+		aspace->mmu->funcs->detach(aspace->mmu,
+				iommu_ports, ARRAY_SIZE(iommu_ports));
+		msm_gem_address_space_destroy(aspace);
+	}
+
 	kfree(mdp4_kms);
 }
 
@@ -440,8 +463,8 @@
 	struct mdp4_platform_config *config = mdp4_get_config(pdev);
 	struct mdp4_kms *mdp4_kms;
 	struct msm_kms *kms = NULL;
-	struct msm_mmu *mmu;
 	int irq, ret;
+	struct msm_gem_address_space *aspace;
 
 	mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
 	if (!mdp4_kms) {
@@ -531,12 +554,23 @@
 	mdelay(16);
 
 	if (config->iommu) {
-		mmu = msm_iommu_new(&pdev->dev, config->iommu);
+		struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, config->iommu);
+
 		if (IS_ERR(mmu)) {
 			ret = PTR_ERR(mmu);
 			goto fail;
 		}
-		ret = mmu->funcs->attach(mmu, iommu_ports,
+
+		aspace = msm_gem_address_space_create(&pdev->dev,
+				mmu, "mdp4", 0x1000, 0xffffffff);
+		if (IS_ERR(aspace)) {
+			ret = PTR_ERR(aspace);
+			goto fail;
+		}
+
+		mdp4_kms->aspace = aspace;
+
+		ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
 				ARRAY_SIZE(iommu_ports));
 		if (ret)
 			goto fail;
@@ -545,14 +579,7 @@
 	} else {
 		dev_info(dev->dev, "no iommu, fallback to phys "
 				"contig buffers for scanout\n");
-		mmu = NULL;
-	}
-
-	mdp4_kms->id = msm_register_mmu(dev, mmu);
-	if (mdp4_kms->id < 0) {
-		ret = mdp4_kms->id;
-		dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
-		goto fail;
+		aspace = NULL;
 	}
 
 	ret = modeset_init(mdp4_kms);
@@ -571,7 +598,7 @@
 		goto fail;
 	}
 
-	ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id,
+	ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->aspace,
 			&mdp4_kms->blank_cursor_iova);
 	if (ret) {
 		dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
@@ -599,5 +626,13 @@
 	config.max_clk = 266667000;
 	config.iommu = iommu_domain_alloc(&platform_bus_type);
 
+#else
+	if (cpu_is_apq8064())
+		config.max_clk = 266667000;
+	else
+		config.max_clk = 200000000;
+
+	config.iommu = msm_get_iommu_domain(DISPLAY_READ_DOMAIN);
+#endif
 	return &config;
 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index ddfabde..f9dcadf 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -33,8 +33,6 @@
 	int rev;
 
 	/* mapper-id used to request GEM buffer mapped for scanout: */
-	int id;
-
 	void __iomem *mmio;
 
 	struct regulator *vdd;
@@ -43,7 +41,7 @@
 	struct clk *pclk;
 	struct clk *lut_clk;
 	struct clk *axi_clk;
-	struct msm_mmu *mmu;
+	struct msm_gem_address_space *aspace;
 
 	struct mdp_irq error_handler;
 
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 3903dbc..934992e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -109,7 +109,7 @@
 		return 0;
 
 	DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id);
-	return msm_framebuffer_prepare(fb, mdp4_kms->id);
+	return msm_framebuffer_prepare(fb, mdp4_kms->aspace);
 }
 
 static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
@@ -123,7 +123,7 @@
 		return;
 
 	DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id);
-	msm_framebuffer_cleanup(fb, mdp4_kms->id);
+	msm_framebuffer_cleanup(fb, mdp4_kms->aspace);
 }
 
 
@@ -172,13 +172,13 @@
 			MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
 
 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe),
-			msm_framebuffer_iova(fb, mdp4_kms->id, 0));
+			msm_framebuffer_iova(fb, mdp4_kms->aspace, 0));
 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe),
-			msm_framebuffer_iova(fb, mdp4_kms->id, 1));
+			msm_framebuffer_iova(fb, mdp4_kms->aspace, 1));
 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe),
-			msm_framebuffer_iova(fb, mdp4_kms->id, 2));
+			msm_framebuffer_iova(fb, mdp4_kms->aspace, 2));
 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe),
-			msm_framebuffer_iova(fb, mdp4_kms->id, 3));
+			msm_framebuffer_iova(fb, mdp4_kms->aspace, 3));
 
 	plane->fb = fb;
 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index c205c36..15e7da2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -171,7 +171,7 @@
 		container_of(work, struct mdp5_crtc, unref_cursor_work);
 	struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
 
-	msm_gem_put_iova(val, mdp5_kms->id);
+	msm_gem_put_iova(val, mdp5_kms->aspace);
 	drm_gem_object_unreference_unlocked(val);
 }
 
@@ -525,7 +525,7 @@
 	if (!cursor_bo)
 		return -ENOENT;
 
-	ret = msm_gem_get_iova(cursor_bo, mdp5_kms->id, &cursor_addr);
+	ret = msm_gem_get_iova(cursor_bo, mdp5_kms->aspace, &cursor_addr);
 	if (ret)
 		return -EINVAL;
 
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 4f204ff..d97e4ef 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016-2017 The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -19,6 +19,7 @@
 #include <linux/of_irq.h>
 
 #include "msm_drv.h"
+#include "msm_gem.h"
 #include "msm_mmu.h"
 #include "mdp5_kms.h"
 
@@ -117,11 +118,12 @@
 static void mdp5_kms_destroy(struct msm_kms *kms)
 {
 	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
-	struct msm_mmu *mmu = mdp5_kms->mmu;
+	struct msm_gem_address_space *aspace = mdp5_kms->aspace;
 
-	if (mmu) {
-		mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
-		mmu->funcs->destroy(mmu);
+	if (aspace) {
+		aspace->mmu->funcs->detach(aspace->mmu,
+				iommu_ports, ARRAY_SIZE(iommu_ports));
+		msm_gem_address_space_destroy(aspace);
 	}
 }
 
@@ -564,8 +566,8 @@
 	struct mdp5_kms *mdp5_kms;
 	struct mdp5_cfg *config;
 	struct msm_kms *kms;
-	struct msm_mmu *mmu;
 	int irq, i, ret;
+	struct msm_gem_address_space *aspace;
 
 	/* priv->kms would have been populated by the MDP5 driver */
 	kms = priv->kms;
@@ -606,7 +608,8 @@
 	mdelay(16);
 
 	if (config->platform.iommu) {
-		mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
+		struct msm_mmu *mmu = msm_iommu_new(&pdev->dev,
+				config->platform.iommu);
 		if (IS_ERR(mmu)) {
 			ret = PTR_ERR(mmu);
 			dev_err(&pdev->dev, "failed to init iommu: %d\n", ret);
@@ -614,7 +617,16 @@
 			goto fail;
 		}
 
-		ret = mmu->funcs->attach(mmu, iommu_ports,
+		aspace = msm_gem_smmu_address_space_create(&pdev->dev,
+				mmu, "mdp5");
+		if (IS_ERR(aspace)) {
+			ret = PTR_ERR(aspace);
+			goto fail;
+		}
+
+		mdp5_kms->aspace = aspace;
+
+		ret = mmu->funcs->attach(aspace->mmu, iommu_ports,
 				ARRAY_SIZE(iommu_ports));
 		if (ret) {
 			dev_err(&pdev->dev, "failed to attach iommu: %d\n",
@@ -625,15 +637,7 @@
 	} else {
 		dev_info(&pdev->dev,
 			 "no iommu, fallback to phys contig buffers for scanout\n");
-		mmu = NULL;
-	}
-	mdp5_kms->mmu = mmu;
-
-	mdp5_kms->id = msm_register_mmu(dev, mmu);
-	if (mdp5_kms->id < 0) {
-		ret = mdp5_kms->id;
-		dev_err(&pdev->dev, "failed to register mdp5 iommu: %d\n", ret);
-		goto fail;
+		aspace = NULL;
 	}
 
 	ret = modeset_init(mdp5_kms);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 0373892..f21e912 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -38,8 +38,7 @@
 
 
 	/* mapper-id used to request GEM buffer mapped for scanout: */
-	int id;
-	struct msm_mmu *mmu;
+	struct msm_gem_address_space *aspace;
 
 	struct mdp5_smp *smp;
 	struct mdp5_ctl_manager *ctlm;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 5e67e8b..88e5d06 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -261,7 +261,7 @@
 		return 0;
 
 	DBG("%s: prepare: FB[%u]", mdp5_plane->name, fb->base.id);
-	return msm_framebuffer_prepare(fb, mdp5_kms->id);
+	return msm_framebuffer_prepare(fb, mdp5_kms->aspace);
 }
 
 static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
@@ -275,7 +275,7 @@
 		return;
 
 	DBG("%s: cleanup: FB[%u]", mdp5_plane->name, fb->base.id);
-	msm_framebuffer_cleanup(fb, mdp5_kms->id);
+	msm_framebuffer_cleanup(fb, mdp5_kms->aspace);
 }
 
 static int mdp5_plane_atomic_check(struct drm_plane *plane,
@@ -398,13 +398,13 @@
 			MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
 
 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
-			msm_framebuffer_iova(fb, mdp5_kms->id, 0));
+			msm_framebuffer_iova(fb, mdp5_kms->aspace, 0));
 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
-			msm_framebuffer_iova(fb, mdp5_kms->id, 1));
+			msm_framebuffer_iova(fb, mdp5_kms->aspace, 1));
 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
-			msm_framebuffer_iova(fb, mdp5_kms->id, 2));
+			msm_framebuffer_iova(fb, mdp5_kms->aspace, 2));
 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
-			msm_framebuffer_iova(fb, mdp5_kms->id, 3));
+			msm_framebuffer_iova(fb, mdp5_kms->aspace, 3));
 
 	plane->fb = fb;
 }
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index f7d5d02..810d0d6 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -126,10 +126,21 @@
 int msm_atomic_check(struct drm_device *dev,
 			    struct drm_atomic_state *state)
 {
+	struct msm_drm_private *priv;
+
+	if (!dev)
+		return -EINVAL;
+
 	if (msm_is_suspend_blocked(dev)) {
 		DRM_DEBUG("rejecting commit during suspend\n");
 		return -EBUSY;
 	}
+
+	priv = dev->dev_private;
+	if (priv && priv->kms && priv->kms->funcs &&
+			priv->kms->funcs->atomic_check)
+		return priv->kms->funcs->atomic_check(priv->kms, state);
+
 	return drm_atomic_helper_check(dev, state);
 }
 
@@ -140,42 +151,6 @@
 	.atomic_commit = msm_atomic_commit,
 };
 
-int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
-{
-	struct msm_drm_private *priv = dev->dev_private;
-	int idx = priv->num_mmus++;
-
-	if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus)))
-		return -EINVAL;
-
-	priv->mmus[idx] = mmu;
-
-	return idx;
-}
-
-void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu)
-{
-	struct msm_drm_private *priv = dev->dev_private;
-	int idx;
-
-	if (priv->num_mmus <= 0) {
-		dev_err(dev->dev, "invalid num mmus %d\n", priv->num_mmus);
-		return;
-	}
-
-	idx = priv->num_mmus - 1;
-
-	/* only support reverse-order deallocation */
-	if (priv->mmus[idx] != mmu) {
-		dev_err(dev->dev, "unexpected mmu at idx %d\n", idx);
-		return;
-	}
-
-	--priv->num_mmus;
-	priv->mmus[idx] = 0;
-}
-
-
 #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
 static bool reglog = false;
 MODULE_PARM_DESC(reglog, "Enable register read/write logging");
@@ -1384,7 +1359,7 @@
 	return ret;
 }
 
-void msm_mode_object_event_nofity(struct drm_mode_object *obj,
+void msm_mode_object_event_notify(struct drm_mode_object *obj,
 		struct drm_device *dev, struct drm_event *event, u8 *payload)
 {
 	struct msm_drm_private *priv = NULL;
@@ -1944,6 +1919,30 @@
 	return ret;
 }
 
+struct msm_gem_address_space *
+msm_gem_smmu_address_space_get(struct drm_device *dev,
+		unsigned int domain)
+{
+	struct msm_drm_private *priv = NULL;
+	struct msm_kms *kms;
+	const struct msm_kms_funcs *funcs;
+
+	if ((!dev) || (!dev->dev_private))
+		return NULL;
+
+	priv = dev->dev_private;
+	kms = priv->kms;
+	if (!kms)
+		return NULL;
+
+	funcs = kms->funcs;
+
+	if ((!funcs) || (!funcs->get_address_space))
+		return NULL;
+
+	return funcs->get_address_space(priv->kms, domain);
+}
+
 /*
  * We don't know what's the best binding to link the gpu with the drm device.
  * Fow now, we just hunt for all the possible gpus that we support, and add them
@@ -2055,6 +2054,7 @@
 static int __init msm_drm_register(void)
 {
 	DBG("init");
+	msm_smmu_driver_init();
 	msm_dsi_register();
 	msm_edp_register();
 	msm_hdmi_register();
@@ -2070,6 +2070,7 @@
 	adreno_unregister();
 	msm_edp_unregister();
 	msm_dsi_unregister();
+	msm_smmu_driver_cleanup();
 }
 
 module_init(msm_drm_register);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 86fec8b..7edd534 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -61,6 +61,8 @@
 struct msm_gem_submit;
 struct msm_fence_context;
 struct msm_fence_cb;
+struct msm_gem_address_space;
+struct msm_gem_vma;
 
 #define NUM_DOMAINS    4    /* one for KMS, then one per gpu core (?) */
 #define MAX_CRTCS      8
@@ -117,6 +119,7 @@
 	PLANE_PROP_ROTATION,
 	PLANE_PROP_BLEND_OP,
 	PLANE_PROP_SRC_CONFIG,
+	PLANE_PROP_FB_TRANSLATION_MODE,
 
 	/* total # of properties */
 	PLANE_PROP_COUNT
@@ -143,6 +146,7 @@
 	CRTC_PROP_ROT_PREFILL_BW,
 	CRTC_PROP_ROT_CLK,
 	CRTC_PROP_ROI_V1,
+	CRTC_PROP_SECURITY_LEVEL,
 
 	/* total # of properties */
 	CRTC_PROP_COUNT
@@ -152,6 +156,7 @@
 	/* blob properties, always put these first */
 	CONNECTOR_PROP_SDE_INFO,
 	CONNECTOR_PROP_HDR_INFO,
+	CONNECTOR_PROP_PP_DITHER,
 
 	/* # of blob properties */
 	CONNECTOR_PROP_BLOBCOUNT,
@@ -528,9 +533,13 @@
 	uint32_t pending_crtcs;
 	wait_queue_head_t pending_crtcs_event;
 
-	/* registered MMUs: */
-	unsigned int num_mmus;
-	struct msm_mmu *mmus[NUM_DOMAINS];
+	/* Registered address spaces.. currently this is fixed per # of
+	 * iommu's.  Ie. one for display block and one for gpu block.
+	 * Eventually, to do per-process gpu pagetables, we'll want one
+	 * of these per-process.
+	 */
+	unsigned int num_aspaces;
+	struct msm_gem_address_space *aspace[NUM_DOMAINS];
 
 	unsigned int num_planes;
 	struct drm_plane *planes[MAX_PLANES];
@@ -635,10 +644,29 @@
 int msm_atomic_commit(struct drm_device *dev,
 		struct drm_atomic_state *state, bool nonblock);
 
-int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
-void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
-
 void msm_gem_submit_free(struct msm_gem_submit *submit);
+void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma, struct sg_table *sgt,
+		void *priv);
+int msm_gem_map_vma(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma, struct sg_table *sgt,
+		void *priv, unsigned int flags);
+void msm_gem_address_space_destroy(struct msm_gem_address_space *aspace);
+
+/* For GPU and legacy display */
+struct msm_gem_address_space *
+msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
+		const char *name);
+
+/* For SDE  display */
+struct msm_gem_address_space *
+msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
+		const char *name);
+
+struct msm_gem_address_space *
+msm_gem_smmu_address_space_get(struct drm_device *dev,
+		unsigned int domain);
+
 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 		struct drm_file *file);
 
@@ -650,13 +678,16 @@
 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
-int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
-		uint32_t *iova);
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
-uint32_t msm_gem_iova(struct drm_gem_object *obj, int id);
+int msm_gem_get_iova_locked(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace, uint32_t *iova);
+int msm_gem_get_iova(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace, uint32_t *iova);
+uint32_t msm_gem_iova(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace);
 struct page **msm_gem_get_pages(struct drm_gem_object *obj);
 void msm_gem_put_pages(struct drm_gem_object *obj);
-void msm_gem_put_iova(struct drm_gem_object *obj, int id);
+void msm_gem_put_iova(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace);
 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 		struct drm_mode_create_dumb *args);
 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -693,9 +724,12 @@
 		struct dma_buf *dmabuf, struct sg_table *sgt);
 
 void msm_framebuffer_set_kmap(struct drm_framebuffer *fb, bool enable);
-int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id);
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id);
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane);
+int msm_framebuffer_prepare(struct drm_framebuffer *fb,
+		struct msm_gem_address_space *aspace);
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
+		struct msm_gem_address_space *aspace);
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
+		struct msm_gem_address_space *aspace, int plane);
 struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
 const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
 struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
@@ -732,7 +766,7 @@
  * @event: event that needs to be notified.
  * @payload: payload for the event.
  */
-void msm_mode_object_event_nofity(struct drm_mode_object *obj,
+void msm_mode_object_event_notify(struct drm_mode_object *obj,
 		struct drm_device *dev, struct drm_event *event, u8 *payload);
 #ifdef CONFIG_DRM_MSM_DSI
 void __init msm_dsi_register(void);
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 918427a..0a9f12d 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -148,14 +148,15 @@
  * should be fine, since only the scanout (mdpN) side of things needs
  * this, the gpu doesn't care about fb's.
  */
-int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
+int msm_framebuffer_prepare(struct drm_framebuffer *fb,
+		struct msm_gem_address_space *aspace)
 {
 	struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
 	int ret, i, n = drm_format_num_planes(fb->pixel_format);
 	uint32_t iova;
 
 	for (i = 0; i < n; i++) {
-		ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova);
+		ret = msm_gem_get_iova(msm_fb->planes[i], aspace, &iova);
 		DBG("FB[%u]: iova[%d]: %08x (%d)", fb->base.id, i, iova, ret);
 		if (ret)
 			return ret;
@@ -167,7 +168,8 @@
 	return 0;
 }
 
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id)
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
+		struct msm_gem_address_space *aspace)
 {
 	struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
 	int i, n = drm_format_num_planes(fb->pixel_format);
@@ -176,15 +178,16 @@
 		msm_framebuffer_kunmap(fb);
 
 	for (i = 0; i < n; i++)
-		msm_gem_put_iova(msm_fb->planes[i], id);
+		msm_gem_put_iova(msm_fb->planes[i], aspace);
 }
 
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane)
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
+		struct msm_gem_address_space *aspace, int plane)
 {
 	struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
 	if (!msm_fb->planes[plane])
 		return 0;
-	return msm_gem_iova(msm_fb->planes[plane], id) + fb->offsets[plane];
+	return msm_gem_iova(msm_fb->planes[plane], aspace) + fb->offsets[plane];
 }
 
 struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index acd7af5..a7d06d1 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -26,6 +26,11 @@
 #include "msm_gpu.h"
 #include "msm_mmu.h"
 
+static void *get_dmabuf_ptr(struct drm_gem_object *obj)
+{
+	return (obj && obj->import_attach) ? obj->import_attach->dmabuf : NULL;
+}
+
 static dma_addr_t physaddr(struct drm_gem_object *obj)
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -285,33 +290,63 @@
 	return offset;
 }
 
+static void obj_remove_domain(struct msm_gem_vma *domain)
+{
+	if (domain) {
+		list_del(&domain->list);
+		kfree(domain);
+	}
+}
+
 static void
 put_iova(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
-	struct msm_drm_private *priv = obj->dev->dev_private;
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
-	int id;
+	struct msm_gem_vma *domain, *tmp;
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-	for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
-		struct msm_mmu *mmu = priv->mmus[id];
-		if (mmu && msm_obj->domain[id].iova) {
-			uint32_t offset = msm_obj->domain[id].iova;
-
-			if (obj->import_attach && mmu->funcs->unmap_dma_buf)
-				mmu->funcs->unmap_dma_buf(mmu, msm_obj->sgt,
-						obj->import_attach->dmabuf,
-						DMA_BIDIRECTIONAL);
-			else
-				mmu->funcs->unmap(mmu, offset, msm_obj->sgt,
-						obj->size);
-			msm_obj->domain[id].iova = 0;
+	list_for_each_entry_safe(domain, tmp, &msm_obj->domains, list) {
+		if (iommu_present(&platform_bus_type)) {
+			msm_gem_unmap_vma(domain->aspace, domain,
+				msm_obj->sgt, get_dmabuf_ptr(obj));
 		}
+
+		obj_remove_domain(domain);
 	}
 }
 
+static struct msm_gem_vma *obj_add_domain(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	struct msm_gem_vma *domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+
+	if (!domain)
+		return ERR_PTR(-ENOMEM);
+
+	domain->aspace = aspace;
+
+	list_add_tail(&domain->list, &msm_obj->domains);
+
+	return domain;
+}
+
+static struct msm_gem_vma *obj_get_domain(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	struct msm_gem_vma *domain;
+
+	list_for_each_entry(domain, &msm_obj->domains, list) {
+		if (domain->aspace == aspace)
+			return domain;
+	}
+
+	return NULL;
+}
+
 /* should be called under struct_mutex.. although it can be called
  * from atomic context without struct_mutex to acquire an extra
  * iova ref if you know one is already held.
@@ -319,65 +354,65 @@
  * That means when I do eventually need to add support for unpinning
  * the refcnt counter needs to be atomic_t.
  */
-int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
-		uint32_t *iova)
+int msm_gem_get_iova_locked(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace, uint32_t *iova)
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	struct page **pages;
+	struct msm_gem_vma *domain;
 	int ret = 0;
 
-	if (!msm_obj->domain[id].iova) {
-		struct msm_drm_private *priv = obj->dev->dev_private;
-		struct page **pages = get_pages(obj);
+	if (!iommu_present(&platform_bus_type)) {
+		pages = get_pages(obj);
 
 		if (IS_ERR(pages))
 			return PTR_ERR(pages);
 
-		if (iommu_present(&platform_bus_type)) {
-			struct msm_mmu *mmu = priv->mmus[id];
-
-			if (WARN_ON(!mmu))
-				return -EINVAL;
-
-			if (obj->import_attach && mmu->funcs->map_dma_buf) {
-				ret = mmu->funcs->map_dma_buf(mmu, msm_obj->sgt,
-						obj->import_attach->dmabuf,
-						DMA_BIDIRECTIONAL,
-						msm_obj->flags);
-				if (ret) {
-					DRM_ERROR("Unable to map dma buf\n");
-					return ret;
-				}
-			}
-			msm_obj->domain[id].iova =
-				sg_dma_address(msm_obj->sgt->sgl);
-		} else {
-			WARN_ONCE(1, "physical address being used\n");
-			msm_obj->domain[id].iova = physaddr(obj);
-		}
+		*iova = physaddr(obj);
+		return 0;
 	}
 
-	if (!ret)
-		*iova = msm_obj->domain[id].iova;
+	domain = obj_get_domain(obj, aspace);
+
+	if (!domain) {
+		domain = obj_add_domain(obj, aspace);
+		if (IS_ERR(domain))
+			return PTR_ERR(domain);
+
+		pages = get_pages(obj);
+		if (IS_ERR(pages)) {
+			obj_remove_domain(domain);
+			return PTR_ERR(pages);
+		}
+
+		ret = msm_gem_map_vma(aspace, domain, msm_obj->sgt,
+			get_dmabuf_ptr(obj),
+			msm_obj->flags);
+	}
+
+	if (!ret && domain)
+		*iova = domain->iova;
+	else
+		obj_remove_domain(domain);
 
 	return ret;
 }
 
 /* get iova, taking a reference.  Should have a matching put */
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
+int msm_gem_get_iova(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace, uint32_t *iova)
 {
-	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	struct msm_gem_vma *domain;
 	int ret;
 
-	/* this is safe right now because we don't unmap until the
-	 * bo is deleted:
-	 */
-	if (msm_obj->domain[id].iova) {
-		*iova = msm_obj->domain[id].iova;
+	domain = obj_get_domain(obj, aspace);
+	if (domain) {
+		*iova = domain->iova;
 		return 0;
 	}
 
 	mutex_lock(&obj->dev->struct_mutex);
-	ret = msm_gem_get_iova_locked(obj, id, iova);
+	ret = msm_gem_get_iova_locked(obj, aspace, iova);
 	mutex_unlock(&obj->dev->struct_mutex);
 	return ret;
 }
@@ -385,14 +420,18 @@
 /* get iova without taking a reference, used in places where you have
  * already done a 'msm_gem_get_iova()'.
  */
-uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
+uint32_t msm_gem_iova(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace)
 {
-	struct msm_gem_object *msm_obj = to_msm_bo(obj);
-	WARN_ON(!msm_obj->domain[id].iova);
-	return msm_obj->domain[id].iova;
+	struct msm_gem_vma *domain = obj_get_domain(obj, aspace);
+
+	WARN_ON(!domain);
+
+	return domain ? domain->iova : 0;
 }
 
-void msm_gem_put_iova(struct drm_gem_object *obj, int id)
+void msm_gem_put_iova(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace)
 {
 	// XXX TODO ..
 	// NOTE: probably don't need a _locked() version.. we wouldn't
@@ -644,6 +683,7 @@
 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	struct msm_gem_vma *domain;
 	struct reservation_object *robj = msm_obj->resv;
 	struct reservation_object_list *fobj;
 	struct fence *fence;
@@ -666,6 +706,7 @@
 	}
 
 	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n",
+
 			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
 			obj->name, obj->refcount.refcount.counter,
 			off, msm_obj->vaddr, obj->size, madv);
@@ -685,6 +726,12 @@
 	if (fence)
 		describe_fence(fence, "Exclusive", m);
 	rcu_read_unlock();
+
+	/* FIXME: we need to print the address space here too */
+	list_for_each_entry(domain, &msm_obj->domains, list)
+		seq_printf(m, " %08llx", domain->iova);
+
+	seq_puts(m, "\n");
 }
 
 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
@@ -721,7 +768,8 @@
 
 	if (obj->import_attach) {
 		if (msm_obj->vaddr)
-			dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
+			dma_buf_vunmap(obj->import_attach->dmabuf,
+				msm_obj->vaddr);
 
 		/* Don't drop the pages for imported dmabuf, as they are not
 		 * ours, just free the array we allocated:
@@ -776,7 +824,6 @@
 {
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_gem_object *msm_obj;
-	unsigned sz;
 	bool use_vram = false;
 
 	switch (flags & MSM_BO_CACHE_MASK) {
@@ -798,16 +845,17 @@
 	if (WARN_ON(use_vram && !priv->vram.size))
 		return -EINVAL;
 
-	sz = sizeof(*msm_obj);
-	if (use_vram)
-		sz += sizeof(struct drm_mm_node);
-
-	msm_obj = kzalloc(sz, GFP_KERNEL);
+	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
 	if (!msm_obj)
 		return -ENOMEM;
 
-	if (use_vram)
-		msm_obj->vram_node = (void *)&msm_obj[1];
+	if (use_vram) {
+		struct msm_gem_vma *domain = obj_add_domain(&msm_obj->base,
+				NULL);
+
+		if (!IS_ERR(domain))
+			msm_obj->vram_node = &domain->node;
+	}
 
 	msm_obj->flags = flags;
 	msm_obj->madv = MSM_MADV_WILLNEED;
@@ -820,6 +868,8 @@
 	}
 
 	INIT_LIST_HEAD(&msm_obj->submit_entry);
+	INIT_LIST_HEAD(&msm_obj->domains);
+
 	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
 
 	*obj = &msm_obj->base;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 19c7726..9d41a00 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -25,6 +25,30 @@
 #define MSM_BO_STOLEN        0x10000000    /* try to use stolen/splash memory */
 #define MSM_BO_KEEPATTRS     0x20000000     /* keep h/w bus attributes */
 
+struct msm_gem_aspace_ops {
+	int (*map)(struct msm_gem_address_space *, struct msm_gem_vma *,
+		struct sg_table *sgt, void *priv, unsigned int flags);
+
+	void (*unmap)(struct msm_gem_address_space *, struct msm_gem_vma *,
+		struct sg_table *sgt, void *priv);
+
+	void (*destroy)(struct msm_gem_address_space *);
+};
+
+struct msm_gem_address_space {
+	const char *name;
+	struct msm_mmu *mmu;
+	const struct msm_gem_aspace_ops *ops;
+};
+
+struct msm_gem_vma {
+	/* Node used by the GPU address space, but not the SDE address space */
+	struct drm_mm_node node;
+	struct msm_gem_address_space *aspace;
+	uint64_t iova;
+	struct list_head list;
+};
+
 struct msm_gem_object {
 	struct drm_gem_object base;
 
@@ -62,9 +86,7 @@
 	struct sg_table *sgt;
 	void *vaddr;
 
-	struct {
-		dma_addr_t iova;
-	} domain[NUM_DOMAINS];
+	struct list_head domains;
 
 	/* normally (resv == &_resv) except for imported bo's */
 	struct reservation_object *resv;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index b6a0f37..8d727fe 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -157,7 +157,7 @@
 	struct msm_gem_object *msm_obj = submit->bos[i].obj;
 
 	if (submit->bos[i].flags & BO_PINNED)
-		msm_gem_put_iova(&msm_obj->base, submit->gpu->id);
+		msm_gem_put_iova(&msm_obj->base, submit->gpu->aspace);
 
 	if (submit->bos[i].flags & BO_LOCKED)
 		ww_mutex_unlock(&msm_obj->resv->lock);
@@ -245,7 +245,7 @@
 
 		/* if locking succeeded, pin bo: */
 		ret = msm_gem_get_iova_locked(&msm_obj->base,
-				submit->gpu->id, &iova);
+				submit->gpu->aspace, &iova);
 
 		if (ret)
 			break;
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
new file mode 100644
index 0000000..8e56871
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
+
+/* SDE address space operations */
+static void smmu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma, struct sg_table *sgt,
+		void *priv)
+{
+	struct dma_buf *buf = priv;
+
+	if (buf)
+		aspace->mmu->funcs->unmap_dma_buf(aspace->mmu,
+			sgt, buf, DMA_BIDIRECTIONAL);
+	else
+		aspace->mmu->funcs->unmap_sg(aspace->mmu, sgt,
+			DMA_BIDIRECTIONAL);
+
+	vma->iova = 0;
+}
+
+
+static int smmu_aspace_map_vma(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma, struct sg_table *sgt,
+		void *priv, unsigned int flags)
+{
+	struct dma_buf *buf = priv;
+	int ret;
+
+	if (buf)
+		ret = aspace->mmu->funcs->map_dma_buf(aspace->mmu, sgt, buf,
+			DMA_BIDIRECTIONAL, flags);
+	else
+		ret = aspace->mmu->funcs->map_sg(aspace->mmu, sgt,
+			DMA_BIDIRECTIONAL);
+
+	if (!ret)
+		vma->iova = sg_dma_address(sgt->sgl);
+
+	return ret;
+}
+
+static void smmu_aspace_destroy(struct msm_gem_address_space *aspace)
+{
+	aspace->mmu->funcs->destroy(aspace->mmu);
+}
+
+
+static const struct msm_gem_aspace_ops smmu_aspace_ops = {
+	.map = smmu_aspace_map_vma,
+	.unmap = smmu_aspace_unmap_vma,
+	.destroy = smmu_aspace_destroy
+};
+
+struct msm_gem_address_space *
+msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
+		const char *name)
+{
+	struct msm_gem_address_space *aspace;
+
+	if (!mmu)
+		return ERR_PTR(-EINVAL);
+
+	aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
+	if (!aspace)
+		return ERR_PTR(-ENOMEM);
+
+	aspace->name = name;
+	aspace->mmu = mmu;
+	aspace->ops = &smmu_aspace_ops;
+
+	return aspace;
+}
+
+/* GPU address space operations */
+struct msm_iommu_aspace {
+	struct msm_gem_address_space base;
+	struct drm_mm mm;
+};
+
+#define to_iommu_aspace(aspace) \
+	((struct msm_iommu_aspace *) \
+	 container_of(aspace, struct msm_iommu_aspace, base))
+
+static void iommu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma, struct sg_table *sgt, void *priv)
+{
+	if (!vma->iova)
+		return;
+
+	if (aspace->mmu)
+		aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt);
+
+	drm_mm_remove_node(&vma->node);
+
+	vma->iova = 0;
+}
+
+static int iommu_aspace_map_vma(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma, struct sg_table *sgt,
+		void *priv, unsigned int flags)
+{
+	struct msm_iommu_aspace *local = to_iommu_aspace(aspace);
+	size_t size = 0;
+	struct scatterlist *sg;
+	int ret = 0, i;
+
+	if (WARN_ON(drm_mm_node_allocated(&vma->node)))
+		return 0;
+
+	for_each_sg(sgt->sgl, sg, sgt->nents, i)
+		size += sg->length + sg->offset;
+
+	ret = drm_mm_insert_node(&local->mm, &vma->node, size >> PAGE_SHIFT,
+			0, DRM_MM_SEARCH_DEFAULT);
+	if (ret)
+		return ret;
+
+	vma->iova = vma->node.start << PAGE_SHIFT;
+
+	if (aspace->mmu)
+		ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova,
+			sgt, IOMMU_READ | IOMMU_WRITE);
+
+	return ret;
+}
+
+static void iommu_aspace_destroy(struct msm_gem_address_space *aspace)
+{
+	struct msm_iommu_aspace *local = to_iommu_aspace(aspace);
+
+	drm_mm_takedown(&local->mm);
+	aspace->mmu->funcs->destroy(aspace->mmu);
+}
+
+static const struct msm_gem_aspace_ops msm_iommu_aspace_ops = {
+	.map = iommu_aspace_map_vma,
+	.unmap = iommu_aspace_unmap_vma,
+	.destroy = iommu_aspace_destroy,
+};
+
+static struct msm_gem_address_space *
+msm_gem_address_space_new(struct msm_mmu *mmu, const char *name,
+		uint64_t start, uint64_t end)
+{
+	struct msm_iommu_aspace *local;
+
+	if (!mmu)
+		return ERR_PTR(-EINVAL);
+
+	local = kzalloc(sizeof(*local), GFP_KERNEL);
+	if (!local)
+		return ERR_PTR(-ENOMEM);
+
+	drm_mm_init(&local->mm, (start >> PAGE_SHIFT),
+		(end >> PAGE_SHIFT) - 1);
+
+	local->base.name = name;
+	local->base.mmu = mmu;
+	local->base.ops = &msm_iommu_aspace_ops;
+
+	return &local->base;
+}
+
+int msm_gem_map_vma(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma, struct sg_table *sgt,
+		void *priv, unsigned int flags)
+{
+	if (aspace && aspace->ops->map)
+		return aspace->ops->map(aspace, vma, sgt, priv, flags);
+
+	return -EINVAL;
+}
+
+void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma, struct sg_table *sgt, void *priv)
+{
+	if (aspace && aspace->ops->unmap)
+		aspace->ops->unmap(aspace, vma, sgt, priv);
+}
+
+struct msm_gem_address_space *
+msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
+		const char *name)
+{
+	struct msm_mmu *mmu = msm_iommu_new(dev, domain);
+
+	if (IS_ERR(mmu))
+		return (struct msm_gem_address_space *) mmu;
+
+	return msm_gem_address_space_new(mmu, name,
+		domain->geometry.aperture_start,
+		domain->geometry.aperture_end);
+}
+
+void
+msm_gem_address_space_destroy(struct msm_gem_address_space *aspace)
+{
+	if (aspace && aspace->ops->destroy)
+		aspace->ops->destroy(aspace);
+
+	kfree(aspace);
+}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 5bb0983..49d9e10 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -457,7 +457,7 @@
 		struct msm_gem_object *msm_obj = submit->bos[i].obj;
 		/* move to inactive: */
 		msm_gem_move_to_inactive(&msm_obj->base);
-		msm_gem_put_iova(&msm_obj->base, gpu->id);
+		msm_gem_put_iova(&msm_obj->base, gpu->aspace);
 		drm_gem_object_unreference(&msm_obj->base);
 	}
 
@@ -494,6 +494,8 @@
 
 	mutex_lock(&dev->struct_mutex);
 	retire_submits(gpu);
+
+	retire_submits(gpu);
 	mutex_unlock(&dev->struct_mutex);
 
 	if (!msm_gpu_active(gpu))
@@ -538,8 +540,7 @@
 		/* submit takes a reference to the bo and iova until retired: */
 		drm_gem_object_reference(&msm_obj->base);
 		msm_gem_get_iova_locked(&msm_obj->base,
-				submit->gpu->id, &iova);
-
+				submit->gpu->aspace, &iova);
 		if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
 			msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
 		else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
@@ -656,12 +657,17 @@
 	 */
 	iommu = iommu_domain_alloc(&platform_bus_type);
 	if (iommu) {
+		/* TODO 32b vs 64b address space.. */
+		iommu->geometry.aperture_start = 0x1000;
+		iommu->geometry.aperture_end = 0xffffffff;
+
 		dev_info(drm->dev, "%s: using IOMMU\n", name);
-		gpu->mmu = msm_iommu_new(&pdev->dev, iommu);
-		if (IS_ERR(gpu->mmu)) {
-			ret = PTR_ERR(gpu->mmu);
+		gpu->aspace = msm_gem_address_space_create(&pdev->dev,
+				iommu, "gpu");
+		if (IS_ERR(gpu->aspace)) {
+			ret = PTR_ERR(gpu->aspace);
 			dev_err(drm->dev, "failed to init iommu: %d\n", ret);
-			gpu->mmu = NULL;
+			gpu->aspace = NULL;
 			iommu_domain_free(iommu);
 			goto fail;
 		}
@@ -669,8 +675,6 @@
 	} else {
 		dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
 	}
-	gpu->id = msm_register_mmu(drm, gpu->mmu);
-
 
 	/* Create ringbuffer: */
 	mutex_lock(&drm->struct_mutex);
@@ -701,13 +705,13 @@
 
 	if (gpu->rb) {
 		if (gpu->rb_iova)
-			msm_gem_put_iova(gpu->rb->bo, gpu->id);
+			msm_gem_put_iova(gpu->rb->bo, gpu->aspace);
 		msm_ringbuffer_destroy(gpu->rb);
 	}
 
-	if (gpu->mmu)
-		gpu->mmu->funcs->destroy(gpu->mmu);
-
 	if (gpu->fctx)
 		msm_fence_context_free(gpu->fctx);
+
+	if (gpu->aspace)
+		msm_gem_address_space_destroy(gpu->aspace);
 }
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index d61d98a..13ecd72 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -98,8 +98,7 @@
 	void __iomem *mmio;
 	int irq;
 
-	struct msm_mmu *mmu;
-	int id;
+	struct msm_gem_address_space *aspace;
 
 	/* Power Control: */
 	struct regulator *gpu_reg, *gpu_cx;
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 3a294d0..bc9877c 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -46,7 +46,7 @@
 }
 
 static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
-		struct sg_table *sgt, unsigned len, int prot)
+		struct sg_table *sgt, int prot)
 {
 	struct msm_iommu *iommu = to_msm_iommu(mmu);
 	struct iommu_domain *domain = iommu->domain;
@@ -85,7 +85,7 @@
 }
 
 static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
-		struct sg_table *sgt, unsigned len)
+		struct sg_table *sgt)
 {
 	struct msm_iommu *iommu = to_msm_iommu(mmu);
 	struct iommu_domain *domain = iommu->domain;
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index d8ac407..eb10d6b 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -75,6 +75,9 @@
 			const struct msm_format *msm_fmt,
 			const struct drm_mode_fb_cmd2 *cmd,
 			struct drm_gem_object **bos);
+	/* perform complete atomic check of given atomic state */
+	int (*atomic_check)(struct msm_kms *kms,
+				    struct drm_atomic_state *state);
 	/* misc: */
 	long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
 			struct drm_encoder *encoder);
@@ -90,6 +93,10 @@
 			struct drm_mode_object *obj, u32 event, bool en);
 	/* cleanup: */
 	void (*destroy)(struct msm_kms *kms);
+	/* get address space */
+	struct msm_gem_address_space *(*get_address_space)(
+			struct msm_kms *kms,
+			unsigned int domain);
 };
 
 struct msm_kms {
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index fbf7e7b..5af26e2 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -21,7 +21,6 @@
 #include <linux/iommu.h>
 
 struct msm_mmu;
-struct msm_gpu;
 
 enum msm_mmu_domain_type {
 	MSM_SMMU_DOMAIN_UNSECURE,
@@ -35,9 +34,8 @@
 	int (*attach)(struct msm_mmu *mmu, const char * const *names, int cnt);
 	void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt);
 	int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
-			unsigned int len, int prot);
-	int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
-			unsigned int len);
+			int prot);
+	int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt);
 	int (*map_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
 			enum dma_data_direction dir);
 	void (*unmap_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
@@ -62,8 +60,11 @@
 }
 
 struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
-struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
 struct msm_mmu *msm_smmu_new(struct device *dev,
 	enum msm_mmu_domain_type domain);
 
+/* SDE smmu driver initialize and cleanup functions */
+int __init msm_smmu_driver_init(void);
+void __exit msm_smmu_driver_cleanup(void);
+
 #endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c
index 26d2953..7d7f74a 100644
--- a/drivers/gpu/drm/msm/msm_smmu.c
+++ b/drivers/gpu/drm/msm/msm_smmu.c
@@ -109,7 +109,7 @@
 }
 
 static int msm_smmu_map(struct msm_mmu *mmu, uint32_t iova,
-		struct sg_table *sgt, unsigned int len, int prot)
+		struct sg_table *sgt, int prot)
 {
 	struct msm_smmu *smmu = to_msm_smmu(mmu);
 	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
@@ -177,7 +177,7 @@
 }
 
 static int msm_smmu_unmap(struct msm_mmu *mmu, uint32_t iova,
-		struct sg_table *sgt, unsigned int len)
+		struct sg_table *sgt)
 {
 	struct msm_smmu *smmu = to_msm_smmu(mmu);
 	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
@@ -296,8 +296,8 @@
 	},
 	[MSM_SMMU_DOMAIN_SECURE] = {
 		.label = "mdp_s",
-		.va_start = 0,
-		.va_size = SZ_4G,
+		.va_start = SZ_128K,
+		.va_size = SZ_4G - SZ_128K,
 		.secure = true,
 	},
 	[MSM_SMMU_DOMAIN_NRT_UNSECURE] = {
@@ -308,20 +308,20 @@
 	},
 	[MSM_SMMU_DOMAIN_NRT_SECURE] = {
 		.label = "rot_s",
-		.va_start = 0,
-		.va_size = SZ_4G,
+		.va_start = SZ_128K,
+		.va_size = SZ_4G - SZ_128K,
 		.secure = true,
 	},
 };
 
 static const struct of_device_id msm_smmu_dt_match[] = {
-	{ .compatible = "qcom,smmu-mdp-unsec",
+	{ .compatible = "qcom,smmu_sde_unsec",
 		.data = &msm_smmu_domains[MSM_SMMU_DOMAIN_UNSECURE] },
-	{ .compatible = "qcom,smmu-mdp-sec",
+	{ .compatible = "qcom,smmu_sde_sec",
 		.data = &msm_smmu_domains[MSM_SMMU_DOMAIN_SECURE] },
-	{ .compatible = "qcom,smmu-rot-unsec",
+	{ .compatible = "qcom,smmu_sde_nrt_unsec",
 		.data = &msm_smmu_domains[MSM_SMMU_DOMAIN_NRT_UNSECURE] },
-	{ .compatible = "qcom,smmu-rot-sec",
+	{ .compatible = "qcom,smmu_sde_nrt_sec",
 		.data = &msm_smmu_domains[MSM_SMMU_DOMAIN_NRT_SECURE] },
 	{}
 };
@@ -545,7 +545,7 @@
 	},
 };
 
-static int __init msm_smmu_driver_init(void)
+int __init msm_smmu_driver_init(void)
 {
 	int ret;
 
@@ -555,13 +555,11 @@
 
 	return ret;
 }
-module_init(msm_smmu_driver_init);
 
-static void __exit msm_smmu_driver_cleanup(void)
+void __exit msm_smmu_driver_cleanup(void)
 {
 	platform_driver_unregister(&msm_smmu_driver);
 }
-module_exit(msm_smmu_driver_cleanup);
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("MSM SMMU driver");
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
index b410302..9c13991 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -526,7 +526,7 @@
 }
 
 static void sde_cp_crtc_setfeature(struct sde_cp_node *prop_node,
-				   struct sde_crtc *sde_crtc, u32 last_feature)
+				   struct sde_crtc *sde_crtc)
 {
 	struct sde_hw_cp_cfg hw_cfg;
 	struct sde_hw_mixer *hw_lm;
@@ -541,16 +541,13 @@
 	hw_cfg.num_of_mixers = sde_crtc->num_mixers;
 	hw_cfg.displayh = sde_crtc->base.mode.hdisplay;
 	hw_cfg.displayv = sde_crtc->base.mode.vdisplay;
+	hw_cfg.last_feature = 0;
 
 	for (i = 0; i < num_mixers && !ret; i++) {
 		hw_lm = sde_crtc->mixers[i].hw_lm;
 		hw_dspp = sde_crtc->mixers[i].hw_dspp;
 		hw_cfg.ctl = sde_crtc->mixers[i].hw_ctl;
 		hw_cfg.mixer_info = hw_lm;
-		if (i == num_mixers - 1)
-			hw_cfg.last_feature = last_feature;
-		else
-			hw_cfg.last_feature = 0;
 		switch (prop_node->feature) {
 		case SDE_CP_CRTC_DSPP_VLUT:
 			if (!hw_dspp || !hw_dspp->ops.setup_vlut) {
@@ -724,7 +721,6 @@
 	struct sde_hw_ctl *ctl;
 	uint32_t flush_mask = 0;
 	u32 num_mixers = 0, i = 0;
-	u32 num_of_features;
 
 	if (!crtc || !crtc->dev) {
 		DRM_ERROR("invalid crtc %pK dev %pK\n", crtc,
@@ -757,15 +753,9 @@
 		set_dspp_flush = true;
 	}
 
-	num_of_features = 0;
-	list_for_each_entry(prop_node, &sde_crtc->dirty_list, dirty_list)
-		num_of_features++;
-
 	list_for_each_entry_safe(prop_node, n, &sde_crtc->dirty_list,
 				dirty_list) {
-		num_of_features--;
-		sde_cp_crtc_setfeature(prop_node, sde_crtc,
-				(num_of_features == 0));
+		sde_cp_crtc_setfeature(prop_node, sde_crtc);
 		/* Set the flush flag to true */
 		if (prop_node->is_dspp_feature)
 			set_dspp_flush = true;
@@ -773,16 +763,10 @@
 			set_lm_flush = true;
 	}
 
-	num_of_features = 0;
-	list_for_each_entry(prop_node, &sde_crtc->ad_dirty, dirty_list)
-		num_of_features++;
-
 	list_for_each_entry_safe(prop_node, n, &sde_crtc->ad_dirty,
 				dirty_list) {
-		num_of_features--;
 		set_dspp_flush = true;
-		sde_cp_crtc_setfeature(prop_node, sde_crtc,
-				(num_of_features == 0));
+		sde_cp_crtc_setfeature(prop_node, sde_crtc);
 	}
 
 	for (i = 0; i < num_mixers; i++) {
@@ -1357,7 +1341,7 @@
 	hw_dspp->ops.ad_read_intr_resp(hw_dspp, AD4_BACKLIGHT, &bl);
 	event.length = sizeof(u32);
 	event.type = DRM_EVENT_AD_BACKLIGHT;
-	msm_mode_object_event_nofity(&crtc_drm->base, crtc_drm->dev,
+	msm_mode_object_event_notify(&crtc_drm->base, crtc_drm->dev,
 			&event, (u8 *)&bl);
 }
 
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 6551257..6c9d496 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -29,6 +29,9 @@
 
 #define SDE_ERROR_CONN(c, fmt, ...) SDE_ERROR("conn%d " fmt,\
 		(c) ? (c)->base.base.id : -1, ##__VA_ARGS__)
+static u32 dither_matrix[DITHER_MATRIX_SZ] = {
+	15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10
+};
 
 static const struct drm_prop_enum_list e_topology_name[] = {
 	{SDE_RM_TOPOLOGY_NONE,	"sde_none"},
@@ -83,7 +86,7 @@
 	if (c_conn->ops.set_backlight) {
 		event.type = DRM_EVENT_SYS_BACKLIGHT;
 		event.length = sizeof(u32);
-		msm_mode_object_event_nofity(&c_conn->base.base,
+		msm_mode_object_event_notify(&c_conn->base.base,
 				c_conn->base.dev, &event, (u8 *)&brightness);
 		c_conn->ops.set_backlight(c_conn->display, bl_lvl);
 	}
@@ -217,6 +220,129 @@
 	(void)sde_connector_register_event(connector, event_idx, 0, 0);
 }
 
+static int _sde_connector_get_default_dither_cfg_v1(
+		struct sde_connector *c_conn, void *cfg)
+{
+	struct drm_msm_dither *dither_cfg = (struct drm_msm_dither *)cfg;
+	enum dsi_pixel_format dst_format = DSI_PIXEL_FORMAT_MAX;
+
+	if (!c_conn || !cfg) {
+		SDE_ERROR("invalid argument(s), c_conn %pK, cfg %pK\n",
+				c_conn, cfg);
+		return -EINVAL;
+	}
+
+	if (!c_conn->ops.get_dst_format) {
+		SDE_ERROR("get_dst_format is invalid\n");
+		return -EINVAL;
+	}
+
+	dst_format = c_conn->ops.get_dst_format(c_conn->display);
+	switch (dst_format) {
+	case DSI_PIXEL_FORMAT_RGB888:
+		dither_cfg->c0_bitdepth = 8;
+		dither_cfg->c1_bitdepth = 8;
+		dither_cfg->c2_bitdepth = 8;
+		dither_cfg->c3_bitdepth = 8;
+		break;
+	case DSI_PIXEL_FORMAT_RGB666:
+	case DSI_PIXEL_FORMAT_RGB666_LOOSE:
+		dither_cfg->c0_bitdepth = 6;
+		dither_cfg->c1_bitdepth = 6;
+		dither_cfg->c2_bitdepth = 6;
+		dither_cfg->c3_bitdepth = 6;
+		break;
+	default:
+		SDE_DEBUG("no default dither config for dst_format %d\n",
+			dst_format);
+		return -ENODATA;
+	}
+
+	memcpy(&dither_cfg->matrix, dither_matrix,
+			sizeof(u32) * DITHER_MATRIX_SZ);
+	dither_cfg->temporal_en = 0;
+	return 0;
+}
+
+static void _sde_connector_install_dither_property(struct drm_device *dev,
+		struct sde_kms *sde_kms, struct sde_connector *c_conn)
+{
+	char prop_name[DRM_PROP_NAME_LEN];
+	struct sde_mdss_cfg *catalog = NULL;
+	struct drm_property_blob *blob_ptr;
+	void *cfg;
+	int ret = 0;
+	u32 version = 0, len = 0;
+	bool defalut_dither_needed = false;
+
+	if (!dev || !sde_kms || !c_conn) {
+		SDE_ERROR("invld args (s), dev %pK, sde_kms %pK, c_conn %pK\n",
+				dev, sde_kms, c_conn);
+		return;
+	}
+
+	catalog = sde_kms->catalog;
+	version = SDE_COLOR_PROCESS_MAJOR(
+			catalog->pingpong[0].sblk->dither.version);
+	snprintf(prop_name, ARRAY_SIZE(prop_name), "%s%d",
+			"SDE_PP_DITHER_V", version);
+	switch (version) {
+	case 1:
+		msm_property_install_blob(&c_conn->property_info, prop_name,
+			DRM_MODE_PROP_BLOB,
+			CONNECTOR_PROP_PP_DITHER);
+		len = sizeof(struct drm_msm_dither);
+		cfg = kzalloc(len, GFP_KERNEL);
+		if (!cfg)
+			return;
+
+		ret = _sde_connector_get_default_dither_cfg_v1(c_conn, cfg);
+		if (!ret)
+			defalut_dither_needed = true;
+		break;
+	default:
+		SDE_ERROR("unsupported dither version %d\n", version);
+		return;
+	}
+
+	if (defalut_dither_needed) {
+		blob_ptr = drm_property_create_blob(dev, len, cfg);
+		if (IS_ERR_OR_NULL(blob_ptr))
+			goto exit;
+		c_conn->blob_dither = blob_ptr;
+	}
+exit:
+	kfree(cfg);
+}
+
+int sde_connector_get_dither_cfg(struct drm_connector *conn,
+			struct drm_connector_state *state, void **cfg,
+			size_t *len)
+{
+	struct sde_connector *c_conn = NULL;
+	struct sde_connector_state *c_state = NULL;
+	size_t dither_sz = 0;
+
+	if (!conn || !state || !(*cfg))
+		return -EINVAL;
+
+	c_conn = to_sde_connector(conn);
+	c_state = to_sde_connector_state(state);
+
+	/* try to get user config data first */
+	*cfg = msm_property_get_blob(&c_conn->property_info,
+					c_state->property_blobs,
+					&dither_sz,
+					CONNECTOR_PROP_PP_DITHER);
+	/* if user config data doesn't exist, use default dither blob */
+	if (*cfg == NULL && c_conn->blob_dither) {
+		*cfg = &c_conn->blob_dither->data;
+		dither_sz = c_conn->blob_dither->length;
+	}
+	*len = dither_sz;
+	return 0;
+}
+
 int sde_connector_get_info(struct drm_connector *connector,
 		struct msm_display_info *info)
 {
@@ -305,6 +431,8 @@
 		drm_property_unreference_blob(c_conn->blob_caps);
 	if (c_conn->blob_hdr)
 		drm_property_unreference_blob(c_conn->blob_hdr);
+	if (c_conn->blob_dither)
+		drm_property_unreference_blob(c_conn->blob_dither);
 	msm_property_destroy(&c_conn->property_info);
 
 	drm_connector_unregister(connector);
@@ -327,8 +455,7 @@
 		return;
 	}
 
-	msm_framebuffer_cleanup(c_state->out_fb,
-			c_state->mmu_id);
+	msm_framebuffer_cleanup(c_state->out_fb, c_state->aspace);
 	drm_framebuffer_unreference(c_state->out_fb);
 	c_state->out_fb = NULL;
 
@@ -369,7 +496,8 @@
 	} else {
 		/* destroy value helper */
 		msm_property_destroy_state(&c_conn->property_info, c_state,
-				c_state->property_values, 0);
+				c_state->property_values,
+				c_state->property_blobs);
 	}
 }
 
@@ -398,7 +526,7 @@
 
 	/* reset value helper, zero out state structure and reset properties */
 	msm_property_reset_state(&c_conn->property_info, c_state,
-			c_state->property_values, 0);
+			c_state->property_values, c_state->property_blobs);
 
 	c_state->base.connector = connector;
 	connector->state = &c_state->base;
@@ -426,13 +554,14 @@
 
 	/* duplicate value helper */
 	msm_property_duplicate_state(&c_conn->property_info,
-			c_oldstate, c_state, c_state->property_values, 0);
+			c_oldstate, c_state, c_state->property_values,
+			c_state->property_blobs);
 
 	/* additional handling for drm framebuffer objects */
 	if (c_state->out_fb) {
 		drm_framebuffer_reference(c_state->out_fb);
 		rc = msm_framebuffer_prepare(c_state->out_fb,
-				c_state->mmu_id);
+				c_state->aspace);
 		if (rc)
 			SDE_ERROR("failed to prepare fb, %d\n", rc);
 	}
@@ -546,7 +675,7 @@
 			return rc;
 
 		c_state->rois.roi[i] = roi_v1.roi[i];
-		SDE_DEBUG_CONN(c_conn, "roi%d: roi 0x%x 0x%x 0x%x 0x%x\n", i,
+		SDE_DEBUG_CONN(c_conn, "roi%d: roi (%d,%d) (%d,%d)\n", i,
 				c_state->rois.roi[i].x1,
 				c_state->rois.roi[i].y1,
 				c_state->rois.roi[i].x2,
@@ -626,7 +755,8 @@
 
 	/* generic property handling */
 	rc = msm_property_atomic_set(&c_conn->property_info,
-			c_state->property_values, 0, property, val);
+			c_state->property_values, c_state->property_blobs,
+			property, val);
 	if (rc)
 		goto end;
 
@@ -652,14 +782,14 @@
 					c_conn->fb_kmap);
 
 			if (c_state->out_fb->flags & DRM_MODE_FB_SECURE)
-				c_state->mmu_id =
-				c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE];
+				c_state->aspace =
+				c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE];
 			else
-				c_state->mmu_id =
-				c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE];
+				c_state->aspace =
+				c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE];
 
 			rc = msm_framebuffer_prepare(c_state->out_fb,
-					c_state->mmu_id);
+					c_state->aspace);
 			if (rc)
 				SDE_ERROR("prep fb failed, %d\n", rc);
 		}
@@ -733,7 +863,8 @@
 	else
 		/* get cached property value */
 		rc = msm_property_atomic_get(&c_conn->property_info,
-				c_state->property_values, 0, property, val);
+				c_state->property_values,
+				c_state->property_blobs, property, val);
 
 	/* allow for custom override */
 	if (c_conn->ops.get_property)
@@ -1010,18 +1141,17 @@
 	c_conn->lp_mode = 0;
 	c_conn->last_panel_power_mode = SDE_MODE_DPMS_ON;
 
-	/* cache mmu_id's for later */
 	sde_kms = to_sde_kms(priv->kms);
 	if (sde_kms->vbif[VBIF_NRT]) {
-		c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
-			sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_UNSECURE];
-		c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
-			sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_SECURE];
+		c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+			sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_UNSECURE];
+		c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+			sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_SECURE];
 	} else {
-		c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
-			sde_kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
-		c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
-			sde_kms->mmu_id[MSM_SMMU_DOMAIN_SECURE];
+		c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+			sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
+		c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+			sde_kms->aspace[MSM_SMMU_DOMAIN_SECURE];
 	}
 
 	if (ops)
@@ -1116,6 +1246,8 @@
 				&c_conn->property_info, "sde_drm_roi_v1", 0x0,
 				0, ~0, 0, CONNECTOR_PROP_ROI_V1);
 	}
+	/* install PP_DITHER properties */
+	_sde_connector_install_dither_property(dev, sde_kms, c_conn);
 
 	msm_property_install_range(&c_conn->property_info, "RETIRE_FENCE",
 			0x0, 0, INR_OPEN_MAX, 0, CONNECTOR_PROP_RETIRE_FENCE);
@@ -1156,6 +1288,9 @@
 		drm_property_unreference_blob(c_conn->blob_caps);
 	if (c_conn->blob_hdr)
 		drm_property_unreference_blob(c_conn->blob_hdr);
+	if (c_conn->blob_dither)
+		drm_property_unreference_blob(c_conn->blob_dither);
+
 	msm_property_destroy(&c_conn->property_info);
 error_cleanup_fence:
 	mutex_destroy(&c_conn->lock);
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 497d0db..8e46a11 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -13,6 +13,7 @@
 #ifndef _SDE_CONNECTOR_H_
 #define _SDE_CONNECTOR_H_
 
+#include <uapi/drm/msm_drm_pp.h>
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_panel.h>
@@ -183,6 +184,13 @@
 	 */
 	int (*set_power)(struct drm_connector *connector,
 			int power_mode, void *display);
+
+	/**
+	 * get_dst_format - get dst_format from display
+	 * @display: Pointer to private display handle
+	 * Returns: dst_format of display
+	 */
+	enum dsi_pixel_format (*get_dst_format)(void *display);
 };
 
 /**
@@ -227,6 +235,7 @@
  * @property_data: Array of private data for generic property handling
  * @blob_caps: Pointer to blob structure for 'capabilities' property
  * @blob_hdr: Pointer to blob structure for 'hdr_properties' property
+ * @blob_dither: Pointer to blob structure for default dither config
  * @fb_kmap: true if kernel mapping of framebuffer is requested
  * @event_table: Array of registered events
  * @event_lock: Lock object for event_table
@@ -240,7 +249,7 @@
 	struct drm_panel *panel;
 	void *display;
 
-	int mmu_id[SDE_IOMMU_DOMAIN_MAX];
+	struct msm_gem_address_space *aspace[SDE_IOMMU_DOMAIN_MAX];
 
 	char name[SDE_CONNECTOR_NAME_SIZE];
 
@@ -255,6 +264,7 @@
 	struct msm_property_data property_data[CONNECTOR_PROP_COUNT];
 	struct drm_property_blob *blob_caps;
 	struct drm_property_blob *blob_hdr;
+	struct drm_property_blob *blob_dither;
 
 	bool fb_kmap;
 	struct sde_connector_evt event_table[SDE_CONN_EVENT_COUNT];
@@ -304,17 +314,19 @@
  * struct sde_connector_state - private connector status structure
  * @base: Base drm connector structure
  * @out_fb: Pointer to output frame buffer, if applicable
- * @mmu_id: MMU ID for accessing frame buffer objects, if applicable
+ * @aspace: Address space for accessing frame buffer objects, if applicable
  * @property_values: Local cache of current connector property values
  * @rois: Regions of interest structure for mapping CRTC to Connector output
+ * @property_blobs: blob properties
  */
 struct sde_connector_state {
 	struct drm_connector_state base;
 	struct drm_framebuffer *out_fb;
-	int mmu_id;
+	struct msm_gem_address_space *aspace;
 	uint64_t property_values[CONNECTOR_PROP_COUNT];
 
 	struct msm_roi_list rois;
+	struct drm_property_blob *property_blobs[CONNECTOR_PROP_BLOBCOUNT];
 };
 
 /**
@@ -497,5 +509,15 @@
 	return (c_conn->connector_type != DRM_MODE_CONNECTOR_VIRTUAL);
 }
 
-#endif /* _SDE_CONNECTOR_H_ */
+/**
+ * sde_connector_get_dither_cfg - get dither property data
+ * @conn: Pointer to drm_connector struct
+ * @state: Pointer to drm_connector_state struct
+ * @cfg: Pointer to pointer to dither cfg
+ * @len: length of the dither data
+ * Returns: Zero on success
+ */
+int sde_connector_get_dither_cfg(struct drm_connector *conn,
+		struct drm_connector_state *state, void **cfg, size_t *len);
 
+#endif /* _SDE_CONNECTOR_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index e942e2d..3f76ba4 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -1515,7 +1515,6 @@
 	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *cstate;
 	struct sde_kms *sde_kms;
-	struct drm_encoder *encoder;
 	unsigned long flags;
 
 	if (!work) {
@@ -1569,26 +1568,8 @@
 		}
 
 		if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE ||
-			    (fevent->event & SDE_ENCODER_FRAME_EVENT_ERROR)) {
-			bool signal_fence = true;
-
-			drm_for_each_encoder(encoder, crtc->dev) {
-				if (encoder->crtc != crtc)
-					continue;
-
-				signal_fence &=
-					sde_encoder_is_cmd_mode(encoder);
-			}
-
-			/* signal release fence only for cmd mode panels here */
-			if (signal_fence) {
-				sde_fence_signal(&sde_crtc->output_fence, 0);
-				SDE_EVT32_VERBOSE(DRMID(crtc), fevent->event,
-							SDE_EVTLOG_FUNC_CASE4);
-			}
-
+			    (fevent->event & SDE_ENCODER_FRAME_EVENT_ERROR))
 			complete_all(&sde_crtc->frame_done_comp);
-		}
 
 		if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE)
 			sde_core_perf_crtc_update(crtc, 0, false);
@@ -1653,9 +1634,7 @@
 {
 	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *cstate;
-	struct drm_encoder *encoder;
 	int i;
-	bool signal_fence = true;
 
 	if (!crtc || !crtc->state) {
 		SDE_ERROR("invalid crtc\n");
@@ -1666,16 +1645,8 @@
 	cstate = to_sde_crtc_state(crtc->state);
 	SDE_EVT32_VERBOSE(DRMID(crtc));
 
-	drm_for_each_encoder(encoder, crtc->dev) {
-		if (encoder->crtc != crtc)
-			continue;
-
-		signal_fence &= !sde_encoder_is_cmd_mode(encoder);
-	}
-
-	/* signal release fence for non-cmd mode panels */
-	if (signal_fence)
-		sde_fence_signal(&sde_crtc->output_fence, 0);
+	/* signal release fence */
+	sde_fence_signal(&sde_crtc->output_fence, 0);
 
 	/* signal retire fence */
 	for (i = 0; i < cstate->num_connectors; ++i)
@@ -2330,7 +2301,7 @@
 		_sde_crtc_vblank_enable_nolock(sde_crtc, !enable);
 
 	sde_crtc->suspend = enable;
-	msm_mode_object_event_nofity(&crtc->base, crtc->dev, &event,
+	msm_mode_object_event_notify(&crtc->base, crtc->dev, &event,
 			(u8 *)&power_on);
 	mutex_unlock(&sde_crtc->crtc_lock);
 }
@@ -2368,6 +2339,8 @@
 
 	_sde_crtc_rp_duplicate(&old_cstate->rp, &cstate->rp);
 
+	cstate->idle_pc = sde_crtc->idle_pc;
+
 	return &cstate->base;
 }
 
@@ -2468,6 +2441,24 @@
 			sde_encoder_virt_restore(encoder);
 		}
 
+	} else if (event_type == SDE_POWER_EVENT_PRE_DISABLE) {
+		/*
+		 * Serialize h/w idle state update with crtc atomic check.
+		 * Grab the modeset lock to ensure that there is no on-going
+		 * atomic check, then increment the idle_pc counter. The next
+		 * atomic check will detect a new idle_pc since the counter
+		 * has advanced between the old_state and new_state, and
+		 * therefore properly reprogram all relevant drm objects'
+		 * hardware.
+		 */
+		drm_modeset_lock_crtc(crtc, NULL);
+
+		sde_crtc->idle_pc++;
+
+		SDE_DEBUG("crtc%d idle_pc:%d\n", crtc->base.id,
+				sde_crtc->idle_pc);
+		SDE_EVT32(DRMID(crtc), sde_crtc->idle_pc);
+
 	} else if (event_type == SDE_POWER_EVENT_POST_DISABLE) {
 		struct drm_plane *plane;
 
@@ -2477,6 +2468,9 @@
 		 */
 		drm_atomic_crtc_for_each_plane(plane, crtc)
 			sde_plane_set_revalidate(plane, true);
+
+		drm_modeset_unlock_crtc(crtc);
+		sde_cp_crtc_suspend(crtc);
 	}
 
 	mutex_unlock(&sde_crtc->crtc_lock);
@@ -2605,7 +2599,8 @@
 
 	sde_crtc->power_event = sde_power_handle_register_event(
 		&priv->phandle,
-		SDE_POWER_EVENT_POST_ENABLE | SDE_POWER_EVENT_POST_DISABLE,
+		SDE_POWER_EVENT_POST_ENABLE | SDE_POWER_EVENT_POST_DISABLE |
+		SDE_POWER_EVENT_PRE_DISABLE,
 		sde_crtc_handle_power_event, crtc, sde_crtc->name);
 }
 
@@ -2645,7 +2640,7 @@
 	for (i = curr_cnt; i < cnt; i++) {
 		pstate = pstates[i].drm_pstate;
 		POPULATE_RECT(&dst_rect, pstate->crtc_x, pstate->crtc_y,
-				pstate->crtc_w, pstate->crtc_h, true);
+				pstate->crtc_w, pstate->crtc_h, false);
 		sde_kms_rect_intersect(&dst_rect, excl_rect, &intersect);
 
 		if (intersect.w == excl_rect->w && intersect.h == excl_rect->h
@@ -2816,8 +2811,10 @@
 			sde_plane_clear_multirect(pipe_staged[i]);
 
 			if (is_sde_plane_virtual(pipe_staged[i]->plane)) {
-				SDE_ERROR("invalid use of virtual plane: %d\n",
+				SDE_ERROR(
+					"r1 only virt plane:%d not supported\n",
 					pipe_staged[i]->plane->base.id);
+				rc  = -EINVAL;
 				goto end;
 			}
 		}
@@ -3012,6 +3009,10 @@
 	struct drm_device *dev;
 	struct sde_kms_info *info;
 	struct sde_kms *sde_kms;
+	static const struct drm_prop_enum_list e_secure_level[] = {
+		{SDE_DRM_SEC_NON_SEC, "sec_and_non_sec"},
+		{SDE_DRM_SEC_ONLY, "sec_only"},
+	};
 
 	SDE_DEBUG("\n");
 
@@ -3090,6 +3091,11 @@
 	msm_property_install_volatile_range(&sde_crtc->property_info,
 		"sde_drm_roi_v1", 0x0, 0, ~0, 0, CRTC_PROP_ROI_V1);
 
+	msm_property_install_enum(&sde_crtc->property_info, "security_level",
+			0x0, 0, e_secure_level,
+			ARRAY_SIZE(e_secure_level),
+			CRTC_PROP_SECURITY_LEVEL);
+
 	sde_kms_info_reset(info);
 
 	if (catalog->has_dim_layer) {
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 0d72ff1..f021477 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -125,6 +125,7 @@
  * @vblank_cb_time  : ktime at vblank count reset
  * @vblank_refcount : reference count for vblank enable request
  * @suspend         : whether or not a suspend operation is in progress
+ * @idle_pc       : count of current idle power collapse request
  * @feature_list  : list of color processing features supported on a crtc
  * @active_list   : list of color processing features are active
  * @dirty_list    : list of color processing features are dirty
@@ -173,6 +174,7 @@
 	ktime_t vblank_cb_time;
 	atomic_t vblank_refcount;
 	bool suspend;
+	u32 idle_pc;
 
 	struct list_head feature_list;
 	struct list_head active_list;
@@ -278,6 +280,7 @@
  * @sbuf_cfg: stream buffer configuration
  * @sbuf_prefill_line: number of line for inline rotator prefetch
  * @sbuf_flush_mask: flush mask for inline rotator
+ * @idle_pc: count of idle power collapse request when state is duplicated
  */
 struct sde_crtc_state {
 	struct drm_crtc_state base;
@@ -307,6 +310,8 @@
 	u32 sbuf_prefill_line;
 	u32 sbuf_flush_mask;
 
+	u32 idle_pc;
+
 	struct sde_crtc_respool rp;
 };
 
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 09882cd..56e1151 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -1847,6 +1847,12 @@
 	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
 	unsigned int i;
 
+	if (!sde_enc->frame_busy_mask[0]) {
+		/* suppress frame_done without waiter, likely autorefresh */
+		SDE_EVT32(DRMID(drm_enc), event, ready_phys->intf_idx);
+		return;
+	}
+
 	/* One of the physical encoders has become idle */
 	for (i = 0; i < sde_enc->num_phys_encs; i++)
 		if (sde_enc->phys_encs[i] == ready_phys) {
@@ -2263,6 +2269,27 @@
 	}
 }
 
+static void _sde_encoder_setup_dither(struct sde_encoder_phys *phys)
+{
+	void *dither_cfg;
+	int ret = 0;
+	size_t len = 0;
+	enum sde_rm_topology_name topology;
+
+	if (!phys || !phys->connector || !phys->hw_pp ||
+			!phys->hw_pp->ops.setup_dither)
+		return;
+	topology = sde_connector_get_topology_name(phys->connector);
+	if ((topology == SDE_RM_TOPOLOGY_PPSPLIT) &&
+			(phys->split_role == ENC_ROLE_SLAVE))
+		return;
+
+	ret = sde_connector_get_dither_cfg(phys->connector,
+				phys->connector->state, &dither_cfg, &len);
+	if (!ret)
+		phys->hw_pp->ops.setup_dither(phys->hw_pp, dither_cfg, len);
+}
+
 void sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
 		struct sde_encoder_kickoff_params *params)
 {
@@ -2289,6 +2316,7 @@
 				phys->ops.prepare_for_kickoff(phys, params);
 			if (phys->enable_state == SDE_ENC_ERR_NEEDS_HW_RESET)
 				needs_hw_reset = true;
+			_sde_encoder_setup_dither(phys);
 		}
 	}
 
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index 6e6960a..4b12651 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -172,6 +172,8 @@
  * @INTR_IDX_PINGPONG: Pingpong done unterrupt for cmd mode panel
  * @INTR_IDX_UNDERRUN: Underrun unterrupt for video and cmd mode panel
  * @INTR_IDX_RDPTR:    Readpointer done unterrupt for cmd mode panel
+ * @INTR_IDX_AUTOREFRESH_DONE:  Autorefresh done for cmd mode panel meaning
+ *                              autorefresh has triggered a double buffer flip
  */
 enum sde_intr_idx {
 	INTR_IDX_VSYNC,
@@ -179,6 +181,7 @@
 	INTR_IDX_UNDERRUN,
 	INTR_IDX_CTL_START,
 	INTR_IDX_RDPTR,
+	INTR_IDX_AUTOREFRESH_DONE,
 	INTR_IDX_MAX,
 };
 
@@ -284,6 +287,18 @@
 };
 
 /**
+ * struct sde_encoder_phys_cmd_autorefresh - autorefresh state tracking
+ * @cfg: current active autorefresh configuration
+ * @kickoff_cnt: atomic count tracking autorefresh done irq kickoffs pending
+ * @kickoff_wq:	wait queue for waiting on autorefresh done irq
+ */
+struct sde_encoder_phys_cmd_autorefresh {
+	struct sde_hw_autorefresh cfg;
+	atomic_t kickoff_cnt;
+	wait_queue_head_t kickoff_wq;
+};
+
+/**
  * struct sde_encoder_phys_cmd - sub-class of sde_encoder_phys to handle command
  *	mode specific operations
  * @base:	Baseclass physical encoder structure
@@ -292,12 +307,14 @@
  * @serialize_wait4pp:	serialize wait4pp feature waits for pp_done interrupt
  *			after ctl_start instead of before next frame kickoff
  * @pp_timeout_report_cnt: number of pingpong done irq timeout errors
+ * @autorefresh: autorefresh feature state
  */
 struct sde_encoder_phys_cmd {
 	struct sde_encoder_phys base;
 	int stream_sel;
 	bool serialize_wait4pp;
 	int pp_timeout_report_cnt;
+	struct sde_encoder_phys_cmd_autorefresh autorefresh;
 };
 
 /**
@@ -316,7 +333,7 @@
  * @wb_fmt:		Writeback pixel format
  * @frame_count:	Counter of completed writeback operations
  * @kickoff_count:	Counter of issued writeback operations
- * @mmu_id:		mmu identifier for non-secure/secure domain
+ * @aspace:		address space identifier for non-secure/secure domain
  * @wb_dev:		Pointer to writeback device
  * @start_time:		Start time of writeback latest request
  * @end_time:		End time of writeback latest request
@@ -338,7 +355,7 @@
 	const struct sde_format *wb_fmt;
 	u32 frame_count;
 	u32 kickoff_count;
-	int mmu_id[SDE_IOMMU_DOMAIN_MAX];
+	struct msm_gem_address_space *aspace[SDE_IOMMU_DOMAIN_MAX];
 	struct sde_wb_device *wb_dev;
 	ktime_t start_time;
 	ktime_t end_time;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index 447fdcc..5400fa7 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -44,6 +44,16 @@
 #define DEFAULT_TEARCHECK_SYNC_THRESH_START	4
 #define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE	4
 
+#define SDE_ENC_WR_PTR_START_TIMEOUT_US 20000
+
+static inline int _sde_encoder_phys_cmd_get_idle_timeout(
+		struct sde_encoder_phys_cmd *cmd_enc)
+{
+	return cmd_enc->autorefresh.cfg.frame_count ?
+			cmd_enc->autorefresh.cfg.frame_count *
+			KICKOFF_TIMEOUT_MS : KICKOFF_TIMEOUT_MS;
+}
+
 static inline bool sde_encoder_phys_cmd_is_master(
 		struct sde_encoder_phys *phys_enc)
 {
@@ -60,6 +70,52 @@
 	return true;
 }
 
+static uint64_t _sde_encoder_phys_cmd_get_autorefresh_property(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct drm_connector *conn = phys_enc->connector;
+
+	if (!conn || !conn->state)
+		return 0;
+
+	return sde_connector_get_property(conn->state,
+				CONNECTOR_PROP_AUTOREFRESH);
+}
+
+static void _sde_encoder_phys_cmd_config_autorefresh(
+		struct sde_encoder_phys *phys_enc,
+		u32 new_frame_count)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+			to_sde_encoder_phys_cmd(phys_enc);
+	struct sde_hw_pingpong *hw_pp = phys_enc->hw_pp;
+	struct drm_connector *conn = phys_enc->connector;
+	struct sde_hw_autorefresh *cfg_cur, cfg_nxt;
+
+	if (!conn || !conn->state || !hw_pp)
+		return;
+
+	cfg_cur = &cmd_enc->autorefresh.cfg;
+
+	/* autorefresh property value should be validated already */
+	memset(&cfg_nxt, 0, sizeof(cfg_nxt));
+	cfg_nxt.frame_count = new_frame_count;
+	cfg_nxt.enable = (cfg_nxt.frame_count != 0);
+
+	SDE_DEBUG_CMDENC(cmd_enc, "autorefresh state %d->%d framecount %d\n",
+			cfg_cur->enable, cfg_nxt.enable, cfg_nxt.frame_count);
+	SDE_EVT32(DRMID(phys_enc->parent), hw_pp->idx, cfg_cur->enable,
+			cfg_nxt.enable, cfg_nxt.frame_count);
+
+	/* only proceed on state changes */
+	if (cfg_nxt.enable == cfg_cur->enable)
+		return;
+
+	memcpy(cfg_cur, &cfg_nxt, sizeof(*cfg_cur));
+	if (hw_pp->ops.setup_autorefresh)
+		hw_pp->ops.setup_autorefresh(hw_pp, cfg_cur);
+}
+
 static void _sde_encoder_phys_cmd_update_flush_mask(
 		struct sde_encoder_phys *phys_enc)
 {
@@ -124,6 +180,29 @@
 	wake_up_all(&phys_enc->pending_kickoff_wq);
 }
 
+static void sde_encoder_phys_cmd_autorefresh_done_irq(void *arg, int irq_idx)
+{
+	struct sde_encoder_phys *phys_enc = arg;
+	struct sde_encoder_phys_cmd *cmd_enc =
+			to_sde_encoder_phys_cmd(phys_enc);
+	unsigned long lock_flags;
+	int new_cnt;
+
+	if (!cmd_enc)
+		return;
+
+	phys_enc = &cmd_enc->base;
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+	new_cnt = atomic_add_unless(&cmd_enc->autorefresh.kickoff_cnt, -1, 0);
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+	SDE_EVT32_IRQ(DRMID(phys_enc->parent),
+			phys_enc->hw_pp->idx - PINGPONG_0, new_cnt);
+
+	/* Signal any waiting atomic commit thread */
+	wake_up_all(&cmd_enc->autorefresh.kickoff_wq);
+}
+
 static void sde_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
 {
 	struct sde_encoder_phys *phys_enc = arg;
@@ -190,6 +269,10 @@
 	irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
 	irq->hw_idx = phys_enc->intf_idx;
 	irq->irq_idx = -EINVAL;
+
+	irq = &phys_enc->irq[INTR_IDX_AUTOREFRESH_DONE];
+	irq->hw_idx = phys_enc->hw_pp->idx;
+	irq->irq_idx = -EINVAL;
 }
 
 static void sde_encoder_phys_cmd_mode_set(
@@ -302,6 +385,74 @@
 			phys_enc->split_role == ENC_ROLE_SLAVE;
 }
 
+static int _sde_encoder_phys_cmd_poll_write_pointer_started(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+			to_sde_encoder_phys_cmd(phys_enc);
+	struct sde_hw_pingpong *hw_pp = phys_enc->hw_pp;
+	struct sde_hw_pp_vsync_info info;
+	u32 timeout_us = SDE_ENC_WR_PTR_START_TIMEOUT_US;
+	int ret;
+
+	if (!hw_pp || !hw_pp->ops.get_vsync_info ||
+			!hw_pp->ops.poll_timeout_wr_ptr)
+		return 0;
+
+	ret = hw_pp->ops.get_vsync_info(hw_pp, &info);
+	if (ret)
+		return ret;
+
+	SDE_DEBUG_CMDENC(cmd_enc,
+			"pp:%d rd_ptr %d wr_ptr %d\n",
+			phys_enc->hw_pp->idx - PINGPONG_0,
+			info.rd_ptr_line_count,
+			info.wr_ptr_line_count);
+	SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
+			info.wr_ptr_line_count);
+
+	ret = hw_pp->ops.poll_timeout_wr_ptr(hw_pp, timeout_us);
+	if (ret) {
+		SDE_EVT32(DRMID(phys_enc->parent),
+				phys_enc->hw_pp->idx - PINGPONG_0,
+				timeout_us,
+				ret);
+		SDE_DBG_DUMP("sde", "dsi0_ctrl", "dsi0_phy", "dsi1_ctrl",
+				"dsi1_phy", "vbif_rt", "dbg_bus",
+				"vbif_dbg_bus", "panic");
+	}
+
+	return ret;
+}
+
+static bool _sde_encoder_phys_cmd_is_ongoing_pptx(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_hw_pingpong *hw_pp;
+	struct sde_hw_pp_vsync_info info;
+
+	if (!phys_enc)
+		return false;
+
+	hw_pp = phys_enc->hw_pp;
+	if (!hw_pp || !hw_pp->ops.get_vsync_info)
+		return false;
+
+	hw_pp->ops.get_vsync_info(hw_pp, &info);
+
+	SDE_EVT32(DRMID(phys_enc->parent),
+			phys_enc->hw_pp->idx - PINGPONG_0,
+			atomic_read(&phys_enc->pending_kickoff_cnt),
+			info.wr_ptr_line_count,
+			phys_enc->cached_mode.vdisplay);
+
+	if (info.wr_ptr_line_count > 0 && info.wr_ptr_line_count <
+			phys_enc->cached_mode.vdisplay)
+		return true;
+
+	return false;
+}
+
 static int _sde_encoder_phys_cmd_wait_for_idle(
 		struct sde_encoder_phys *phys_enc)
 {
@@ -333,12 +484,49 @@
 	return ret;
 }
 
+static int _sde_encoder_phys_cmd_wait_for_autorefresh_done(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+			to_sde_encoder_phys_cmd(phys_enc);
+	struct sde_encoder_wait_info wait_info;
+	int ret = 0;
+
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+
+	/* only master deals with autorefresh */
+	if (!sde_encoder_phys_cmd_is_master(phys_enc))
+		return 0;
+
+	wait_info.wq = &cmd_enc->autorefresh.kickoff_wq;
+	wait_info.atomic_cnt = &cmd_enc->autorefresh.kickoff_cnt;
+	wait_info.timeout_ms = _sde_encoder_phys_cmd_get_idle_timeout(cmd_enc);
+
+	/* wait for autorefresh kickoff to start */
+	ret = sde_encoder_helper_wait_for_irq(phys_enc,
+			INTR_IDX_AUTOREFRESH_DONE, &wait_info);
+
+	/* double check that kickoff has started by reading write ptr reg */
+	if (!ret)
+		ret = _sde_encoder_phys_cmd_poll_write_pointer_started(
+			phys_enc);
+	else
+		sde_encoder_helper_report_irq_timeout(phys_enc,
+				INTR_IDX_AUTOREFRESH_DONE);
+
+	return ret;
+}
+
 static int sde_encoder_phys_cmd_control_vblank_irq(
 		struct sde_encoder_phys *phys_enc,
 		bool enable)
 {
 	struct sde_encoder_phys_cmd *cmd_enc =
 		to_sde_encoder_phys_cmd(phys_enc);
+	unsigned long lock_flags;
 	int ret = 0;
 
 	if (!phys_enc) {
@@ -354,6 +542,8 @@
 			__builtin_return_address(0),
 			enable, atomic_read(&phys_enc->vblank_refcount));
 
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+
 	SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
 			enable, atomic_read(&phys_enc->vblank_refcount));
 
@@ -363,6 +553,8 @@
 		ret = sde_encoder_helper_unregister_irq(phys_enc,
 				INTR_IDX_RDPTR);
 
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
 end:
 	if (ret)
 		SDE_ERROR_CMDENC(cmd_enc,
@@ -387,14 +579,20 @@
 		sde_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
 		sde_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
 
-		if (sde_encoder_phys_cmd_is_master(phys_enc))
+		if (sde_encoder_phys_cmd_is_master(phys_enc)) {
 			sde_encoder_helper_register_irq(phys_enc,
 					INTR_IDX_CTL_START);
-	} else {
+			sde_encoder_helper_register_irq(phys_enc,
+					INTR_IDX_AUTOREFRESH_DONE);
+		}
 
-		if (sde_encoder_phys_cmd_is_master(phys_enc))
+	} else {
+		if (sde_encoder_phys_cmd_is_master(phys_enc)) {
 			sde_encoder_helper_unregister_irq(phys_enc,
 					INTR_IDX_CTL_START);
+			sde_encoder_helper_unregister_irq(phys_enc,
+					INTR_IDX_AUTOREFRESH_DONE);
+		}
 
 		sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
 		sde_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
@@ -445,7 +643,9 @@
 	}
 
 	tc_cfg.vsync_count = vsync_hz / (mode->vtotal * mode->vrefresh);
-	tc_cfg.hw_vsync_mode = 1;
+
+	/* enable external TE after kickoff to avoid premature autorefresh */
+	tc_cfg.hw_vsync_mode = 0;
 
 	/*
 	 * By setting sync_cfg_height to near max register value, we essentially
@@ -561,6 +761,41 @@
 	phys_enc->enable_state = SDE_ENC_ENABLED;
 }
 
+static bool _sde_encoder_phys_cmd_is_autorefresh_enabled(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_hw_pingpong *hw_pp;
+	struct sde_hw_autorefresh cfg;
+	int ret;
+
+	if (!phys_enc || !phys_enc->hw_pp)
+		return 0;
+
+	if (!sde_encoder_phys_cmd_is_master(phys_enc))
+		return 0;
+
+	hw_pp = phys_enc->hw_pp;
+	if (!hw_pp->ops.get_autorefresh)
+		return 0;
+
+	ret = hw_pp->ops.get_autorefresh(hw_pp, &cfg);
+	if (ret)
+		return 0;
+
+	return cfg.enable;
+}
+
+static void _sde_encoder_phys_cmd_connect_te(
+		struct sde_encoder_phys *phys_enc, bool enable)
+{
+	if (!phys_enc || !phys_enc->hw_pp ||
+			!phys_enc->hw_pp->ops.connect_external_te)
+		return;
+
+	SDE_EVT32(DRMID(phys_enc->parent), enable);
+	phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp, enable);
+}
+
 static void sde_encoder_phys_cmd_disable(struct sde_encoder_phys *phys_enc)
 {
 	struct sde_encoder_phys_cmd *cmd_enc =
@@ -638,7 +873,10 @@
 		return;
 	}
 	SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
-	SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0);
+
+	SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
+			atomic_read(&phys_enc->pending_kickoff_cnt),
+			atomic_read(&cmd_enc->autorefresh.kickoff_cnt));
 
 	/*
 	 * Mark kickoff request as outstanding. If there are more than one,
@@ -652,6 +890,10 @@
 				phys_enc->hw_pp->idx - PINGPONG_0);
 		SDE_ERROR("failed wait_for_idle: %d\n", ret);
 	}
+
+	SDE_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n",
+			phys_enc->hw_pp->idx - PINGPONG_0,
+			atomic_read(&phys_enc->pending_kickoff_cnt));
 }
 
 static int _sde_encoder_phys_cmd_wait_for_ctl_start(
@@ -722,6 +964,10 @@
 	if (sde_encoder_phys_cmd_is_master(phys_enc))
 		rc = _sde_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
 
+	if (!rc && sde_encoder_phys_cmd_is_master(phys_enc) &&
+			cmd_enc->autorefresh.cfg.enable)
+		rc = _sde_encoder_phys_cmd_wait_for_autorefresh_done(phys_enc);
+
 	/* required for both controllers */
 	if (!rc && cmd_enc->serialize_wait4pp)
 		sde_encoder_phys_cmd_prepare_for_kickoff(phys_enc, NULL);
@@ -765,6 +1011,108 @@
 static void sde_encoder_phys_cmd_prepare_commit(
 		struct sde_encoder_phys *phys_enc)
 {
+	struct sde_encoder_phys_cmd *cmd_enc =
+		to_sde_encoder_phys_cmd(phys_enc);
+
+	if (!phys_enc)
+		return;
+
+	if (sde_encoder_phys_cmd_is_master(phys_enc)) {
+		unsigned long lock_flags;
+
+
+		SDE_EVT32(DRMID(phys_enc->parent), phys_enc->intf_idx - INTF_0,
+				cmd_enc->autorefresh.cfg.enable);
+
+		if (!_sde_encoder_phys_cmd_is_autorefresh_enabled(phys_enc))
+			return;
+
+		/**
+		 * Autorefresh must be disabled carefully:
+		 *  - Must disable while there is no ongoing transmission
+		 *  - Receiving a TE will trigger the next Autorefresh TX
+		 *  - Only safe to disable Autorefresh between PPDone and TE
+		 *  - However, that is a small time window
+		 *  - Disabling External TE gives large safe window, assuming
+		 *    internally generated TE is set to a large counter value
+		 *
+		 * If Autorefresh is active:
+		 * 1. Disable external TE
+		 *   - TE will run on an SDE counter set to large value (~200ms)
+		 *
+		 * 2. Check for ongoing TX
+		 *   - If ongoing TX, set pending_kickoff_cnt if not set already
+		 *   - We don't want to wait for a ppdone that will never
+		 *     arrive, so verify ongoing TX
+		 *
+		 * 3. Wait for TX to Complete
+		 *  - Wait for PPDone pending count to reach 0
+		 *
+		 * 4. Leave Autorefresh Disabled
+		 *   - Assume disable of Autorefresh since it is now safe
+		 *   - Can now safely Disable Encoder, do debug printing, etc.
+		 *     without worrying that Autorefresh will kickoff
+		 */
+
+		spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+
+		/* disable external TE to prevent next autorefresh */
+		_sde_encoder_phys_cmd_connect_te(phys_enc, false);
+
+		/* verify that we disabled TE during outstanding TX */
+		if (_sde_encoder_phys_cmd_is_ongoing_pptx(phys_enc))
+			atomic_add_unless(&phys_enc->pending_kickoff_cnt, 1, 1);
+
+		spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+		/* wait for ppdone if necessary due to catching ongoing TX */
+		if (_sde_encoder_phys_cmd_wait_for_idle(phys_enc))
+			SDE_ERROR_CMDENC(cmd_enc,
+					"pp:%d kickoff timed out\n",
+					phys_enc->hw_pp->idx - PINGPONG_0);
+
+		/*
+		 * not strictly necessary for kickoff, but simplifies disable
+		 * callflow since our disable is split across multiple phys_encs
+		 */
+		_sde_encoder_phys_cmd_config_autorefresh(phys_enc, 0);
+
+		SDE_DEBUG_CMDENC(cmd_enc, "disabled autorefresh & ext TE\n");
+
+	}
+}
+
+static void sde_encoder_phys_cmd_handle_post_kickoff(
+		struct sde_encoder_phys *phys_enc)
+{
+	if (!phys_enc)
+		return;
+
+	/**
+	 * re-enable external TE, either for the first time after enabling
+	 * or if disabled for Autorefresh
+	 */
+	_sde_encoder_phys_cmd_connect_te(phys_enc, true);
+}
+
+static void sde_encoder_phys_cmd_trigger_start(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+			to_sde_encoder_phys_cmd(phys_enc);
+	u32 frame_cnt;
+
+	if (!phys_enc)
+		return;
+
+	/* we don't issue CTL_START when using autorefresh */
+	frame_cnt = _sde_encoder_phys_cmd_get_autorefresh_property(phys_enc);
+	if (frame_cnt) {
+		_sde_encoder_phys_cmd_config_autorefresh(phys_enc, frame_cnt);
+		atomic_inc(&cmd_enc->autorefresh.kickoff_cnt);
+	} else {
+		sde_encoder_helper_trigger_start(phys_enc);
+	}
 }
 
 static void sde_encoder_phys_cmd_init_ops(
@@ -782,7 +1130,8 @@
 	ops->wait_for_commit_done = sde_encoder_phys_cmd_wait_for_commit_done;
 	ops->prepare_for_kickoff = sde_encoder_phys_cmd_prepare_for_kickoff;
 	ops->wait_for_tx_complete = sde_encoder_phys_cmd_wait_for_tx_complete;
-	ops->trigger_start = sde_encoder_helper_trigger_start;
+	ops->handle_post_kickoff = sde_encoder_phys_cmd_handle_post_kickoff;
+	ops->trigger_start = sde_encoder_phys_cmd_trigger_start;
 	ops->needs_single_flush = sde_encoder_phys_cmd_needs_single_flush;
 	ops->hw_reset = sde_encoder_helper_hw_reset;
 	ops->irq_control = sde_encoder_phys_cmd_irq_control;
@@ -860,10 +1209,18 @@
 	irq->intr_idx = INTR_IDX_UNDERRUN;
 	irq->cb.func = sde_encoder_phys_cmd_underrun_irq;
 
+	irq = &phys_enc->irq[INTR_IDX_AUTOREFRESH_DONE];
+	irq->name = "autorefresh_done";
+	irq->intr_type = SDE_IRQ_TYPE_PING_PONG_AUTO_REF;
+	irq->intr_idx = INTR_IDX_AUTOREFRESH_DONE;
+	irq->cb.func = sde_encoder_phys_cmd_autorefresh_done_irq;
+
 	atomic_set(&phys_enc->vblank_refcount, 0);
 	atomic_set(&phys_enc->pending_kickoff_cnt, 0);
 	atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
 	init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+	atomic_set(&cmd_enc->autorefresh.kickoff_cnt, 0);
+	init_waitqueue_head(&cmd_enc->autorefresh.kickoff_wq);
 
 	SDE_DEBUG_CMDENC(cmd_enc, "created\n");
 
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index 007738a6..5b3ac1f 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -476,6 +476,7 @@
 {
 	int ret = 0;
 	struct sde_encoder_phys_vid *vid_enc;
+	unsigned long lock_flags;
 
 	if (!phys_enc) {
 		SDE_ERROR("invalid encoder\n");
@@ -492,6 +493,8 @@
 			__builtin_return_address(0),
 			enable, atomic_read(&phys_enc->vblank_refcount));
 
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+
 	SDE_EVT32(DRMID(phys_enc->parent), enable,
 			atomic_read(&phys_enc->vblank_refcount));
 
@@ -501,6 +504,8 @@
 		ret = sde_encoder_helper_unregister_irq(phys_enc,
 				INTR_IDX_VSYNC);
 
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
 	if (ret)
 		SDE_ERROR_VIDENC(vid_enc,
 				"control vblank irq error %d, enable %d\n",
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index 54c1397..875d99d 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -250,7 +250,8 @@
 	struct sde_hw_wb_cfg *wb_cfg;
 	struct sde_hw_wb_cdp_cfg *cdp_cfg;
 	const struct msm_format *format;
-	int ret, mmu_id;
+	int ret;
+	struct msm_gem_address_space *aspace;
 
 	if (!phys_enc || !phys_enc->sde_kms || !phys_enc->sde_kms->catalog) {
 		SDE_ERROR("invalid encoder\n");
@@ -264,9 +265,9 @@
 
 	wb_cfg->intf_mode = phys_enc->intf_mode;
 	wb_cfg->is_secure = (fb->flags & DRM_MODE_FB_SECURE) ? true : false;
-	mmu_id = (wb_cfg->is_secure) ?
-			wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] :
-			wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE];
+	aspace = (wb_cfg->is_secure) ?
+			wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] :
+			wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE];
 
 	SDE_DEBUG("[fb_secure:%d]\n", wb_cfg->is_secure);
 
@@ -288,7 +289,7 @@
 	wb_cfg->roi = *wb_roi;
 
 	if (hw_wb->caps->features & BIT(SDE_WB_XY_ROI_OFFSET)) {
-		ret = sde_format_populate_layout(mmu_id, fb, &wb_cfg->dest);
+		ret = sde_format_populate_layout(aspace, fb, &wb_cfg->dest);
 		if (ret) {
 			SDE_DEBUG("failed to populate layout %d\n", ret);
 			return;
@@ -297,7 +298,7 @@
 		wb_cfg->dest.height = fb->height;
 		wb_cfg->dest.num_planes = wb_cfg->dest.format->num_planes;
 	} else {
-		ret = sde_format_populate_layout_with_roi(mmu_id, fb, wb_roi,
+		ret = sde_format_populate_layout_with_roi(aspace, fb, wb_roi,
 			&wb_cfg->dest);
 		if (ret) {
 			/* this error should be detected during atomic_check */
@@ -914,12 +915,19 @@
 	struct drm_mode_fb_cmd2 mode_cmd;
 	uint32_t size;
 	int nplanes, i, ret;
+	struct msm_gem_address_space *aspace;
 
 	if (!wb_enc || !wb_enc->base.parent || !wb_enc->base.sde_kms) {
 		SDE_ERROR("invalid params\n");
 		return -EINVAL;
 	}
 
+	aspace = wb_enc->base.sde_kms->aspace[SDE_IOMMU_DOMAIN_UNSECURE];
+	if (!aspace) {
+		SDE_ERROR("invalid address space\n");
+		return -EINVAL;
+	}
+
 	dev = wb_enc->base.sde_kms->dev;
 	if (!dev) {
 		SDE_ERROR("invalid dev\n");
@@ -974,8 +982,7 @@
 	}
 
 	/* prepare the backing buffer now so that it's available later */
-	ret = msm_framebuffer_prepare(fb,
-			wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE]);
+	ret = msm_framebuffer_prepare(fb, aspace);
 	if (!ret)
 		wb_enc->fb_disable = fb;
 	return ret;
@@ -1234,15 +1241,15 @@
 	phys_enc = &wb_enc->base;
 
 	if (p->sde_kms->vbif[VBIF_NRT]) {
-		wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
-			p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_UNSECURE];
-		wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
-			p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_SECURE];
+		wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+			p->sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_UNSECURE];
+		wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+			p->sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_SECURE];
 	} else {
-		wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
-			p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
-		wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
-			p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_SECURE];
+		wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+			p->sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
+		wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+			p->sde_kms->aspace[MSM_SMMU_DOMAIN_SECURE];
 	}
 
 	hw_mdp = sde_rm_get_mdp(&p->sde_kms->rm);
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.c b/drivers/gpu/drm/msm/sde/sde_formats.c
index c3477b5..04c9e79 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.c
+++ b/drivers/gpu/drm/msm/sde/sde_formats.c
@@ -818,7 +818,7 @@
 }
 
 static int _sde_format_populate_addrs_ubwc(
-		int mmu_id,
+		struct msm_gem_address_space *aspace,
 		struct drm_framebuffer *fb,
 		struct sde_hw_fmt_layout *layout)
 {
@@ -830,7 +830,7 @@
 		return -EINVAL;
 	}
 
-	base_addr = msm_framebuffer_iova(fb, mmu_id, 0);
+	base_addr = msm_framebuffer_iova(fb, aspace, 0);
 	if (!base_addr) {
 		DRM_ERROR("failed to retrieve base addr\n");
 		return -EFAULT;
@@ -909,7 +909,7 @@
 }
 
 static int _sde_format_populate_addrs_linear(
-		int mmu_id,
+		struct msm_gem_address_space *aspace,
 		struct drm_framebuffer *fb,
 		struct sde_hw_fmt_layout *layout)
 {
@@ -926,7 +926,7 @@
 
 	/* Populate addresses for simple formats here */
 	for (i = 0; i < layout->num_planes; ++i) {
-		layout->plane_addr[i] = msm_framebuffer_iova(fb, mmu_id, i);
+		layout->plane_addr[i] = msm_framebuffer_iova(fb, aspace, i);
 		if (!layout->plane_addr[i]) {
 			DRM_ERROR("failed to retrieve base addr\n");
 			return -EFAULT;
@@ -937,7 +937,7 @@
 }
 
 int sde_format_populate_layout(
-		int mmu_id,
+		struct msm_gem_address_space *aspace,
 		struct drm_framebuffer *fb,
 		struct sde_hw_fmt_layout *layout)
 {
@@ -969,9 +969,9 @@
 	/* Populate the addresses given the fb */
 	if (SDE_FORMAT_IS_UBWC(layout->format) ||
 			SDE_FORMAT_IS_TILE(layout->format))
-		ret = _sde_format_populate_addrs_ubwc(mmu_id, fb, layout);
+		ret = _sde_format_populate_addrs_ubwc(aspace, fb, layout);
 	else
-		ret = _sde_format_populate_addrs_linear(mmu_id, fb, layout);
+		ret = _sde_format_populate_addrs_linear(aspace, fb, layout);
 
 	/* check if anything changed */
 	if (!ret && !memcmp(plane_addr, layout->plane_addr, sizeof(plane_addr)))
@@ -1013,14 +1013,14 @@
 }
 
 int sde_format_populate_layout_with_roi(
-		int mmu_id,
+		struct msm_gem_address_space *aspace,
 		struct drm_framebuffer *fb,
 		struct sde_rect *roi,
 		struct sde_hw_fmt_layout *layout)
 {
 	int ret;
 
-	ret = sde_format_populate_layout(mmu_id, fb, layout);
+	ret = sde_format_populate_layout(aspace, fb, layout);
 	if (ret || !roi)
 		return ret;
 
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.h b/drivers/gpu/drm/msm/sde/sde_formats.h
index 40aab22..2333a72 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.h
+++ b/drivers/gpu/drm/msm/sde/sde_formats.h
@@ -14,6 +14,7 @@
 #define _SDE_FORMATS_H
 
 #include <drm/drm_fourcc.h>
+#include "msm_gem.h"
 #include "sde_hw_mdss.h"
 
 /**
@@ -103,7 +104,7 @@
 /**
  * sde_format_populate_layout - populate the given format layout based on
  *                     mmu, fb, and format found in the fb
- * @mmu_id:            mmu id handle
+ * @aspace:            address space pointer
  * @fb:                framebuffer pointer
  * @fmtl:              format layout structure to populate
  *
@@ -111,14 +112,14 @@
  *         are the same as before or 0 if new addresses were populated
  */
 int sde_format_populate_layout(
-		int mmu_id,
+		struct msm_gem_address_space *aspace,
 		struct drm_framebuffer *fb,
 		struct sde_hw_fmt_layout *fmtl);
 
 /**
  * sde_format_populate_layout_with_roi - populate the given format layout
  *                     based on mmu, fb, roi, and format found in the fb
- * @mmu_id:            mmu id handle
+ * @aspace:            address space pointer
  * @fb:                framebuffer pointer
  * @roi:               region of interest (optional)
  * @fmtl:              format layout structure to populate
@@ -126,7 +127,7 @@
  * Return: error code on failure, 0 on success
  */
 int sde_format_populate_layout_with_roi(
-		int mmu_id,
+		struct msm_gem_address_space *aspace,
 		struct drm_framebuffer *fb,
 		struct sde_rect *roi,
 		struct sde_hw_fmt_layout *fmtl);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ad4.c b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
index b02cc06..35fc2b5 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
@@ -202,7 +202,7 @@
 	} else {
 		info[dspp->idx].state = ad4_state_run;
 		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-				0);
+				0x100);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 1cbbe1e..1612640 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -217,6 +217,9 @@
 	TE2_OFF,
 	TE2_LEN,
 	PP_SLAVE,
+	DITHER_OFF,
+	DITHER_LEN,
+	DITHER_VER,
 	PP_PROP_MAX,
 };
 
@@ -494,6 +497,9 @@
 	{TE2_OFF, "qcom,sde-te2-off", false, PROP_TYPE_U32_ARRAY},
 	{TE2_LEN, "qcom,sde-te2-size", false, PROP_TYPE_U32},
 	{PP_SLAVE, "qcom,sde-pp-slave", false, PROP_TYPE_U32_ARRAY},
+	{DITHER_OFF, "qcom,sde-dither-off", false, PROP_TYPE_U32_ARRAY},
+	{DITHER_LEN, "qcom,sde-dither-size", false, PROP_TYPE_U32},
+	{DITHER_VER, "qcom,sde-dither-version", false, PROP_TYPE_U32},
 };
 
 static struct sde_prop_type dsc_prop[] = {
@@ -2334,6 +2340,18 @@
 					pp->id - PINGPONG_0);
 			set_bit(SDE_PINGPONG_DSC, &pp->features);
 		}
+
+		sblk->dither.base = PROP_VALUE_ACCESS(prop_value, DITHER_OFF,
+							i);
+		if (sblk->dither.base) {
+			sblk->dither.id = SDE_PINGPONG_DITHER;
+			snprintf(sblk->dither.name, SDE_HW_BLK_NAME_LEN,
+					"dither_%u", pp->id);
+			set_bit(SDE_PINGPONG_DITHER, &pp->features);
+		}
+		sblk->dither.len = PROP_VALUE_ACCESS(prop_value, DITHER_LEN, 0);
+		sblk->dither.version = PROP_VALUE_ACCESS(prop_value, DITHER_VER,
+								0);
 	}
 
 end:
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index 74fa8f9..29698bc 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -194,6 +194,7 @@
  * @SDE_PINGPONG_SPLIT      PP block supports split fifo
  * @SDE_PINGPONG_SLAVE      PP block is a suitable slave for split fifo
  * @SDE_PINGPONG_DSC,       Display stream compression blocks
+ * @SDE_PINGPONG_DITHER,    Dither blocks
  * @SDE_PINGPONG_MAX
  */
 enum {
@@ -202,6 +203,7 @@
 	SDE_PINGPONG_SPLIT,
 	SDE_PINGPONG_SLAVE,
 	SDE_PINGPONG_DSC,
+	SDE_PINGPONG_DITHER,
 	SDE_PINGPONG_MAX
 };
 
@@ -457,6 +459,7 @@
 	struct sde_pp_blk te;
 	struct sde_pp_blk te2;
 	struct sde_pp_blk dsc;
+	struct sde_pp_blk dither;
 };
 
 struct sde_wb_sub_blocks {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
index ab2c473..4191367 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
@@ -70,8 +70,6 @@
 #define DSPP_OP_PA_FOL_EN	BIT(6)
 #define DSPP_OP_PA_SKY_EN	BIT(7)
 
-#define REG_MASK(n) ((BIT(n)) - 1)
-
 #define PA_VIG_DISABLE_REQUIRED(x) \
 			!((x) & (VIG_OP_PA_SKIN_EN | VIG_OP_PA_SKY_EN | \
 			VIG_OP_PA_FOL_EN | VIG_OP_PA_HUE_EN | \
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
index ba55086..0b3432b 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
@@ -15,6 +15,7 @@
 #include "sde_hw_ctl.h"
 #include "sde_dbg.h"
 #include "sde_kms.h"
+#include "sde_reg_dma.h"
 
 #define   CTL_LAYER(lm)                 \
 	(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
@@ -111,6 +112,11 @@
 
 static inline void sde_hw_ctl_trigger_flush(struct sde_hw_ctl *ctx)
 {
+	struct sde_hw_reg_dma_ops *ops = sde_reg_dma_get_ops();
+
+	if (ops && ops->last_command)
+		ops->last_command(ctx, DMA_CTL_QUEUE0);
+
 	SDE_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
 }
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
index 37b74df..e88f40f 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
@@ -40,6 +40,11 @@
 #define PP_DCE_DATA_IN_SWAP             0x0ac
 #define PP_DCE_DATA_OUT_SWAP            0x0c8
 
+#define DITHER_DEPTH_MAP_INDEX 9
+static u32 dither_depth_map[DITHER_DEPTH_MAP_INDEX] = {
+	0, 0, 0, 0, 0, 1, 2, 3, 3
+};
+
 static struct sde_pingpong_cfg *_pingpong_offset(enum sde_pingpong pp,
 		struct sde_mdss_cfg *m,
 		void __iomem *addr,
@@ -167,6 +172,57 @@
 	return 0;
 }
 
+static int sde_hw_pp_setup_dither_v1(struct sde_hw_pingpong *pp,
+					void *cfg, size_t len)
+{
+	struct sde_hw_blk_reg_map *c;
+	struct drm_msm_dither *dither = (struct drm_msm_dither *)cfg;
+	u32 base = 0, offset = 0, data = 0, i = 0;
+
+	if (!pp)
+		return -EINVAL;
+
+	c = &pp->hw;
+	base = pp->caps->sblk->dither.base;
+	if (!dither) {
+		/* dither property disable case */
+		SDE_REG_WRITE(c, base, 0);
+		return 0;
+	}
+
+	if (len != sizeof(struct drm_msm_dither)) {
+		DRM_ERROR("input len %zu, expected len %zu\n", len,
+			sizeof(struct drm_msm_dither));
+		return -EINVAL;
+	}
+
+	if (dither->c0_bitdepth >= DITHER_DEPTH_MAP_INDEX ||
+		dither->c1_bitdepth >= DITHER_DEPTH_MAP_INDEX ||
+		dither->c2_bitdepth >= DITHER_DEPTH_MAP_INDEX ||
+		dither->c3_bitdepth >= DITHER_DEPTH_MAP_INDEX)
+		return -EINVAL;
+
+	offset += 4;
+	data = dither_depth_map[dither->c0_bitdepth] & REG_MASK(2);
+	data |= (dither_depth_map[dither->c1_bitdepth] & REG_MASK(2)) << 2;
+	data |= (dither_depth_map[dither->c2_bitdepth] & REG_MASK(2)) << 4;
+	data |= (dither_depth_map[dither->c3_bitdepth] & REG_MASK(2)) << 6;
+	data |= (dither->temporal_en) ? (1 << 8) : 0;
+	SDE_REG_WRITE(c, base + offset, data);
+
+	for (i = 0; i < DITHER_MATRIX_SZ - 3; i += 4) {
+		offset += 4;
+		data = (dither->matrix[i] & REG_MASK(4)) |
+			((dither->matrix[i + 1] & REG_MASK(4)) << 4) |
+			((dither->matrix[i + 2] & REG_MASK(4)) << 8) |
+			((dither->matrix[i + 3] & REG_MASK(4)) << 12);
+		SDE_REG_WRITE(c, base + offset, data);
+	}
+	SDE_REG_WRITE(c, base, 1);
+
+	return 0;
+}
+
 static int sde_hw_pp_enable_te(struct sde_hw_pingpong *pp, bool enable)
 {
 	struct sde_hw_blk_reg_map *c = &pp->hw;
@@ -218,8 +274,10 @@
 }
 
 static void _setup_pingpong_ops(struct sde_hw_pingpong_ops *ops,
-		unsigned long cap)
+	const struct sde_pingpong_cfg *hw_cap)
 {
+	u32 version = 0;
+
 	ops->setup_tearcheck = sde_hw_pp_setup_te_config;
 	ops->enable_tearcheck = sde_hw_pp_enable_te;
 	ops->connect_external_te = sde_hw_pp_connect_external_te;
@@ -230,6 +288,16 @@
 	ops->disable_dsc = sde_hw_pp_dsc_disable;
 	ops->get_autorefresh = sde_hw_pp_get_autorefresh_config;
 	ops->poll_timeout_wr_ptr = sde_hw_pp_poll_timeout_wr_ptr;
+
+	version = SDE_COLOR_PROCESS_MAJOR(hw_cap->sblk->dither.version);
+	switch (version) {
+	case 1:
+		ops->setup_dither = sde_hw_pp_setup_dither_v1;
+		break;
+	default:
+		ops->setup_dither = NULL;
+		break;
+	}
 };
 
 static struct sde_hw_blk_ops sde_hw_ops = {
@@ -257,7 +325,7 @@
 
 	c->idx = idx;
 	c->caps = cfg;
-	_setup_pingpong_ops(&c->ops, c->caps->features);
+	_setup_pingpong_ops(&c->ops, c->caps);
 
 	rc = sde_hw_blk_init(&c->base, SDE_HW_BLK_PINGPONG, idx, &sde_hw_ops);
 	if (rc) {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
index 6dbf4aa..f0a2054 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
@@ -17,6 +17,7 @@
 #include "sde_hw_mdss.h"
 #include "sde_hw_util.h"
 #include "sde_hw_blk.h"
+#include <uapi/drm/msm_drm_pp.h>
 
 struct sde_hw_pingpong;
 
@@ -62,6 +63,7 @@
  *  @setup_dsc : program DSC block with encoding details
  *  @enable_dsc : enables DSC encoder
  *  @disable_dsc : disables DSC encoder
+ *  @setup_dither : function to program the dither hw block
  */
 struct sde_hw_pingpong_ops {
 	/**
@@ -123,6 +125,11 @@
 	 * Disables DSC encoder
 	 */
 	void (*disable_dsc)(struct sde_hw_pingpong *pp);
+
+	/**
+	 * Program the dither hw block
+	 */
+	int (*setup_dither)(struct sde_hw_pingpong *pp, void *cfg, size_t len);
 };
 
 struct sde_hw_pingpong {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
index 0a5346e..4140a12 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
@@ -13,6 +13,7 @@
 #include "sde_hw_ctl.h"
 #include "sde_hw_reg_dma_v1.h"
 #include "msm_drv.h"
+#include "msm_mmu.h"
 
 #define GUARD_BYTES (BIT(8) - 1)
 #define ALIGNED_OFFSET (U32_MAX & ~(GUARD_BYTES))
@@ -49,6 +50,7 @@
 			(cfg)->dma_buf->index)
 
 #define REG_DMA_DECODE_SEL 0x180AC060
+#define REG_DMA_LAST_CMD 0x180AC004
 #define SINGLE_REG_WRITE_OPCODE (BIT(28))
 #define REL_ADDR_OPCODE (BIT(27))
 #define HW_INDEX_REG_WRITE_OPCODE (BIT(28) | BIT(29))
@@ -58,6 +60,7 @@
 #define WRAP_MIN_SIZE 2
 #define WRAP_MAX_SIZE (BIT(4) - 1)
 #define MAX_DWORDS_SZ (BIT(14) - 1)
+#define REG_DMA_HEADERS_BUFFER_SZ (sizeof(u32) * 128)
 
 typedef int (*reg_dma_internal_ops) (struct sde_reg_dma_setup_ops_cfg *cfg);
 
@@ -93,17 +96,20 @@
 static int validate_write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg);
 static int validate_write_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
 static int validate_write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
+static int validate_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg);
 static int write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg);
 static int write_single_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
 static int write_multi_reg_index(struct sde_reg_dma_setup_ops_cfg *cfg);
 static int write_multi_reg_inc(struct sde_reg_dma_setup_ops_cfg *cfg);
 static int write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
+static int write_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg);
 static int reset_reg_dma_buffer_v1(struct sde_reg_dma_buffer *lut_buf);
 static int check_support_v1(enum sde_reg_dma_features feature,
 		enum sde_reg_dma_blk blk, bool *is_supported);
 static int setup_payload_v1(struct sde_reg_dma_setup_ops_cfg *cfg);
 static int kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg);
 static int reset_v1(struct sde_hw_ctl *ctl);
+static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q);
 static struct sde_reg_dma_buffer *alloc_reg_dma_buf_v1(u32 size);
 static int dealloc_reg_dma_v1(struct sde_reg_dma_buffer *lut_buf);
 
@@ -123,6 +129,8 @@
 	[REG_BLK_WRITE_MULTIPLE] = validate_write_multi_lut_reg,
 };
 
+static struct sde_reg_dma_buffer *last_cmd_buf;
+
 static void get_decode_sel(unsigned long blk, u32 *decode_sel)
 {
 	int i = 0;
@@ -475,6 +483,11 @@
 		return -EINVAL;
 
 	reg_dma = cfg;
+	if (!last_cmd_buf) {
+		last_cmd_buf = alloc_reg_dma_buf_v1(REG_DMA_HEADERS_BUFFER_SZ);
+		if (IS_ERR_OR_NULL(last_cmd_buf))
+			return -EINVAL;
+	}
 	reg_dma->ops.check_support = check_support_v1;
 	reg_dma->ops.setup_payload = setup_payload_v1;
 	reg_dma->ops.kick_off = kick_off_v1;
@@ -482,6 +495,7 @@
 	reg_dma->ops.alloc_reg_dma_buf = alloc_reg_dma_buf_v1;
 	reg_dma->ops.dealloc_reg_dma = dealloc_reg_dma_v1;
 	reg_dma->ops.reset_reg_dma_buf = reset_reg_dma_buffer_v1;
+	reg_dma->ops.last_command = last_cmd_v1;
 
 	reg_dma_ctl_queue_off[CTL_0] = REG_DMA_CTL0_QUEUE_0_CMD0_OFF;
 	for (i = CTL_1; i < ARRAY_SIZE(reg_dma_ctl_queue_off); i++)
@@ -571,6 +585,7 @@
 	struct sde_reg_dma_buffer *dma_buf = NULL;
 	u32 iova_aligned, offset;
 	u32 rsize = size + GUARD_BYTES;
+	struct msm_gem_address_space *aspace = NULL;
 	int rc = 0;
 
 	if (!size || SIZE_DWORD(size) > MAX_DWORDS_SZ) {
@@ -591,7 +606,15 @@
 		goto fail;
 	}
 
-	rc = msm_gem_get_iova(dma_buf->buf, 0, &dma_buf->iova);
+	aspace = msm_gem_smmu_address_space_get(reg_dma->drm_dev,
+			MSM_SMMU_DOMAIN_UNSECURE);
+	if (!aspace) {
+		DRM_ERROR("failed to get aspace\n");
+		rc = -EINVAL;
+		goto free_gem;
+	}
+
+	rc = msm_gem_get_iova(dma_buf->buf, aspace, &dma_buf->iova);
 	if (rc) {
 		DRM_ERROR("failed to get the iova rc %d\n", rc);
 		goto free_gem;
@@ -614,7 +637,7 @@
 	return dma_buf;
 
 put_iova:
-	msm_gem_put_iova(dma_buf->buf, 0);
+	msm_gem_put_iova(dma_buf->buf, aspace);
 free_gem:
 	msm_gem_free_object(dma_buf->buf);
 fail:
@@ -650,3 +673,76 @@
 	lut_buf->next_op_allowed = DECODE_SEL_OP;
 	return 0;
 }
+
+static int validate_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg)
+{
+	u32 remain_len, write_len;
+
+	remain_len = BUFFER_SPACE_LEFT(cfg);
+	write_len = sizeof(u32);
+	if (remain_len < write_len) {
+		DRM_ERROR("buffer is full sz %d needs %d bytes\n",
+				remain_len, write_len);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int write_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg)
+{
+	u32 *loc = NULL;
+
+	loc =  (u32 *)((u8 *)cfg->dma_buf->vaddr +
+			cfg->dma_buf->index);
+	loc[0] = REG_DMA_LAST_CMD;
+	loc[1] = BIT(0);
+	cfg->dma_buf->index = sizeof(u32) * 2;
+	cfg->dma_buf->ops_completed = REG_WRITE_OP | DECODE_SEL_OP;
+	cfg->dma_buf->next_op_allowed = REG_WRITE_OP;
+
+	return 0;
+}
+
+static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q)
+{
+	struct sde_reg_dma_setup_ops_cfg cfg;
+	struct sde_reg_dma_kickoff_cfg kick_off;
+
+	if (!last_cmd_buf || !ctl || q >= DMA_CTL_QUEUE_MAX) {
+		DRM_ERROR("invalid param buf %pK ctl %pK q %d\n", last_cmd_buf,
+				ctl, q);
+		return -EINVAL;
+	}
+
+	cfg.dma_buf = last_cmd_buf;
+	reset_reg_dma_buffer_v1(last_cmd_buf);
+	if (validate_last_cmd(&cfg)) {
+		DRM_ERROR("validate buf failed\n");
+		return -EINVAL;
+	}
+
+	if (write_last_cmd(&cfg)) {
+		DRM_ERROR("write buf failed\n");
+		return -EINVAL;
+	}
+
+	kick_off.ctl = ctl;
+	kick_off.queue_select = q;
+	kick_off.trigger_mode = WRITE_IMMEDIATE;
+	kick_off.last_command = 1;
+	kick_off.op = REG_DMA_WRITE;
+	kick_off.dma_buf = last_cmd_buf;
+	if (kick_off_v1(&kick_off)) {
+		DRM_ERROR("kick off last cmd failed\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+void deinit_v1(void)
+{
+	if (last_cmd_buf)
+		dealloc_reg_dma_v1(last_cmd_buf);
+	last_cmd_buf = NULL;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.h b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.h
index 8e37d38..4f9ab4e 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.h
@@ -20,4 +20,8 @@
  */
 int init_v1(struct sde_hw_reg_dma *reg_dma);
 
+/**
+ * deinit_v1() - free up any resources allocated during the v1 reg dma init
+ */
+void deinit_v1(void);
 #endif /* _SDE_HW_REG_DMA_V1_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c
index 0dcbb7e..285ef11 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c
@@ -408,7 +408,6 @@
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[GAMUT][ctx->idx],
 			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	kick_off.last_command = hw_cfg->last_feature;
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc)
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -505,7 +504,6 @@
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[GAMUT][ctx->idx],
 			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	kick_off.last_command = hw_cfg->last_feature;
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc)
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -598,7 +596,6 @@
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[GC][ctx->idx],
 			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	kick_off.last_command = hw_cfg->last_feature;
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc) {
 		DRM_ERROR("failed to kick off ret %d\n", rc);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_util.h b/drivers/gpu/drm/msm/sde/sde_hw_util.h
index 8f469b2..aa3d5b9 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_util.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_util.h
@@ -17,6 +17,7 @@
 #include <linux/slab.h>
 #include "sde_hw_mdss.h"
 
+#define REG_MASK(n)                     ((BIT(n)) - 1)
 struct sde_format_extended;
 
 /*
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index dd8b31e..7b620bf 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -644,7 +644,8 @@
 		.soft_reset   = dsi_display_soft_reset,
 		.pre_kickoff  = dsi_conn_pre_kickoff,
 		.clk_ctrl = dsi_display_clk_ctrl,
-		.get_topology = dsi_conn_get_topology
+		.get_topology = dsi_conn_get_topology,
+		.get_dst_format = dsi_display_get_dst_format
 	};
 	static const struct sde_connector_ops wb_ops = {
 		.post_init =    sde_wb_connector_post_init,
@@ -653,7 +654,8 @@
 		.set_property = sde_wb_connector_set_property,
 		.get_info =     sde_wb_get_info,
 		.soft_reset =   NULL,
-		.get_topology = sde_wb_get_topology
+		.get_topology = sde_wb_get_topology,
+		.get_dst_format = NULL
 	};
 	static const struct sde_connector_ops dp_ops = {
 		.post_init  = dp_connector_post_init,
@@ -1338,6 +1340,8 @@
 	if (sde_kms->mmio)
 		msm_iounmap(pdev, sde_kms->mmio);
 	sde_kms->mmio = NULL;
+
+	sde_reg_dma_deinit();
 }
 
 static void sde_kms_destroy(struct msm_kms *kms)
@@ -1372,6 +1376,71 @@
 		sde_crtc_cancel_pending_flip(priv->crtcs[i], file);
 }
 
+static int sde_kms_atomic_check(struct msm_kms *kms,
+		struct drm_atomic_state *state)
+{
+	struct sde_kms *sde_kms = to_sde_kms(kms);
+	struct drm_device *dev = sde_kms->dev;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	int rc, i;
+
+	if (!kms || !state)
+		return -EINVAL;
+
+	/*
+	 * Add planes (and other affected DRM objects, if any) to new state
+	 * if idle power collapse occurred since previous commit.
+	 * Since atomic state is a delta from the last, if the user-space
+	 * did not request any changes on a plane/connector, that object
+	 * will not be included in the new atomic state. Idle power collapse
+	 * is driver-autonomous, so the driver needs to ensure that all
+	 * hardware is reprogrammed as the power comes back on by forcing
+	 * the drm objects attached to the CRTC into the new atomic state.
+	 */
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		struct sde_crtc_state *cstate = to_sde_crtc_state(crtc_state);
+		struct sde_crtc_state *old_cstate =
+				to_sde_crtc_state(crtc->state);
+
+		if (cstate->idle_pc != old_cstate->idle_pc) {
+			SDE_DEBUG("crtc%d idle_pc:%d/%d\n",
+					crtc->base.id, cstate->idle_pc,
+					old_cstate->idle_pc);
+			SDE_EVT32(DRMID(crtc), cstate->idle_pc,
+					old_cstate->idle_pc);
+			rc = drm_atomic_add_affected_planes(state, crtc);
+			if (rc)
+				return rc;
+		}
+	}
+
+	return drm_atomic_helper_check(dev, state);
+}
+
+static struct msm_gem_address_space*
+_sde_kms_get_address_space(struct msm_kms *kms,
+		unsigned int domain)
+{
+	struct sde_kms *sde_kms;
+
+	if (!kms) {
+		SDE_ERROR("invalid kms\n");
+		return  NULL;
+	}
+
+	sde_kms = to_sde_kms(kms);
+	if (!sde_kms) {
+		SDE_ERROR("invalid sde_kms\n");
+		return NULL;
+	}
+
+	if (domain >= MSM_SMMU_DOMAIN_MAX)
+		return NULL;
+
+	return sde_kms->aspace[domain];
+}
+
 static const struct msm_kms_funcs kms_funcs = {
 	.hw_init         = sde_kms_hw_init,
 	.postinit        = sde_kms_postinit,
@@ -1389,10 +1458,12 @@
 	.enable_vblank   = sde_kms_enable_vblank,
 	.disable_vblank  = sde_kms_disable_vblank,
 	.check_modified_format = sde_format_check_modified_format,
+	.atomic_check    = sde_kms_atomic_check,
 	.get_format      = sde_get_msm_format,
 	.round_pixclk    = sde_kms_round_pixclk,
 	.destroy         = sde_kms_destroy,
 	.register_events = _sde_kms_register_events,
+	.get_address_space = _sde_kms_get_address_space,
 };
 
 /* the caller api needs to turn on clock before calling it */
@@ -1406,17 +1477,17 @@
 	struct msm_mmu *mmu;
 	int i;
 
-	for (i = ARRAY_SIZE(sde_kms->mmu_id) - 1; i >= 0; i--) {
-		if (!sde_kms->mmu[i])
+	for (i = ARRAY_SIZE(sde_kms->aspace) - 1; i >= 0; i--) {
+		if (!sde_kms->aspace[i])
 			continue;
 
-		mmu = sde_kms->mmu[i];
-		msm_unregister_mmu(sde_kms->dev, mmu);
+		mmu = sde_kms->aspace[i]->mmu;
+
 		mmu->funcs->detach(mmu, (const char **)iommu_ports,
 				ARRAY_SIZE(iommu_ports));
-		mmu->funcs->destroy(mmu);
-		sde_kms->mmu[i] = 0;
-		sde_kms->mmu_id[i] = 0;
+		msm_gem_address_space_destroy(sde_kms->aspace[i]);
+
+		sde_kms->aspace[i] = NULL;
 	}
 
 	return 0;
@@ -1428,6 +1499,8 @@
 	int i, ret;
 
 	for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
+		struct msm_gem_address_space *aspace;
+
 		mmu = msm_smmu_new(sde_kms->dev->dev, i);
 		if (IS_ERR(mmu)) {
 			ret = PTR_ERR(mmu);
@@ -1436,25 +1509,24 @@
 			continue;
 		}
 
+		aspace = msm_gem_smmu_address_space_create(sde_kms->dev->dev,
+			mmu, "sde");
+		if (IS_ERR(aspace)) {
+			ret = PTR_ERR(aspace);
+			mmu->funcs->destroy(mmu);
+			goto fail;
+		}
+
+		sde_kms->aspace[i] = aspace;
+
 		ret = mmu->funcs->attach(mmu, (const char **)iommu_ports,
 				ARRAY_SIZE(iommu_ports));
 		if (ret) {
 			SDE_ERROR("failed to attach iommu %d: %d\n", i, ret);
-			mmu->funcs->destroy(mmu);
-			continue;
-		}
-
-		sde_kms->mmu_id[i] = msm_register_mmu(sde_kms->dev, mmu);
-		if (sde_kms->mmu_id[i] < 0) {
-			ret = sde_kms->mmu_id[i];
-			SDE_ERROR("failed to register sde iommu %d: %d\n",
-					i, ret);
-			mmu->funcs->detach(mmu, (const char **)iommu_ports,
-					ARRAY_SIZE(iommu_ports));
+			msm_gem_address_space_destroy(aspace);
 			goto fail;
 		}
 
-		sde_kms->mmu[i] = mmu;
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index 5894fe2..d818fdf 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -24,6 +24,7 @@
 #include "msm_drv.h"
 #include "msm_kms.h"
 #include "msm_mmu.h"
+#include "msm_gem.h"
 #include "sde_dbg.h"
 #include "sde_hw_catalog.h"
 #include "sde_hw_ctl.h"
@@ -158,8 +159,7 @@
 	int core_rev;
 	struct sde_mdss_cfg *catalog;
 
-	struct msm_mmu *mmu[MSM_SMMU_DOMAIN_MAX];
-	int mmu_id[MSM_SMMU_DOMAIN_MAX];
+	struct msm_gem_address_space *aspace[MSM_SMMU_DOMAIN_MAX];
 	struct sde_power_client *core_client;
 
 	struct ion_client *iclient;
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 246e401..5c73707 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -115,6 +115,7 @@
 
 /*
  * struct sde_plane - local sde plane structure
+ * @aspace: address space pointer
  * @csc_cfg: Decoded user configuration for csc
  * @csc_usr_ptr: Points to csc_cfg if valid user config available
  * @csc_ptr: Points to sde_csc_cfg structure to use for current
@@ -129,8 +130,6 @@
 struct sde_plane {
 	struct drm_plane base;
 
-	int mmu_id;
-
 	struct mutex lock;
 
 	enum sde_sspp pipe;
@@ -867,12 +866,62 @@
 	return ret;
 }
 
+/**
+ * _sde_plane_get_aspace: gets the address space based on the
+ *            fb_translation mode property
+ */
+static int _sde_plane_get_aspace(
+		struct sde_plane *psde,
+		struct sde_plane_state *pstate,
+		struct msm_gem_address_space **aspace)
+{
+	struct sde_kms *kms;
+	int mode;
+
+	if (!psde || !pstate || !aspace) {
+		SDE_ERROR("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	kms = _sde_plane_get_kms(&psde->base);
+	if (!kms) {
+		SDE_ERROR("invalid kms\n");
+		return -EINVAL;
+	}
+
+	mode = sde_plane_get_property(pstate,
+			PLANE_PROP_FB_TRANSLATION_MODE);
+
+	switch (mode) {
+	case SDE_DRM_FB_NON_SEC:
+		*aspace = kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
+		if (!aspace)
+			return -EINVAL;
+		break;
+	case SDE_DRM_FB_SEC:
+		*aspace = kms->aspace[MSM_SMMU_DOMAIN_SECURE];
+		if (!aspace)
+			return -EINVAL;
+		break;
+	case SDE_DRM_FB_SEC_DIR_TRANS:
+	case SDE_DRM_FB_NON_SEC_DIR_TRANS:
+		*aspace = NULL;
+		break;
+	default:
+		SDE_ERROR("invalid fb_translation mode:%d\n", mode);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
 static inline void _sde_plane_set_scanout(struct drm_plane *plane,
 		struct sde_plane_state *pstate,
 		struct sde_hw_pipe_cfg *pipe_cfg,
 		struct drm_framebuffer *fb)
 {
 	struct sde_plane *psde;
+	struct msm_gem_address_space *aspace = NULL;
 	int ret;
 
 	if (!plane || !pstate || !pipe_cfg || !fb) {
@@ -888,7 +937,13 @@
 		return;
 	}
 
-	ret = sde_format_populate_layout(psde->mmu_id, fb, &pipe_cfg->layout);
+	ret = _sde_plane_get_aspace(psde, pstate, &aspace);
+	if (ret) {
+		SDE_ERROR_PLANE(psde, "Failed to get aspace %d\n", ret);
+		return;
+	}
+
+	ret = sde_format_populate_layout(aspace, fb, &pipe_cfg->layout);
 	if (ret == -EAGAIN)
 		SDE_DEBUG_PLANE(psde, "not updating same src addrs\n");
 	else if (ret)
@@ -1741,6 +1796,7 @@
 	struct drm_crtc_state *cstate;
 	struct sde_crtc_state *sde_cstate;
 	int ret, i;
+	int fb_mode;
 
 	if (!plane || !state || !state->fb || !rstate->rot_hw) {
 		SDE_ERROR("invalid parameters\n");
@@ -1764,7 +1820,14 @@
 	rot_cmd->rot90 = rstate->rot90;
 	rot_cmd->hflip = rstate->hflip;
 	rot_cmd->vflip = rstate->vflip;
-	rot_cmd->secure = state->fb->flags & DRM_MODE_FB_SECURE ? true : false;
+	fb_mode = sde_plane_get_property(pstate,
+			PLANE_PROP_FB_TRANSLATION_MODE);
+	if ((fb_mode == SDE_DRM_FB_SEC) ||
+			(fb_mode == SDE_DRM_FB_SEC_DIR_TRANS))
+		rot_cmd->secure = true;
+	else
+		rot_cmd->secure = false;
+
 	rot_cmd->prefill_bw = sde_crtc_get_property(sde_cstate,
 			CRTC_PROP_ROT_PREFILL_BW);
 	rot_cmd->clkrate = sde_crtc_get_property(sde_cstate,
@@ -1801,7 +1864,7 @@
 		struct sde_hw_fmt_layout layout;
 
 		memset(&layout, 0, sizeof(struct sde_hw_fmt_layout));
-		sde_format_populate_layout(rstate->mmu_id, state->fb,
+		sde_format_populate_layout(rstate->aspace, state->fb,
 				&layout);
 		for (i = 0; i < ARRAY_SIZE(rot_cmd->src_iova); i++) {
 			rot_cmd->src_iova[i] = layout.plane_addr[i];
@@ -1810,7 +1873,7 @@
 		rot_cmd->src_planes = layout.num_planes;
 
 		memset(&layout, 0, sizeof(struct sde_hw_fmt_layout));
-		sde_format_populate_layout(rstate->mmu_id, rstate->out_fb,
+		sde_format_populate_layout(rstate->aspace, rstate->out_fb,
 				&layout);
 		for (i = 0; i < ARRAY_SIZE(rot_cmd->dst_iova); i++) {
 			rot_cmd->dst_iova[i] = layout.plane_addr[i];
@@ -2003,11 +2066,7 @@
 
 		SDE_DEBUG("plane%d.%d allocate fb/fbo\n", plane->base.id,
 				new_rstate->sequence_id);
-
-		if (new_state->fb->flags & DRM_MODE_FB_SECURE)
-			new_rstate->mmu_id = MSM_SMMU_DOMAIN_SECURE;
-		else
-			new_rstate->mmu_id = MSM_SMMU_DOMAIN_UNSECURE;
+		new_rstate->aspace = new_pstate->aspace;
 
 		/* check if out_fb is already attached to rotator */
 		new_rstate->out_fbo = sde_kms_fbo_alloc(plane->dev, fb_w, fb_h,
@@ -2046,7 +2105,7 @@
 	}
 
 	/* prepare rotator input buffer */
-	ret = msm_framebuffer_prepare(new_state->fb, new_rstate->mmu_id);
+	ret = msm_framebuffer_prepare(new_state->fb, new_rstate->aspace);
 	if (ret) {
 		SDE_ERROR("failed to prepare input framebuffer\n");
 		goto error_prepare_input_buffer;
@@ -2058,7 +2117,7 @@
 				new_rstate->sequence_id);
 
 		ret = msm_framebuffer_prepare(new_rstate->out_fb,
-				new_rstate->mmu_id);
+				new_rstate->aspace);
 		if (ret) {
 			SDE_ERROR("failed to prepare inline framebuffer\n");
 			goto error_prepare_output_buffer;
@@ -2068,7 +2127,7 @@
 	return 0;
 
 error_prepare_output_buffer:
-	msm_framebuffer_cleanup(new_state->fb, new_rstate->mmu_id);
+	msm_framebuffer_cleanup(new_state->fb, new_rstate->aspace);
 error_prepare_input_buffer:
 	sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FB,
 			(u64) &new_rstate->rot_hw->base);
@@ -2124,7 +2183,7 @@
 	if (sde_plane_enabled(old_state)) {
 		if (old_rstate->out_fb) {
 			msm_framebuffer_cleanup(old_rstate->out_fb,
-					old_rstate->mmu_id);
+					old_rstate->aspace);
 			sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FB,
 					(u64) &old_rstate->rot_hw->base);
 			old_rstate->out_fb = NULL;
@@ -2133,7 +2192,7 @@
 			old_rstate->out_fbo = NULL;
 		}
 
-		msm_framebuffer_cleanup(old_state->fb, old_rstate->mmu_id);
+		msm_framebuffer_cleanup(old_state->fb, old_rstate->aspace);
 	}
 }
 
@@ -2640,8 +2699,10 @@
 {
 	struct drm_framebuffer *fb = new_state->fb;
 	struct sde_plane *psde = to_sde_plane(plane);
+	struct sde_plane_state *pstate = to_sde_plane_state(new_state);
 	struct sde_plane_rot_state *new_rstate;
 	struct sde_hw_fmt_layout layout;
+	struct msm_gem_address_space *aspace;
 	int ret;
 
 	if (!new_state->fb)
@@ -2649,6 +2710,14 @@
 
 	SDE_DEBUG_PLANE(psde, "FB[%u]\n", fb->base.id);
 
+	ret = _sde_plane_get_aspace(psde, pstate, &aspace);
+	if (ret) {
+		SDE_ERROR_PLANE(psde, "Failed to get aspace\n");
+		return ret;
+	}
+
+	/*cache aspace */
+	pstate->aspace = aspace;
 	ret = sde_plane_rot_prepare_fb(plane, new_state);
 	if (ret) {
 		SDE_ERROR("failed to prepare rot framebuffer\n");
@@ -2657,14 +2726,14 @@
 
 	new_rstate = &to_sde_plane_state(new_state)->rot;
 
-	ret = msm_framebuffer_prepare(new_rstate->out_fb, new_rstate->mmu_id);
+	ret = msm_framebuffer_prepare(new_rstate->out_fb, pstate->aspace);
 	if (ret) {
 		SDE_ERROR("failed to prepare framebuffer\n");
 		return ret;
 	}
 
 	/* validate framebuffer layout before commit */
-	ret = sde_format_populate_layout(new_rstate->mmu_id,
+	ret = sde_format_populate_layout(pstate->aspace,
 			new_rstate->out_fb, &layout);
 	if (ret) {
 		SDE_ERROR_PLANE(psde, "failed to get format layout, %d\n", ret);
@@ -2687,7 +2756,7 @@
 
 	old_rstate = &to_sde_plane_state(old_state)->rot;
 
-	msm_framebuffer_cleanup(old_rstate->out_fb, old_rstate->mmu_id);
+	msm_framebuffer_cleanup(old_rstate->out_fb, old_rstate->aspace);
 
 	sde_plane_rot_cleanup_fb(plane, old_state);
 }
@@ -3134,7 +3203,7 @@
 static int sde_plane_sspp_atomic_update(struct drm_plane *plane,
 				struct drm_plane_state *old_state)
 {
-	uint32_t nplanes, src_flags;
+	uint32_t nplanes, src_flags = 0x0;
 	struct sde_plane *psde;
 	struct drm_plane_state *state;
 	struct sde_plane_state *pstate;
@@ -3147,6 +3216,7 @@
 	const struct sde_rect *crtc_roi;
 	bool q16_data = true;
 	int idx;
+	int mode;
 
 	if (!plane) {
 		SDE_ERROR("invalid plane\n");
@@ -3228,6 +3298,9 @@
 		case PLANE_PROP_BLEND_OP:
 			/* no special action required */
 			break;
+		case PLANE_PROP_FB_TRANSLATION_MODE:
+			pstate->dirty |= SDE_PLANE_DIRTY_FB_TRANSLATION_MODE;
+			break;
 		case PLANE_PROP_PREFILL_SIZE:
 		case PLANE_PROP_PREFILL_TIME:
 			pstate->dirty |= SDE_PLANE_DIRTY_PERF;
@@ -3275,6 +3348,12 @@
 	psde->is_rt_pipe = (sde_crtc_get_client_type(crtc) != NRT_CLIENT);
 	_sde_plane_set_qos_ctrl(plane, false, SDE_PLANE_QOS_PANIC_CTRL);
 
+	/* update secure session flag */
+	mode = sde_plane_get_property(pstate, PLANE_PROP_FB_TRANSLATION_MODE);
+	if ((mode == SDE_DRM_FB_SEC) ||
+			(mode == SDE_DRM_FB_SEC_DIR_TRANS))
+		src_flags |= SDE_SSPP_SECURE_OVERLAY_SESSION;
+
 	/* update roi config */
 	if (pstate->dirty & SDE_PLANE_DIRTY_RECTS) {
 		POPULATE_RECT(&src, rstate->out_src_x, rstate->out_src_y,
@@ -3352,9 +3431,9 @@
 					pstate->multirect_mode);
 	}
 
-	if ((pstate->dirty & SDE_PLANE_DIRTY_FORMAT) &&
+	if (((pstate->dirty & SDE_PLANE_DIRTY_FORMAT) ||
+			(src_flags & SDE_SSPP_SECURE_OVERLAY_SESSION)) &&
 			psde->pipe_hw->ops.setup_format) {
-		src_flags = 0x0;
 		SDE_DEBUG_PLANE(psde, "rotation 0x%X\n", rstate->out_rotation);
 		if (rstate->out_rotation & DRM_REFLECT_X)
 			src_flags |= SDE_SSPP_FLIP_LR;
@@ -3504,6 +3583,12 @@
 	static const struct drm_prop_enum_list e_src_config[] = {
 		{SDE_DRM_DEINTERLACE, "deinterlace"}
 	};
+	static const struct drm_prop_enum_list e_fb_translation_mode[] = {
+		{SDE_DRM_FB_NON_SEC, "non_sec"},
+		{SDE_DRM_FB_SEC, "sec"},
+		{SDE_DRM_FB_NON_SEC_DIR_TRANS, "non_sec_direct_translation"},
+		{SDE_DRM_FB_SEC_DIR_TRANS, "sec_direct_translation"},
+	};
 	const struct sde_format_extended *format_list;
 	struct sde_format_extended *virt_format_list = NULL;
 	struct sde_kms_info *info;
@@ -3722,6 +3807,12 @@
 		msm_property_install_blob(&psde->property_info, feature_name, 0,
 			PLANE_PROP_FOLIAGE_COLOR);
 	}
+
+	msm_property_install_enum(&psde->property_info, "fb_translation_mode",
+			0x0,
+			0, e_fb_translation_mode,
+			ARRAY_SIZE(e_fb_translation_mode),
+			PLANE_PROP_FB_TRANSLATION_MODE);
 }
 
 static inline void _sde_plane_set_csc_v1(struct sde_plane *psde, void *usr_ptr)
@@ -4481,7 +4572,6 @@
 	/* cache local stuff for later */
 	plane = &psde->base;
 	psde->pipe = pipe;
-	psde->mmu_id = kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
 	psde->is_virtual = (master_plane_id != 0);
 	psde->scaler_check_state = SDE_PLANE_SCLCHECK_NONE;
 	INIT_LIST_HEAD(&psde->mplane_list);
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
index f83a891..ccbf005 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -34,7 +34,7 @@
  * @rot90: true if rotation of 90 degree is required
  * @hflip: true if horizontal flip is required
  * @vflip: true if vertical flip is required
- * @mmu_id: iommu identifier for input/output buffers
+ * @aspace:  pointer address space for input/output buffers
  * @rot_cmd: rotator configuration command
  * @nplane: total number of drm plane attached to rotator
  * @in_fb: input fb attached to rotator
@@ -64,7 +64,7 @@
 	bool rot90;
 	bool hflip;
 	bool vflip;
-	u32 mmu_id;
+	struct msm_gem_address_space *aspace;
 	struct sde_hw_rot_cmd rot_cmd;
 	int nplane;
 	/* input */
@@ -96,6 +96,7 @@
 #define SDE_PLANE_DIRTY_FORMAT	0x2
 #define SDE_PLANE_DIRTY_SHARPEN	0x4
 #define SDE_PLANE_DIRTY_PERF	0x8
+#define SDE_PLANE_DIRTY_FB_TRANSLATION_MODE	0x10
 #define SDE_PLANE_DIRTY_ALL	0xFFFFFFFF
 
 /**
@@ -103,6 +104,7 @@
  * @base:	base drm plane state object
  * @property_values:	cached plane property values
  * @property_blobs:	blob properties
+ * @aspace:	pointer to address space for input/output buffers
  * @input_fence:	dereferenced input fence pointer
  * @stage:	assigned by crtc blender
  * @excl_rect:	exclusion rect values
@@ -116,6 +118,7 @@
 	struct drm_plane_state base;
 	uint64_t property_values[PLANE_PROP_COUNT];
 	struct drm_property_blob *property_blobs[PLANE_PROP_BLOBCOUNT];
+	struct msm_gem_address_space *aspace;
 	void *input_fence;
 	enum sde_stage stage;
 	struct sde_rect excl_rect;
diff --git a/drivers/gpu/drm/msm/sde/sde_reg_dma.c b/drivers/gpu/drm/msm/sde/sde_reg_dma.c
index cc115c5..cc87aeb 100644
--- a/drivers/gpu/drm/msm/sde/sde_reg_dma.c
+++ b/drivers/gpu/drm/msm/sde/sde_reg_dma.c
@@ -62,10 +62,17 @@
 	return -EINVAL;
 }
 
+static int default_last_command(struct sde_hw_ctl *ctl,
+		enum sde_reg_dma_queue q)
+{
+	return 0;
+}
+
 static struct sde_hw_reg_dma reg_dma = {
 	.ops = {default_check_support, default_setup_payload,
 		default_kick_off, default_reset, default_alloc_reg_dma_buf,
-		default_dealloc_reg_dma, default_buf_reset_reg_dma},
+		default_dealloc_reg_dma, default_buf_reset_reg_dma,
+		default_last_command},
 };
 
 int sde_reg_dma_init(void __iomem *addr, struct sde_mdss_cfg *m,
@@ -103,3 +110,26 @@
 {
 	return &reg_dma.ops;
 }
+
+void sde_reg_dma_deinit(void)
+{
+	struct sde_hw_reg_dma op = {
+	.ops = {default_check_support, default_setup_payload,
+		default_kick_off, default_reset, default_alloc_reg_dma_buf,
+		default_dealloc_reg_dma, default_buf_reset_reg_dma,
+		default_last_command},
+	};
+
+	if (!reg_dma.drm_dev || !reg_dma.caps)
+		return;
+
+	switch (reg_dma.caps->version) {
+	case 1:
+		deinit_v1();
+		break;
+	default:
+		break;
+	}
+	memset(&reg_dma, 0, sizeof(reg_dma));
+	memcpy(&reg_dma.ops, &op.ops, sizeof(op.ops));
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_reg_dma.h b/drivers/gpu/drm/msm/sde/sde_reg_dma.h
index c8e464d..70d995a 100644
--- a/drivers/gpu/drm/msm/sde/sde_reg_dma.h
+++ b/drivers/gpu/drm/msm/sde/sde_reg_dma.h
@@ -251,6 +251,7 @@
  * @alloc_reg_dma_buf: allocate reg dma buffer
  * @dealloc_reg_dma: de-allocate reg dma buffer
  * @reset_reg_dma_buf: reset the buffer to init state
+ * @last_command: notify control that last command is queued
  */
 struct sde_hw_reg_dma_ops {
 	int (*check_support)(enum sde_reg_dma_features feature,
@@ -262,6 +263,7 @@
 	struct sde_reg_dma_buffer* (*alloc_reg_dma_buf)(u32 size);
 	int (*dealloc_reg_dma)(struct sde_reg_dma_buffer *lut_buf);
 	int (*reset_reg_dma_buf)(struct sde_reg_dma_buffer *buf);
+	int (*last_command)(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q);
 };
 
 /**
@@ -298,4 +300,9 @@
  *                            who call this api.
  */
 struct sde_hw_reg_dma_ops *sde_reg_dma_get_ops(void);
+
+/**
+ * sde_reg_dma_deinit() - de-initialize the reg dma
+ */
+void sde_reg_dma_deinit(void);
 #endif /* _SDE_REG_DMA_H */
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.c b/drivers/gpu/drm/msm/sde_edid_parser.c
index 12165e8..db36069 100644
--- a/drivers/gpu/drm/msm/sde_edid_parser.c
+++ b/drivers/gpu/drm/msm/sde_edid_parser.c
@@ -484,6 +484,40 @@
 	return rc;
 }
 
+u32 sde_get_sink_bpc(void *input)
+{
+	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input);
+	struct edid *edid = edid_ctrl->edid;
+
+	if ((edid->revision < 3) || !(edid->input & DRM_EDID_INPUT_DIGITAL))
+		return 0;
+
+	if (edid->revision < 4) {
+		if (edid->input & DRM_EDID_DIGITAL_TYPE_DVI)
+			return 8;
+		else
+			return 0;
+	}
+
+	switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
+	case DRM_EDID_DIGITAL_DEPTH_6:
+		return 6;
+	case DRM_EDID_DIGITAL_DEPTH_8:
+		return 8;
+	case DRM_EDID_DIGITAL_DEPTH_10:
+		return 10;
+	case DRM_EDID_DIGITAL_DEPTH_12:
+		return 12;
+	case DRM_EDID_DIGITAL_DEPTH_14:
+		return 14;
+	case DRM_EDID_DIGITAL_DEPTH_16:
+		return 16;
+	case DRM_EDID_DIGITAL_DEPTH_UNDEF:
+	default:
+		return 0;
+	}
+}
+
 bool sde_detect_hdmi_monitor(void *input)
 {
 	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input);
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.h b/drivers/gpu/drm/msm/sde_edid_parser.h
index 1143dc2..eb68439 100644
--- a/drivers/gpu/drm/msm/sde_edid_parser.h
+++ b/drivers/gpu/drm/msm/sde_edid_parser.h
@@ -136,6 +136,14 @@
 bool sde_detect_hdmi_monitor(void *edid_ctrl);
 
 /**
+ * sde_get_sink_bpc() - return the bpc of sink device.
+ * @edid_ctrl:     Handle to the edid_ctrl structure.
+ *
+ * Return: bpc supported by the sink.
+ */
+u32 sde_get_sink_bpc(void *edid_ctrl);
+
+/**
  * _sde_edid_update_modes() - populate EDID modes.
  * @edid_ctrl:     Handle to the edid_ctrl structure.
  *
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index afbf557..2c2b86d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -24,6 +24,7 @@
  *
  */
 
+#include <acpi/video.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 
@@ -358,6 +359,57 @@
 	}                                                                      \
 } while(0)
 
+static void
+nouveau_display_hpd_work(struct work_struct *work)
+{
+	struct nouveau_drm *drm = container_of(work, typeof(*drm), hpd_work);
+
+	pm_runtime_get_sync(drm->dev->dev);
+
+	drm_helper_hpd_irq_event(drm->dev);
+	/* enable polling for external displays */
+	drm_kms_helper_poll_enable(drm->dev);
+
+	pm_runtime_mark_last_busy(drm->dev->dev);
+	pm_runtime_put_sync(drm->dev->dev);
+}
+
+#ifdef CONFIG_ACPI
+
+/*
+ * Hans de Goede: This define belongs in acpi/video.h, I've submitted a patch
+ * to the acpi subsys to move it there from drivers/acpi/acpi_video.c .
+ * This should be dropped once that is merged.
+ */
+#ifndef ACPI_VIDEO_NOTIFY_PROBE
+#define ACPI_VIDEO_NOTIFY_PROBE			0x81
+#endif
+
+static int
+nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val,
+			  void *data)
+{
+	struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb);
+	struct acpi_bus_event *info = data;
+
+	if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) {
+		if (info->type == ACPI_VIDEO_NOTIFY_PROBE) {
+			/*
+			 * This may be the only indication we receive of a
+			 * connector hotplug on a runtime suspended GPU,
+			 * schedule hpd_work to check.
+			 */
+			schedule_work(&drm->hpd_work);
+
+			/* acpi-video should not generate keypresses for this */
+			return NOTIFY_BAD;
+		}
+	}
+
+	return NOTIFY_DONE;
+}
+#endif
+
 int
 nouveau_display_init(struct drm_device *dev)
 {
@@ -370,9 +422,6 @@
 	if (ret)
 		return ret;
 
-	/* enable polling for external displays */
-	drm_kms_helper_poll_enable(dev);
-
 	/* enable hotplug interrupts */
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct nouveau_connector *conn = nouveau_connector(connector);
@@ -537,6 +586,12 @@
 	}
 
 	nouveau_backlight_init(dev);
+	INIT_WORK(&drm->hpd_work, nouveau_display_hpd_work);
+#ifdef CONFIG_ACPI
+	drm->acpi_nb.notifier_call = nouveau_display_acpi_ntfy;
+	register_acpi_notifier(&drm->acpi_nb);
+#endif
+
 	return 0;
 
 vblank_err:
@@ -552,6 +607,9 @@
 {
 	struct nouveau_display *disp = nouveau_display(dev);
 
+#ifdef CONFIG_ACPI
+	unregister_acpi_notifier(&nouveau_drm(dev)->acpi_nb);
+#endif
 	nouveau_backlight_exit(dev);
 	nouveau_display_vblank_fini(dev);
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 3100fd88..42829a9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -483,6 +483,9 @@
 		pm_runtime_allow(dev->dev);
 		pm_runtime_mark_last_busy(dev->dev);
 		pm_runtime_put(dev->dev);
+	} else {
+		/* enable polling for external displays */
+		drm_kms_helper_poll_enable(dev);
 	}
 	return 0;
 
@@ -761,7 +764,7 @@
 	pci_set_master(pdev);
 
 	ret = nouveau_do_resume(drm_dev, true);
-	drm_kms_helper_poll_enable(drm_dev);
+
 	/* do magic */
 	nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
 	vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 822a021..1e7f1e3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -37,6 +37,8 @@
  *      - implemented limited ABI16/NVIF interop
  */
 
+#include <linux/notifier.h>
+
 #include <nvif/client.h>
 #include <nvif/device.h>
 #include <nvif/ioctl.h>
@@ -161,6 +163,12 @@
 	struct nvbios vbios;
 	struct nouveau_display *display;
 	struct backlight_device *backlight;
+	struct work_struct hpd_work;
+	struct work_struct fbcon_work;
+	int fbcon_new_state;
+#ifdef CONFIG_ACPI
+	struct notifier_block acpi_nb;
+#endif
 
 	/* power management */
 	struct nouveau_hwmon *hwmon;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 9f56927..2b79e27 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -491,19 +491,43 @@
 	.fb_probe = nouveau_fbcon_create,
 };
 
+static void
+nouveau_fbcon_set_suspend_work(struct work_struct *work)
+{
+	struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work);
+	int state = READ_ONCE(drm->fbcon_new_state);
+
+	if (state == FBINFO_STATE_RUNNING)
+		pm_runtime_get_sync(drm->dev->dev);
+
+	console_lock();
+	if (state == FBINFO_STATE_RUNNING)
+		nouveau_fbcon_accel_restore(drm->dev);
+	drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
+	if (state != FBINFO_STATE_RUNNING)
+		nouveau_fbcon_accel_save_disable(drm->dev);
+	console_unlock();
+
+	if (state == FBINFO_STATE_RUNNING) {
+		pm_runtime_mark_last_busy(drm->dev->dev);
+		pm_runtime_put_sync(drm->dev->dev);
+	}
+}
+
 void
 nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	if (drm->fbcon) {
-		console_lock();
-		if (state == FBINFO_STATE_RUNNING)
-			nouveau_fbcon_accel_restore(dev);
-		drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
-		if (state != FBINFO_STATE_RUNNING)
-			nouveau_fbcon_accel_save_disable(dev);
-		console_unlock();
-	}
+
+	if (!drm->fbcon)
+		return;
+
+	drm->fbcon_new_state = state;
+	/* Since runtime resume can happen as a result of a sysfs operation,
+	 * it's possible we already have the console locked. So handle fbcon
+	 * init/deinit from a seperate work thread
+	 */
+	schedule_work(&drm->fbcon_work);
 }
 
 int
@@ -524,6 +548,7 @@
 
 	fbcon->dev = dev;
 	drm->fbcon = fbcon;
+	INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
 
 	drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 64c4ce7..75e1f09 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -100,6 +100,7 @@
 	struct nouveau_bo *bo;
 	struct nouveau_bo *bo_gart;
 	u32 *suspend;
+	struct mutex mutex;
 };
 
 u64  nv84_fence_crtc(struct nouveau_channel *, int);
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
index 08f9c6f..1fba386 100644
--- a/drivers/gpu/drm/nouveau/nouveau_usif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.c
@@ -313,7 +313,8 @@
 	if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) {
 		/* block access to objects not created via this interface */
 		owner = argv->v0.owner;
-		if (argv->v0.object == 0ULL)
+		if (argv->v0.object == 0ULL &&
+		    argv->v0.type != NVIF_IOCTL_V0_DEL)
 			argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
 		else
 			argv->v0.owner = NVDRM_OBJECT_USIF;
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 18bde9d..90a5dd6 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -121,8 +121,10 @@
 	}
 
 	nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
+	mutex_lock(&priv->mutex);
 	nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
 	nouveau_bo_vma_del(priv->bo, &fctx->vma);
+	mutex_unlock(&priv->mutex);
 	nouveau_fence_context_del(&fctx->base);
 	chan->fence = NULL;
 	nouveau_fence_context_free(&fctx->base);
@@ -148,11 +150,13 @@
 	fctx->base.sync32 = nv84_fence_sync32;
 	fctx->base.sequence = nv84_fence_read(chan);
 
+	mutex_lock(&priv->mutex);
 	ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
 	if (ret == 0) {
 		ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
 					&fctx->vma_gart);
 	}
+	mutex_unlock(&priv->mutex);
 
 	/* map display semaphore buffers into channel's vm */
 	for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
@@ -232,6 +236,8 @@
 	priv->base.context_base = fence_context_alloc(priv->base.contexts);
 	priv->base.uevent = true;
 
+	mutex_init(&priv->mutex);
+
 	/* Use VRAM if there is any ; otherwise fallback to system memory */
 	domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
 			 /*
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 432480f..3178ba0 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -3393,6 +3393,13 @@
 	    rdev->pdev->subsystem_vendor == 0x103c &&
 	    rdev->pdev->subsystem_device == 0x280a)
 		return;
+	/* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume
+	 * - it hangs on resume inside the dynclk 1 table.
+	 */
+	if (rdev->family == CHIP_RS400 &&
+	    rdev->pdev->subsystem_vendor == 0x1179 &&
+	    rdev->pdev->subsystem_device == 0xff31)
+	        return;
 
 	/* DYN CLK 1 */
 	table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 621af06..3b21ca5 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -136,6 +136,10 @@
 	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
 	 */
 	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
+	/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
+	 * https://bugs.freedesktop.org/show_bug.cgi?id=101491
+	 */
+	{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
 	/* macbook pro 8.2 */
 	{ PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
 	{ 0, 0, 0, 0, 0 },
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 3f6704c..ec9023b 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -313,6 +313,14 @@
 		goto out;
 	}
 
+	/* If this object was partially constructed but CMA allocation
+	 * had failed, just free it.
+	 */
+	if (!bo->base.vaddr) {
+		vc4_bo_destroy(bo);
+		goto out;
+	}
+
 	cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size);
 	if (!cache_list) {
 		vc4_bo_destroy(bo);
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index f4552b6..32ebe0c 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -875,6 +875,7 @@
 #define A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS	0x23B0C
 #define A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2	0x23B0D
 #define A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK	0x23B0E
+#define A6XX_GMU_AO_AHB_FENCE_CTRL		0x23B10
 #define A6XX_GMU_AHB_FENCE_STATUS		0x23B13
 #define A6XX_GMU_RBBM_INT_UNMASKED_STATUS	0x23B15
 #define A6XX_GMU_AO_SPARE_CNTL			0x23B16
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index 86a1e88..f8bf780 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -327,8 +327,7 @@
 		.minor = 0,
 		.patchid = ANY_ID,
 		.features = ADRENO_64BIT | ADRENO_RPMH |
-			ADRENO_GPMU | ADRENO_CONTENT_PROTECTION |
-			ADRENO_SPTP_PC,
+			ADRENO_GPMU | ADRENO_CONTENT_PROTECTION,
 		.sqefw_name = "a630_sqe.fw",
 		.zap_name = "a630_zap",
 		.gpudev = &adreno_a6xx_gpudev,
@@ -341,4 +340,21 @@
 		.gpmu_tsens = 0x000C000D,
 		.max_power = 5448,
 	},
+	{
+		.gpurev = ADRENO_REV_A615,
+		.core = 6,
+		.major = 1,
+		.minor = 5,
+		.patchid = ANY_ID,
+		.features = ADRENO_64BIT | ADRENO_RPMH,
+		.sqefw_name = "a630_sqe.fw",
+		.zap_name = "a615_zap",
+		.gpudev = &adreno_a6xx_gpudev,
+		.gmem_size = SZ_512K,
+		.num_protected_regs = 0x20,
+		.busy_mask = 0xFFFFFFFE,
+		.gpmufw_name = "a630_gmu.bin",
+		.gpmu_major = 0x0,
+		.gpmu_minor = 0x005,
+	},
 };
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 3672273..6f465aa 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -2645,10 +2645,9 @@
 	void __iomem *reg;
 	struct gmu_device *gmu = &device->gmu;
 
-	offsetwords -= gmu->gmu2gpu_offset;
-
 	trace_kgsl_regwrite(device, offsetwords, value);
 
+	offsetwords -= gmu->gmu2gpu_offset;
 	reg = gmu->reg_virt + (offsetwords << 2);
 
 	/*
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index c7e3ad7..4900b3a 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -195,6 +195,7 @@
 	ADRENO_REV_A512 = 512,
 	ADRENO_REV_A530 = 530,
 	ADRENO_REV_A540 = 540,
+	ADRENO_REV_A615 = 615,
 	ADRENO_REV_A630 = 630,
 };
 
@@ -875,6 +876,8 @@
 				unsigned int fsynr1);
 	int (*reset)(struct kgsl_device *, int fault);
 	int (*soft_reset)(struct adreno_device *);
+	bool (*gx_is_on)(struct adreno_device *);
+	bool (*sptprac_is_on)(struct adreno_device *);
 };
 
 /**
@@ -1144,6 +1147,7 @@
 			ADRENO_GPUREV(adreno_dev) < 700;
 }
 
+ADRENO_TARGET(a615, ADRENO_REV_A615)
 ADRENO_TARGET(a630, ADRENO_REV_A630)
 
 static inline int adreno_is_a630v1(struct adreno_device *adreno_dev)
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index fb745ad..c1d2407 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -64,10 +64,10 @@
 	{A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222},
 	{A6XX_RBBM_CLOCK_CNTL_SP2, 0x22222222},
 	{A6XX_RBBM_CLOCK_CNTL_SP3, 0x22222222},
-	{A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
-	{A6XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
-	{A6XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
-	{A6XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
+	{A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02022220},
+	{A6XX_RBBM_CLOCK_CNTL2_SP1, 0x02022220},
+	{A6XX_RBBM_CLOCK_CNTL2_SP2, 0x02022220},
+	{A6XX_RBBM_CLOCK_CNTL2_SP3, 0x02022220},
 	{A6XX_RBBM_CLOCK_DELAY_SP0, 0x0000F3CF},
 	{A6XX_RBBM_CLOCK_DELAY_SP1, 0x0000F3CF},
 	{A6XX_RBBM_CLOCK_DELAY_SP2, 0x0000F3CF},
@@ -172,7 +172,8 @@
 	const struct kgsl_hwcg_reg *regs;
 	unsigned int count;
 } a6xx_hwcg_registers[] = {
-	{adreno_is_a630, a630_hwcg_regs, ARRAY_SIZE(a630_hwcg_regs)}
+	{adreno_is_a630, a630_hwcg_regs, ARRAY_SIZE(a630_hwcg_regs)},
+	{adreno_is_a615, a630_hwcg_regs, ARRAY_SIZE(a630_hwcg_regs)},
 };
 
 static struct a6xx_protected_regs {
@@ -338,7 +339,7 @@
 	regs = a6xx_hwcg_registers[i].regs;
 
 	/* Disable SP clock before programming HWCG registers */
-	kgsl_gmu_regrmw(device, A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 0);
+	kgsl_gmu_regrmw(device, A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
 
 	for (j = 0; j < a6xx_hwcg_registers[i].count; j++)
 		kgsl_regwrite(device, regs[j].off, on ? regs[j].val : 0);
@@ -1187,6 +1188,56 @@
 	return regulator_disable(gmu->gx_gdsc);
 }
 
+#define SPTPRAC_POWER_OFF	BIT(2)
+#define SP_CLK_OFF		BIT(4)
+#define GX_GDSC_POWER_OFF	BIT(6)
+#define GX_CLK_OFF		BIT(7)
+
+/*
+ * a6xx_gx_is_on() - Check if GX is on using pwr status register
+ * @adreno_dev - Pointer to adreno_device
+ * This check should only be performed if the keepalive bit is set or it
+ * can be guaranteed that the power state of the GPU will remain unchanged
+ */
+static bool a6xx_gx_is_on(struct adreno_device *adreno_dev)
+{
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	unsigned int val;
+	bool state;
+
+	if (!kgsl_gmu_isenabled(device))
+		return true;
+
+	kgsl_gmu_regread(device, A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, &val);
+	state = !(val & (GX_GDSC_POWER_OFF | GX_CLK_OFF));
+
+	/* If GMU is holding on to the fence then we cannot dump any GX stuff */
+	kgsl_gmu_regread(device, A6XX_GMU_AO_AHB_FENCE_CTRL, &val);
+	if (val)
+		return false;
+
+	return state;
+
+}
+
+/*
+ * a6xx_sptprac_is_on() - Check if SPTP is on using pwr status register
+ * @adreno_dev - Pointer to adreno_device
+ * This check should only be performed if the keepalive bit is set or it
+ * can be guaranteed that the power state of the GPU will remain unchanged
+ */
+static bool a6xx_sptprac_is_on(struct adreno_device *adreno_dev)
+{
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	unsigned int val;
+
+	if (!kgsl_gmu_isenabled(device))
+		return true;
+
+	kgsl_gmu_regread(device, A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, &val);
+	return !(val & (SPTPRAC_POWER_OFF | SP_CLK_OFF));
+}
+
 /*
  * a6xx_hm_sptprac_enable() - Turn on HM and SPTPRAC
  * @device: Pointer to KGSL device
@@ -2777,4 +2828,6 @@
 	.preemption_set_marker = a6xx_preemption_set_marker,
 	.preemption_context_init = a6xx_preemption_context_init,
 	.preemption_context_destroy = a6xx_preemption_context_destroy,
+	.gx_is_on = a6xx_gx_is_on,
+	.sptprac_is_on = a6xx_sptprac_is_on,
 };
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index 54acd73..ed0129f 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -257,16 +257,17 @@
 	0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001B,
 	0x001e, 0x0032, 0x0038, 0x003C, 0x0042, 0x0042, 0x0044, 0x0044,
 	0x0047, 0x0047, 0x0056, 0x0056, 0x00AD, 0x00AE, 0x00B0, 0x00FB,
-	0x0100, 0x011D, 0x0200, 0x020D, 0x0210, 0x0213, 0x0218, 0x023D,
-	0x0400, 0x04F9, 0x0500, 0x0500, 0x0505, 0x050B, 0x050E, 0x0511,
-	0x0533, 0x0533, 0x0540, 0x0555,
+	0x0100, 0x011D, 0x0200, 0x020D, 0x0218, 0x023D, 0x0400, 0x04F9,
+	0x0500, 0x0500, 0x0505, 0x050B, 0x050E, 0x0511, 0x0533, 0x0533,
+	0x0540, 0x0555,
 	/* CP */
-	0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0827,
-	0x0830, 0x0833, 0x0840, 0x0843, 0x084F, 0x086F, 0x0880, 0x088A,
-	0x08A0, 0x08AB, 0x08C0, 0x08C4, 0x08D0, 0x08DD, 0x08F0, 0x08F3,
-	0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093E, 0x0942, 0x094D,
-	0x0980, 0x0984, 0x098D, 0x0996, 0x0998, 0x099E, 0x09A0, 0x09A6,
-	0x09A8, 0x09AE, 0x09B0, 0x09B1, 0x09C2, 0x09C8, 0x0A00, 0x0A03,
+	0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0824,
+	0x0826, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843, 0x084F, 0x086F,
+	0x0880, 0x088A, 0x08A0, 0x08AB, 0x08C0, 0x08C4, 0x08D0, 0x08DD,
+	0x08F0, 0x08F3, 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093E,
+	0x0942, 0x094D, 0x0980, 0x0984, 0x098D, 0x0996, 0x0998, 0x099E,
+	0x09A0, 0x09A6, 0x09A8, 0x09AE, 0x09B0, 0x09B1, 0x09C2, 0x09C8,
+	0x0A00, 0x0A03,
 	/* VSC */
 	0x0C00, 0x0C04, 0x0C06, 0x0C06, 0x0C10, 0x0CD9, 0x0E00, 0x0E0E,
 	/* UCHE */
@@ -290,6 +291,18 @@
 	0xA630, 0xA630,
 };
 
+/*
+ * Set of registers to dump for A6XX before actually triggering crash dumper.
+ * Registers in pairs - first value is the start offset, second
+ * is the stop offset (inclusive)
+ */
+static const unsigned int a6xx_pre_crashdumper_registers[] = {
+	/* RBBM: RBBM_STATUS - RBBM_STATUS3 */
+	0x210, 0x213,
+	/* CP: CP_STATUS_1 */
+	0x825, 0x825,
+};
+
 enum a6xx_debugbus_id {
 	A6XX_DBGBUS_CP           = 0x1,
 	A6XX_DBGBUS_RBBM         = 0x2,
@@ -562,6 +575,17 @@
 	return (count * 8) + sizeof(*header);
 }
 
+static size_t a6xx_snapshot_pre_crashdump_regs(struct kgsl_device *device,
+		u8 *buf, size_t remain, void *priv)
+{
+	struct kgsl_snapshot_registers pre_cdregs = {
+			.regs = a6xx_pre_crashdumper_registers,
+			.count = ARRAY_SIZE(a6xx_pre_crashdumper_registers)/2,
+	};
+
+	return kgsl_snapshot_dump_registers(device, buf, remain, &pre_cdregs);
+}
+
 static size_t a6xx_snapshot_shader_memory(struct kgsl_device *device,
 		u8 *buf, size_t remain, void *priv)
 {
@@ -1384,17 +1408,35 @@
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 	struct adreno_snapshot_data *snap_data = gpudev->snapshot_data;
+	bool sptprac_on;
 
-	/* Try to run the crash dumper */
-	_a6xx_do_crashdump(device);
+	/* GMU TCM data dumped through AHB */
+	a6xx_snapshot_gmu(device, snapshot);
 
+	sptprac_on = gpudev->sptprac_is_on(adreno_dev);
+
+	/* Return if the GX is off */
+	if (!gpudev->gx_is_on(adreno_dev)) {
+		pr_err("GX is off. Only dumping GMU data in snapshot\n");
+		return;
+	}
+
+	/* Dump the registers which get affected by crash dumper trigger */
 	kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
-		snapshot, a6xx_snapshot_registers, NULL);
+		snapshot, a6xx_snapshot_pre_crashdump_regs, NULL);
 
+	/* Dump vbif registers as well which get affected by crash dumper */
 	adreno_snapshot_vbif_registers(device, snapshot,
 		a6xx_vbif_snapshot_registers,
 		ARRAY_SIZE(a6xx_vbif_snapshot_registers));
 
+	/* Try to run the crash dumper */
+	if (sptprac_on)
+		_a6xx_do_crashdump(device);
+
+	kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
+		snapshot, a6xx_snapshot_registers, NULL);
+
 	/* CP_SQE indexed registers */
 	kgsl_snapshot_indexed_registers(device, snapshot,
 		A6XX_CP_SQE_STAT_ADDR, A6XX_CP_SQE_STAT_DATA,
@@ -1422,19 +1464,19 @@
 	/* Mempool debug data */
 	a6xx_snapshot_mempool(device, snapshot);
 
-	/* Shader memory */
-	a6xx_snapshot_shader(device, snapshot);
+	if (sptprac_on) {
+		/* Shader memory */
+		a6xx_snapshot_shader(device, snapshot);
 
-	/* MVC register section */
-	a6xx_snapshot_mvc_regs(device, snapshot);
+		/* MVC register section */
+		a6xx_snapshot_mvc_regs(device, snapshot);
 
-	/* registers dumped through DBG AHB */
-	a6xx_snapshot_dbgahb_regs(device, snapshot);
+		/* registers dumped through DBG AHB */
+		a6xx_snapshot_dbgahb_regs(device, snapshot);
+	}
 
 	a6xx_snapshot_debugbus(device, snapshot);
 
-	/* GMU TCM data dumped through AHB */
-	a6xx_snapshot_gmu(device, snapshot);
 }
 
 static int _a6xx_crashdump_init_mvc(uint64_t *ptr, uint64_t *offset)
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index e8b1c67..422c434 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -2060,11 +2060,25 @@
 	int ret, i;
 	int fault;
 	int halt;
+	bool gx_on = true;
 
 	fault = atomic_xchg(&dispatcher->fault, 0);
 	if (fault == 0)
 		return 0;
 
+	/* Mask all GMU interrupts */
+	if (kgsl_gmu_isenabled(device)) {
+		adreno_write_gmureg(adreno_dev,
+			ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
+			0xFFFFFFFF);
+		adreno_write_gmureg(adreno_dev,
+			ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
+			0xFFFFFFFF);
+	}
+
+	if (gpudev->gx_is_on)
+		gx_on = gpudev->gx_is_on(adreno_dev);
+
 	/*
 	 * In the very unlikely case that the power is off, do nothing - the
 	 * state will be reset on power up and everybody will be happy
@@ -2084,7 +2098,8 @@
 	 * else return early to give the fault handler a chance to run.
 	 */
 	if (!(fault & ADRENO_IOMMU_PAGE_FAULT) &&
-		(adreno_is_a5xx(adreno_dev) || adreno_is_a6xx(adreno_dev))) {
+		(adreno_is_a5xx(adreno_dev) || adreno_is_a6xx(adreno_dev)) &&
+		gx_on) {
 		unsigned int val;
 
 		mutex_lock(&device->mutex);
@@ -2106,14 +2121,15 @@
 
 	mutex_lock(&device->mutex);
 
-	adreno_readreg64(adreno_dev, ADRENO_REG_CP_RB_BASE,
-		ADRENO_REG_CP_RB_BASE_HI, &base);
+	if (gx_on)
+		adreno_readreg64(adreno_dev, ADRENO_REG_CP_RB_BASE,
+			ADRENO_REG_CP_RB_BASE_HI, &base);
 
 	/*
 	 * Force the CP off for anything but a hard fault to make sure it is
 	 * good and stopped
 	 */
-	if (!(fault & ADRENO_HARD_FAULT)) {
+	if (!(fault & ADRENO_HARD_FAULT) && gx_on) {
 		adreno_readreg(adreno_dev, ADRENO_REG_CP_ME_CNTL, &reg);
 		if (adreno_is_a5xx(adreno_dev) || adreno_is_a6xx(adreno_dev))
 			reg |= 1 | (1 << 1);
@@ -2149,8 +2165,9 @@
 		trace_adreno_cmdbatch_fault(cmdobj, fault);
 	}
 
-	adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB1_BASE,
-		ADRENO_REG_CP_IB1_BASE_HI, &base);
+	if (gx_on)
+		adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB1_BASE,
+			ADRENO_REG_CP_IB1_BASE_HI, &base);
 
 	do_header_and_snapshot(device, hung_rb, cmdobj);
 
diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c
index 92b541d..0840aba 100644
--- a/drivers/gpu/msm/adreno_snapshot.c
+++ b/drivers/gpu/msm/adreno_snapshot.c
@@ -840,6 +840,15 @@
 	setup_fault_process(device, snapshot,
 			context ? context->proc_priv : NULL);
 
+	/* Add GPU specific sections - registers mainly, but other stuff too */
+	if (gpudev->snapshot)
+		gpudev->snapshot(adreno_dev, snapshot);
+
+	/* Dumping these buffers is useless if the GX is not on */
+	if (gpudev->gx_is_on)
+		if (!gpudev->gx_is_on(adreno_dev))
+			return;
+
 	adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB1_BASE,
 			ADRENO_REG_CP_IB1_BASE_HI, &snapshot->ib1base);
 	adreno_readreg(adreno_dev, ADRENO_REG_CP_IB1_BUFSZ, &snapshot->ib1size);
@@ -862,10 +871,6 @@
 		adreno_snapshot_ringbuffer(device, snapshot,
 			adreno_dev->next_rb);
 
-	/* Add GPU specific sections - registers mainly, but other stuff too */
-	if (gpudev->snapshot)
-		gpudev->snapshot(adreno_dev, snapshot);
-
 	/* Dump selected global buffers */
 	kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_GPU_OBJECT_V2,
 			snapshot, snapshot_global, &device->memstore);
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 7b8cdc2..4e67efb 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -254,13 +254,6 @@
 	kgsl_mem_entry_put(entry);
 }
 
-static inline void
-kgsl_mem_entry_put_deferred(struct kgsl_mem_entry *entry)
-{
-	if (entry)
-		queue_work(kgsl_driver.mem_workqueue, &entry->work);
-}
-
 static inline struct kgsl_mem_entry *
 kgsl_mem_entry_create(void)
 {
@@ -270,7 +263,6 @@
 		kref_init(&entry->refcount);
 		/* put this ref in userspace memory alloc and map ioctls */
 		kref_get(&entry->refcount);
-		INIT_WORK(&entry->work, _deferred_put);
 	}
 
 	return entry;
@@ -577,8 +569,10 @@
 	context->tid = task_pid_nr(current);
 
 	ret = kgsl_sync_timeline_create(context);
-	if (ret)
+	if (ret) {
+		kgsl_process_private_put(dev_priv->process_priv);
 		goto out;
+	}
 
 	snprintf(name, sizeof(name), "context-%d", id);
 	kgsl_add_event_group(&context->events, context, name,
@@ -1880,7 +1874,7 @@
 		return -EINVAL;
 
 	ret = gpumem_free_entry(entry);
-	kgsl_mem_entry_put_deferred(entry);
+	kgsl_mem_entry_put(entry);
 
 	return ret;
 }
@@ -1898,7 +1892,7 @@
 		return -EINVAL;
 
 	ret = gpumem_free_entry(entry);
-	kgsl_mem_entry_put_deferred(entry);
+	kgsl_mem_entry_put(entry);
 
 	return ret;
 }
@@ -1935,7 +1929,8 @@
 {
 	struct kgsl_mem_entry *entry = priv;
 
-	kgsl_mem_entry_put_deferred(entry);
+	INIT_WORK(&entry->work, _deferred_put);
+	queue_work(kgsl_driver.mem_workqueue, &entry->work);
 }
 
 static long gpuobj_free_on_fence(struct kgsl_device_private *dev_priv,
@@ -1999,7 +1994,7 @@
 	else
 		ret = -EINVAL;
 
-	kgsl_mem_entry_put_deferred(entry);
+	kgsl_mem_entry_put(entry);
 	return ret;
 }
 
@@ -3379,13 +3374,7 @@
 	if (entry == NULL)
 		return -EINVAL;
 
-	if (!kgsl_mem_entry_set_pend(entry)) {
-		kgsl_mem_entry_put(entry);
-		return -EBUSY;
-	}
-
 	if (entry->memdesc.cur_bindings != 0) {
-		kgsl_mem_entry_unset_pend(entry);
 		kgsl_mem_entry_put(entry);
 		return -EINVAL;
 	}
@@ -3394,7 +3383,7 @@
 
 	/* One put for find_id(), one put for the kgsl_mem_entry_create() */
 	kgsl_mem_entry_put(entry);
-	kgsl_mem_entry_put_deferred(entry);
+	kgsl_mem_entry_put(entry);
 
 	return 0;
 }
@@ -3454,13 +3443,7 @@
 	if (entry == NULL)
 		return -EINVAL;
 
-	if (!kgsl_mem_entry_set_pend(entry)) {
-		kgsl_mem_entry_put(entry);
-		return -EBUSY;
-	}
-
 	if (entry->bind_tree.rb_node != NULL) {
-		kgsl_mem_entry_unset_pend(entry);
 		kgsl_mem_entry_put(entry);
 		return -EINVAL;
 	}
@@ -3469,7 +3452,7 @@
 
 	/* One put for find_id(), one put for the kgsl_mem_entry_create() */
 	kgsl_mem_entry_put(entry);
-	kgsl_mem_entry_put_deferred(entry);
+	kgsl_mem_entry_put(entry);
 
 	return 0;
 }
@@ -4867,7 +4850,7 @@
 		WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
 
 	kgsl_driver.mem_workqueue = alloc_workqueue("kgsl-mementry",
-		WQ_MEM_RECLAIM, 0);
+		WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
 
 	kgsl_events_init();
 
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 324840d..c9f1483 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -1202,14 +1202,16 @@
 static int gmu_disable_clks(struct gmu_device *gmu)
 {
 	int ret, j = 0;
+	unsigned int gmu_freq;
 
 	if (IS_ERR_OR_NULL(gmu->clks[0]))
 		return 0;
 
-	ret = clk_set_rate(gmu->clks[0], gmu->gmu_freqs[0]);
+	gmu_freq = gmu->gmu_freqs[gmu->num_gmupwrlevels - 1];
+	ret = clk_set_rate(gmu->clks[0], gmu_freq);
 	if (ret) {
 		dev_err(&gmu->pdev->dev, "fail to reset GMU clk freq %d\n",
-				gmu->gmu_freqs[0]);
+				gmu_freq);
 		return ret;
 	}
 
diff --git a/drivers/gpu/msm/kgsl_hfi.c b/drivers/gpu/msm/kgsl_hfi.c
index 067b276..cc878aa 100644
--- a/drivers/gpu/msm/kgsl_hfi.c
+++ b/drivers/gpu/msm/kgsl_hfi.c
@@ -177,6 +177,7 @@
 {
 	struct kgsl_hfi *hfi = &gmu->hfi;
 	struct pending_msg *msg = NULL, *next;
+	bool in_queue = false;
 
 	trace_kgsl_hfi_receive(rsp->ret_hdr.id,
 		rsp->ret_hdr.size,
@@ -185,11 +186,13 @@
 	spin_lock(&hfi->msglock);
 	list_for_each_entry_safe(msg, next, &hfi->msglist, node) {
 		if (msg->msg_id == rsp->ret_hdr.id &&
-				msg->seqnum == rsp->ret_hdr.seqnum)
+				msg->seqnum == rsp->ret_hdr.seqnum) {
+			in_queue = true;
 			break;
+		}
 	}
 
-	if (msg == NULL) {
+	if (in_queue == false) {
 		spin_unlock(&hfi->msglock);
 		dev_err(&gmu->pdev->dev,
 				"Cannot find receiver of ack msg with id=%d\n",
diff --git a/drivers/gpu/msm/kgsl_hfi.h b/drivers/gpu/msm/kgsl_hfi.h
index 8eedbfa2..47d07d9 100644
--- a/drivers/gpu/msm/kgsl_hfi.h
+++ b/drivers/gpu/msm/kgsl_hfi.h
@@ -115,7 +115,7 @@
 	HFI_F2H_QPRI_DEBUG = 40,
 };
 
-#define HFI_RSP_TIMEOUT 100 /* msec */
+#define HFI_RSP_TIMEOUT 500 /* msec */
 #define HFI_H2F_CMD_IRQ_MASK BIT(0)
 
 enum hfi_msg_type {
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 73c0d71..c02046a 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -34,6 +34,8 @@
 #include "kgsl_trace.h"
 #include "kgsl_pwrctrl.h"
 
+#define CP_APERTURE_REG	0
+
 #define _IOMMU_PRIV(_mmu) (&((_mmu)->priv.iommu))
 
 #define ADDR_IN_GLOBAL(_a) \
@@ -1220,6 +1222,19 @@
 		"System cache not enabled for GPU pagetable walks: %d\n", ret);
 }
 
+static int program_smmu_aperture(unsigned int cb, unsigned int aperture_reg)
+{
+	struct scm_desc desc = {0};
+
+	desc.args[0] = 0xFFFF0000 | ((aperture_reg & 0xff) << 8) | (cb & 0xff);
+	desc.args[1] = 0xFFFFFFFF;
+	desc.args[2] = 0xFFFFFFFF;
+	desc.args[3] = 0xFFFFFFFF;
+	desc.arginfo = SCM_ARGS(4);
+
+	return scm_call2(SCM_SIP_FNID(SCM_SVC_MP, 0x1B), &desc);
+}
+
 static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
 {
 	int ret = 0;
@@ -1260,6 +1275,15 @@
 		goto done;
 	}
 
+	if (!MMU_FEATURE(mmu, KGSL_MMU_GLOBAL_PAGETABLE)) {
+		ret = program_smmu_aperture(cb_num, CP_APERTURE_REG);
+		if (ret) {
+			pr_err("SMMU aperture programming call failed with error %d\n",
+									ret);
+			return ret;
+		}
+	}
+
 	ctx->cb_num = cb_num;
 	ctx->regbase = iommu->regbase + KGSL_IOMMU_CB0_OFFSET
 			+ (cb_num << KGSL_IOMMU_CB_SHIFT);
diff --git a/drivers/gpu/msm/kgsl_pool.c b/drivers/gpu/msm/kgsl_pool.c
index bb92b8b..c31a85b 100644
--- a/drivers/gpu/msm/kgsl_pool.c
+++ b/drivers/gpu/msm/kgsl_pool.c
@@ -280,6 +280,17 @@
 	return -ENOMEM;
 }
 
+static int kgsl_pool_get_retry_order(unsigned int order)
+{
+	int i;
+
+	for (i = kgsl_num_pools-1; i > 0; i--)
+		if (order >= kgsl_pools[i].pool_order)
+			return kgsl_pools[i].pool_order;
+
+	return 0;
+}
+
 /**
  * kgsl_pool_alloc_page() - Allocate a page of requested size
  * @page_size: Size of the page to be allocated
@@ -326,7 +337,7 @@
 	if (pool == NULL) {
 		/* Retry with lower order pages */
 		if (order > 0) {
-			size = PAGE_SIZE << --order;
+			size = PAGE_SIZE << kgsl_pool_get_retry_order(order);
 			goto eagain;
 		} else {
 			/*
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index eb67657..f0f202b 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -20,6 +20,7 @@
 #include <linux/scatterlist.h>
 #include <soc/qcom/scm.h>
 #include <soc/qcom/secure_buffer.h>
+#include <linux/ratelimit.h>
 
 #include "kgsl.h"
 #include "kgsl_sharedmem.h"
@@ -700,6 +701,10 @@
 	size_t len;
 	unsigned int align;
 
+	static DEFINE_RATELIMIT_STATE(_rs,
+					DEFAULT_RATELIMIT_INTERVAL,
+					DEFAULT_RATELIMIT_BURST);
+
 	size = PAGE_ALIGN(size);
 	if (size == 0 || size > UINT_MAX)
 		return -EINVAL;
@@ -762,7 +767,8 @@
 			 */
 			memdesc->size = (size - len);
 
-			if (sharedmem_noretry_flag != true)
+			if (sharedmem_noretry_flag != true &&
+					__ratelimit(&_rs))
 				KGSL_CORE_ERR(
 					"Out of memory: only allocated %lldKB of %lldKB requested\n",
 					(size - len) >> 10, size >> 10);
diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c
index 96873c4..817a6b1 100644
--- a/drivers/gpu/msm/kgsl_sync.c
+++ b/drivers/gpu/msm/kgsl_sync.c
@@ -52,6 +52,10 @@
 	fence_init(&kfence->fence, &kgsl_sync_fence_ops, &ktimeline->lock,
 		ktimeline->fence_context, timestamp);
 
+	/*
+	 * sync_file_create() takes a refcount to the fence. This refcount is
+	 * put when the fence is signaled.
+	 */
 	kfence->sync_file = sync_file_create(&kfence->fence);
 
 	if (kfence->sync_file == NULL) {
@@ -61,9 +65,6 @@
 		return NULL;
 	}
 
-	/* Get a refcount to the fence. Put when signaled */
-	fence_get(&kfence->fence);
-
 	spin_lock_irqsave(&ktimeline->lock, flags);
 	list_add_tail(&kfence->child_list, &ktimeline->child_list_head);
 	spin_unlock_irqrestore(&ktimeline->lock, flags);
@@ -707,6 +708,14 @@
 	list_add_tail(&sfence->child_list, &syncsource->child_list_head);
 	spin_unlock(&syncsource->lock);
 out:
+	/*
+	 * We're transferring ownership of the fence to the sync file.
+	 * The sync file takes an extra refcount when it is created, so put
+	 * our refcount.
+	 */
+	if (sync_file)
+		fence_put(&sfence->fence);
+
 	if (ret) {
 		if (sync_file)
 			fput(sync_file->file);
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 2e04608..cb2e85c 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -440,6 +440,9 @@
 		.driver_data = APPLE_HAS_FN },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ANSI),
 		.driver_data = APPLE_HAS_FN },
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+		USB_DEVICE_ID_APPLE_ALU_ANSI),
+		.driver_data = APPLE_HAS_FN },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ISO),
 		.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_JIS),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index a5dd7e6..d7f6cf0 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1791,6 +1791,8 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_ISO) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_JIS) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ANSI) },
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+		USB_DEVICE_ID_APPLE_ALU_ANSI) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ISO) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_JIS) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
@@ -2056,7 +2058,6 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
-	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index da93077..cfca43f 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -311,6 +311,9 @@
 #define USB_VENDOR_ID_DELCOM		0x0fc5
 #define USB_DEVICE_ID_DELCOM_VISUAL_IND	0xb080
 
+#define USB_VENDOR_ID_DELL				0x413c
+#define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE	0x301a
+
 #define USB_VENDOR_ID_DELORME		0x1163
 #define USB_DEVICE_ID_DELORME_EARTHMATE	0x0100
 #define USB_DEVICE_ID_DELORME_EM_LT20	0x0200
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 97dbb25..2b16207 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -81,6 +81,7 @@
 	{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB, HID_QUIRK_NO_INIT_REPORTS },
 	{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
 	{ USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
+	{ USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
 	{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
 	{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 966a988..d0ae889 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -771,33 +771,47 @@
 
 	mutex_lock(&drvdata->mem_lock);
 
-	/*
-	 * ETR DDR memory is not allocated until user enables
-	 * tmc at least once. If user specifies different ETR
-	 * DDR size than the default size or switches between
-	 * contiguous or scatter-gather memory type after
-	 * enabling tmc; the new selection will be honored from
-	 * next tmc enable session.
-	 */
-	if (drvdata->size != drvdata->mem_size ||
-	    drvdata->memtype != drvdata->mem_type) {
-		tmc_etr_free_mem(drvdata);
-		drvdata->size = drvdata->mem_size;
-		drvdata->memtype = drvdata->mem_type;
-	}
-	ret = tmc_etr_alloc_mem(drvdata);
-	if (ret) {
-		pm_runtime_put(drvdata->dev);
-		mutex_unlock(&drvdata->mem_lock);
-		return ret;
-	}
-	mutex_unlock(&drvdata->mem_lock);
-
 	spin_lock_irqsave(&drvdata->spinlock, flags);
 	if (drvdata->reading) {
 		ret = -EBUSY;
-		goto out;
+		spin_unlock_irqrestore(&drvdata->spinlock, flags);
+		mutex_unlock(&drvdata->mem_lock);
+		return ret;
 	}
+	spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+	if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM) {
+		/*
+		 * ETR DDR memory is not allocated until user enables
+		 * tmc at least once. If user specifies different ETR
+		 * DDR size than the default size or switches between
+		 * contiguous or scatter-gather memory type after
+		 * enabling tmc; the new selection will be honored from
+		 * next tmc enable session.
+		 */
+		if (drvdata->size != drvdata->mem_size ||
+		    drvdata->memtype != drvdata->mem_type) {
+			tmc_etr_free_mem(drvdata);
+			drvdata->size = drvdata->mem_size;
+			drvdata->memtype = drvdata->mem_type;
+		}
+		ret = tmc_etr_alloc_mem(drvdata);
+		if (ret) {
+			mutex_unlock(&drvdata->mem_lock);
+			return ret;
+		}
+	} else {
+		drvdata->usbch = usb_qdss_open("qdss", drvdata,
+					       usb_notifier);
+		if (IS_ERR_OR_NULL(drvdata->usbch)) {
+			dev_err(drvdata->dev, "usb_qdss_open failed\n");
+			ret = PTR_ERR(drvdata->usbch);
+			mutex_unlock(&drvdata->mem_lock);
+			return ret;
+		}
+	}
+
+	spin_lock_irqsave(&drvdata->spinlock, flags);
 
 	val = local_xchg(&drvdata->mode, mode);
 	/*
@@ -808,9 +822,14 @@
 	if (val == CS_MODE_SYSFS)
 		goto out;
 
-	tmc_etr_enable_hw(drvdata);
+	if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM)
+		tmc_etr_enable_hw(drvdata);
+
+	drvdata->enable = true;
+	drvdata->sticky_enable = true;
 out:
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
+	mutex_unlock(&drvdata->mem_lock);
 
 	if (!ret)
 		dev_info(drvdata->dev, "TMC-ETR enabled\n");
@@ -880,8 +899,15 @@
 
 	val = local_xchg(&drvdata->mode, CS_MODE_DISABLED);
 	/* Disable the TMC only if it needs to */
-	if (val != CS_MODE_DISABLED)
-		tmc_etr_disable_hw(drvdata);
+	if (val != CS_MODE_DISABLED) {
+		if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB) {
+			__tmc_etr_disable_to_bam(drvdata);
+			tmc_etr_bam_disable(drvdata);
+			usb_qdss_close(drvdata->usbch);
+		} else {
+			tmc_etr_disable_hw(drvdata);
+		}
+	}
 
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
@@ -913,6 +939,11 @@
 		goto out;
 	}
 
+	if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB) {
+		ret = -EINVAL;
+		goto out;
+	}
+
 	val = local_read(&drvdata->mode);
 	/* Don't interfere if operated from Perf */
 	if (val == CS_MODE_PERF) {
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 077cb45..012c56e 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -540,6 +540,7 @@
 			drvdata->memtype  = TMC_ETR_MEM_TYPE_CONTIG;
 		drvdata->mem_size = drvdata->size;
 		drvdata->mem_type = drvdata->memtype;
+		drvdata->out_mode = TMC_ETR_OUT_MODE_MEM;
 	} else {
 		drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
 	}
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index e34d82e..c21ca7b 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -58,7 +58,7 @@
 #define SMBSLVDAT	(0xC + piix4_smba)
 
 /* count for request_region */
-#define SMBIOSIZE	8
+#define SMBIOSIZE	9
 
 /* PCI Address Constants */
 #define SMBBA		0x090
@@ -592,6 +592,8 @@
 	u8 port;
 	int retval;
 
+	mutex_lock(&piix4_mutex_sb800);
+
 	/* Request the SMBUS semaphore, avoid conflicts with the IMC */
 	smbslvcnt  = inb_p(SMBSLVCNT);
 	do {
@@ -605,10 +607,10 @@
 		usleep_range(1000, 2000);
 	} while (--retries);
 	/* SMBus is still owned by the IMC, we give up */
-	if (!retries)
+	if (!retries) {
+		mutex_unlock(&piix4_mutex_sb800);
 		return -EBUSY;
-
-	mutex_lock(&piix4_mutex_sb800);
+	}
 
 	outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX);
 	smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
@@ -623,11 +625,11 @@
 
 	outb_p(smba_en_lo, SB800_PIIX4_SMB_IDX + 1);
 
-	mutex_unlock(&piix4_mutex_sb800);
-
 	/* Release the semaphore */
 	outb_p(smbslvcnt | 0x20, SMBSLVCNT);
 
+	mutex_unlock(&piix4_mutex_sb800);
+
 	return retval;
 }
 
diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c
index b521df6..b055ff6 100644
--- a/drivers/iio/adc/qcom-rradc.c
+++ b/drivers/iio/adc/qcom-rradc.c
@@ -331,8 +331,8 @@
 	int64_t temp;
 
 	/* K = code/4 */
-	temp = div64_s64(adc_code, FG_ADC_RR_BATT_THERM_LSB_K);
-	temp *= FG_ADC_SCALE_MILLI_FACTOR;
+	temp = ((int64_t)adc_code * FG_ADC_SCALE_MILLI_FACTOR);
+	temp = div64_s64(temp, FG_ADC_RR_BATT_THERM_LSB_K);
 	*result_millidegc = temp - FG_ADC_KELVINMIL_CELSIUSMIL;
 
 	return 0;
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 2de1f52..62b0dec 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -484,7 +484,7 @@
 		return -EINVAL;
 	}
 
-	indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*indio_dev));
+	indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc_dev));
 	if (indio_dev == NULL) {
 		dev_err(&pdev->dev, "failed to allocate iio device\n");
 		return -ENOMEM;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index b9fcbf1..5faea37 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -41,6 +41,7 @@
 static const struct inv_mpu6050_reg_map reg_set_6500 = {
 	.sample_rate_div	= INV_MPU6050_REG_SAMPLE_RATE_DIV,
 	.lpf                    = INV_MPU6050_REG_CONFIG,
+	.accel_lpf              = INV_MPU6500_REG_ACCEL_CONFIG_2,
 	.user_ctrl              = INV_MPU6050_REG_USER_CTRL,
 	.fifo_en                = INV_MPU6050_REG_FIFO_EN,
 	.gyro_config            = INV_MPU6050_REG_GYRO_CONFIG,
@@ -205,6 +206,37 @@
 EXPORT_SYMBOL_GPL(inv_mpu6050_set_power_itg);
 
 /**
+ *  inv_mpu6050_set_lpf_regs() - set low pass filter registers, chip dependent
+ *
+ *  MPU60xx/MPU9150 use only 1 register for accelerometer + gyroscope
+ *  MPU6500 and above have a dedicated register for accelerometer
+ */
+static int inv_mpu6050_set_lpf_regs(struct inv_mpu6050_state *st,
+				    enum inv_mpu6050_filter_e val)
+{
+	int result;
+
+	result = regmap_write(st->map, st->reg->lpf, val);
+	if (result)
+		return result;
+
+	switch (st->chip_type) {
+	case INV_MPU6050:
+	case INV_MPU6000:
+	case INV_MPU9150:
+		/* old chips, nothing to do */
+		result = 0;
+		break;
+	default:
+		/* set accel lpf */
+		result = regmap_write(st->map, st->reg->accel_lpf, val);
+		break;
+	}
+
+	return result;
+}
+
+/**
  *  inv_mpu6050_init_config() - Initialize hardware, disable FIFO.
  *
  *  Initial configuration:
@@ -227,8 +259,7 @@
 	if (result)
 		return result;
 
-	d = INV_MPU6050_FILTER_20HZ;
-	result = regmap_write(st->map, st->reg->lpf, d);
+	result = inv_mpu6050_set_lpf_regs(st, INV_MPU6050_FILTER_20HZ);
 	if (result)
 		return result;
 
@@ -531,6 +562,8 @@
  *                  would be alising. This function basically search for the
  *                  correct low pass parameters based on the fifo rate, e.g,
  *                  sampling frequency.
+ *
+ *  lpf is set automatically when setting sampling rate to avoid any aliases.
  */
 static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate)
 {
@@ -546,7 +579,7 @@
 	while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1))
 		i++;
 	data = d[i];
-	result = regmap_write(st->map, st->reg->lpf, data);
+	result = inv_mpu6050_set_lpf_regs(st, data);
 	if (result)
 		return result;
 	st->chip_config.lpf = data;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index f0e8c5d..d851581 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -28,6 +28,7 @@
  *  struct inv_mpu6050_reg_map - Notable registers.
  *  @sample_rate_div:	Divider applied to gyro output rate.
  *  @lpf:		Configures internal low pass filter.
+ *  @accel_lpf:		Configures accelerometer low pass filter.
  *  @user_ctrl:		Enables/resets the FIFO.
  *  @fifo_en:		Determines which data will appear in FIFO.
  *  @gyro_config:	gyro config register.
@@ -47,6 +48,7 @@
 struct inv_mpu6050_reg_map {
 	u8 sample_rate_div;
 	u8 lpf;
+	u8 accel_lpf;
 	u8 user_ctrl;
 	u8 fifo_en;
 	u8 gyro_config;
@@ -187,6 +189,7 @@
 #define INV_MPU6050_FIFO_THRESHOLD           500
 
 /* mpu6500 registers */
+#define INV_MPU6500_REG_ACCEL_CONFIG_2      0x1D
 #define INV_MPU6500_REG_ACCEL_OFFSET        0x77
 
 /* delay time in milliseconds */
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 55df9a7..44e46c1 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -227,7 +227,7 @@
 		.address = ST_PRESS_1_OUT_XL_ADDR,
 		.scan_index = 0,
 		.scan_type = {
-			.sign = 'u',
+			.sign = 's',
 			.realbits = 24,
 			.storagebits = 32,
 			.endianness = IIO_LE,
@@ -240,7 +240,7 @@
 		.address = ST_TEMP_1_OUT_L_ADDR,
 		.scan_index = 1,
 		.scan_type = {
-			.sign = 'u',
+			.sign = 's',
 			.realbits = 16,
 			.storagebits = 16,
 			.endianness = IIO_LE,
@@ -259,7 +259,7 @@
 		.address = ST_PRESS_LPS001WP_OUT_L_ADDR,
 		.scan_index = 0,
 		.scan_type = {
-			.sign = 'u',
+			.sign = 's',
 			.realbits = 16,
 			.storagebits = 16,
 			.endianness = IIO_LE,
@@ -273,7 +273,7 @@
 		.address = ST_TEMP_LPS001WP_OUT_L_ADDR,
 		.scan_index = 1,
 		.scan_type = {
-			.sign = 'u',
+			.sign = 's',
 			.realbits = 16,
 			.storagebits = 16,
 			.endianness = IIO_LE,
@@ -291,7 +291,7 @@
 		.address = ST_PRESS_1_OUT_XL_ADDR,
 		.scan_index = 0,
 		.scan_type = {
-			.sign = 'u',
+			.sign = 's',
 			.realbits = 24,
 			.storagebits = 32,
 			.endianness = IIO_LE,
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index 268210e..24fb543 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -269,8 +269,6 @@
 
 static void calibrate_as3935(struct as3935_state *st)
 {
-	mutex_lock(&st->lock);
-
 	/* mask disturber interrupt bit */
 	as3935_write(st, AS3935_INT, BIT(5));
 
@@ -280,8 +278,6 @@
 
 	mdelay(2);
 	as3935_write(st, AS3935_TUNE_CAP, (st->tune_cap / TUNE_CAP_DIV));
-
-	mutex_unlock(&st->lock);
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -318,6 +314,8 @@
 	val &= ~AS3935_AFE_PWR_BIT;
 	ret = as3935_write(st, AS3935_AFE_GAIN, val);
 
+	calibrate_as3935(st);
+
 err_resume:
 	mutex_unlock(&st->lock);
 
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 11bfa27..282c9fb 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1105,13 +1105,13 @@
 	 * pretend we don't support reading the HCA's core clock. This is also
 	 * forced by mmap function.
 	 */
-	if (PAGE_SIZE <= 4096 &&
-	    field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
-		resp.comp_mask |=
-			MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
-		resp.hca_core_clock_offset =
-			offsetof(struct mlx5_init_seg, internal_timer_h) %
-			PAGE_SIZE;
+	if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
+		if (PAGE_SIZE <= 4096) {
+			resp.comp_mask |=
+				MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
+			resp.hca_core_clock_offset =
+				offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE;
+		}
 		resp.response_length += sizeof(resp.hca_core_clock_offset) +
 					sizeof(resp.reserved2);
 	}
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 7b74d09..58e92bc 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -792,6 +792,9 @@
 		if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
 			goto sysfs_err;
 
+	if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+		qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
+
 	DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
 	return dev;
 
@@ -824,11 +827,10 @@
 	ib_dealloc_device(&dev->ibdev);
 }
 
-static int qedr_close(struct qedr_dev *dev)
+static void qedr_close(struct qedr_dev *dev)
 {
-	qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
-
-	return 0;
+	if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+		qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR);
 }
 
 static void qedr_shutdown(struct qedr_dev *dev)
@@ -837,6 +839,12 @@
 	qedr_remove(dev);
 }
 
+static void qedr_open(struct qedr_dev *dev)
+{
+	if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+		qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
+}
+
 static void qedr_mac_address_change(struct qedr_dev *dev)
 {
 	union ib_gid *sgid = &dev->sgid_tbl[0];
@@ -863,7 +871,7 @@
 
 	ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
 
-	qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE);
+	qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE);
 
 	if (rc)
 		DP_ERR(dev, "Error updating mac filter\n");
@@ -877,7 +885,7 @@
 {
 	switch (event) {
 	case QEDE_UP:
-		qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
+		qedr_open(dev);
 		break;
 	case QEDE_DOWN:
 		qedr_close(dev);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 620badd..f669d0b 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -113,6 +113,8 @@
 	struct qed_rdma_events events;
 };
 
+#define QEDR_ENET_STATE_BIT	(0)
+
 struct qedr_dev {
 	struct ib_device	ibdev;
 	struct qed_dev		*cdev;
@@ -153,6 +155,8 @@
 	struct qedr_cq		*gsi_sqcq;
 	struct qedr_cq		*gsi_rqcq;
 	struct qedr_qp		*gsi_qp;
+
+	unsigned long enet_state;
 };
 
 #define QEDR_MAX_SQ_PBL			(0x8000)
@@ -188,6 +192,7 @@
 #define QEDR_ROCE_MAX_CNQ_SIZE		(0x4000)
 
 #define QEDR_MAX_PORT			(1)
+#define QEDR_PORT			(1)
 
 #define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
 
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index a615142..4ba019e 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -471,8 +471,6 @@
 			    struct ib_ucontext *context, struct ib_udata *udata)
 {
 	struct qedr_dev *dev = get_qedr_dev(ibdev);
-	struct qedr_ucontext *uctx = NULL;
-	struct qedr_alloc_pd_uresp uresp;
 	struct qedr_pd *pd;
 	u16 pd_id;
 	int rc;
@@ -489,21 +487,33 @@
 	if (!pd)
 		return ERR_PTR(-ENOMEM);
 
-	dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
+	rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
+	if (rc)
+		goto err;
 
-	uresp.pd_id = pd_id;
 	pd->pd_id = pd_id;
 
 	if (udata && context) {
+		struct qedr_alloc_pd_uresp uresp;
+
+		uresp.pd_id = pd_id;
+
 		rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
-		if (rc)
+		if (rc) {
 			DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
-		uctx = get_qedr_ucontext(context);
-		uctx->pd = pd;
-		pd->uctx = uctx;
+			dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
+			goto err;
+		}
+
+		pd->uctx = get_qedr_ucontext(context);
+		pd->uctx->pd = pd;
 	}
 
 	return &pd->ibpd;
+
+err:
+	kfree(pd);
+	return ERR_PTR(rc);
 }
 
 int qedr_dealloc_pd(struct ib_pd *ibpd)
@@ -1719,6 +1729,14 @@
 		/* ERR->XXX */
 		switch (new_state) {
 		case QED_ROCE_QP_STATE_RESET:
+			if ((qp->rq.prod != qp->rq.cons) ||
+			    (qp->sq.prod != qp->sq.cons)) {
+				DP_NOTICE(dev,
+					  "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
+					  qp->rq.prod, qp->rq.cons, qp->sq.prod,
+					  qp->sq.cons);
+				status = -EINVAL;
+			}
 			break;
 		default:
 			status = -EINVAL;
@@ -2014,7 +2032,7 @@
 	qp_attr->cap.max_recv_wr = qp->rq.max_wr;
 	qp_attr->cap.max_send_sge = qp->sq.max_sges;
 	qp_attr->cap.max_recv_sge = qp->rq.max_sges;
-	qp_attr->cap.max_inline_data = qp->max_inline_data;
+	qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
 	qp_init_attr->cap = qp_attr->cap;
 
 	memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
@@ -3220,9 +3238,10 @@
 				  IB_WC_SUCCESS, 0);
 		break;
 	case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
-		DP_ERR(dev,
-		       "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
-		       cq->icid, qp->icid);
+		if (qp->state != QED_ROCE_QP_STATE_ERR)
+			DP_ERR(dev,
+			       "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+			       cq->icid, qp->icid);
 		cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
 				  IB_WC_WR_FLUSH_ERR, 0);
 		break;
diff --git a/drivers/input/misc/hbtp_input.c b/drivers/input/misc/hbtp_input.c
index fe7cc70..c9ea89d 100644
--- a/drivers/input/misc/hbtp_input.c
+++ b/drivers/input/misc/hbtp_input.c
@@ -87,6 +87,7 @@
 	u32 power_on_delay;
 	u32 power_off_delay;
 	bool manage_pin_ctrl;
+	struct kobject *sysfs_kobject;
 };
 
 static struct hbtp_data *hbtp;
@@ -1350,6 +1351,39 @@
 	},
 };
 
+static ssize_t hbtp_display_pwr_store(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	u32 status;
+	ssize_t ret;
+	char *envp[2] = {HBTP_EVENT_TYPE_DISPLAY, NULL};
+
+	mutex_lock(&hbtp->mutex);
+	ret = kstrtou32(buf, 10, &status);
+	if (ret) {
+		pr_err("hbtp: ret error: %zd\n", ret);
+		return ret;
+	}
+	if (!hbtp || !hbtp->input_dev) {
+		pr_err("hbtp: hbtp or hbtp->input_dev not ready!\n");
+		return ret;
+	}
+	if (status) {
+		pr_debug("hbtp: display power on!\n");
+		kobject_uevent_env(&hbtp->input_dev->dev.kobj,
+			KOBJ_ONLINE, envp);
+	} else {
+		pr_debug("hbtp: display power off!\n");
+		kobject_uevent_env(&hbtp->input_dev->dev.kobj,
+			KOBJ_OFFLINE, envp);
+	}
+	mutex_unlock(&hbtp->mutex);
+	return count;
+}
+
+static struct kobj_attribute hbtp_display_attribute =
+		__ATTR(display_pwr, 0660, NULL, hbtp_display_pwr_store);
+
 static int __init hbtp_init(void)
 {
 	int error;
@@ -1382,6 +1416,16 @@
 		goto err_platform_drv_reg;
 	}
 
+	hbtp->sysfs_kobject = kobject_create_and_add("hbtp", kernel_kobj);
+	if (!hbtp->sysfs_kobject)
+		pr_err("%s: Could not create sysfs kobject\n", __func__);
+	else {
+		error = sysfs_create_file(hbtp->sysfs_kobject,
+			&hbtp_display_attribute.attr);
+		if (error)
+			pr_err("failed to create the display_pwr sysfs\n");
+	}
+
 	return 0;
 
 err_platform_drv_reg:
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index e7b96f1..5be14ad 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -788,6 +788,13 @@
 			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
 		},
 	},
+	{
+		/* Fujitsu UH554 laptop */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"),
+		},
+	},
 	{ }
 };
 
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
index bb3ac5f..72a391e 100644
--- a/drivers/irqchip/irq-xtensa-mx.c
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -142,7 +142,7 @@
 int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
 {
 	struct irq_domain *root_domain =
-		irq_domain_add_legacy(NULL, NR_IRQS, 0, 0,
+		irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
 				&xtensa_mx_irq_domain_ops,
 				&xtensa_mx_irq_chip);
 	irq_set_default_host(root_domain);
diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c
index 472ae17..f728755 100644
--- a/drivers/irqchip/irq-xtensa-pic.c
+++ b/drivers/irqchip/irq-xtensa-pic.c
@@ -89,7 +89,7 @@
 int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent)
 {
 	struct irq_domain *root_domain =
-		irq_domain_add_legacy(NULL, NR_IRQS, 0, 0,
+		irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
 				&xtensa_irq_domain_ops, &xtensa_irq_chip);
 	irq_set_default_host(root_domain);
 	return 0;
diff --git a/drivers/mailbox/msm_qmp.c b/drivers/mailbox/msm_qmp.c
index dd022d3..f0bb0bc 100644
--- a/drivers/mailbox/msm_qmp.c
+++ b/drivers/mailbox/msm_qmp.c
@@ -26,7 +26,6 @@
 #define QMP_MAGIC	0x4d41494c	/* MAIL */
 #define QMP_VERSION	0x1
 #define QMP_FEATURES	0x0
-#define QMP_NUM_CHANS	0x1
 #define QMP_TOUT_MS	5000
 #define QMP_TX_TOUT_MS	2000
 
@@ -107,63 +106,89 @@
 };
 
 /**
- * struct qmp_device - local information for managing a single mailbox
- * @dev:		The device that corresponds to this mailbox
- * @mbox:		The mbox controller for this mailbox
- * @name:		The name of this mailbox
- * @local_state:	Current state of the mailbox protocol
- * @link_complete:	Use to block until link negotiation with remote proc
- *			is complete
- * @ch_complete:	Use to block until the channel is fully opened
+ * struct qmp_mbox - local information for managing a single mailbox
+ * @list:		List head for adding mbox to linked list
+ * @ctrl:		Controller for this mailbox
+ * @priority:		Priority of mailbox in the linked list
+ * @num_assigned:	Number of channels assigned for allocated pool
+ * @num_shutdown:	Number of channels that have shutdown
+ * @desc:		Reference to the mailbox descriptor in SMEM
+ * @rx_disabled:	Disable rx if multiple client are sending from this mbox
  * @tx_sent:		True if tx is sent and remote proc has not sent ack
- * @ch_in_use:		True if this mailbox's channel owned by a client
- * @rx_buf:		buffer to pass to client, holds copied data from mailbox
- * @version:		Version and features received during link negotiation
+ * @idx_in_flight:	current channel idx whos tx is in flight
  * @mcore_mbox_offset:	Offset of mcore mbox from the msgram start
  * @mcore_mbox_size:	Size of the mcore mbox
- * @desc:		Reference to the mailbox descriptor in SMEM
+ * @rx_pkt:		buffer to pass to client, holds copied data from mailbox
+ * @version:		Version and features received during link negotiation
+ * @local_state:	Current state of the mailbox protocol
+ * @state_lock:		Serialize mailbox state changes
+ * @tx_lock:		Serialize access for writes to mailbox
+ * @link_complete:	Use to block until link negotiation with remote proc
+ * @ch_complete:	Use to block until the channel is fully opened
+ * @ch_in_use:		True if this mailbox's channel owned by a client
+ * @dwork:		Delayed work to detect timed out tx
+ */
+struct qmp_mbox {
+	struct list_head list;
+	struct mbox_controller ctrl;
+	int priority;
+	u32 num_assigned;
+	u32 num_shutdown;
+
+	void __iomem *desc;
+	bool rx_disabled;
+	bool tx_sent;
+	u32 idx_in_flight;
+	u32 mcore_mbox_offset;
+	u32 mcore_mbox_size;
+	struct qmp_pkt rx_pkt;
+
+	struct qmp_core_version version;
+	enum qmp_local_state local_state;
+	struct mutex state_lock;
+	spinlock_t tx_lock;
+
+	struct completion link_complete;
+	struct completion ch_complete;
+	struct delayed_work dwork;
+	struct qmp_device *mdev;
+};
+
+/**
+ * struct qmp_device - local information for managing a single qmp edge
+ * @dev:		The device that corresponds to this edge
+ * @name:		The name of this mailbox
+ * @mboxes:		The mbox controller for this mailbox
  * @msgram:		Reference to the start of msgram
- * @irq_mask:		Mask written to @tx_irq_reg to trigger irq
  * @tx_irq_reg:		Reference to the register to send an irq to remote proc
  * @rx_reset_reg:	Reference to the register to reset the rx irq, if
  *			applicable
- * @rx_irq_line:	The incoming interrupt line
- * @tx_irq_count:	Number of tx interrupts triggered
- * @rx_irq_count:	Number of rx interrupts received
- * @kwork:		Work to be executed when an irq is received
+ * @kwork:		kwork for rx handling
  * @kworker:		Handle to entitiy to process incoming data
  * @task:		Handle to task context used to run @kworker
- * @state_lock:		Serialize mailbox state changes
- * @dwork:		Delayed work to detect timed out tx
- * @tx_lock:		Serialize access for writes to mailbox
+ * @irq_mask:		Mask written to @tx_irq_reg to trigger irq
+ * @rx_irq_line:	The incoming interrupt line
+ * @rx_work:		Work to be executed when an irq is received
+ * @tx_irq_count:	Number of tx interrupts triggered
+ * @rx_irq_count:	Number of rx interrupts received
  */
 struct qmp_device {
 	struct device *dev;
-	struct mbox_controller *mbox;
 	const char *name;
-	enum qmp_local_state local_state;
-	struct completion link_complete;
-	struct completion ch_complete;
-	bool tx_sent;
-	bool ch_in_use;
-	struct qmp_pkt rx_pkt;
-	struct qmp_core_version version;
-	u32 mcore_mbox_offset;
-	u32 mcore_mbox_size;
-	void __iomem *desc;
+	struct list_head mboxes;
+
 	void __iomem *msgram;
-	u32 irq_mask;
 	void __iomem *tx_irq_reg;
 	void __iomem *rx_reset_reg;
-	u32 rx_irq_line;
-	u32 tx_irq_count;
-	u32 rx_irq_count;
+
 	struct kthread_work kwork;
 	struct kthread_worker kworker;
 	struct task_struct *task;
-	struct mutex state_lock;
-	struct delayed_work dwork;
-	spinlock_t tx_lock;
+
+	u32 irq_mask;
+	u32 rx_irq_line;
+	u32 tx_irq_count;
+	u32 rx_irq_count;
 };
 
 /**
@@ -181,25 +206,7 @@
 	mdev->tx_irq_count++;
 }
 
-/**
- * qmp_irq_handler() - handle irq from remote entitity.
- * @irq:	irq number for the trggered interrupt.
- * @priv:	private pointer to qmp mbox device.
- */
-irqreturn_t qmp_irq_handler(int irq, void *priv)
-{
-	struct qmp_device *mdev = (struct qmp_device *)priv;
-
-	if (mdev->rx_reset_reg)
-		writel_relaxed(mdev->irq_mask, mdev->rx_reset_reg);
-
-	kthread_queue_work(&mdev->kworker, &mdev->kwork);
-	mdev->rx_irq_count++;
-
-	return IRQ_HANDLED;
-}
-
-static void memcpy32_toio(void *dest, void *src, size_t size)
+static void memcpy32_toio(void __iomem *dest, void *src, size_t size)
 {
 	u32 *dest_local = (u32 *)dest;
 	u32 *src_local = (u32 *)src;
@@ -210,7 +217,7 @@
 		iowrite32(*src_local++, dest_local++);
 }
 
-static void memcpy32_fromio(void *dest, void *src, size_t size)
+static void memcpy32_fromio(void *dest, void __iomem *src, size_t size)
 {
 	u32 *dest_local = (u32 *)dest;
 	u32 *src_local = (u32 *)src;
@@ -222,62 +229,75 @@
 }
 
 /**
- * set_ucore_link_ack() - set the link ack in the ucore channel desc.
- * @mdev:	the mailbox for the field that is being set.
- * @state:	the value to set the ack field to.
- */
-static void set_ucore_link_ack(struct qmp_device *mdev, u32 state)
-{
-	u32 offset;
-
-	offset = offsetof(struct mbox_desc, ucore);
-	offset += offsetof(struct channel_desc, link_state_ack);
-	iowrite32(state, mdev->desc + offset);
-}
-
-/**
- * set_ucore_ch_ack() - set the channel ack in the ucore channel desc.
- * @mdev:	the mailbox for the field that is being set.
- * @state:	the value to set the ack field to.
- */
-static void set_ucore_ch_ack(struct qmp_device *mdev, u32 state)
-{
-	u32 offset;
-
-	offset = offsetof(struct mbox_desc, ucore);
-	offset += offsetof(struct channel_desc, ch_state_ack);
-	iowrite32(state, mdev->desc + offset);
-}
-
-/**
- * set_mcore_ch() - set the channel state in the mcore channel desc.
- * @mdev:	the mailbox for the field that is being set.
- * @state:	the value to set the channel field to.
- */
-static void set_mcore_ch(struct qmp_device *mdev, u32 state)
-{
-	u32 offset;
-
-	offset = offsetof(struct mbox_desc, mcore);
-	offset += offsetof(struct channel_desc, ch_state);
-	iowrite32(state, mdev->desc + offset);
-}
-
-/**
  * qmp_notify_timeout() - Notify client of tx timeout with -EIO
  * @work:	Structure for work that was scheduled.
  */
 static void qmp_notify_timeout(struct work_struct *work)
 {
 	struct delayed_work *dwork = to_delayed_work(work);
-	struct qmp_device *mdev = container_of(dwork, struct qmp_device, dwork);
-	struct mbox_chan *chan = &mdev->mbox->chans[0];
+	struct qmp_mbox *mbox = container_of(dwork, struct qmp_mbox, dwork);
+	struct mbox_chan *chan = &mbox->ctrl.chans[mbox->idx_in_flight];
 	int err = -EIO;
+	unsigned long flags;
 
-	pr_err("%s: qmp tx timeout for %s\n", __func__, mdev->name);
+	spin_lock_irqsave(&mbox->tx_lock, flags);
+	if (!mbox->tx_sent) {
+		spin_unlock_irqrestore(&mbox->tx_lock, flags);
+		return;
+	}
+	pr_err("%s: qmp tx timeout for %d\n", __func__, mbox->idx_in_flight);
+	mbox->tx_sent = false;
+	spin_unlock_irqrestore(&mbox->tx_lock, flags);
 	mbox_chan_txdone(chan, err);
 }
 
+static inline void qmp_schedule_tx_timeout(struct qmp_mbox *mbox)
+{
+	schedule_delayed_work(&mbox->dwork, msecs_to_jiffies(QMP_TX_TOUT_MS));
+}
+
+/**
+ * set_ucore_link_ack() - set the link ack in the ucore channel desc.
+ * @mbox:	the mailbox for the field that is being set.
+ * @state:	the value to set the ack field to.
+ */
+static void set_ucore_link_ack(struct qmp_mbox *mbox, u32 state)
+{
+	u32 offset;
+
+	offset = offsetof(struct mbox_desc, ucore);
+	offset += offsetof(struct channel_desc, link_state_ack);
+	iowrite32(state, mbox->desc + offset);
+}
+
+/**
+ * set_ucore_ch_ack() - set the channel ack in the ucore channel desc.
+ * @mbox:	the mailbox for the field that is being set.
+ * @state:	the value to set the ack field to.
+ */
+static void set_ucore_ch_ack(struct qmp_mbox *mbox, u32 state)
+{
+	u32 offset;
+
+	offset = offsetof(struct mbox_desc, ucore);
+	offset += offsetof(struct channel_desc, ch_state_ack);
+	iowrite32(state, mbox->desc + offset);
+}
+
+/**
+ * set_mcore_ch() - set the channel state in the mcore channel desc.
+ * @mbox:	the mailbox for the field that is being set.
+ * @state:	the value to set the channel field to.
+ */
+static void set_mcore_ch(struct qmp_mbox *mbox, u32 state)
+{
+	u32 offset;
+
+	offset = offsetof(struct mbox_desc, mcore);
+	offset += offsetof(struct channel_desc, ch_state);
+	iowrite32(state, mbox->desc + offset);
+}
+
 /**
  * qmp_startup() - Start qmp mailbox channel for communication. Waits for
  *			remote subsystem to open channel if link is not
@@ -288,35 +308,27 @@
  */
 static int qmp_startup(struct mbox_chan *chan)
 {
-	struct qmp_device *mdev = chan->con_priv;
+	struct qmp_mbox *mbox = chan->con_priv;
 
-	if (!mdev)
+	if (!mbox)
 		return -EINVAL;
 
-	mutex_lock(&mdev->state_lock);
-	if (mdev->local_state == CHANNEL_CONNECTED) {
-		mutex_unlock(&mdev->state_lock);
-		return -EINVAL;
-	}
-	if (!completion_done(&mdev->link_complete)) {
-		mutex_unlock(&mdev->state_lock);
+	mutex_lock(&mbox->state_lock);
+	if (!completion_done(&mbox->link_complete)) {
+		mutex_unlock(&mbox->state_lock);
 		return -EAGAIN;
 	}
 
-	set_mcore_ch(mdev, QMP_MBOX_CH_CONNECTED);
-	mdev->local_state = LOCAL_CONNECTING;
-	mutex_unlock(&mdev->state_lock);
+	set_mcore_ch(mbox, QMP_MBOX_CH_CONNECTED);
+	mbox->local_state = LOCAL_CONNECTING;
+	mutex_unlock(&mbox->state_lock);
 
-	send_irq(mdev);
-	wait_for_completion_interruptible_timeout(&mdev->ch_complete,
+	send_irq(mbox->mdev);
+	wait_for_completion_interruptible_timeout(&mbox->ch_complete,
 					msecs_to_jiffies(QMP_TOUT_MS));
 	return 0;
 }
 
-static inline void qmp_schedule_tx_timeout(struct qmp_device *mdev)
-{
-	schedule_delayed_work(&mdev->dwork, msecs_to_jiffies(QMP_TX_TOUT_MS));
-}
 
 /**
  * qmp_send_data() - Copy the data to the channel's mailbox and notify
@@ -331,31 +343,39 @@
  */
 static int qmp_send_data(struct mbox_chan *chan, void *data)
 {
-	struct qmp_device *mdev = chan->con_priv;
+	struct qmp_mbox *mbox = chan->con_priv;
+	struct qmp_device *mdev;
 	struct qmp_pkt *pkt = (struct qmp_pkt *)data;
 	void __iomem *addr;
 	unsigned long flags;
+	int i;
 
-	if (!mdev || !data || mdev->local_state != CHANNEL_CONNECTED)
+	if (!mbox || !data || mbox->local_state != CHANNEL_CONNECTED)
 		return -EINVAL;
+	mdev = mbox->mdev;
 
-	spin_lock_irqsave(&mdev->tx_lock, flags);
-	addr = mdev->msgram + mdev->mcore_mbox_offset;
-	if (ioread32(addr)) {
-		spin_unlock_irqrestore(&mdev->tx_lock, flags);
+	spin_lock_irqsave(&mbox->tx_lock, flags);
+	addr = mdev->msgram + mbox->mcore_mbox_offset;
+	if (mbox->tx_sent) {
+		spin_unlock_irqrestore(&mbox->tx_lock, flags);
 		return -EBUSY;
 	}
 
-	if (pkt->size + sizeof(pkt->size) > mdev->mcore_mbox_size) {
-		spin_unlock_irqrestore(&mdev->tx_lock, flags);
+	if (pkt->size + sizeof(pkt->size) > mbox->mcore_mbox_size) {
+		spin_unlock_irqrestore(&mbox->tx_lock, flags);
 		return -EINVAL;
 	}
+
 	memcpy32_toio(addr + sizeof(pkt->size), pkt->data, pkt->size);
 	iowrite32(pkt->size, addr);
-	mdev->tx_sent = true;
+	mbox->tx_sent = true;
+	for (i = 0; i < mbox->ctrl.num_chans; i++) {
+		if (chan == &mbox->ctrl.chans[i])
+			mbox->idx_in_flight = i;
+	}
 	send_irq(mdev);
-	qmp_schedule_tx_timeout(mdev);
-	spin_unlock_irqrestore(&mdev->tx_lock, flags);
+	qmp_schedule_tx_timeout(mbox);
+	spin_unlock_irqrestore(&mbox->tx_lock, flags);
 	return 0;
 }
 
@@ -367,16 +387,23 @@
  */
 static void qmp_shutdown(struct mbox_chan *chan)
 {
-	struct qmp_device *mdev = chan->con_priv;
+	struct qmp_mbox *mbox = chan->con_priv;
 
-	mutex_lock(&mdev->state_lock);
-	if (mdev->local_state != LINK_DISCONNECTED) {
-		mdev->local_state = LOCAL_DISCONNECTING;
-		set_mcore_ch(mdev, QMP_MBOX_CH_DISCONNECTED);
-		send_irq(mdev);
+	mutex_lock(&mbox->state_lock);
+	mbox->num_shutdown++;
+	if (mbox->num_shutdown < mbox->num_assigned) {
+		mutex_unlock(&mbox->state_lock);
+		return;
 	}
-	mdev->ch_in_use = false;
-	mutex_unlock(&mdev->state_lock);
+
+	if (mbox->local_state != LINK_DISCONNECTED) {
+		mbox->local_state = LOCAL_DISCONNECTING;
+		set_mcore_ch(mbox, QMP_MBOX_CH_DISCONNECTED);
+		send_irq(mbox->mdev);
+	}
+	mbox->num_shutdown = 0;
+	mbox->num_assigned = 0;
+	mutex_unlock(&mbox->state_lock);
 }
 
 /**
@@ -396,33 +423,34 @@
 /**
  * qmp_recv_data() - received notification that data is available in the
  *			mailbox. Copy data from mailbox and pass to client.
- * @mdev:	mailbox device that received the notification.
+ * @mbox:		mailbox device that received the notification.
  * @mbox_of:	offset of mailbox from msgram start.
  */
-static void qmp_recv_data(struct qmp_device *mdev, u32 mbox_of)
+static void qmp_recv_data(struct qmp_mbox *mbox, u32 mbox_of)
 {
 	void __iomem *addr;
 	struct qmp_pkt *pkt;
 
-	addr = mdev->msgram + mbox_of;
-	pkt = &mdev->rx_pkt;
+	addr = mbox->mdev->msgram + mbox_of;
+	pkt = &mbox->rx_pkt;
 	pkt->size = ioread32(addr);
 
-	if (pkt->size > mdev->mcore_mbox_size)
+	if (pkt->size > mbox->mcore_mbox_size)
 		pr_err("%s: Invalid mailbox packet\n", __func__);
 	else {
 		memcpy32_fromio(pkt->data, addr + sizeof(pkt->size), pkt->size);
-		mbox_chan_received_data(&mdev->mbox->chans[0], &pkt);
+		mbox_chan_received_data(&mbox->ctrl.chans[mbox->idx_in_flight],
+				pkt);
 	}
 	iowrite32(0, addr);
-	send_irq(mdev);
+	send_irq(mbox->mdev);
 }
 
 /**
  * init_mcore_state() - initialize the mcore state of a mailbox.
  * @mdev:	mailbox device to be initialized.
  */
-static void init_mcore_state(struct qmp_device *mdev)
+static void init_mcore_state(struct qmp_mbox *mbox)
 {
 	struct channel_desc mcore;
 	u32 offset = offsetof(struct mbox_desc, mcore);
@@ -431,40 +459,60 @@
 	mcore.link_state_ack = QMP_MBOX_LINK_DOWN;
 	mcore.ch_state = QMP_MBOX_CH_DISCONNECTED;
 	mcore.ch_state_ack = QMP_MBOX_CH_DISCONNECTED;
-	mcore.mailbox_size = mdev->mcore_mbox_size;
-	mcore.mailbox_offset = mdev->mcore_mbox_offset;
-	memcpy32_toio(mdev->desc + offset, &mcore, sizeof(mcore));
+	mcore.mailbox_size = mbox->mcore_mbox_size;
+	mcore.mailbox_offset = mbox->mcore_mbox_offset;
+	memcpy32_toio(mbox->desc + offset, &mcore, sizeof(mcore));
+}
+
+/**
+ * qmp_irq_handler() - handle irq from remote entitity.
+ * @irq:	irq number for the trggered interrupt.
+ * @priv:	private pointer to qmp mbox device.
+ */
+static irqreturn_t qmp_irq_handler(int irq, void *priv)
+{
+	struct qmp_device *mdev = (struct qmp_device *)priv;
+
+	if (mdev->rx_reset_reg)
+		writel_relaxed(mdev->irq_mask, mdev->rx_reset_reg);
+
+	kthread_queue_work(&mdev->kworker, &mdev->kwork);
+	mdev->rx_irq_count++;
+
+	return IRQ_HANDLED;
 }
 
 /**
  * __qmp_rx_worker() - Handle incoming messages from remote processor.
- * @mdev:	mailbox device that received notification.
+ * @mbox:	mailbox device that received notification.
  */
-static void __qmp_rx_worker(struct qmp_device *mdev)
+static void __qmp_rx_worker(struct qmp_mbox *mbox)
 {
-	u32 msg_len;
+	u32 msg_len, idx;
 	struct mbox_desc desc;
+	struct qmp_device *mdev = mbox->mdev;
+	unsigned long flags;
 
-	memcpy_fromio(&desc, mdev->desc, sizeof(desc));
+	memcpy_fromio(&desc, mbox->desc, sizeof(desc));
 	if (desc.magic != QMP_MAGIC)
 		return;
 
-	mutex_lock(&mdev->state_lock);
-	switch (mdev->local_state) {
+	mutex_lock(&mbox->state_lock);
+	switch (mbox->local_state) {
 	case LINK_DISCONNECTED:
-		mdev->version.version = desc.version;
-		mdev->version.features = desc.features;
-		set_ucore_link_ack(mdev, desc.ucore.link_state);
+		mbox->version.version = desc.version;
+		mbox->version.features = desc.features;
+		set_ucore_link_ack(mbox, desc.ucore.link_state);
 		if (desc.mcore.mailbox_size) {
-			mdev->mcore_mbox_size = desc.mcore.mailbox_size;
-			mdev->mcore_mbox_offset = desc.mcore.mailbox_offset;
+			mbox->mcore_mbox_size = desc.mcore.mailbox_size;
+			mbox->mcore_mbox_offset = desc.mcore.mailbox_offset;
 		}
-		init_mcore_state(mdev);
-		mdev->local_state = LINK_NEGOTIATION;
-		mdev->rx_pkt.data = devm_kzalloc(mdev->dev,
+		init_mcore_state(mbox);
+		mbox->local_state = LINK_NEGOTIATION;
+		mbox->rx_pkt.data = devm_kzalloc(mdev->dev,
 						 desc.ucore.mailbox_size,
 						 GFP_KERNEL);
-		if (!mdev->rx_pkt.data) {
+		if (!mbox->rx_pkt.data) {
 			pr_err("In %s: failed to allocate rx pkt\n", __func__);
 			break;
 		}
@@ -477,8 +525,8 @@
 					__func__);
 			break;
 		}
-		mdev->local_state = LINK_CONNECTED;
-		complete_all(&mdev->link_complete);
+		mbox->local_state = LINK_CONNECTED;
+		complete_all(&mbox->link_complete);
 		break;
 	case LINK_CONNECTED:
 		if (desc.ucore.ch_state == desc.ucore.ch_state_ack) {
@@ -486,23 +534,23 @@
 					__func__);
 			break;
 		}
-		set_ucore_ch_ack(mdev, desc.ucore.ch_state);
+		set_ucore_ch_ack(mbox, desc.ucore.ch_state);
 		send_irq(mdev);
 		break;
 	case LOCAL_CONNECTING:
 		if (desc.mcore.ch_state_ack == QMP_MBOX_CH_CONNECTED &&
 				desc.mcore.ch_state == QMP_MBOX_CH_CONNECTED)
-			mdev->local_state = LOCAL_CONNECTED;
+			mbox->local_state = LOCAL_CONNECTED;
 
 		if (desc.ucore.ch_state != desc.ucore.ch_state_ack) {
-			set_ucore_ch_ack(mdev, desc.ucore.ch_state);
+			set_ucore_ch_ack(mbox, desc.ucore.ch_state);
 			send_irq(mdev);
 		}
-		if (mdev->local_state == LOCAL_CONNECTED &&
+		if (mbox->local_state == LOCAL_CONNECTED &&
 				desc.mcore.ch_state == QMP_MBOX_CH_CONNECTED &&
 				desc.ucore.ch_state == QMP_MBOX_CH_CONNECTED) {
-			mdev->local_state = CHANNEL_CONNECTED;
-			complete_all(&mdev->ch_complete);
+			mbox->local_state = CHANNEL_CONNECTED;
+			complete_all(&mbox->ch_complete);
 		}
 		break;
 	case LOCAL_CONNECTED:
@@ -511,50 +559,58 @@
 					__func__);
 			break;
 		}
-		set_ucore_ch_ack(mdev, desc.ucore.ch_state);
-		mdev->local_state = CHANNEL_CONNECTED;
+		set_ucore_ch_ack(mbox, desc.ucore.ch_state);
+		mbox->local_state = CHANNEL_CONNECTED;
 		send_irq(mdev);
-		complete_all(&mdev->ch_complete);
+		complete_all(&mbox->ch_complete);
 		break;
 	case CHANNEL_CONNECTED:
 		if (desc.ucore.ch_state == QMP_MBOX_CH_DISCONNECTED) {
-			set_ucore_ch_ack(mdev, desc.ucore.ch_state);
-			mdev->local_state = LOCAL_CONNECTED;
+			set_ucore_ch_ack(mbox, desc.ucore.ch_state);
+			mbox->local_state = LOCAL_CONNECTED;
 			send_irq(mdev);
 		}
 
 		msg_len = ioread32(mdev->msgram + desc.ucore.mailbox_offset);
-		if (msg_len)
-			qmp_recv_data(mdev, desc.ucore.mailbox_offset);
+		if (msg_len && !mbox->rx_disabled)
+			qmp_recv_data(mbox, desc.ucore.mailbox_offset);
 
-		if (mdev->tx_sent) {
+		spin_lock_irqsave(&mbox->tx_lock, flags);
+		idx = mbox->idx_in_flight;
+		if (mbox->tx_sent) {
 			msg_len = ioread32(mdev->msgram +
-						mdev->mcore_mbox_offset);
+						mbox->mcore_mbox_offset);
 			if (msg_len == 0) {
-				mdev->tx_sent = false;
-				cancel_delayed_work(&mdev->dwork);
-				mbox_chan_txdone(&mdev->mbox->chans[0], 0);
+				mbox->tx_sent = false;
+				cancel_delayed_work(&mbox->dwork);
+				spin_unlock_irqrestore(&mbox->tx_lock, flags);
+				mbox_chan_txdone(&mbox->ctrl.chans[idx], 0);
+				spin_lock_irqsave(&mbox->tx_lock, flags);
 			}
 		}
+		spin_unlock_irqrestore(&mbox->tx_lock, flags);
 		break;
 	case LOCAL_DISCONNECTING:
 		if (desc.mcore.ch_state_ack == QMP_MBOX_CH_DISCONNECTED &&
 				desc.mcore.ch_state == desc.mcore.ch_state_ack)
-			mdev->local_state = LINK_CONNECTED;
-		reinit_completion(&mdev->ch_complete);
+			mbox->local_state = LINK_CONNECTED;
+		reinit_completion(&mbox->ch_complete);
 		break;
 	default:
 		pr_err("In %s: Local Channel State corrupted\n", __func__);
 	}
-	mutex_unlock(&mdev->state_lock);
+	mutex_unlock(&mbox->state_lock);
 }
 
 static void rx_worker(struct kthread_work *work)
 {
 	struct qmp_device *mdev;
+	struct qmp_mbox *mbox;
 
 	mdev = container_of(work, struct qmp_device, kwork);
-	__qmp_rx_worker(mdev);
+	list_for_each_entry(mbox, &mdev->mboxes, list) {
+		__qmp_rx_worker(mbox);
+	}
 }
 
 /**
@@ -566,48 +622,207 @@
 static struct mbox_chan *qmp_mbox_of_xlate(struct mbox_controller *mbox,
 		const struct of_phandle_args *spec)
 {
-	struct qmp_device *mdev = dev_get_drvdata(mbox->dev);
-	unsigned int channel = spec->args[0];
+	struct qmp_mbox *dev = container_of(mbox, struct qmp_mbox, ctrl);
+	struct mbox_chan *chan;
 
-	if (!mdev || channel >= mbox->num_chans)
-		return ERR_PTR(-EINVAL);
-
-	mutex_lock(&mdev->state_lock);
-	if (mdev->ch_in_use) {
-		pr_err("%s, mbox channel already in use %s\n", __func__,
-								mdev->name);
-		mutex_unlock(&mdev->state_lock);
-		return ERR_PTR(-EBUSY);
+	if (dev->num_assigned >= mbox->num_chans || !dev->ctrl.chans) {
+		pr_err("%s: QMP out of channels\n", __func__);
+		return ERR_PTR(-ENOMEM);
 	}
-	mdev->ch_in_use = true;
-	mutex_unlock(&mdev->state_lock);
-	return &mbox->chans[0];
+
+	mutex_lock(&dev->state_lock);
+	chan = &dev->ctrl.chans[dev->num_assigned++];
+	mutex_unlock(&dev->state_lock);
+
+	return chan;
 }
 
 /**
- * parse_devicetree() - Parse the device tree information for QMP, map io
- *			memory and register for needed interrupts
- * @pdev:	platform device for this driver.
- * @mdev:	mailbox device to hold the device tree configuration.
+ * cleanup_workqueue() - Flush all work and stop the thread for this mailbox.
+ * @mdev:	mailbox device to cleanup.
+ */
+static void cleanup_workqueue(struct qmp_device *mdev)
+{
+	kthread_flush_worker(&mdev->kworker);
+	kthread_stop(mdev->task);
+	mdev->task = NULL;
+}
+
+static int qmp_mbox_remove(struct platform_device *pdev)
+{
+	struct qmp_device *mdev = platform_get_drvdata(pdev);
+	struct qmp_mbox *mbox = NULL;
+
+	disable_irq(mdev->rx_irq_line);
+	cleanup_workqueue(mdev);
+
+	list_for_each_entry(mbox, &mdev->mboxes, list) {
+		mbox_controller_unregister(&mbox->ctrl);
+	}
+	return 0;
+}
+
+/**
+ * get_mbox_num_chans() - Find how many mbox channels need to be allocated
+ *
+ * @node:	device node for this mailbox.
+ *
+ * Return: the number of phandles referring to this device node
+ */
+static u32 get_mbox_num_chans(struct device_node *node)
+{
+	int i, j, ret;
+	u32 num_chans = 0;
+	struct device_node *np;
+	struct of_phandle_args p;
+
+	for_each_node_with_property(np, "mboxes") {
+		if (!of_device_is_available(np))
+			continue;
+		i = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
+		for (j = 0; j < i; j++) {
+			ret = of_parse_phandle_with_args(np, "mboxes",
+							"#mbox-cells", j, &p);
+			if (!ret && p.np == node) {
+				num_chans++;
+				break;
+			}
+		}
+	}
+	if (num_chans)
+		return num_chans;
+
+	return 1;
+}
+
+/**
+ * mdev_add_mbox() - Add a mailbox to qmp device based on priority
+ *
+ * @mdev:	qmp device to add mailbox to.
+ * @new:	new mailbox to add to qmp device.
+ */
+static void mdev_add_mbox(struct qmp_device *mdev, struct qmp_mbox *new)
+{
+	struct qmp_mbox *mbox;
+
+	list_for_each_entry(mbox, &mdev->mboxes, list) {
+		if (mbox->priority > new->priority)
+			continue;
+		list_add_tail(&new->list, &mbox->list);
+		return;
+	}
+	list_add_tail(&new->list, &mdev->mboxes);
+}
+
+static struct mbox_chan_ops qmp_mbox_ops = {
+	.startup = qmp_startup,
+	.shutdown = qmp_shutdown,
+	.send_data = qmp_send_data,
+	.last_tx_done = qmp_last_tx_done,
+};
+
+static const struct of_device_id qmp_mbox_match_table[] = {
+	{ .compatible = "qcom,qmp-mbox" },
+	{},
+};
+
+/**
+ * qmp_mbox_init() - Parse the device tree for qmp mailbox and init structure
+ *
+ * @n:		child device node representing a mailbox.
+ * @mbox:	device structure for this edge.
  *
  * Return: 0 on succes or standard Linux error code.
  */
-static int qmp_parse_devicetree(struct platform_device *pdev,
-					struct qmp_device *mdev)
+static int qmp_mbox_init(struct device_node *n, struct qmp_device *mdev)
 {
+	int rc, i;
+	char *key;
+	struct qmp_mbox *mbox;
+	struct mbox_chan *chans;
+	u32 mbox_of, mbox_size, desc_of, priority, num_chans;
+
+	key = "mbox-desc-offset";
+	rc = of_property_read_u32(n, key, &desc_of);
+	if (rc) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		return 0;
+	}
+	key = "priority";
+	rc = of_property_read_u32(n, key, &priority);
+	if (rc) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		return 0;
+	}
+	mbox = devm_kzalloc(mdev->dev, sizeof(*mbox), GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+
+	rc = of_property_read_u32(n, "mbox-offset", &mbox_of);
+	if (!rc)
+		mbox->mcore_mbox_offset = mbox_of;
+	rc = of_property_read_u32(n, "mbox-size", &mbox_size);
+	if (!rc)
+		mbox->mcore_mbox_size = mbox_size;
+
+	mbox->mdev = mdev;
+	mbox->priority = priority;
+	mbox->desc = mdev->msgram + desc_of;
+	num_chans = get_mbox_num_chans(n);
+	mbox->rx_disabled = (num_chans > 1) ? true : false;
+	chans = devm_kzalloc(mdev->dev, sizeof(*chans) * num_chans, GFP_KERNEL);
+	if (!chans)
+		return -ENOMEM;
+
+	for (i = 0; i < num_chans; i++)
+		chans[i].con_priv = mbox;
+
+	mbox->ctrl.dev = mdev->dev;
+	mbox->ctrl.ops = &qmp_mbox_ops;
+	mbox->ctrl.chans = chans;
+	mbox->ctrl.num_chans = num_chans;
+	mbox->ctrl.txdone_irq = true;
+	mbox->ctrl.txdone_poll = false;
+	mbox->ctrl.of_xlate = qmp_mbox_of_xlate;
+
+	rc = mbox_controller_register(&mbox->ctrl);
+	if (rc) {
+		pr_err("%s: failed to register mbox controller %d\n", __func__,
+				rc);
+		return rc;
+	}
+	spin_lock_init(&mbox->tx_lock);
+	mutex_init(&mbox->state_lock);
+	mbox->local_state = LINK_DISCONNECTED;
+	init_completion(&mbox->link_complete);
+	init_completion(&mbox->ch_complete);
+	mbox->tx_sent = false;
+	mbox->num_assigned = 0;
+	INIT_DELAYED_WORK(&mbox->dwork, qmp_notify_timeout);
+
+	mdev_add_mbox(mdev, mbox);
+	return 0;
+}
+
+
+/**
+ * qmp_edge_init() - Parse the device tree information for QMP, map io
+ *			memory and register for needed interrupts
+ * @pdev:	platform device for this driver.
+ *
+ * Return: 0 on succes or standard Linux error code.
+ */
+static int qmp_edge_init(struct platform_device *pdev)
+{
+	struct qmp_device *mdev = platform_get_drvdata(pdev);
 	struct device_node *node = pdev->dev.of_node;
+	struct resource *msgram_r, *tx_irq_reg_r;
 	char *key;
 	int rc;
-	const char *subsys_name;
-	u32 rx_irq_line, tx_irq_mask;
-	u32 desc_of = 0;
-	u32 mbox_of = 0;
-	u32 mbox_size = 0;
-	struct resource *msgram_r, *tx_irq_reg_r;
 
 	key = "label";
-	subsys_name = of_get_property(node, key, NULL);
-	if (!subsys_name) {
+	mdev->name = of_get_property(node, key, NULL);
+	if (!mdev->name) {
 		pr_err("%s: missing key %s\n", __func__, key);
 		return -ENODEV;
 	}
@@ -627,143 +842,60 @@
 	}
 
 	key = "qcom,irq-mask";
-	rc = of_property_read_u32(node, key, &tx_irq_mask);
+	rc = of_property_read_u32(node, key, &mdev->irq_mask);
 	if (rc) {
 		pr_err("%s: missing key %s\n", __func__, key);
 		return -ENODEV;
 	}
 
 	key = "interrupts";
-	rx_irq_line = irq_of_parse_and_map(node, 0);
-	if (!rx_irq_line) {
+	mdev->rx_irq_line = irq_of_parse_and_map(node, 0);
+	if (!mdev->rx_irq_line) {
 		pr_err("%s: missing key %s\n", __func__, key);
 		return -ENODEV;
 	}
 
-	key = "mbox-desc-offset";
-	rc = of_property_read_u32(node, key, &desc_of);
-	if (rc) {
-		pr_err("%s: missing key %s\n", __func__, key);
-		return -ENODEV;
-	}
-
-	key = "mbox-offset";
-	rc = of_property_read_u32(node, key, &mbox_of);
-	if (!rc)
-		mdev->mcore_mbox_offset = mbox_of;
-
-	key = "mbox-size";
-	rc = of_property_read_u32(node, key, &mbox_size);
-	if (!rc)
-		mdev->mcore_mbox_size = mbox_size;
-
-	mdev->name = subsys_name;
-	mdev->msgram = devm_ioremap_nocache(&pdev->dev, msgram_r->start,
-						resource_size(msgram_r));
-	if (!mdev->msgram)
-		return -ENOMEM;
-
-	mdev->desc = mdev->msgram + desc_of;
-	if (!mdev->desc)
-		return -ENOMEM;
-
-	mdev->irq_mask = tx_irq_mask;
+	mdev->dev = &pdev->dev;
 	mdev->tx_irq_reg = devm_ioremap_nocache(&pdev->dev, tx_irq_reg_r->start,
 						resource_size(tx_irq_reg_r));
-	if (!mdev->tx_irq_reg)
-		return -ENOMEM;
+	mdev->msgram = devm_ioremap_nocache(&pdev->dev, msgram_r->start,
+						resource_size(msgram_r));
+	if (!mdev->msgram || !mdev->tx_irq_reg)
+		return -EIO;
 
-	mdev->rx_irq_line = rx_irq_line;
+	INIT_LIST_HEAD(&mdev->mboxes);
 	return 0;
 }
 
-/**
- * cleanup_workqueue() - Flush all work and stop the thread for this mailbox.
- * @mdev:	mailbox device to cleanup.
- */
-static void cleanup_workqueue(struct qmp_device *mdev)
-{
-	kthread_flush_worker(&mdev->kworker);
-	kthread_stop(mdev->task);
-	mdev->task = NULL;
-}
-
-static struct mbox_chan_ops qmp_mbox_ops = {
-	.startup = qmp_startup,
-	.shutdown = qmp_shutdown,
-	.send_data = qmp_send_data,
-	.last_tx_done = qmp_last_tx_done,
-};
-
-static const struct of_device_id qmp_mbox_match_table[] = {
-	{ .compatible = "qcom,qmp-mbox" },
-	{},
-};
-
 static int qmp_mbox_probe(struct platform_device *pdev)
 {
-	struct device_node *node = pdev->dev.of_node;
-	struct mbox_controller *mbox;
+	struct device_node *edge_node = pdev->dev.of_node;
 	struct qmp_device *mdev;
-	struct mbox_chan *chans;
 	int ret = 0;
 
 	mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
 	if (!mdev)
 		return -ENOMEM;
-	platform_set_drvdata(pdev, mdev);
 
-	ret = qmp_parse_devicetree(pdev, mdev);
+	platform_set_drvdata(pdev, mdev);
+	ret = qmp_edge_init(pdev);
 	if (ret)
 		return ret;
 
-	mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL);
-	if (!mbox)
-		return -ENOMEM;
+	ret = qmp_mbox_init(edge_node, mdev);
+	if (ret)
+		return ret;
 
-	chans = devm_kzalloc(&pdev->dev, sizeof(*chans) * QMP_NUM_CHANS,
-								GFP_KERNEL);
-	if (!chans)
-		return -ENOMEM;
-
-	mbox->dev = &pdev->dev;
-	mbox->ops = &qmp_mbox_ops;
-	mbox->chans = chans;
-	mbox->chans[0].con_priv = mdev;
-	mbox->num_chans = QMP_NUM_CHANS;
-	mbox->txdone_irq = true;
-	mbox->txdone_poll = false;
-	mbox->of_xlate = qmp_mbox_of_xlate;
-
-	mdev->dev = &pdev->dev;
-	mdev->mbox = mbox;
-	spin_lock_init(&mdev->tx_lock);
-	mutex_init(&mdev->state_lock);
-	mdev->local_state = LINK_DISCONNECTED;
 	kthread_init_work(&mdev->kwork, rx_worker);
 	kthread_init_worker(&mdev->kworker);
 	mdev->task = kthread_run(kthread_worker_fn, &mdev->kworker, "qmp_%s",
 								mdev->name);
-	init_completion(&mdev->link_complete);
-	init_completion(&mdev->ch_complete);
-	mdev->tx_sent = false;
-	mdev->ch_in_use = false;
-	INIT_DELAYED_WORK(&mdev->dwork, qmp_notify_timeout);
-
-	ret = mbox_controller_register(mbox);
-	if (ret) {
-		cleanup_workqueue(mdev);
-		pr_err("%s: failed to register mbox controller %d\n", __func__,
-									ret);
-		return ret;
-	}
 
 	ret = devm_request_irq(&pdev->dev, mdev->rx_irq_line, qmp_irq_handler,
 		IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND | IRQF_SHARED,
-		node->name, mdev);
+		edge_node->name, mdev);
 	if (ret < 0) {
-		cleanup_workqueue(mdev);
-		mbox_controller_unregister(mdev->mbox);
+		qmp_mbox_remove(pdev);
 		pr_err("%s: request irq on %d failed: %d\n", __func__,
 							mdev->rx_irq_line, ret);
 		return ret;
@@ -773,19 +905,11 @@
 		pr_err("%s: enable_irq_wake on %d failed: %d\n", __func__,
 							mdev->rx_irq_line, ret);
 
+	/* Trigger RX */
 	qmp_irq_handler(0, mdev);
 	return 0;
 }
 
-static int qmp_mbox_remove(struct platform_device *pdev)
-{
-	struct qmp_device *mdev = platform_get_drvdata(pdev);
-
-	cleanup_workqueue(mdev);
-	mbox_controller_unregister(mdev->mbox);
-	return 0;
-}
-
 static struct platform_driver qmp_mbox_driver = {
 	.probe = qmp_mbox_probe,
 	.remove = qmp_mbox_remove,
diff --git a/drivers/media/dvb-core/demux.h b/drivers/media/dvb-core/demux.h
index aeda2b6..fbe0165 100644
--- a/drivers/media/dvb-core/demux.h
+++ b/drivers/media/dvb-core/demux.h
@@ -40,6 +40,8 @@
  * Common definitions
  */
 
+#define DMX_EVENT_QUEUE_SIZE 500 /* number of events */
+
 /*
  * DMX_MAX_FILTER_SIZE: Maximum length (in bytes) of a section/PES filter.
  */
@@ -61,6 +63,104 @@
 #endif
 
 /*
+ * enum dmx_success: Success codes for the Demux Callback API.
+ */
+enum dmx_success {
+	DMX_OK = 0, /* Received Ok */
+	DMX_OK_PES_END, /* Received OK, data reached end of PES packet */
+	DMX_OK_PCR, /* Received OK, data with new PCR/STC pair */
+	DMX_OK_EOS, /* Received OK, reached End-of-Stream (EOS) */
+	DMX_OK_MARKER, /* Received OK, reached a data Marker */
+	DMX_LENGTH_ERROR, /* Incorrect length */
+	DMX_OVERRUN_ERROR, /* Receiver ring buffer overrun */
+	DMX_CRC_ERROR, /* Incorrect CRC */
+	DMX_FRAME_ERROR, /* Frame alignment error */
+	DMX_FIFO_ERROR, /* Receiver FIFO overrun */
+	DMX_MISSED_ERROR, /* Receiver missed packet */
+	DMX_OK_DECODER_BUF, /* Received OK, new ES data in decoder buffer */
+	DMX_OK_IDX, /* Received OK, new index event */
+	DMX_OK_SCRAMBLING_STATUS, /* Received OK, new scrambling status */
+};
+
+
+/*
+ * struct dmx_data_ready: Parameters for event notification callback.
+ * Event notification notifies demux device that data is written
+ * and available in the device's output buffer or provides
+ * notification on errors and other events. In the latter case
+ * data_length is zero.
+ */
+struct dmx_data_ready {
+	enum dmx_success status;
+
+	/*
+	 * data_length may be 0 in case of DMX_OK_PES_END or DMX_OK_EOS
+	 * and in non-DMX_OK_XXX events. In DMX_OK_PES_END,
+	 * data_length is for data coming after the end of PES.
+	 */
+	int data_length;
+
+	union {
+		struct {
+			int start_gap;
+			int actual_length;
+			int disc_indicator_set;
+			int pes_length_mismatch;
+			u64 stc;
+			u32 tei_counter;
+			u32 cont_err_counter;
+			u32 ts_packets_num;
+		} pes_end;
+
+		struct {
+			u64 pcr;
+			u64 stc;
+			int disc_indicator_set;
+		} pcr;
+
+		struct {
+			int handle;
+			int cookie;
+			u32 offset;
+			u32 len;
+			int pts_exists;
+			u64 pts;
+			int dts_exists;
+			u64 dts;
+			u32 tei_counter;
+			u32 cont_err_counter;
+			u32 ts_packets_num;
+			u32 ts_dropped_bytes;
+			u64 stc;
+		} buf;
+
+		struct {
+			u64 id;
+		} marker;
+
+		struct dmx_index_event_info idx_event;
+		struct dmx_scrambling_status_event_info scrambling_bits;
+	};
+};
+
+/*
+ * struct data_buffer: Parameters of buffer allocated by
+ * demux device for input/output. Can be used to directly map the
+ * demux-device buffer to HW output if HW supports it.
+ */
+struct data_buffer {
+	/* dvb_ringbuffer managed by demux-device */
+	const struct dvb_ringbuffer *ringbuff;
+
+
+	/*
+	 * Private handle returned by kernel demux when
+	 * map_buffer is called in case external buffer
+	 * is used. NULL if buffer is allocated internally.
+	 */
+	void *priv_handle;
+};
+/*
  * TS packet reception
  */
 
@@ -95,10 +195,18 @@
  * Using this API, the client can set the filtering properties to start/stop
  * filtering TS packets on a particular TS feed.
  */
+struct dmx_ts_feed;
+
+typedef int (*dmx_ts_data_ready_cb)(
+		struct dmx_ts_feed *source,
+		struct dmx_data_ready *dmx_data_ready);
+
 struct dmx_ts_feed {
 	int is_filtering;
 	struct dmx_demux *parent;
+	struct data_buffer buffer;
 	void *priv;
+	struct dmx_decoder_buffers *decoder_buffers;
 	int (*set)(struct dmx_ts_feed *feed,
 		   u16 pid,
 		   int type,
@@ -107,6 +215,34 @@
 		   ktime_t timeout);
 	int (*start_filtering)(struct dmx_ts_feed *feed);
 	int (*stop_filtering)(struct dmx_ts_feed *feed);
+	int (*set_video_codec)(struct dmx_ts_feed *feed,
+				enum dmx_video_codec video_codec);
+	int (*set_idx_params)(struct dmx_ts_feed *feed,
+				struct dmx_indexing_params *idx_params);
+	int (*get_decoder_buff_status)(
+			struct dmx_ts_feed *feed,
+			struct dmx_buffer_status *dmx_buffer_status);
+	int (*reuse_decoder_buffer)(
+			struct dmx_ts_feed *feed,
+			int cookie);
+	int (*data_ready_cb)(struct dmx_ts_feed *feed,
+			dmx_ts_data_ready_cb callback);
+	int (*notify_data_read)(struct dmx_ts_feed *feed,
+			u32 bytes_num);
+	int (*set_tsp_out_format)(struct dmx_ts_feed *feed,
+				enum dmx_tsp_format_t tsp_format);
+	int (*set_secure_mode)(struct dmx_ts_feed *feed,
+				struct dmx_secure_mode *sec_mode);
+	int (*set_cipher_ops)(struct dmx_ts_feed *feed,
+				struct dmx_cipher_operations *cipher_ops);
+	int (*oob_command)(struct dmx_ts_feed *feed,
+			struct dmx_oob_command *cmd);
+	int (*ts_insertion_init)(struct dmx_ts_feed *feed);
+	int (*ts_insertion_terminate)(struct dmx_ts_feed *feed);
+	int (*ts_insertion_insert_buffer)(struct dmx_ts_feed *feed,
+			char *data, size_t size);
+	int (*get_scrambling_bits)(struct dmx_ts_feed *feed, u8 *value);
+	int (*flush_buffer)(struct dmx_ts_feed *feed, size_t length);
 };
 
 /*
@@ -131,14 +267,21 @@
  * corresponding bits are compared. The filter only accepts sections that are
  * equal to filter_value in all the tested bit positions.
  */
+
+struct dmx_section_feed;
 struct dmx_section_filter {
 	u8 filter_value[DMX_MAX_FILTER_SIZE];
 	u8 filter_mask[DMX_MAX_FILTER_SIZE];
 	u8 filter_mode[DMX_MAX_FILTER_SIZE];
 	struct dmx_section_feed *parent; /* Back-pointer */
+	struct data_buffer buffer;
 	void *priv; /* Pointer to private data of the API client */
 };
 
+typedef int (*dmx_section_data_ready_cb)(
+		struct dmx_section_filter *source,
+		struct dmx_data_ready *dmx_data_ready);
+
 /**
  * struct dmx_section_feed - Structure that contains a section feed filter
  *
@@ -189,8 +332,24 @@
 			      struct dmx_section_filter *filter);
 	int (*start_filtering)(struct dmx_section_feed *feed);
 	int (*stop_filtering)(struct dmx_section_feed *feed);
+	int (*data_ready_cb)(struct dmx_section_feed *feed,
+			dmx_section_data_ready_cb callback);
+	int (*notify_data_read)(struct dmx_section_filter *filter,
+			u32 bytes_num);
+	int (*set_secure_mode)(struct dmx_section_feed *feed,
+				struct dmx_secure_mode *sec_mode);
+	int (*set_cipher_ops)(struct dmx_section_feed *feed,
+				struct dmx_cipher_operations *cipher_ops);
+	int (*oob_command)(struct dmx_section_feed *feed,
+				struct dmx_oob_command *cmd);
+	int (*get_scrambling_bits)(struct dmx_section_feed *feed, u8 *value);
+	int (*flush_buffer)(struct dmx_section_feed *feed, size_t length);
 };
 
+/*
+ * Callback functions
+ */
+
 /**
  * typedef dmx_ts_cb - DVB demux TS filter callback function prototype
  *
@@ -295,9 +454,19 @@
 			      size_t buffer2_len,
 			      struct dmx_section_filter *source);
 
-/*
- * DVB Front-End
- */
+typedef int (*dmx_ts_fullness) (
+				struct dmx_ts_feed *source,
+				int required_space,
+				int wait);
+
+typedef int (*dmx_section_fullness) (
+				struct dmx_section_filter *source,
+				int required_space,
+				int wait);
+
+/*--------------------------------------------------------------------------*/
+/* DVB Front-End */
+/*--------------------------------------------------------------------------*/
 
 /**
  * enum dmx_frontend_source - Used to identify the type of frontend
@@ -312,6 +481,13 @@
 enum dmx_frontend_source {
 	DMX_MEMORY_FE,
 	DMX_FRONTEND_0,
+	DMX_FRONTEND_1,
+	DMX_FRONTEND_2,
+	DMX_FRONTEND_3,
+	DMX_STREAM_0,    /* external stream input, e.g. LVDS */
+	DMX_STREAM_1,
+	DMX_STREAM_2,
+	DMX_STREAM_3
 };
 
 /**
@@ -345,14 +521,24 @@
  */
 enum dmx_demux_caps {
 	DMX_TS_FILTERING = 1,
+	DMX_PES_FILTERING = 2,
 	DMX_SECTION_FILTERING = 4,
 	DMX_MEMORY_BASED_FILTERING = 8,
+	DMX_CRC_CHECKING = 16,
+	DMX_TS_DESCRAMBLING = 32
 };
 
 /*
  * Demux resource type identifier.
  */
 
+/*
+ * DMX_FE_ENTRY(): Casts elements in the list of registered
+ * front-ends from the generic type struct list_head
+ * to the type * struct dmx_frontend.
+ *
+ */
+
 /**
  * DMX_FE_ENTRY - Casts elements in the list of registered
  *		  front-ends from the generic type struct list_head
@@ -557,6 +743,10 @@
 	enum dmx_demux_caps capabilities;
 	struct dmx_frontend *frontend;
 	void *priv;
+	struct data_buffer dvr_input; /* DVR input buffer */
+	int dvr_input_protected;
+	struct dentry *debugfs_demux_dir; /* debugfs dir */
+
 	int (*open)(struct dmx_demux *demux);
 	int (*close)(struct dmx_demux *demux);
 	int (*write)(struct dmx_demux *demux, const char __user *buf,
@@ -582,15 +772,31 @@
 
 	int (*get_pes_pids)(struct dmx_demux *demux, u16 *pids);
 
-	/* private: */
+	int (*get_caps)(struct dmx_demux *demux, struct dmx_caps *caps);
 
-	/*
-	 * Only used at av7110, to read some data from firmware.
-	 * As this was never documented, we have no clue about what's
-	 * there, and its usage on other drivers aren't encouraged.
-	 */
+	int (*set_source)(struct dmx_demux *demux, const dmx_source_t *src);
+
+	int (*set_tsp_format)(struct dmx_demux *demux,
+				enum dmx_tsp_format_t tsp_format);
+
+	int (*set_playback_mode)(struct dmx_demux *demux,
+				 enum dmx_playback_mode_t mode,
+				 dmx_ts_fullness ts_fullness_callback,
+				 dmx_section_fullness sec_fullness_callback);
+
+	int (*write_cancel)(struct dmx_demux *demux);
+
 	int (*get_stc)(struct dmx_demux *demux, unsigned int num,
 		       u64 *stc, unsigned int *base);
+
+	int (*map_buffer)(struct dmx_demux *demux,
+			struct dmx_buffer *dmx_buffer,
+			void **priv_handle, void **mem);
+
+	int (*unmap_buffer)(struct dmx_demux *demux,
+			void *priv_handle);
+
+	int (*get_tsp_size)(struct dmx_demux *demux);
 };
 
 #endif /* #ifndef __DEMUX_H */
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index 7b67e1d..e868f92 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -28,15 +28,74 @@
 #include <linux/poll.h>
 #include <linux/ioctl.h>
 #include <linux/wait.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/compat.h>
+#include <linux/mm.h>
 #include "dmxdev.h"
 
-static int debug;
+static int overflow_auto_flush = 1;
+module_param(overflow_auto_flush, int, 0644);
+MODULE_PARM_DESC(overflow_auto_flush,
+	"Automatically flush buffer on overflow (default: on)");
 
-module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
+#define DMX_DEFAULT_DECODER_BUFFER_SIZE (32768)
 
-#define dprintk	if (debug) printk
+static inline int dvb_dmxdev_verify_buffer_size(u32 size, u32 max_size,
+	u32 size_align)
+{
+	if (size_align)
+		return size <= max_size && !(size % size_align);
+	else
+		return size <= max_size;
+}
+
+static int dvb_filter_verify_buffer_size(struct dmxdev_filter *filter)
+{
+	struct dmx_caps caps;
+	size_t size = filter->buffer.size;
+
+	/*
+	 * For backward compatibility, if no demux capabilities can
+	 * be retrieved assume size is ok.
+	 * Decoder filter buffer size is verified when decoder buffer is set.
+	 */
+	if (filter->dev->demux->get_caps) {
+		filter->dev->demux->get_caps(filter->dev->demux, &caps);
+
+		if (filter->type == DMXDEV_TYPE_SEC)
+			return dvb_dmxdev_verify_buffer_size(
+				size,
+				caps.section.max_size,
+				caps.section.size_alignment);
+
+		if (filter->params.pes.output == DMX_OUT_TAP)
+			return dvb_dmxdev_verify_buffer_size(
+				size,
+				caps.pes.max_size,
+				caps.pes.size_alignment);
+
+		size = (filter->params.pes.output == DMX_OUT_TS_TAP) ?
+			filter->dev->dvr_buffer.size : size;
+
+		if (filter->params.pes.output == DMX_OUT_TSDEMUX_TAP ||
+			filter->params.pes.output == DMX_OUT_TS_TAP) {
+			if (filter->dmx_tsp_format == DMX_TSP_FORMAT_188)
+				return dvb_dmxdev_verify_buffer_size(
+					size,
+					caps.recording_188_tsp.max_size,
+					caps.recording_188_tsp.size_alignment);
+
+			return dvb_dmxdev_verify_buffer_size(
+					size,
+					caps.recording_192_tsp.max_size,
+					caps.recording_192_tsp.size_alignment);
+		}
+	}
+
+	return 1;
+}
 
 static int dvb_dmxdev_buffer_write(struct dvb_ringbuffer *buf,
 				   const u8 *src, size_t len)
@@ -50,16 +109,400 @@
 
 	free = dvb_ringbuffer_free(buf);
 	if (len > free) {
-		dprintk("dmxdev: buffer overflow\n");
+		pr_debug("dmxdev: buffer overflow\n");
 		return -EOVERFLOW;
 	}
 
 	return dvb_ringbuffer_write(buf, src, len);
 }
 
-static ssize_t dvb_dmxdev_buffer_read(struct dvb_ringbuffer *src,
-				      int non_blocking, char __user *buf,
-				      size_t count, loff_t *ppos)
+static inline void dvb_dmxdev_notify_data_read(struct dmxdev_filter *filter,
+					int bytes_read)
+{
+	if (!filter)
+		return;
+
+	if (filter->type == DMXDEV_TYPE_SEC) {
+		if (filter->feed.sec.feed->notify_data_read)
+			filter->feed.sec.feed->notify_data_read(
+						filter->filter.sec,
+						bytes_read);
+	} else {
+		struct dmxdev_feed *feed;
+
+		/*
+		 * All feeds of same demux-handle share the same output
+		 * buffer, it is enough to notify on the buffer status
+		 * on one of the feeds
+		 */
+		feed = list_first_entry(&filter->feed.ts,
+					struct dmxdev_feed, next);
+
+		if (feed->ts->notify_data_read)
+			feed->ts->notify_data_read(
+						feed->ts,
+						bytes_read);
+	}
+}
+
+static inline u32 dvb_dmxdev_advance_event_idx(u32 index)
+{
+	index++;
+	if (index >= DMX_EVENT_QUEUE_SIZE)
+		index = 0;
+
+	return index;
+}
+
+static inline int dvb_dmxdev_events_is_full(struct dmxdev_events_queue *events)
+{
+	int new_write_index;
+
+	new_write_index = dvb_dmxdev_advance_event_idx(events->write_index);
+	if (new_write_index == events->read_index)
+		return 1;
+
+	return 0;
+
+}
+
+static inline void dvb_dmxdev_flush_events(struct dmxdev_events_queue *events)
+{
+	events->read_index = 0;
+	events->write_index = 0;
+	events->notified_index = 0;
+	events->bytes_read_no_event = 0;
+	events->current_event_data_size = 0;
+	events->wakeup_events_counter = 0;
+}
+
+static inline void dvb_dmxdev_flush_output(struct dvb_ringbuffer *buffer,
+					struct dmxdev_events_queue *events)
+{
+	dvb_dmxdev_flush_events(events);
+	dvb_ringbuffer_flush(buffer);
+}
+
+static int dvb_dmxdev_update_pes_event(struct dmx_filter_event *event,
+					int bytes_read)
+{
+	int start_delta;
+
+	if (event->params.pes.total_length <= bytes_read)
+		return event->params.pes.total_length;
+
+	/*
+	 * only part of the data relevant to this event was read.
+	 * Update the event's information to reflect the new state.
+	 */
+	event->params.pes.total_length -= bytes_read;
+
+	start_delta = event->params.pes.start_offset -
+		event->params.pes.base_offset;
+
+	if (bytes_read <= start_delta) {
+		event->params.pes.base_offset +=
+			bytes_read;
+	} else {
+		start_delta =
+			bytes_read - start_delta;
+
+		event->params.pes.start_offset += start_delta;
+		event->params.pes.actual_length -= start_delta;
+
+		event->params.pes.base_offset =
+			event->params.pes.start_offset;
+	}
+
+	return 0;
+}
+
+static int dvb_dmxdev_update_section_event(struct dmx_filter_event *event,
+					int bytes_read)
+{
+	int start_delta;
+
+	if (event->params.section.total_length <= bytes_read)
+		return event->params.section.total_length;
+
+	/*
+	 * only part of the data relevant to this event was read.
+	 * Update the event's information to reflect the new state.
+	 */
+
+	event->params.section.total_length -= bytes_read;
+
+	start_delta = event->params.section.start_offset -
+		event->params.section.base_offset;
+
+	if (bytes_read <= start_delta) {
+		event->params.section.base_offset +=
+			bytes_read;
+	} else {
+		start_delta =
+			bytes_read - start_delta;
+
+		event->params.section.start_offset += start_delta;
+		event->params.section.actual_length -= start_delta;
+
+		event->params.section.base_offset =
+			event->params.section.start_offset;
+	}
+
+	return 0;
+}
+
+static int dvb_dmxdev_update_rec_event(struct dmx_filter_event *event,
+					int bytes_read)
+{
+	if (event->params.recording_chunk.size <= bytes_read)
+		return event->params.recording_chunk.size;
+
+	/*
+	 * only part of the data relevant to this event was read.
+	 * Update the event's information to reflect the new state.
+	 */
+	event->params.recording_chunk.size -= bytes_read;
+	event->params.recording_chunk.offset += bytes_read;
+
+	return 0;
+}
+
+static int dvb_dmxdev_add_event(struct dmxdev_events_queue *events,
+					struct dmx_filter_event *event)
+{
+	int res;
+	int new_write_index;
+	int data_event;
+
+	/* Check if the event is disabled */
+	if (events->event_mask.disable_mask & event->type)
+		return 0;
+
+	/* Check if we are adding an event that user already read its data */
+	if (events->bytes_read_no_event) {
+		data_event = 1;
+
+		if (event->type == DMX_EVENT_NEW_PES)
+			res = dvb_dmxdev_update_pes_event(event,
+						events->bytes_read_no_event);
+		else if (event->type == DMX_EVENT_NEW_SECTION)
+			res = dvb_dmxdev_update_section_event(event,
+						events->bytes_read_no_event);
+		else if (event->type == DMX_EVENT_NEW_REC_CHUNK)
+			res = dvb_dmxdev_update_rec_event(event,
+						events->bytes_read_no_event);
+		else
+			data_event = 0;
+
+		if (data_event) {
+			if (res) {
+				/*
+				 * Data relevant to this event was fully
+				 * consumed already, discard event.
+				 */
+				events->bytes_read_no_event -= res;
+				return 0;
+			}
+			events->bytes_read_no_event = 0;
+		} else {
+			/*
+			 * data was read beyond the non-data event,
+			 * making it not relevant anymore
+			 */
+			return 0;
+		}
+	}
+
+	new_write_index = dvb_dmxdev_advance_event_idx(events->write_index);
+	if (new_write_index == events->read_index) {
+		pr_err("dmxdev: events overflow\n");
+		return -EOVERFLOW;
+	}
+
+	events->queue[events->write_index] = *event;
+	events->write_index = new_write_index;
+
+	if (!(events->event_mask.no_wakeup_mask & event->type))
+		events->wakeup_events_counter++;
+
+	return 0;
+}
+
+static int dvb_dmxdev_remove_event(struct dmxdev_events_queue *events,
+					struct dmx_filter_event *event)
+{
+	if (events->notified_index == events->write_index)
+		return -ENODATA;
+
+	*event = events->queue[events->notified_index];
+
+	events->notified_index =
+		dvb_dmxdev_advance_event_idx(events->notified_index);
+
+	if (!(events->event_mask.no_wakeup_mask & event->type))
+		events->wakeup_events_counter--;
+
+	return 0;
+}
+
+static int dvb_dmxdev_update_events(struct dmxdev_events_queue *events,
+					int bytes_read)
+{
+	struct dmx_filter_event *event;
+	int res;
+	int data_event;
+
+	/*
+	 * If data events are not enabled on this filter,
+	 * there's nothing to update.
+	 */
+	if (events->data_read_event_masked)
+		return 0;
+
+	/*
+	 * Go through all events that were notified and
+	 * remove them from the events queue if their respective
+	 * data was read.
+	 */
+	while ((events->read_index != events->notified_index) &&
+		   (bytes_read)) {
+		event = events->queue + events->read_index;
+
+		data_event = 1;
+
+		if (event->type == DMX_EVENT_NEW_PES)
+			res = dvb_dmxdev_update_pes_event(event, bytes_read);
+		else if (event->type == DMX_EVENT_NEW_SECTION)
+			res = dvb_dmxdev_update_section_event(event,
+								bytes_read);
+		else if (event->type == DMX_EVENT_NEW_REC_CHUNK)
+			res = dvb_dmxdev_update_rec_event(event, bytes_read);
+		else
+			data_event = 0;
+
+		if (data_event) {
+			if (res) {
+				/*
+				 * Data relevant to this event was
+				 * fully consumed, remove it from the queue.
+				 */
+				bytes_read -= res;
+				events->read_index =
+					dvb_dmxdev_advance_event_idx(
+						events->read_index);
+			} else {
+				bytes_read = 0;
+			}
+		} else {
+			/*
+			 * non-data event was already notified,
+			 * no need to keep it
+			 */
+			events->read_index = dvb_dmxdev_advance_event_idx(
+						events->read_index);
+		}
+	}
+
+	if (!bytes_read)
+		return 0;
+
+	/*
+	 * If we reached here it means:
+	 * bytes_read != 0
+	 * events->read_index == events->notified_index
+	 * Check if there are pending events in the queue
+	 * which the user didn't read while their relevant data
+	 * was read.
+	 */
+	while ((events->notified_index != events->write_index) &&
+		   (bytes_read)) {
+		event = events->queue + events->notified_index;
+
+		data_event = 1;
+
+		if (event->type == DMX_EVENT_NEW_PES)
+			res = dvb_dmxdev_update_pes_event(event, bytes_read);
+		else if (event->type == DMX_EVENT_NEW_SECTION)
+			res = dvb_dmxdev_update_section_event(event,
+								bytes_read);
+		else if (event->type == DMX_EVENT_NEW_REC_CHUNK)
+			res = dvb_dmxdev_update_rec_event(event, bytes_read);
+		else
+			data_event = 0;
+
+		if (data_event) {
+			if (res) {
+				/*
+				 * Data relevant to this event was
+				 * fully consumed, remove it from the queue.
+				 */
+				bytes_read -= res;
+				events->notified_index =
+					dvb_dmxdev_advance_event_idx(
+						events->notified_index);
+				if (!(events->event_mask.no_wakeup_mask &
+					event->type))
+					events->wakeup_events_counter--;
+			} else {
+				bytes_read = 0;
+			}
+		} else {
+			if (bytes_read)
+				/*
+				 * data was read beyond the non-data event,
+				 * making it not relevant anymore
+				 */
+				events->notified_index =
+					dvb_dmxdev_advance_event_idx(
+						events->notified_index);
+				if (!(events->event_mask.no_wakeup_mask &
+					event->type))
+					events->wakeup_events_counter--;
+		}
+
+		events->read_index = events->notified_index;
+	}
+
+	/*
+	 * Check if data was read without having a respective
+	 * event in the events-queue
+	 */
+	if (bytes_read)
+		events->bytes_read_no_event += bytes_read;
+
+	return 0;
+}
+
+static inline int dvb_dmxdev_check_data(struct dmxdev_filter *filter,
+			struct dvb_ringbuffer *src)
+{
+	int data_status_change;
+
+	if (filter)
+		if (mutex_lock_interruptible(&filter->mutex))
+			return -ERESTARTSYS;
+
+	if (!src->data ||
+		!dvb_ringbuffer_empty(src) ||
+		src->error ||
+		(filter &&
+		 (filter->state != DMXDEV_STATE_GO) &&
+		 (filter->state != DMXDEV_STATE_DONE)))
+		data_status_change = 1;
+	else
+		data_status_change = 0;
+
+	if (filter)
+		mutex_unlock(&filter->mutex);
+
+	return data_status_change;
+}
+
+static ssize_t dvb_dmxdev_buffer_read(struct dmxdev_filter *filter,
+					struct dvb_ringbuffer *src,
+					int non_blocking, char __user *buf,
+					size_t count, loff_t *ppos)
 {
 	size_t todo;
 	ssize_t avail;
@@ -70,7 +513,7 @@
 
 	if (src->error) {
 		ret = src->error;
-		dvb_ringbuffer_flush(src);
+		src->error = 0;
 		return ret;
 	}
 
@@ -80,15 +523,35 @@
 			break;
 		}
 
+		if (filter) {
+			if ((filter->state == DMXDEV_STATE_DONE) &&
+				dvb_ringbuffer_empty(src))
+				break;
+
+			mutex_unlock(&filter->mutex);
+		}
+
 		ret = wait_event_interruptible(src->queue,
-					       !dvb_ringbuffer_empty(src) ||
-					       (src->error != 0));
+				dvb_dmxdev_check_data(filter, src));
+
+		if (filter) {
+			if (mutex_lock_interruptible(&filter->mutex))
+				return -ERESTARTSYS;
+
+			if ((filter->state != DMXDEV_STATE_GO) &&
+				(filter->state != DMXDEV_STATE_DONE))
+				return -ENODEV;
+		}
+
 		if (ret < 0)
 			break;
 
+		if (!src->data)
+			return 0;
+
 		if (src->error) {
 			ret = src->error;
-			dvb_ringbuffer_flush(src);
+			src->error = 0;
 			break;
 		}
 
@@ -103,6 +566,9 @@
 		buf += ret;
 	}
 
+	if (count - todo) /* some data was read? */
+		wake_up_all(&src->queue);
+
 	return (count - todo) ? (count - todo) : ret;
 }
 
@@ -120,13 +586,238 @@
 	return NULL;
 }
 
+static void dvb_dvr_oob_cmd(struct dmxdev *dmxdev, struct dmx_oob_command *cmd)
+{
+	int i;
+	struct dmxdev_filter *filter;
+	struct dmxdev_feed *feed;
+
+	for (i = 0; i < dmxdev->filternum; i++) {
+		filter = &dmxdev->filter[i];
+		if (!filter || filter->state != DMXDEV_STATE_GO)
+			continue;
+
+		switch (filter->type) {
+		case DMXDEV_TYPE_SEC:
+			filter->feed.sec.feed->oob_command(
+				filter->feed.sec.feed, cmd);
+			break;
+		case DMXDEV_TYPE_PES:
+			feed = list_first_entry(&filter->feed.ts,
+						struct dmxdev_feed, next);
+			feed->ts->oob_command(feed->ts, cmd);
+			break;
+		case DMXDEV_TYPE_NONE:
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+static int dvb_dvr_feed_cmd(struct dmxdev *dmxdev, struct dvr_command *dvr_cmd)
+{
+	int ret = 0;
+	size_t todo;
+	int bytes_written = 0;
+	size_t split;
+	size_t tsp_size;
+	u8 *data_start;
+	struct dvb_ringbuffer *src = &dmxdev->dvr_input_buffer;
+
+	todo = dvr_cmd->cmd.data_feed_count;
+
+	if (dmxdev->demux->get_tsp_size)
+		tsp_size = dmxdev->demux->get_tsp_size(dmxdev->demux);
+	else
+		tsp_size = 188;
+
+	while (todo >= tsp_size) {
+		/* wait for input */
+		ret = wait_event_interruptible(
+			src->queue,
+			(dvb_ringbuffer_avail(src) >= tsp_size) ||
+			dmxdev->dvr_in_exit || src->error);
+
+		if (ret < 0)
+			break;
+
+		spin_lock(&dmxdev->dvr_in_lock);
+
+		if (dmxdev->exit || dmxdev->dvr_in_exit) {
+			spin_unlock(&dmxdev->dvr_in_lock);
+			ret = -ENODEV;
+			break;
+		}
+
+		if (src->error) {
+			spin_unlock(&dmxdev->dvr_in_lock);
+			wake_up_all(&src->queue);
+			ret = -EINVAL;
+			break;
+		}
+
+		dmxdev->dvr_processing_input = 1;
+
+		split = (src->pread + todo > src->size) ?
+			src->size - src->pread : 0;
+
+		/*
+		 * In DVR PULL mode, write might block.
+		 * Lock on DVR buffer is released before calling to
+		 * write, if DVR was released meanwhile, dvr_in_exit is
+		 * prompted. Lock is acquired when updating the read pointer
+		 * again to preserve read/write pointers consistency.
+		 *
+		 * In protected input mode, DVR input buffer is not mapped
+		 * to kernel memory. Underlying demux implementation
+		 * should trigger HW to read from DVR input buffer
+		 * based on current read offset.
+		 */
+		if (split > 0) {
+			data_start = (dmxdev->demux->dvr_input_protected) ?
+						NULL : (src->data + src->pread);
+
+			spin_unlock(&dmxdev->dvr_in_lock);
+			ret = dmxdev->demux->write(dmxdev->demux,
+						data_start,
+						split);
+
+			if (ret < 0) {
+				pr_err("dmxdev: dvr write error %d\n", ret);
+				continue;
+			}
+
+			if (dmxdev->dvr_in_exit) {
+				ret = -ENODEV;
+				break;
+			}
+
+			spin_lock(&dmxdev->dvr_in_lock);
+
+			todo -= ret;
+			bytes_written += ret;
+			DVB_RINGBUFFER_SKIP(src, ret);
+			if (ret < split) {
+				dmxdev->dvr_processing_input = 0;
+				spin_unlock(&dmxdev->dvr_in_lock);
+				wake_up_all(&src->queue);
+				continue;
+			}
+		}
+
+		data_start = (dmxdev->demux->dvr_input_protected) ?
+			NULL : (src->data + src->pread);
+
+		spin_unlock(&dmxdev->dvr_in_lock);
+		ret = dmxdev->demux->write(dmxdev->demux,
+			data_start, todo);
+
+		if (ret < 0) {
+			pr_err("dmxdev: dvr write error %d\n", ret);
+			continue;
+		}
+
+		if (dmxdev->dvr_in_exit) {
+			ret = -ENODEV;
+			break;
+		}
+
+		spin_lock(&dmxdev->dvr_in_lock);
+
+		todo -= ret;
+		bytes_written += ret;
+		DVB_RINGBUFFER_SKIP(src, ret);
+		dmxdev->dvr_processing_input = 0;
+		spin_unlock(&dmxdev->dvr_in_lock);
+
+		wake_up_all(&src->queue);
+	}
+
+	if (ret < 0)
+		return ret;
+
+	return bytes_written;
+}
+
+static int dvr_input_thread_entry(void *arg)
+{
+	struct dmxdev *dmxdev = arg;
+	struct dvb_ringbuffer *cmdbuf = &dmxdev->dvr_cmd_buffer;
+	struct dvr_command dvr_cmd;
+	int leftover = 0;
+	int ret;
+
+	while (1) {
+		/* wait for input */
+		ret = wait_event_interruptible(
+			cmdbuf->queue,
+			(!cmdbuf->data) ||
+			(dvb_ringbuffer_avail(cmdbuf) >= sizeof(dvr_cmd)) ||
+			(dmxdev->dvr_in_exit));
+
+		if (ret < 0)
+			break;
+
+		spin_lock(&dmxdev->dvr_in_lock);
+
+		if (!cmdbuf->data || dmxdev->exit || dmxdev->dvr_in_exit) {
+			spin_unlock(&dmxdev->dvr_in_lock);
+			break;
+		}
+
+		dvb_ringbuffer_read(cmdbuf, (u8 *)&dvr_cmd, sizeof(dvr_cmd));
+
+		spin_unlock(&dmxdev->dvr_in_lock);
+
+		if (dvr_cmd.type == DVR_DATA_FEED_CMD) {
+			dvr_cmd.cmd.data_feed_count += leftover;
+
+			ret = dvb_dvr_feed_cmd(dmxdev, &dvr_cmd);
+			if (ret < 0) {
+				pr_debug("%s: DVR data feed failed, ret=%d\n",
+					__func__, ret);
+				continue;
+			}
+
+			leftover = dvr_cmd.cmd.data_feed_count - ret;
+		} else {
+			/*
+			 * For EOS, try to process leftover data in the input
+			 * buffer.
+			 */
+			if (dvr_cmd.cmd.oobcmd.type == DMX_OOB_CMD_EOS) {
+				struct dvr_command feed_cmd;
+
+				feed_cmd.type = DVR_DATA_FEED_CMD;
+				feed_cmd.cmd.data_feed_count =
+					dvb_ringbuffer_avail(
+						&dmxdev->dvr_input_buffer);
+				dvb_dvr_feed_cmd(dmxdev, &feed_cmd);
+			}
+
+			dvb_dvr_oob_cmd(dmxdev, &dvr_cmd.cmd.oobcmd);
+		}
+	}
+
+	set_current_state(TASK_INTERRUPTIBLE);
+	while (!kthread_should_stop()) {
+		schedule();
+		set_current_state(TASK_INTERRUPTIBLE);
+	}
+	set_current_state(TASK_RUNNING);
+
+	return 0;
+}
+
 static int dvb_dvr_open(struct inode *inode, struct file *file)
 {
 	struct dvb_device *dvbdev = file->private_data;
 	struct dmxdev *dmxdev = dvbdev->priv;
 	struct dmx_frontend *front;
+	void *mem;
 
-	dprintk("function : %s\n", __func__);
+	pr_debug("function : %s(%X)\n", __func__, (file->f_flags & O_ACCMODE));
 
 	if (mutex_lock_interruptible(&dmxdev->mutex))
 		return -ERESTARTSYS;
@@ -144,21 +835,28 @@
 	}
 
 	if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
-		void *mem;
 		if (!dvbdev->readers) {
 			mutex_unlock(&dmxdev->mutex);
 			return -EBUSY;
 		}
-		mem = vmalloc(DVR_BUFFER_SIZE);
+		mem = vmalloc_user(DVR_BUFFER_SIZE);
 		if (!mem) {
 			mutex_unlock(&dmxdev->mutex);
 			return -ENOMEM;
 		}
 		dvb_ringbuffer_init(&dmxdev->dvr_buffer, mem, DVR_BUFFER_SIZE);
-		dvbdev->readers--;
-	}
+		dvb_dmxdev_flush_events(&dmxdev->dvr_output_events);
+		dmxdev->dvr_output_events.event_mask.disable_mask = 0;
+		dmxdev->dvr_output_events.event_mask.no_wakeup_mask = 0;
+		dmxdev->dvr_output_events.event_mask.wakeup_threshold = 1;
+		dmxdev->dvr_feeds_count = 0;
+		dmxdev->dvr_buffer_mode = DMX_BUFFER_MODE_INTERNAL;
+		dmxdev->dvr_priv_buff_handle = NULL;
 
-	if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
+		dvbdev->readers--;
+	} else if (!dvbdev->writers) {
+		dmxdev->dvr_in_exit = 0;
+		dmxdev->dvr_processing_input = 0;
 		dmxdev->dvr_orig_fe = dmxdev->demux->frontend;
 
 		if (!dmxdev->demux->write) {
@@ -172,9 +870,51 @@
 			mutex_unlock(&dmxdev->mutex);
 			return -EINVAL;
 		}
+
+		mem = vmalloc_user(DVR_BUFFER_SIZE);
+		if (!mem) {
+			mutex_unlock(&dmxdev->mutex);
+			return -ENOMEM;
+		}
+
 		dmxdev->demux->disconnect_frontend(dmxdev->demux);
 		dmxdev->demux->connect_frontend(dmxdev->demux, front);
+		dmxdev->dvr_input_buffer_mode = DMX_BUFFER_MODE_INTERNAL;
+
+		dvb_ringbuffer_init(&dmxdev->dvr_input_buffer,
+							mem,
+							DVR_BUFFER_SIZE);
+
+		dmxdev->demux->dvr_input.priv_handle = NULL;
+		dmxdev->demux->dvr_input.ringbuff = &dmxdev->dvr_input_buffer;
+		dmxdev->demux->dvr_input_protected = 0;
+		mem = vmalloc(DVR_CMDS_BUFFER_SIZE);
+		if (!mem) {
+			vfree(dmxdev->dvr_input_buffer.data);
+			dmxdev->dvr_input_buffer.data = NULL;
+			mutex_unlock(&dmxdev->mutex);
+			return -ENOMEM;
+		}
+		dvb_ringbuffer_init(&dmxdev->dvr_cmd_buffer, mem,
+			DVR_CMDS_BUFFER_SIZE);
+		dvbdev->writers--;
+
+		dmxdev->dvr_input_thread =
+			kthread_run(
+				dvr_input_thread_entry,
+				(void *)dmxdev,
+				"dvr_input");
+
+		if (IS_ERR(dmxdev->dvr_input_thread)) {
+			vfree(dmxdev->dvr_input_buffer.data);
+			vfree(dmxdev->dvr_cmd_buffer.data);
+			dmxdev->dvr_input_buffer.data = NULL;
+			dmxdev->dvr_cmd_buffer.data = NULL;
+			mutex_unlock(&dmxdev->mutex);
+			return -ENOMEM;
+		}
 	}
+
 	dvbdev->users++;
 	mutex_unlock(&dmxdev->mutex);
 	return 0;
@@ -187,11 +927,6 @@
 
 	mutex_lock(&dmxdev->mutex);
 
-	if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
-		dmxdev->demux->disconnect_frontend(dmxdev->demux);
-		dmxdev->demux->connect_frontend(dmxdev->demux,
-						dmxdev->dvr_orig_fe);
-	}
 	if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
 		dvbdev->readers++;
 		if (dmxdev->dvr_buffer.data) {
@@ -200,12 +935,100 @@
 			spin_lock_irq(&dmxdev->lock);
 			dmxdev->dvr_buffer.data = NULL;
 			spin_unlock_irq(&dmxdev->lock);
+			wake_up_all(&dmxdev->dvr_buffer.queue);
+
+			if (dmxdev->dvr_buffer_mode == DMX_BUFFER_MODE_INTERNAL)
+				vfree(mem);
+		}
+
+		if ((dmxdev->dvr_buffer_mode == DMX_BUFFER_MODE_EXTERNAL) &&
+			dmxdev->dvr_priv_buff_handle) {
+			dmxdev->demux->unmap_buffer(dmxdev->demux,
+					dmxdev->dvr_priv_buff_handle);
+			dmxdev->dvr_priv_buff_handle = NULL;
+		}
+	} else {
+		int i;
+
+		spin_lock(&dmxdev->dvr_in_lock);
+		dmxdev->dvr_in_exit = 1;
+		spin_unlock(&dmxdev->dvr_in_lock);
+
+		wake_up_all(&dmxdev->dvr_cmd_buffer.queue);
+
+		/*
+		 * There might be dmx filters reading now from DVR
+		 * device, in PULL mode, they might be also stalled
+		 * on output, signal to them that DVR is exiting.
+		 */
+		if (dmxdev->playback_mode == DMX_PB_MODE_PULL) {
+			wake_up_all(&dmxdev->dvr_buffer.queue);
+
+			for (i = 0; i < dmxdev->filternum; i++)
+				if (dmxdev->filter[i].state == DMXDEV_STATE_GO)
+					wake_up_all(
+					&dmxdev->filter[i].buffer.queue);
+		}
+
+		/* notify kernel demux that we are canceling */
+		if (dmxdev->demux->write_cancel)
+			dmxdev->demux->write_cancel(dmxdev->demux);
+
+		/*
+		 * Now stop dvr-input thread so that no one
+		 * would process data from dvr input buffer any more
+		 * before it gets freed.
+		 */
+		kthread_stop(dmxdev->dvr_input_thread);
+
+		dvbdev->writers++;
+		dmxdev->demux->disconnect_frontend(dmxdev->demux);
+		dmxdev->demux->connect_frontend(dmxdev->demux,
+						dmxdev->dvr_orig_fe);
+
+		if (dmxdev->dvr_input_buffer.data) {
+			void *mem = dmxdev->dvr_input_buffer.data;
+			/*
+			 * Ensure all the operations on the DVR input buffer
+			 * are completed before it gets freed.
+			 */
+			mb();
+			spin_lock_irq(&dmxdev->dvr_in_lock);
+			dmxdev->dvr_input_buffer.data = NULL;
+			spin_unlock_irq(&dmxdev->dvr_in_lock);
+
+			if (dmxdev->dvr_input_buffer_mode ==
+				DMX_BUFFER_MODE_INTERNAL)
+				vfree(mem);
+		}
+
+		if ((dmxdev->dvr_input_buffer_mode ==
+			DMX_BUFFER_MODE_EXTERNAL) &&
+			(dmxdev->demux->dvr_input.priv_handle)) {
+			if (!dmxdev->demux->dvr_input_protected)
+				dmxdev->demux->unmap_buffer(dmxdev->demux,
+					dmxdev->demux->dvr_input.priv_handle);
+			dmxdev->demux->dvr_input.priv_handle = NULL;
+		}
+
+		if (dmxdev->dvr_cmd_buffer.data) {
+			void *mem = dmxdev->dvr_cmd_buffer.data;
+			/*
+			 * Ensure all the operations on the DVR command buffer
+			 * are completed before it gets freed.
+			 */
+			mb();
+			spin_lock_irq(&dmxdev->dvr_in_lock);
+			dmxdev->dvr_cmd_buffer.data = NULL;
+			spin_unlock_irq(&dmxdev->dvr_in_lock);
 			vfree(mem);
 		}
 	}
 	/* TODO */
 	dvbdev->users--;
 	if (dvbdev->users == 1 && dmxdev->exit == 1) {
+		fops_put(file->f_op);
+		file->f_op = NULL;
 		mutex_unlock(&dmxdev->mutex);
 		wake_up(&dvbdev->wait_queue);
 	} else
@@ -214,17 +1037,21 @@
 	return 0;
 }
 
-static ssize_t dvb_dvr_write(struct file *file, const char __user *buf,
-			     size_t count, loff_t *ppos)
+
+static int dvb_dvr_mmap(struct file *filp, struct vm_area_struct *vma)
 {
-	struct dvb_device *dvbdev = file->private_data;
+	struct dvb_device *dvbdev = filp->private_data;
 	struct dmxdev *dmxdev = dvbdev->priv;
+	struct dvb_ringbuffer *buffer;
+	enum dmx_buffer_mode buffer_mode;
+	int vma_size;
+	int buffer_size;
 	int ret;
 
-	if (!dmxdev->demux->write)
-		return -EOPNOTSUPP;
-	if ((file->f_flags & O_ACCMODE) != O_WRONLY)
+	if (((filp->f_flags & O_ACCMODE) == O_RDONLY) &&
+		(vma->vm_flags & VM_WRITE))
 		return -EINVAL;
+
 	if (mutex_lock_interruptible(&dmxdev->mutex))
 		return -ERESTARTSYS;
 
@@ -232,58 +1059,693 @@
 		mutex_unlock(&dmxdev->mutex);
 		return -ENODEV;
 	}
-	ret = dmxdev->demux->write(dmxdev->demux, buf, count);
+
+	if ((filp->f_flags & O_ACCMODE) == O_RDONLY) {
+		buffer = &dmxdev->dvr_buffer;
+		buffer_mode = dmxdev->dvr_buffer_mode;
+	} else {
+		buffer = &dmxdev->dvr_input_buffer;
+		buffer_mode = dmxdev->dvr_input_buffer_mode;
+	}
+
+	if (buffer_mode == DMX_BUFFER_MODE_EXTERNAL) {
+		mutex_unlock(&dmxdev->mutex);
+		return -EINVAL;
+	}
+
+	vma_size = vma->vm_end - vma->vm_start;
+
+	/* Make sure requested mapping is not larger than buffer size */
+	buffer_size = buffer->size + (PAGE_SIZE-1);
+	buffer_size = buffer_size & ~(PAGE_SIZE-1);
+
+	if (vma_size != buffer_size) {
+		mutex_unlock(&dmxdev->mutex);
+		return -EINVAL;
+	}
+
+	ret = remap_vmalloc_range(vma, buffer->data, 0);
+	if (ret) {
+		mutex_unlock(&dmxdev->mutex);
+		return ret;
+	}
+
+	vma->vm_flags |= VM_DONTDUMP;
+	vma->vm_flags |= VM_DONTEXPAND;
+
 	mutex_unlock(&dmxdev->mutex);
 	return ret;
 }
 
+static void dvb_dvr_queue_data_feed(struct dmxdev *dmxdev, size_t count)
+{
+	struct dvb_ringbuffer *cmdbuf = &dmxdev->dvr_cmd_buffer;
+	struct dvr_command *dvr_cmd;
+	int last_dvr_cmd;
+
+	spin_lock(&dmxdev->dvr_in_lock);
+
+	/* Peek at the last DVR command queued, try to coalesce FEED commands */
+	if (dvb_ringbuffer_avail(cmdbuf) >= sizeof(*dvr_cmd)) {
+		last_dvr_cmd = cmdbuf->pwrite - sizeof(*dvr_cmd);
+		if (last_dvr_cmd < 0)
+			last_dvr_cmd += cmdbuf->size;
+
+		dvr_cmd = (struct dvr_command *)&cmdbuf->data[last_dvr_cmd];
+		if (dvr_cmd->type == DVR_DATA_FEED_CMD) {
+			dvr_cmd->cmd.data_feed_count += count;
+			spin_unlock(&dmxdev->dvr_in_lock);
+			return;
+		}
+	}
+
+	/*
+	 * We assume command buffer is large enough so that overflow should not
+	 * happen. Overflow to the command buffer means data previously written
+	 * to the input buffer is 'orphan' - does not have a matching FEED
+	 * command. Issue a warning if this ever happens.
+	 * Orphan data might still be processed if EOS is issued.
+	 */
+	if (dvb_ringbuffer_free(cmdbuf) < sizeof(*dvr_cmd)) {
+		pr_err("%s: DVR command buffer overflow\n", __func__);
+		spin_unlock(&dmxdev->dvr_in_lock);
+		return;
+	}
+
+	dvr_cmd = (struct dvr_command *)&cmdbuf->data[cmdbuf->pwrite];
+	dvr_cmd->type = DVR_DATA_FEED_CMD;
+	dvr_cmd->cmd.data_feed_count = count;
+	DVB_RINGBUFFER_PUSH(cmdbuf, sizeof(*dvr_cmd));
+	spin_unlock(&dmxdev->dvr_in_lock);
+
+	wake_up_all(&cmdbuf->queue);
+}
+
+static int dvb_dvr_external_input_only(struct dmxdev *dmxdev)
+{
+	struct dmx_caps caps;
+	int is_external_only;
+	int flags;
+	size_t tsp_size;
+
+	if (dmxdev->demux->get_tsp_size)
+		tsp_size = dmxdev->demux->get_tsp_size(dmxdev->demux);
+	else
+		tsp_size = 188;
+
+	/*
+	 * For backward compatibility, default assumes that
+	 * external only buffers are not supported.
+	 */
+	flags = 0;
+	if (dmxdev->demux->get_caps) {
+		dmxdev->demux->get_caps(dmxdev->demux, &caps);
+
+		if (tsp_size == 188)
+			flags = caps.playback_188_tsp.flags;
+		else
+			flags = caps.playback_192_tsp.flags;
+	}
+
+	if (!(flags & DMX_BUFFER_INTERNAL_SUPPORT) &&
+		(flags & DMX_BUFFER_EXTERNAL_SUPPORT))
+		is_external_only = 1;
+	else
+		is_external_only = 0;
+
+	return is_external_only;
+}
+
+static int dvb_dvr_verify_buffer_size(struct dmxdev *dmxdev,
+	unsigned int f_flags,
+	unsigned long size)
+{
+	struct dmx_caps caps;
+	int tsp_size;
+
+	if (!dmxdev->demux->get_caps)
+		return 1;
+
+	if (dmxdev->demux->get_tsp_size)
+		tsp_size = dmxdev->demux->get_tsp_size(dmxdev->demux);
+	else
+		tsp_size = 188;
+
+	dmxdev->demux->get_caps(dmxdev->demux, &caps);
+	if ((f_flags & O_ACCMODE) == O_RDONLY)
+		return (tsp_size == 188 && dvb_dmxdev_verify_buffer_size(size,
+				caps.recording_188_tsp.max_size,
+				caps.recording_188_tsp.size_alignment)) ||
+			(tsp_size == 192 && dvb_dmxdev_verify_buffer_size(size,
+				caps.recording_192_tsp.max_size,
+				caps.recording_192_tsp.size_alignment));
+
+	return (tsp_size == 188 && dvb_dmxdev_verify_buffer_size(size,
+		caps.playback_188_tsp.max_size,
+		caps.playback_188_tsp.size_alignment)) ||
+		(tsp_size == 192 && dvb_dmxdev_verify_buffer_size(size,
+			caps.playback_192_tsp.max_size,
+			caps.playback_192_tsp.size_alignment));
+}
+
+static ssize_t dvb_dvr_write(struct file *file, const char __user *buf,
+			     size_t count, loff_t *ppos)
+{
+	struct dvb_device *dvbdev = file->private_data;
+	struct dmxdev *dmxdev = dvbdev->priv;
+	struct dvb_ringbuffer *src = &dmxdev->dvr_input_buffer;
+	struct dvb_ringbuffer *cmdbuf = &dmxdev->dvr_cmd_buffer;
+	int ret;
+	size_t todo;
+	ssize_t free_space;
+
+	if (!dmxdev->demux->write)
+		return -EOPNOTSUPP;
+
+	if (!dvb_dvr_verify_buffer_size(dmxdev, file->f_flags, src->size) ||
+		((file->f_flags & O_ACCMODE) == O_RDONLY) ||
+		!src->data || !cmdbuf->data ||
+		(dvb_dvr_external_input_only(dmxdev) &&
+		 (dmxdev->dvr_input_buffer_mode == DMX_BUFFER_MODE_INTERNAL)))
+		return -EINVAL;
+
+	if ((file->f_flags & O_NONBLOCK) &&
+		(dvb_ringbuffer_free(src) == 0))
+		return -EWOULDBLOCK;
+
+	ret = 0;
+	for (todo = count; todo > 0; todo -= ret) {
+		ret = wait_event_interruptible(src->queue,
+			(dvb_ringbuffer_free(src)) ||
+			!src->data || !cmdbuf->data ||
+			(src->error != 0) || dmxdev->dvr_in_exit);
+
+		if (ret < 0)
+			return ret;
+
+		if (mutex_lock_interruptible(&dmxdev->mutex))
+			return -ERESTARTSYS;
+
+		if ((!src->data) || (!cmdbuf->data)) {
+			mutex_unlock(&dmxdev->mutex);
+			return 0;
+		}
+
+		if (dmxdev->exit || dmxdev->dvr_in_exit) {
+			mutex_unlock(&dmxdev->mutex);
+			return -ENODEV;
+		}
+
+		if (src->error) {
+			ret = src->error;
+			dvb_ringbuffer_flush(src);
+			mutex_unlock(&dmxdev->mutex);
+			wake_up_all(&src->queue);
+			return ret;
+		}
+
+		free_space = dvb_ringbuffer_free(src);
+
+		if (free_space > todo)
+			free_space = todo;
+
+		ret = dvb_ringbuffer_write_user(src, buf, free_space);
+
+		if (ret < 0) {
+			mutex_unlock(&dmxdev->mutex);
+			return ret;
+		}
+
+		buf += ret;
+
+		dvb_dvr_queue_data_feed(dmxdev, ret);
+
+		mutex_unlock(&dmxdev->mutex);
+	}
+
+	return (count - todo) ? (count - todo) : ret;
+}
+
+static int dvb_dmxdev_flush_data(struct dmxdev_filter *filter, size_t length)
+{
+	int ret = 0;
+	unsigned long flags;
+
+	struct dvb_ringbuffer *buffer = &filter->buffer;
+	struct dmxdev_events_queue *events = &filter->events;
+
+	if (filter->type == DMXDEV_TYPE_PES &&
+		filter->params.pes.output == DMX_OUT_TS_TAP) {
+		buffer = &filter->dev->dvr_buffer;
+		events = &filter->dev->dvr_output_events;
+	}
+
+	/*
+	 * Drop 'length' pending data bytes from the ringbuffer and update
+	 * event queue accordingly, similarly to dvb_dmxdev_release_data().
+	 */
+	spin_lock_irqsave(&filter->dev->lock, flags);
+	DVB_RINGBUFFER_SKIP(buffer, length);
+	buffer->error = 0;
+	dvb_dmxdev_flush_events(events);
+	events->current_event_start_offset = buffer->pwrite;
+	spin_unlock_irqrestore(&filter->dev->lock, flags);
+
+	if (filter->type == DMXDEV_TYPE_PES) {
+		struct dmxdev_feed *feed;
+
+		feed = list_first_entry(&filter->feed.ts,
+			struct dmxdev_feed, next);
+
+		if (feed->ts->flush_buffer)
+			return feed->ts->flush_buffer(feed->ts, length);
+	} else if (filter->type == DMXDEV_TYPE_SEC &&
+		filter->feed.sec.feed->flush_buffer) {
+		return filter->feed.sec.feed->flush_buffer(
+			filter->feed.sec.feed, length);
+	}
+
+	return ret;
+}
+
+static inline void dvb_dmxdev_auto_flush_buffer(struct dmxdev_filter *filter,
+	struct dvb_ringbuffer *buf)
+{
+	size_t flush_len;
+
+	/*
+	 * When buffer overflowed, demux-dev marked the buffer in
+	 * error state. If auto-flush is enabled discard current
+	 * pending data in buffer.
+	 */
+	if (overflow_auto_flush) {
+		flush_len = dvb_ringbuffer_avail(buf);
+		dvb_dmxdev_flush_data(filter, flush_len);
+	}
+}
+
 static ssize_t dvb_dvr_read(struct file *file, char __user *buf, size_t count,
 			    loff_t *ppos)
 {
+	ssize_t res;
 	struct dvb_device *dvbdev = file->private_data;
 	struct dmxdev *dmxdev = dvbdev->priv;
+	unsigned long flags;
 
 	if (dmxdev->exit)
 		return -ENODEV;
 
-	return dvb_dmxdev_buffer_read(&dmxdev->dvr_buffer,
-				      file->f_flags & O_NONBLOCK,
-				      buf, count, ppos);
+	if (!dvb_dvr_verify_buffer_size(dmxdev, file->f_flags,
+		dmxdev->dvr_buffer.size))
+		return -EINVAL;
+
+	res = dvb_dmxdev_buffer_read(NULL, &dmxdev->dvr_buffer,
+				file->f_flags & O_NONBLOCK,
+				buf, count, ppos);
+
+	if (res > 0) {
+		dvb_dmxdev_notify_data_read(dmxdev->dvr_feed, res);
+		spin_lock_irqsave(&dmxdev->lock, flags);
+		dvb_dmxdev_update_events(&dmxdev->dvr_output_events, res);
+		spin_unlock_irqrestore(&dmxdev->lock, flags);
+
+		/*
+		 * in PULL mode, we might be stalling on
+		 * event queue, so need to wake-up waiters
+		 */
+		if (dmxdev->playback_mode == DMX_PB_MODE_PULL)
+			wake_up_all(&dmxdev->dvr_buffer.queue);
+	} else if (res == -EOVERFLOW) {
+		dvb_dmxdev_auto_flush_buffer(dmxdev->dvr_feed,
+			&dmxdev->dvr_buffer);
+	}
+
+	return res;
+}
+
+/*
+ * dvb_dvr_push_oob_cmd
+ *
+ * Note: this function assume dmxdev->mutex was taken, so command buffer cannot
+ * be released during its operation.
+ */
+static int dvb_dvr_push_oob_cmd(struct dmxdev *dmxdev, unsigned int f_flags,
+		struct dmx_oob_command *cmd)
+{
+	struct dvb_ringbuffer *cmdbuf = &dmxdev->dvr_cmd_buffer;
+	struct dvr_command *dvr_cmd;
+
+	if ((f_flags & O_ACCMODE) == O_RDONLY ||
+		dmxdev->source < DMX_SOURCE_DVR0)
+		return -EPERM;
+
+	if (dvb_ringbuffer_free(cmdbuf) < sizeof(*dvr_cmd))
+		return -ENOMEM;
+
+	dvr_cmd = (struct dvr_command *)&cmdbuf->data[cmdbuf->pwrite];
+	dvr_cmd->type = DVR_OOB_CMD;
+	dvr_cmd->cmd.oobcmd = *cmd;
+	DVB_RINGBUFFER_PUSH(cmdbuf, sizeof(*dvr_cmd));
+	wake_up_all(&cmdbuf->queue);
+
+	return 0;
+}
+
+static int dvb_dvr_flush_buffer(struct dmxdev *dmxdev, unsigned int f_flags)
+{
+	size_t flush_len;
+	int ret;
+
+	if ((f_flags & O_ACCMODE) != O_RDONLY)
+		return -EINVAL;
+
+	flush_len = dvb_ringbuffer_avail(&dmxdev->dvr_buffer);
+	ret = dvb_dmxdev_flush_data(dmxdev->dvr_feed, flush_len);
+
+	return ret;
 }
 
 static int dvb_dvr_set_buffer_size(struct dmxdev *dmxdev,
-				      unsigned long size)
+						unsigned int f_flags,
+						unsigned long size)
 {
-	struct dvb_ringbuffer *buf = &dmxdev->dvr_buffer;
+	struct dvb_ringbuffer *buf;
 	void *newmem;
 	void *oldmem;
+	spinlock_t *lock;
+	enum dmx_buffer_mode buffer_mode;
 
-	dprintk("function : %s\n", __func__);
+	pr_debug("function : %s\n", __func__);
+
+	if ((f_flags & O_ACCMODE) == O_RDONLY) {
+		buf = &dmxdev->dvr_buffer;
+		lock = &dmxdev->lock;
+		buffer_mode = dmxdev->dvr_buffer_mode;
+	} else {
+		buf = &dmxdev->dvr_input_buffer;
+		lock = &dmxdev->dvr_in_lock;
+		buffer_mode = dmxdev->dvr_input_buffer_mode;
+	}
 
 	if (buf->size == size)
 		return 0;
-	if (!size)
+	if (!size || (buffer_mode == DMX_BUFFER_MODE_EXTERNAL))
 		return -EINVAL;
 
-	newmem = vmalloc(size);
+	newmem = vmalloc_user(size);
 	if (!newmem)
 		return -ENOMEM;
 
 	oldmem = buf->data;
 
-	spin_lock_irq(&dmxdev->lock);
+	spin_lock_irq(lock);
+
+	if (((f_flags & O_ACCMODE) != O_RDONLY) &&
+		(dmxdev->dvr_processing_input)) {
+		spin_unlock_irq(lock);
+		vfree(oldmem);
+		return -EBUSY;
+	}
+
 	buf->data = newmem;
 	buf->size = size;
 
 	/* reset and not flush in case the buffer shrinks */
 	dvb_ringbuffer_reset(buf);
-	spin_unlock_irq(&dmxdev->lock);
+
+	spin_unlock_irq(lock);
 
 	vfree(oldmem);
 
 	return 0;
 }
 
+static int dvb_dvr_set_buffer_mode(struct dmxdev *dmxdev,
+			unsigned int f_flags, enum dmx_buffer_mode mode)
+{
+	struct dvb_ringbuffer *buf;
+	spinlock_t *lock;
+	enum dmx_buffer_mode *buffer_mode;
+	void **buff_handle;
+	void *oldmem;
+	int *is_protected;
+
+	if ((mode != DMX_BUFFER_MODE_INTERNAL) &&
+		(mode != DMX_BUFFER_MODE_EXTERNAL))
+		return -EINVAL;
+
+	if ((mode == DMX_BUFFER_MODE_EXTERNAL) &&
+		(!dmxdev->demux->map_buffer || !dmxdev->demux->unmap_buffer))
+		return -EINVAL;
+
+	if ((f_flags & O_ACCMODE) == O_RDONLY) {
+		buf = &dmxdev->dvr_buffer;
+		lock = &dmxdev->lock;
+		buffer_mode = &dmxdev->dvr_buffer_mode;
+		buff_handle = &dmxdev->dvr_priv_buff_handle;
+		is_protected = NULL;
+	} else {
+		buf = &dmxdev->dvr_input_buffer;
+		lock = &dmxdev->dvr_in_lock;
+		buffer_mode = &dmxdev->dvr_input_buffer_mode;
+		buff_handle = &dmxdev->demux->dvr_input.priv_handle;
+		is_protected = &dmxdev->demux->dvr_input_protected;
+	}
+
+	if (mode == *buffer_mode)
+		return 0;
+
+	oldmem = buf->data;
+	spin_lock_irq(lock);
+	buf->data = NULL;
+	spin_unlock_irq(lock);
+
+	*buffer_mode = mode;
+
+	if (mode == DMX_BUFFER_MODE_INTERNAL) {
+		/* switched from external to internal */
+		if (*buff_handle) {
+			dmxdev->demux->unmap_buffer(dmxdev->demux,
+				*buff_handle);
+			*buff_handle = NULL;
+		}
+
+		if (is_protected)
+			*is_protected = 0;
+
+		/* set default internal buffer */
+		dvb_dvr_set_buffer_size(dmxdev, f_flags, DVR_BUFFER_SIZE);
+	} else if (oldmem) {
+		/* switched from internal to external */
+		vfree(oldmem);
+	}
+
+	return 0;
+}
+
+static int dvb_dvr_set_buffer(struct dmxdev *dmxdev,
+			unsigned int f_flags, struct dmx_buffer *dmx_buffer)
+{
+	struct dvb_ringbuffer *buf;
+	spinlock_t *lock;
+	enum dmx_buffer_mode buffer_mode;
+	void **buff_handle;
+	void *newmem;
+	void *oldmem;
+	int *is_protected;
+	struct dmx_caps caps;
+
+	if (dmxdev->demux->get_caps)
+		dmxdev->demux->get_caps(dmxdev->demux, &caps);
+	else
+		caps.caps = 0;
+
+	if ((f_flags & O_ACCMODE) == O_RDONLY) {
+		buf = &dmxdev->dvr_buffer;
+		lock = &dmxdev->lock;
+		buffer_mode = dmxdev->dvr_buffer_mode;
+		buff_handle = &dmxdev->dvr_priv_buff_handle;
+		is_protected = NULL;
+	} else {
+		buf = &dmxdev->dvr_input_buffer;
+		lock = &dmxdev->dvr_in_lock;
+		buffer_mode = dmxdev->dvr_input_buffer_mode;
+		buff_handle = &dmxdev->demux->dvr_input.priv_handle;
+		is_protected = &dmxdev->demux->dvr_input_protected;
+		if (!(caps.caps & DMX_CAP_SECURED_INPUT_PLAYBACK) &&
+			dmx_buffer->is_protected)
+			return -EINVAL;
+	}
+
+	if (!dmx_buffer->size ||
+		(buffer_mode == DMX_BUFFER_MODE_INTERNAL))
+		return -EINVAL;
+
+	oldmem = *buff_handle;
+
+	/*
+	 * Protected buffer is relevant only for DVR input buffer
+	 * when DVR device is opened for write. In such case,
+	 * buffer is mapped only if the buffer is not protected one.
+	 */
+	if (!is_protected || !dmx_buffer->is_protected) {
+		if (dmxdev->demux->map_buffer(dmxdev->demux, dmx_buffer,
+					buff_handle, &newmem))
+			return -ENOMEM;
+	} else {
+		newmem = NULL;
+		*buff_handle = NULL;
+	}
+
+	spin_lock_irq(lock);
+	buf->data = newmem;
+	buf->size = dmx_buffer->size;
+	if (is_protected)
+		*is_protected = dmx_buffer->is_protected;
+	dvb_ringbuffer_reset(buf);
+	spin_unlock_irq(lock);
+
+	if (oldmem)
+		dmxdev->demux->unmap_buffer(dmxdev->demux, oldmem);
+
+	return 0;
+}
+
+static int dvb_dvr_get_event(struct dmxdev *dmxdev,
+				unsigned int f_flags,
+				struct dmx_filter_event *event)
+{
+	int res = 0;
+
+	if (!((f_flags & O_ACCMODE) == O_RDONLY))
+		return -EINVAL;
+
+	spin_lock_irq(&dmxdev->lock);
+
+	if (dmxdev->dvr_buffer.error == -EOVERFLOW) {
+		event->type = DMX_EVENT_BUFFER_OVERFLOW;
+		dmxdev->dvr_buffer.error = 0;
+	} else {
+		res = dvb_dmxdev_remove_event(&dmxdev->dvr_output_events,
+			event);
+		if (res) {
+			spin_unlock_irq(&dmxdev->lock);
+			return res;
+		}
+	}
+
+	spin_unlock_irq(&dmxdev->lock);
+
+	if (event->type == DMX_EVENT_BUFFER_OVERFLOW)
+		dvb_dmxdev_auto_flush_buffer(dmxdev->dvr_feed,
+			&dmxdev->dvr_buffer);
+
+	/*
+	 * in PULL mode, we might be stalling on
+	 * event queue, so need to wake-up waiters
+	 */
+	if (dmxdev->playback_mode == DMX_PB_MODE_PULL)
+		wake_up_all(&dmxdev->dvr_buffer.queue);
+
+	return res;
+}
+
+static int dvb_dvr_get_buffer_status(struct dmxdev *dmxdev,
+				unsigned int f_flags,
+				struct dmx_buffer_status *dmx_buffer_status)
+{
+	struct dvb_ringbuffer *buf;
+	spinlock_t *lock;
+
+	if ((f_flags & O_ACCMODE) == O_RDONLY) {
+		buf = &dmxdev->dvr_buffer;
+		lock = &dmxdev->lock;
+	} else {
+		buf = &dmxdev->dvr_input_buffer;
+		lock = &dmxdev->dvr_in_lock;
+	}
+
+	spin_lock_irq(lock);
+
+	dmx_buffer_status->error = buf->error;
+	dmx_buffer_status->fullness = dvb_ringbuffer_avail(buf);
+	dmx_buffer_status->free_bytes = dvb_ringbuffer_free(buf);
+	dmx_buffer_status->read_offset = buf->pread;
+	dmx_buffer_status->write_offset = buf->pwrite;
+	dmx_buffer_status->size = buf->size;
+	buf->error = 0;
+
+	spin_unlock_irq(lock);
+
+	if (dmx_buffer_status->error == -EOVERFLOW)
+		dvb_dmxdev_auto_flush_buffer(dmxdev->dvr_feed, buf);
+
+	return 0;
+}
+
+static int dvb_dvr_release_data(struct dmxdev *dmxdev,
+					unsigned int f_flags,
+					u32 bytes_count)
+{
+	ssize_t buff_fullness;
+
+	if (!((f_flags & O_ACCMODE) == O_RDONLY))
+		return -EINVAL;
+
+	if (!bytes_count)
+		return 0;
+
+	buff_fullness = dvb_ringbuffer_avail(&dmxdev->dvr_buffer);
+
+	if (bytes_count > buff_fullness)
+		return -EINVAL;
+
+	DVB_RINGBUFFER_SKIP(&dmxdev->dvr_buffer, bytes_count);
+
+	dvb_dmxdev_notify_data_read(dmxdev->dvr_feed, bytes_count);
+	spin_lock_irq(&dmxdev->lock);
+	dvb_dmxdev_update_events(&dmxdev->dvr_output_events, bytes_count);
+	spin_unlock_irq(&dmxdev->lock);
+
+	wake_up_all(&dmxdev->dvr_buffer.queue);
+	return 0;
+}
+
+/*
+ * dvb_dvr_feed_data - Notify new data in DVR input buffer
+ *
+ * @dmxdev - demux device instance
+ * @f_flags - demux device file flag (access mode)
+ * @bytes_count - how many bytes were written to the input buffer
+ *
+ * Note: this function assume dmxdev->mutex was taken, so buffer cannot
+ * be released during its operation.
+ */
+static int dvb_dvr_feed_data(struct dmxdev *dmxdev,
+	unsigned int f_flags,
+	u32 bytes_count)
+{
+	ssize_t free_space;
+	struct dvb_ringbuffer *buffer = &dmxdev->dvr_input_buffer;
+
+	if ((f_flags & O_ACCMODE) == O_RDONLY)
+		return -EINVAL;
+
+	if (!bytes_count)
+		return 0;
+
+	free_space = dvb_ringbuffer_free(buffer);
+
+	if (bytes_count > free_space)
+		return -EINVAL;
+
+	DVB_RINGBUFFER_PUSH(buffer, bytes_count);
+
+	dvb_dvr_queue_data_feed(dmxdev, bytes_count);
+
+	return 0;
+}
+
 static inline void dvb_dmxdev_filter_state_set(struct dmxdev_filter
 					       *dmxdevfilter, int state)
 {
@@ -301,12 +1763,13 @@
 
 	if (buf->size == size)
 		return 0;
-	if (!size)
+	if (!size ||
+		(dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_EXTERNAL))
 		return -EINVAL;
 	if (dmxdevfilter->state >= DMXDEV_STATE_GO)
 		return -EBUSY;
 
-	newmem = vmalloc(size);
+	newmem = vmalloc_user(size);
 	if (!newmem)
 		return -ENOMEM;
 
@@ -325,15 +1788,805 @@
 	return 0;
 }
 
+static int dvb_dmxdev_set_buffer_mode(struct dmxdev_filter *dmxdevfilter,
+					enum dmx_buffer_mode mode)
+{
+	struct dvb_ringbuffer *buf = &dmxdevfilter->buffer;
+	struct dmxdev *dmxdev = dmxdevfilter->dev;
+	void *oldmem;
+
+	if (dmxdevfilter->state >= DMXDEV_STATE_GO)
+		return -EBUSY;
+
+	if ((mode != DMX_BUFFER_MODE_INTERNAL) &&
+		(mode != DMX_BUFFER_MODE_EXTERNAL))
+		return -EINVAL;
+
+	if ((mode == DMX_BUFFER_MODE_EXTERNAL) &&
+		(!dmxdev->demux->map_buffer || !dmxdev->demux->unmap_buffer))
+		return -EINVAL;
+
+	if (mode == dmxdevfilter->buffer_mode)
+		return 0;
+
+	oldmem = buf->data;
+	spin_lock_irq(&dmxdevfilter->dev->lock);
+	buf->data = NULL;
+	spin_unlock_irq(&dmxdevfilter->dev->lock);
+
+	dmxdevfilter->buffer_mode = mode;
+
+	if (mode == DMX_BUFFER_MODE_INTERNAL) {
+		/* switched from external to internal */
+		if (dmxdevfilter->priv_buff_handle) {
+			dmxdev->demux->unmap_buffer(dmxdev->demux,
+				dmxdevfilter->priv_buff_handle);
+			dmxdevfilter->priv_buff_handle = NULL;
+		}
+	} else if (oldmem) {
+		/* switched from internal to external */
+		vfree(oldmem);
+	}
+
+	return 0;
+}
+
+static int dvb_dmxdev_set_buffer(struct dmxdev_filter *dmxdevfilter,
+					struct dmx_buffer *buffer)
+{
+	struct dvb_ringbuffer *buf = &dmxdevfilter->buffer;
+	struct dmxdev *dmxdev = dmxdevfilter->dev;
+	void *newmem;
+	void *oldmem;
+
+	if (dmxdevfilter->state >= DMXDEV_STATE_GO)
+		return -EBUSY;
+
+	if ((!buffer->size) ||
+		(dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_INTERNAL))
+		return -EINVAL;
+
+	oldmem = dmxdevfilter->priv_buff_handle;
+	if (dmxdev->demux->map_buffer(dmxdev->demux, buffer,
+			&dmxdevfilter->priv_buff_handle, &newmem))
+		return -ENOMEM;
+
+	spin_lock_irq(&dmxdevfilter->dev->lock);
+	buf->data = newmem;
+	buf->size = buffer->size;
+	dvb_ringbuffer_reset(buf);
+	spin_unlock_irq(&dmxdevfilter->dev->lock);
+
+	if (oldmem)
+		dmxdev->demux->unmap_buffer(dmxdev->demux, oldmem);
+
+	return 0;
+}
+
+static int dvb_dmxdev_set_tsp_out_format(struct dmxdev_filter *dmxdevfilter,
+				enum dmx_tsp_format_t dmx_tsp_format)
+{
+	if (dmxdevfilter->state >= DMXDEV_STATE_GO)
+		return -EBUSY;
+
+	if ((dmx_tsp_format > DMX_TSP_FORMAT_192_HEAD) ||
+		(dmx_tsp_format < DMX_TSP_FORMAT_188))
+		return -EINVAL;
+
+	dmxdevfilter->dmx_tsp_format = dmx_tsp_format;
+
+	return 0;
+}
+
+static int dvb_dmxdev_set_decoder_buffer_size(
+	struct dmxdev_filter *dmxdevfilter,
+	unsigned long size)
+{
+	struct dmx_caps caps;
+	struct dmx_demux *demux = dmxdevfilter->dev->demux;
+
+	if (demux->get_caps) {
+		demux->get_caps(demux, &caps);
+		if (!dvb_dmxdev_verify_buffer_size(size, caps.decoder.max_size,
+			caps.decoder.size_alignment))
+			return -EINVAL;
+	}
+
+	if (size == 0)
+		return -EINVAL;
+
+	if (dmxdevfilter->decoder_buffers.buffers_size == size)
+		return 0;
+
+	if (dmxdevfilter->state >= DMXDEV_STATE_GO)
+		return -EBUSY;
+
+	/*
+	 * In case decoder buffers were already set before to some external
+	 * buffers, setting the decoder buffer size alone implies transition
+	 * to internal buffer mode.
+	 */
+	dmxdevfilter->decoder_buffers.buffers_size = size;
+	dmxdevfilter->decoder_buffers.buffers_num = 0;
+	dmxdevfilter->decoder_buffers.is_linear = 0;
+	return 0;
+}
+
+static int dvb_dmxdev_set_source(struct dmxdev_filter *dmxdevfilter,
+					dmx_source_t *source)
+{
+	int ret = 0;
+	struct dmxdev *dev;
+
+	if (dmxdevfilter->state == DMXDEV_STATE_GO)
+		return -EBUSY;
+
+	dev = dmxdevfilter->dev;
+	if (dev->demux->set_source)
+		ret = dev->demux->set_source(dev->demux, source);
+
+	if (!ret)
+		dev->source = *source;
+
+	return ret;
+}
+
+static int dvb_dmxdev_reuse_decoder_buf(struct dmxdev_filter *dmxdevfilter,
+						int cookie)
+{
+	struct dmxdev_feed *feed;
+
+	if (dmxdevfilter->state != DMXDEV_STATE_GO ||
+		(dmxdevfilter->type != DMXDEV_TYPE_PES) ||
+		(dmxdevfilter->params.pes.output != DMX_OUT_DECODER) ||
+		(dmxdevfilter->events.event_mask.disable_mask &
+			DMX_EVENT_NEW_ES_DATA))
+		return -EPERM;
+
+	/* Only one feed should be in the list in case of decoder */
+	feed = list_first_entry(&dmxdevfilter->feed.ts,
+				struct dmxdev_feed, next);
+	if (feed && feed->ts && feed->ts->reuse_decoder_buffer)
+		return feed->ts->reuse_decoder_buffer(feed->ts, cookie);
+
+	return -ENODEV;
+}
+
+static int dvb_dmxdev_set_event_mask(struct dmxdev_filter *dmxdevfilter,
+				struct dmx_events_mask *event_mask)
+{
+	if (!event_mask ||
+		(event_mask->wakeup_threshold >= DMX_EVENT_QUEUE_SIZE))
+		return -EINVAL;
+
+	if (dmxdevfilter->state == DMXDEV_STATE_GO)
+		return -EBUSY;
+
+	/*
+	 * Overflow event is not allowed to be masked.
+	 * This is because if overflow occurs, demux stops outputting data
+	 * until user is notified. If user is using events to read the data,
+	 * the overflow event must be always enabled or otherwise we would
+	 * never recover from overflow state.
+	 */
+	event_mask->disable_mask &= ~(u32)DMX_EVENT_BUFFER_OVERFLOW;
+	event_mask->no_wakeup_mask &= ~(u32)DMX_EVENT_BUFFER_OVERFLOW;
+
+	dmxdevfilter->events.event_mask = *event_mask;
+
+	return 0;
+}
+
+static int dvb_dmxdev_get_event_mask(struct dmxdev_filter *dmxdevfilter,
+				struct dmx_events_mask *event_mask)
+{
+	if (!event_mask)
+		return -EINVAL;
+
+	*event_mask = dmxdevfilter->events.event_mask;
+
+	return 0;
+}
+
+static int dvb_dmxdev_set_indexing_params(struct dmxdev_filter *dmxdevfilter,
+				struct dmx_indexing_params *idx_params)
+{
+	int found_pid;
+	struct dmxdev_feed *feed;
+	struct dmxdev_feed *ts_feed = NULL;
+	struct dmx_caps caps;
+	int ret = 0;
+
+	if (!dmxdevfilter->dev->demux->get_caps)
+		return -EINVAL;
+
+	dmxdevfilter->dev->demux->get_caps(dmxdevfilter->dev->demux, &caps);
+
+	if (!idx_params ||
+		!(caps.caps & DMX_CAP_VIDEO_INDEXING) ||
+		(dmxdevfilter->state < DMXDEV_STATE_SET) ||
+		(dmxdevfilter->type != DMXDEV_TYPE_PES) ||
+		((dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) &&
+		 (dmxdevfilter->params.pes.output != DMX_OUT_TSDEMUX_TAP)))
+		return -EINVAL;
+
+	if (idx_params->enable && !idx_params->types)
+		return -EINVAL;
+
+	found_pid = 0;
+	list_for_each_entry(feed, &dmxdevfilter->feed.ts, next) {
+		if (feed->pid == idx_params->pid) {
+			found_pid = 1;
+			ts_feed = feed;
+			ts_feed->idx_params = *idx_params;
+			if ((dmxdevfilter->state == DMXDEV_STATE_GO) &&
+				ts_feed->ts->set_idx_params)
+				ret = ts_feed->ts->set_idx_params(
+						ts_feed->ts, idx_params);
+			break;
+		}
+	}
+
+	if (!found_pid)
+		return -EINVAL;
+
+	return ret;
+}
+
+static int dvb_dmxdev_get_scrambling_bits(struct dmxdev_filter *filter,
+	struct dmx_scrambling_bits *scrambling_bits)
+{
+	struct dmxdev_feed *feed;
+
+	if (!scrambling_bits ||
+		(filter->state != DMXDEV_STATE_GO))
+		return -EINVAL;
+
+	if (filter->type == DMXDEV_TYPE_SEC) {
+		if (filter->feed.sec.feed->get_scrambling_bits)
+			return filter->feed.sec.feed->get_scrambling_bits(
+						filter->feed.sec.feed,
+						&scrambling_bits->value);
+		return -EINVAL;
+	}
+
+	list_for_each_entry(feed, &filter->feed.ts, next) {
+		if (feed->pid == scrambling_bits->pid) {
+			if (feed->ts->get_scrambling_bits)
+				return feed->ts->get_scrambling_bits(feed->ts,
+						&scrambling_bits->value);
+			return -EINVAL;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static void dvb_dmxdev_ts_insertion_work(struct work_struct *worker)
+{
+	struct ts_insertion_buffer *ts_buffer =
+		container_of(to_delayed_work(worker),
+			struct ts_insertion_buffer, dwork);
+	struct dmxdev_feed *feed;
+	size_t free_bytes;
+	struct dmx_ts_feed *ts;
+
+	mutex_lock(&ts_buffer->dmxdevfilter->mutex);
+
+	if (ts_buffer->abort ||
+		(ts_buffer->dmxdevfilter->state != DMXDEV_STATE_GO)) {
+		mutex_unlock(&ts_buffer->dmxdevfilter->mutex);
+		return;
+	}
+
+	feed = list_first_entry(&ts_buffer->dmxdevfilter->feed.ts,
+				struct dmxdev_feed, next);
+	ts = feed->ts;
+	free_bytes = dvb_ringbuffer_free(&ts_buffer->dmxdevfilter->buffer);
+
+	mutex_unlock(&ts_buffer->dmxdevfilter->mutex);
+
+	if (ts_buffer->size < free_bytes)
+		ts->ts_insertion_insert_buffer(ts,
+			ts_buffer->buffer, ts_buffer->size);
+
+	if (ts_buffer->repetition_time && !ts_buffer->abort)
+		schedule_delayed_work(&ts_buffer->dwork,
+				msecs_to_jiffies(ts_buffer->repetition_time));
+}
+
+static void dvb_dmxdev_queue_ts_insertion(
+		struct ts_insertion_buffer *ts_buffer)
+{
+	size_t tsp_size;
+
+	if (ts_buffer->dmxdevfilter->dmx_tsp_format == DMX_TSP_FORMAT_188)
+		tsp_size = 188;
+	else
+		tsp_size = 192;
+
+	if (ts_buffer->size % tsp_size) {
+		pr_err("%s: Wrong buffer alignment, size=%zu, tsp_size=%zu\n",
+			__func__, ts_buffer->size, tsp_size);
+		return;
+	}
+
+	ts_buffer->abort = 0;
+	schedule_delayed_work(&ts_buffer->dwork, 0);
+}
+
+static void dvb_dmxdev_cancel_ts_insertion(
+		struct ts_insertion_buffer *ts_buffer)
+{
+	/*
+	 * This function assumes it is called while mutex
+	 * of demux filter is taken. Since work in workqueue
+	 * captures the filter's mutex to protect against the DB,
+	 * mutex needs to be released before waiting for the work
+	 * to get finished otherwise work in workqueue will
+	 * never be finished.
+	 */
+	if (!mutex_is_locked(&ts_buffer->dmxdevfilter->mutex)) {
+		pr_err("%s: mutex is not locked!\n", __func__);
+		return;
+	}
+
+	ts_buffer->abort = 1;
+
+	mutex_unlock(&ts_buffer->dmxdevfilter->mutex);
+	cancel_delayed_work_sync(&ts_buffer->dwork);
+	mutex_lock(&ts_buffer->dmxdevfilter->mutex);
+}
+
+static int dvb_dmxdev_set_ts_insertion(struct dmxdev_filter *dmxdevfilter,
+		struct dmx_set_ts_insertion *params)
+{
+	int ret = 0;
+	int first_buffer;
+	struct dmxdev_feed *feed;
+	struct ts_insertion_buffer *ts_buffer;
+	struct dmx_caps caps;
+
+	if (!dmxdevfilter->dev->demux->get_caps)
+		return -EINVAL;
+
+	dmxdevfilter->dev->demux->get_caps(dmxdevfilter->dev->demux, &caps);
+
+	if (!params ||
+		!params->size ||
+		!(caps.caps & DMX_CAP_TS_INSERTION) ||
+		(dmxdevfilter->state < DMXDEV_STATE_SET) ||
+		(dmxdevfilter->type != DMXDEV_TYPE_PES) ||
+		((dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) &&
+		 (dmxdevfilter->params.pes.output != DMX_OUT_TSDEMUX_TAP)))
+		return -EINVAL;
+
+	ts_buffer = vmalloc(sizeof(struct ts_insertion_buffer));
+	if (!ts_buffer)
+		return -ENOMEM;
+
+	ts_buffer->buffer = vmalloc(params->size);
+	if (!ts_buffer->buffer) {
+		vfree(ts_buffer);
+		return -ENOMEM;
+	}
+
+	if (copy_from_user(ts_buffer->buffer,
+			params->ts_packets, params->size)) {
+		vfree(ts_buffer->buffer);
+		vfree(ts_buffer);
+		return -EFAULT;
+	}
+
+	if (params->repetition_time &&
+		params->repetition_time < DMX_MIN_INSERTION_REPETITION_TIME)
+		params->repetition_time = DMX_MIN_INSERTION_REPETITION_TIME;
+
+	ts_buffer->size = params->size;
+	ts_buffer->identifier = params->identifier;
+	ts_buffer->repetition_time = params->repetition_time;
+	ts_buffer->dmxdevfilter = dmxdevfilter;
+	INIT_DELAYED_WORK(&ts_buffer->dwork, dvb_dmxdev_ts_insertion_work);
+
+	first_buffer = list_empty(&dmxdevfilter->insertion_buffers);
+	list_add_tail(&ts_buffer->next, &dmxdevfilter->insertion_buffers);
+
+	if (dmxdevfilter->state != DMXDEV_STATE_GO)
+		return 0;
+
+	feed = list_first_entry(&dmxdevfilter->feed.ts,
+				struct dmxdev_feed, next);
+
+	if (first_buffer && feed->ts->ts_insertion_init)
+		ret = feed->ts->ts_insertion_init(feed->ts);
+
+	if (!ret) {
+		dvb_dmxdev_queue_ts_insertion(ts_buffer);
+	} else {
+		list_del(&ts_buffer->next);
+		vfree(ts_buffer->buffer);
+		vfree(ts_buffer);
+	}
+
+	return ret;
+}
+
+static int dvb_dmxdev_abort_ts_insertion(struct dmxdev_filter *dmxdevfilter,
+		struct dmx_abort_ts_insertion *params)
+{
+	int ret = 0;
+	int found_buffer;
+	struct dmxdev_feed *feed;
+	struct ts_insertion_buffer *ts_buffer, *tmp;
+	struct dmx_caps caps;
+
+	if (!dmxdevfilter->dev->demux->get_caps)
+		return -EINVAL;
+
+	dmxdevfilter->dev->demux->get_caps(dmxdevfilter->dev->demux, &caps);
+
+	if (!params ||
+		!(caps.caps & DMX_CAP_TS_INSERTION) ||
+		(dmxdevfilter->state < DMXDEV_STATE_SET) ||
+		(dmxdevfilter->type != DMXDEV_TYPE_PES) ||
+		((dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) &&
+		 (dmxdevfilter->params.pes.output != DMX_OUT_TSDEMUX_TAP)))
+		return -EINVAL;
+
+	found_buffer = 0;
+	list_for_each_entry_safe(ts_buffer, tmp,
+			&dmxdevfilter->insertion_buffers, next) {
+		if (ts_buffer->identifier == params->identifier) {
+			list_del(&ts_buffer->next);
+			found_buffer = 1;
+			break;
+		}
+	}
+
+	if (!found_buffer)
+		return -EINVAL;
+
+	if (dmxdevfilter->state == DMXDEV_STATE_GO) {
+		dvb_dmxdev_cancel_ts_insertion(ts_buffer);
+		if (list_empty(&dmxdevfilter->insertion_buffers)) {
+			feed = list_first_entry(&dmxdevfilter->feed.ts,
+						struct dmxdev_feed, next);
+			if (feed->ts->ts_insertion_terminate)
+				ret = feed->ts->ts_insertion_terminate(
+							feed->ts);
+		}
+	}
+
+	vfree(ts_buffer->buffer);
+	vfree(ts_buffer);
+
+	return ret;
+}
+
+static int dvb_dmxdev_ts_fullness_callback(struct dmx_ts_feed *filter,
+				int required_space, int wait)
+{
+	struct dmxdev_filter *dmxdevfilter = filter->priv;
+	struct dvb_ringbuffer *src;
+	struct dmxdev_events_queue *events;
+	int ret;
+
+	if (!dmxdevfilter) {
+		pr_err("%s: NULL demux filter object!\n", __func__);
+		return -ENODEV;
+	}
+
+	if (dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) {
+		src = &dmxdevfilter->buffer;
+		events = &dmxdevfilter->events;
+	} else {
+		src = &dmxdevfilter->dev->dvr_buffer;
+		events = &dmxdevfilter->dev->dvr_output_events;
+	}
+
+	do {
+		ret = 0;
+
+		if (dmxdevfilter->dev->dvr_in_exit)
+			return -ENODEV;
+
+		spin_lock(&dmxdevfilter->dev->lock);
+
+		if ((!src->data) ||
+			(dmxdevfilter->state != DMXDEV_STATE_GO))
+			ret = -EINVAL;
+		else if (src->error)
+			ret = src->error;
+
+		if (ret) {
+			spin_unlock(&dmxdevfilter->dev->lock);
+			return ret;
+		}
+
+		if ((required_space <= dvb_ringbuffer_free(src)) &&
+			(!dvb_dmxdev_events_is_full(events))) {
+			spin_unlock(&dmxdevfilter->dev->lock);
+			return 0;
+		}
+
+		spin_unlock(&dmxdevfilter->dev->lock);
+
+		if (!wait)
+			return -ENOSPC;
+
+		ret = wait_event_interruptible(src->queue,
+				(!src->data) ||
+				((dvb_ringbuffer_free(src) >= required_space) &&
+				 (!dvb_dmxdev_events_is_full(events))) ||
+				(src->error != 0) ||
+				(dmxdevfilter->state != DMXDEV_STATE_GO) ||
+				dmxdevfilter->dev->dvr_in_exit);
+
+		if (ret < 0)
+			return ret;
+	} while (1);
+}
+
+static int dvb_dmxdev_sec_fullness_callback(
+				struct dmx_section_filter *filter,
+				int required_space, int wait)
+{
+	struct dmxdev_filter *dmxdevfilter = filter->priv;
+	struct dvb_ringbuffer *src;
+	struct dmxdev_events_queue *events;
+	int ret;
+
+	if (!dmxdevfilter) {
+		pr_err("%s: NULL demux filter object!\n", __func__);
+		return -ENODEV;
+	}
+
+	src = &dmxdevfilter->buffer;
+	events = &dmxdevfilter->events;
+
+	do {
+		ret = 0;
+
+		if (dmxdevfilter->dev->dvr_in_exit)
+			return -ENODEV;
+
+		spin_lock(&dmxdevfilter->dev->lock);
+
+		if ((!src->data) ||
+			(dmxdevfilter->state != DMXDEV_STATE_GO))
+			ret = -EINVAL;
+		else if (src->error)
+			ret = src->error;
+
+		if (ret) {
+			spin_unlock(&dmxdevfilter->dev->lock);
+			return ret;
+		}
+
+		if ((required_space <= dvb_ringbuffer_free(src)) &&
+			(!dvb_dmxdev_events_is_full(events))) {
+			spin_unlock(&dmxdevfilter->dev->lock);
+			return 0;
+		}
+
+		spin_unlock(&dmxdevfilter->dev->lock);
+
+		if (!wait)
+			return -ENOSPC;
+
+		ret = wait_event_interruptible(src->queue,
+				(!src->data) ||
+				((dvb_ringbuffer_free(src) >= required_space) &&
+				 (!dvb_dmxdev_events_is_full(events))) ||
+				(src->error != 0) ||
+				(dmxdevfilter->state != DMXDEV_STATE_GO) ||
+				dmxdevfilter->dev->dvr_in_exit);
+
+		if (ret < 0)
+			return ret;
+	} while (1);
+}
+
+static int dvb_dmxdev_set_playback_mode(struct dmxdev_filter *dmxdevfilter,
+					enum dmx_playback_mode_t playback_mode)
+{
+	struct dmxdev *dmxdev = dmxdevfilter->dev;
+	struct dmx_caps caps;
+
+	if (dmxdev->demux->get_caps)
+		dmxdev->demux->get_caps(dmxdev->demux, &caps);
+	else
+		caps.caps = 0;
+
+	if ((playback_mode != DMX_PB_MODE_PUSH) &&
+		(playback_mode != DMX_PB_MODE_PULL))
+		return -EINVAL;
+
+	if (dmxdev->demux->set_playback_mode == NULL)
+		return -EINVAL;
+
+	if (((dmxdev->source < DMX_SOURCE_DVR0) ||
+		 !(caps.caps & DMX_CAP_PULL_MODE)) &&
+		 (playback_mode == DMX_PB_MODE_PULL))
+		return -EPERM;
+
+	if (dmxdevfilter->state == DMXDEV_STATE_GO)
+		return -EBUSY;
+
+	dmxdev->playback_mode = playback_mode;
+
+	return dmxdev->demux->set_playback_mode(
+				dmxdev->demux,
+				dmxdev->playback_mode,
+				dvb_dmxdev_ts_fullness_callback,
+				dvb_dmxdev_sec_fullness_callback);
+}
+
+static int dvb_dmxdev_flush_buffer(struct dmxdev_filter *filter)
+{
+	size_t flush_len;
+	int ret;
+
+	if (filter->state != DMXDEV_STATE_GO)
+		return -EINVAL;
+
+	flush_len = dvb_ringbuffer_avail(&filter->buffer);
+	ret = dvb_dmxdev_flush_data(filter, flush_len);
+
+	return ret;
+}
+
+static int dvb_dmxdev_get_buffer_status(
+		struct dmxdev_filter *dmxdevfilter,
+		struct dmx_buffer_status *dmx_buffer_status)
+{
+	struct dvb_ringbuffer *buf = &dmxdevfilter->buffer;
+
+	/*
+	 * Note: Taking the dmxdevfilter->dev->lock spinlock is required only
+	 * when getting the status of the Demux-userspace data ringbuffer .
+	 * In case we are getting the status of a decoder buffer, taking this
+	 * spinlock is not required and in fact might lead to a deadlock.
+	 */
+	if ((dmxdevfilter->type == DMXDEV_TYPE_PES) &&
+		(dmxdevfilter->params.pes.output == DMX_OUT_DECODER)) {
+		struct dmxdev_feed *feed;
+		int ret;
+
+		/* Only one feed should be in the list in case of decoder */
+		feed = list_first_entry(&dmxdevfilter->feed.ts,
+					struct dmxdev_feed, next);
+
+		/* Ask for status of decoder's buffer from underlying HW */
+		if (feed->ts->get_decoder_buff_status)
+			ret = feed->ts->get_decoder_buff_status(
+					feed->ts,
+					dmx_buffer_status);
+		else
+			ret = -ENODEV;
+
+		return ret;
+	}
+
+	spin_lock_irq(&dmxdevfilter->dev->lock);
+
+	if (!buf->data) {
+		spin_unlock_irq(&dmxdevfilter->dev->lock);
+		return -EINVAL;
+	}
+
+	dmx_buffer_status->error = buf->error;
+	dmx_buffer_status->fullness = dvb_ringbuffer_avail(buf);
+	dmx_buffer_status->free_bytes = dvb_ringbuffer_free(buf);
+	dmx_buffer_status->read_offset = buf->pread;
+	dmx_buffer_status->write_offset = buf->pwrite;
+	dmx_buffer_status->size = buf->size;
+	buf->error = 0;
+
+	spin_unlock_irq(&dmxdevfilter->dev->lock);
+
+	if (dmx_buffer_status->error == -EOVERFLOW)
+		dvb_dmxdev_auto_flush_buffer(dmxdevfilter, buf);
+
+	return 0;
+}
+
+static int dvb_dmxdev_release_data(struct dmxdev_filter *dmxdevfilter,
+					u32 bytes_count)
+{
+	ssize_t buff_fullness;
+
+	if (!dmxdevfilter->buffer.data)
+		return -EINVAL;
+
+	if (!bytes_count)
+		return 0;
+
+	buff_fullness = dvb_ringbuffer_avail(&dmxdevfilter->buffer);
+
+	if (bytes_count > buff_fullness)
+		return -EINVAL;
+
+	DVB_RINGBUFFER_SKIP(&dmxdevfilter->buffer, bytes_count);
+
+	dvb_dmxdev_notify_data_read(dmxdevfilter, bytes_count);
+	spin_lock_irq(&dmxdevfilter->dev->lock);
+	dvb_dmxdev_update_events(&dmxdevfilter->events, bytes_count);
+	spin_unlock_irq(&dmxdevfilter->dev->lock);
+
+	wake_up_all(&dmxdevfilter->buffer.queue);
+
+	return 0;
+}
+
+static int dvb_dmxdev_get_event(struct dmxdev_filter *dmxdevfilter,
+					struct dmx_filter_event *event)
+{
+	int res = 0;
+
+	spin_lock_irq(&dmxdevfilter->dev->lock);
+
+	/* Check first for filter overflow */
+	if (dmxdevfilter->buffer.error == -EOVERFLOW) {
+		event->type = DMX_EVENT_BUFFER_OVERFLOW;
+	} else {
+		res = dvb_dmxdev_remove_event(&dmxdevfilter->events, event);
+		if (res) {
+			spin_unlock_irq(&dmxdevfilter->dev->lock);
+			return res;
+		}
+	}
+
+	/* clear buffer error now that user was notified */
+	if (event->type == DMX_EVENT_BUFFER_OVERFLOW ||
+		event->type == DMX_EVENT_SECTION_TIMEOUT)
+		dmxdevfilter->buffer.error = 0;
+
+	spin_unlock_irq(&dmxdevfilter->dev->lock);
+
+	if (event->type == DMX_EVENT_BUFFER_OVERFLOW)
+		dvb_dmxdev_auto_flush_buffer(dmxdevfilter,
+			&dmxdevfilter->buffer);
+
+	spin_lock_irq(&dmxdevfilter->dev->lock);
+
+	/*
+	 * If no-data events are enabled on this filter,
+	 * the events can be removed from the queue when
+	 * user gets them.
+	 * For filters with data events enabled, the event is removed
+	 * from the queue only when the respective data is read.
+	 */
+	if (event->type != DMX_EVENT_BUFFER_OVERFLOW &&
+		dmxdevfilter->events.data_read_event_masked)
+		dmxdevfilter->events.read_index =
+			dvb_dmxdev_advance_event_idx(
+				dmxdevfilter->events.read_index);
+
+	spin_unlock_irq(&dmxdevfilter->dev->lock);
+
+	/*
+	 * in PULL mode, we might be stalling on
+	 * event queue, so need to wake-up waiters
+	 */
+	if (dmxdevfilter->dev->playback_mode == DMX_PB_MODE_PULL)
+		wake_up_all(&dmxdevfilter->buffer.queue);
+
+	return res;
+}
+
 static void dvb_dmxdev_filter_timeout(unsigned long data)
 {
 	struct dmxdev_filter *dmxdevfilter = (struct dmxdev_filter *)data;
+	struct dmx_filter_event event;
 
 	dmxdevfilter->buffer.error = -ETIMEDOUT;
 	spin_lock_irq(&dmxdevfilter->dev->lock);
 	dmxdevfilter->state = DMXDEV_STATE_TIMEDOUT;
+	event.type = DMX_EVENT_SECTION_TIMEOUT;
+	dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
 	spin_unlock_irq(&dmxdevfilter->dev->lock);
-	wake_up(&dmxdevfilter->buffer.queue);
+	wake_up_all(&dmxdevfilter->buffer.queue);
 }
 
 static void dvb_dmxdev_filter_timer(struct dmxdev_filter *dmxdevfilter)
@@ -355,31 +2608,77 @@
 				       struct dmx_section_filter *filter)
 {
 	struct dmxdev_filter *dmxdevfilter = filter->priv;
-	int ret;
+	struct dmx_filter_event event;
+	ssize_t free;
 
-	if (dmxdevfilter->buffer.error) {
-		wake_up(&dmxdevfilter->buffer.queue);
-		return 0;
+
+	if (!dmxdevfilter) {
+		pr_err("%s: null filter.\n", __func__);
+		return -EINVAL;
 	}
+
 	spin_lock(&dmxdevfilter->dev->lock);
-	if (dmxdevfilter->state != DMXDEV_STATE_GO) {
+
+	if (dmxdevfilter->buffer.error ||
+		dmxdevfilter->state != DMXDEV_STATE_GO ||
+		dmxdevfilter->eos_state) {
 		spin_unlock(&dmxdevfilter->dev->lock);
 		return 0;
 	}
-	del_timer(&dmxdevfilter->timer);
-	dprintk("dmxdev: section callback %*ph\n", 6, buffer1);
-	ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer1,
-				      buffer1_len);
-	if (ret == buffer1_len) {
-		ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer2,
-					      buffer2_len);
+
+	/* Discard section data if event cannot be notified */
+	if (!(dmxdevfilter->events.event_mask.disable_mask &
+		DMX_EVENT_NEW_SECTION) &&
+		dvb_dmxdev_events_is_full(&dmxdevfilter->events)) {
+		spin_unlock(&dmxdevfilter->dev->lock);
+		return 0;
 	}
-	if (ret < 0)
-		dmxdevfilter->buffer.error = ret;
+
+	if ((buffer1_len + buffer2_len) == 0) {
+		if (buffer1 == NULL && buffer2 == NULL) {
+			/* Section was dropped due to CRC error */
+			event.type = DMX_EVENT_SECTION_CRC_ERROR;
+			dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+
+			spin_unlock(&dmxdevfilter->dev->lock);
+			wake_up_all(&dmxdevfilter->buffer.queue);
+		} else {
+			spin_unlock(&dmxdevfilter->dev->lock);
+		}
+
+		return 0;
+	}
+
+	event.params.section.base_offset = dmxdevfilter->buffer.pwrite;
+	event.params.section.start_offset = dmxdevfilter->buffer.pwrite;
+
+	del_timer(&dmxdevfilter->timer);
+
+	/* Verify output buffer has sufficient space, or report overflow */
+	free = dvb_ringbuffer_free(&dmxdevfilter->buffer);
+	if (free < (buffer1_len + buffer2_len)) {
+		pr_debug("%s: section filter overflow (pid=%u)\n",
+			__func__, dmxdevfilter->params.sec.pid);
+		dmxdevfilter->buffer.error = -EOVERFLOW;
+		spin_unlock(&dmxdevfilter->dev->lock);
+		wake_up_all(&dmxdevfilter->buffer.queue);
+		return 0;
+	}
+
+	dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer1, buffer1_len);
+	dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer2, buffer2_len);
+
+	event.type = DMX_EVENT_NEW_SECTION;
+	event.params.section.total_length = buffer1_len + buffer2_len;
+	event.params.section.actual_length =
+		event.params.section.total_length;
+
+	dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+
 	if (dmxdevfilter->params.sec.flags & DMX_ONESHOT)
 		dmxdevfilter->state = DMXDEV_STATE_DONE;
 	spin_unlock(&dmxdevfilter->dev->lock);
-	wake_up(&dmxdevfilter->buffer.queue);
+	wake_up_all(&dmxdevfilter->buffer.queue);
 	return 0;
 }
 
@@ -389,31 +2688,401 @@
 {
 	struct dmxdev_filter *dmxdevfilter = feed->priv;
 	struct dvb_ringbuffer *buffer;
-	int ret;
+	struct dmxdev_events_queue *events;
+	struct dmx_filter_event event;
+	ssize_t free;
 
+	if (!dmxdevfilter) {
+		pr_err("%s: null filter (feed->is_filtering=%d)\n",
+			__func__, feed->is_filtering);
+		return -EINVAL;
+	}
 	spin_lock(&dmxdevfilter->dev->lock);
-	if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER) {
+
+	if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER ||
+		dmxdevfilter->state != DMXDEV_STATE_GO ||
+		dmxdevfilter->eos_state) {
 		spin_unlock(&dmxdevfilter->dev->lock);
 		return 0;
 	}
 
-	if (dmxdevfilter->params.pes.output == DMX_OUT_TAP
-	    || dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP)
+	if (dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) {
 		buffer = &dmxdevfilter->buffer;
-	else
+		events = &dmxdevfilter->events;
+	} else {
 		buffer = &dmxdevfilter->dev->dvr_buffer;
+		events = &dmxdevfilter->dev->dvr_output_events;
+	}
+
 	if (buffer->error) {
 		spin_unlock(&dmxdevfilter->dev->lock);
-		wake_up(&buffer->queue);
+		wake_up_all(&buffer->queue);
+		return buffer->error;
+	}
+
+	if (!events->current_event_data_size)
+		events->current_event_start_offset = buffer->pwrite;
+
+	/* Verify output buffer has sufficient space, or report overflow */
+	free = dvb_ringbuffer_free(buffer);
+	if (free < (buffer1_len + buffer2_len)) {
+		pr_debug("%s: buffer overflow error, pid=%u\n",
+			__func__, dmxdevfilter->params.pes.pid);
+		buffer->error = -EOVERFLOW;
+		spin_unlock(&dmxdevfilter->dev->lock);
+		wake_up_all(&buffer->queue);
+
+		return -EOVERFLOW;
+	}
+
+	if (buffer1_len + buffer2_len) {
+		dvb_dmxdev_buffer_write(buffer, buffer1, buffer1_len);
+		dvb_dmxdev_buffer_write(buffer, buffer2, buffer2_len);
+
+		events->current_event_data_size += (buffer1_len + buffer2_len);
+
+		if ((dmxdevfilter->params.pes.output == DMX_OUT_TS_TAP ||
+			dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP)
+			&& events->current_event_data_size >=
+				dmxdevfilter->params.pes.rec_chunk_size) {
+			event.type = DMX_EVENT_NEW_REC_CHUNK;
+			event.params.recording_chunk.offset =
+				events->current_event_start_offset;
+			event.params.recording_chunk.size =
+				events->current_event_data_size;
+
+			dvb_dmxdev_add_event(events, &event);
+			events->current_event_data_size = 0;
+		}
+	}
+
+	spin_unlock(&dmxdevfilter->dev->lock);
+	wake_up_all(&buffer->queue);
+	return 0;
+}
+
+static int dvb_dmxdev_section_event_cb(struct dmx_section_filter *filter,
+			struct dmx_data_ready *dmx_data_ready)
+{
+	int res = 0;
+	struct dmxdev_filter *dmxdevfilter = filter->priv;
+	struct dmx_filter_event event;
+	ssize_t free;
+
+	if (!dmxdevfilter) {
+		pr_err("%s: null filter. event type=%d (length=%d) will be discarded\n",
+			__func__, dmx_data_ready->status,
+			dmx_data_ready->data_length);
+		return -EINVAL;
+	}
+
+	spin_lock(&dmxdevfilter->dev->lock);
+
+	if (dmxdevfilter->buffer.error == -ETIMEDOUT ||
+		dmxdevfilter->state != DMXDEV_STATE_GO ||
+		dmxdevfilter->eos_state) {
+		spin_unlock(&dmxdevfilter->dev->lock);
 		return 0;
 	}
-	ret = dvb_dmxdev_buffer_write(buffer, buffer1, buffer1_len);
-	if (ret == buffer1_len)
-		ret = dvb_dmxdev_buffer_write(buffer, buffer2, buffer2_len);
-	if (ret < 0)
-		buffer->error = ret;
+
+	if (dmx_data_ready->data_length == 0) {
+		if (dmx_data_ready->status == DMX_CRC_ERROR) {
+			/* Section was dropped due to CRC error */
+			event.type = DMX_EVENT_SECTION_CRC_ERROR;
+			dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+
+			spin_unlock(&dmxdevfilter->dev->lock);
+			wake_up_all(&dmxdevfilter->buffer.queue);
+		} else if (dmx_data_ready->status == DMX_OK_EOS) {
+			event.type = DMX_EVENT_EOS;
+			dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+			spin_unlock(&dmxdevfilter->dev->lock);
+			wake_up_all(&dmxdevfilter->buffer.queue);
+		} else if (dmx_data_ready->status == DMX_OK_MARKER) {
+			event.type = DMX_EVENT_MARKER;
+			event.params.marker.id = dmx_data_ready->marker.id;
+			dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+			spin_unlock(&dmxdevfilter->dev->lock);
+			wake_up_all(&dmxdevfilter->buffer.queue);
+		} else if (dmx_data_ready->status == DMX_OK_SCRAMBLING_STATUS) {
+			event.type = DMX_EVENT_SCRAMBLING_STATUS_CHANGE;
+			event.params.scrambling_status =
+				dmx_data_ready->scrambling_bits;
+			dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+			spin_unlock(&dmxdevfilter->dev->lock);
+			wake_up_all(&dmxdevfilter->buffer.queue);
+		} else if (dmx_data_ready->status == DMX_OVERRUN_ERROR) {
+			pr_debug("dmxdev: section filter overflow (pid=%u)\n",
+				dmxdevfilter->params.sec.pid);
+			/* Set buffer error to notify user overflow occurred */
+			dmxdevfilter->buffer.error = -EOVERFLOW;
+			spin_unlock(&dmxdevfilter->dev->lock);
+			wake_up_all(&dmxdevfilter->buffer.queue);
+		} else {
+			spin_unlock(&dmxdevfilter->dev->lock);
+		}
+		return 0;
+	}
+
+	event.type = DMX_EVENT_NEW_SECTION;
+	event.params.section.base_offset = dmxdevfilter->buffer.pwrite;
+	event.params.section.start_offset = dmxdevfilter->buffer.pwrite;
+	event.params.section.total_length = dmx_data_ready->data_length;
+	event.params.section.actual_length = dmx_data_ready->data_length;
+
+	if (dmx_data_ready->status == DMX_MISSED_ERROR)
+		event.params.section.flags = DMX_FILTER_CC_ERROR;
+	else
+		event.params.section.flags = 0;
+
+	free = dvb_ringbuffer_free(&dmxdevfilter->buffer);
+	if (free < dmx_data_ready->data_length) {
+		pr_err("%s: invalid data length: data_length=%d > free=%zd\n",
+			__func__, dmx_data_ready->data_length, free);
+	} else {
+		res = dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+		DVB_RINGBUFFER_PUSH(&dmxdevfilter->buffer,
+			dmx_data_ready->data_length);
+	}
+
 	spin_unlock(&dmxdevfilter->dev->lock);
-	wake_up(&buffer->queue);
+	wake_up_all(&dmxdevfilter->buffer.queue);
+
+	return res;
+}
+
+static int dvb_dmxdev_ts_event_cb(struct dmx_ts_feed *feed,
+			struct dmx_data_ready *dmx_data_ready)
+{
+	struct dmxdev_filter *dmxdevfilter = feed->priv;
+	struct dvb_ringbuffer *buffer;
+	struct dmxdev_events_queue *events;
+	struct dmx_filter_event event;
+	ssize_t free;
+
+	if (!dmxdevfilter) {
+		pr_err("%s: null filter (feed->is_filtering=%d) event type=%d (length=%d) will be discarded\n",
+			__func__, feed->is_filtering,
+			dmx_data_ready->status,
+			dmx_data_ready->data_length);
+		return -EINVAL;
+	}
+
+	spin_lock(&dmxdevfilter->dev->lock);
+
+	if (dmxdevfilter->state != DMXDEV_STATE_GO ||
+		dmxdevfilter->eos_state) {
+		spin_unlock(&dmxdevfilter->dev->lock);
+		return 0;
+	}
+
+	if (dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) {
+		buffer = &dmxdevfilter->buffer;
+		events = &dmxdevfilter->events;
+	} else {
+		buffer = &dmxdevfilter->dev->dvr_buffer;
+		events = &dmxdevfilter->dev->dvr_output_events;
+	}
+
+	if (!buffer->error && dmx_data_ready->status == DMX_OVERRUN_ERROR) {
+		pr_debug("dmxdev: %s filter buffer overflow (pid=%u)\n",
+			dmxdevfilter->params.pes.output == DMX_OUT_DECODER ?
+				"decoder" : "",
+			dmxdevfilter->params.pes.pid);
+		/* Set buffer error to notify user overflow occurred */
+		buffer->error = -EOVERFLOW;
+		spin_unlock(&dmxdevfilter->dev->lock);
+		wake_up_all(&buffer->queue);
+		return 0;
+	}
+
+	if (dmx_data_ready->status == DMX_OK_EOS) {
+		/* Report partial recording chunk */
+		if ((dmxdevfilter->params.pes.output == DMX_OUT_TS_TAP ||
+			dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP)
+			&& events->current_event_data_size) {
+			event.type = DMX_EVENT_NEW_REC_CHUNK;
+			event.params.recording_chunk.offset =
+				events->current_event_start_offset;
+			event.params.recording_chunk.size =
+				events->current_event_data_size;
+			events->current_event_start_offset =
+				(events->current_event_start_offset +
+				events->current_event_data_size) %
+				buffer->size;
+			events->current_event_data_size = 0;
+			dvb_dmxdev_add_event(events, &event);
+		}
+
+		dmxdevfilter->eos_state = 1;
+		pr_debug("dmxdev: DMX_OK_EOS - entering EOS state\n");
+		event.type = DMX_EVENT_EOS;
+		dvb_dmxdev_add_event(events, &event);
+		spin_unlock(&dmxdevfilter->dev->lock);
+		wake_up_all(&buffer->queue);
+		return 0;
+	}
+
+	if (dmx_data_ready->status == DMX_OK_MARKER) {
+		pr_debug("dmxdev: DMX_OK_MARKER - id=%llu\n",
+			dmx_data_ready->marker.id);
+		event.type = DMX_EVENT_MARKER;
+		event.params.marker.id = dmx_data_ready->marker.id;
+		dvb_dmxdev_add_event(events, &event);
+		spin_unlock(&dmxdevfilter->dev->lock);
+		wake_up_all(&buffer->queue);
+		return 0;
+	}
+
+	if (dmx_data_ready->status == DMX_OK_PCR) {
+		pr_debug("dmxdev: event callback DMX_OK_PCR\n");
+		event.type = DMX_EVENT_NEW_PCR;
+		event.params.pcr.pcr = dmx_data_ready->pcr.pcr;
+		event.params.pcr.stc = dmx_data_ready->pcr.stc;
+		if (dmx_data_ready->pcr.disc_indicator_set)
+			event.params.pcr.flags =
+				DMX_FILTER_DISCONTINUITY_INDICATOR;
+		else
+			event.params.pcr.flags = 0;
+
+		dvb_dmxdev_add_event(events, &event);
+		spin_unlock(&dmxdevfilter->dev->lock);
+		wake_up_all(&buffer->queue);
+		return 0;
+	}
+
+	if (dmx_data_ready->status == DMX_OK_IDX) {
+		pr_debug("dmxdev: event callback DMX_OK_IDX\n");
+		event.type = DMX_EVENT_NEW_INDEX_ENTRY;
+		event.params.index = dmx_data_ready->idx_event;
+
+		dvb_dmxdev_add_event(events, &event);
+		spin_unlock(&dmxdevfilter->dev->lock);
+		wake_up_all(&buffer->queue);
+		return 0;
+	}
+
+	if (dmx_data_ready->status == DMX_OK_SCRAMBLING_STATUS) {
+		event.type = DMX_EVENT_SCRAMBLING_STATUS_CHANGE;
+		event.params.scrambling_status =
+			dmx_data_ready->scrambling_bits;
+		dvb_dmxdev_add_event(events, &event);
+		spin_unlock(&dmxdevfilter->dev->lock);
+		wake_up_all(&buffer->queue);
+		return 0;
+	}
+
+	if (dmx_data_ready->status == DMX_OK_DECODER_BUF) {
+		event.type = DMX_EVENT_NEW_ES_DATA;
+		event.params.es_data.buf_handle = dmx_data_ready->buf.handle;
+		event.params.es_data.cookie = dmx_data_ready->buf.cookie;
+		event.params.es_data.offset = dmx_data_ready->buf.offset;
+		event.params.es_data.data_len = dmx_data_ready->buf.len;
+		event.params.es_data.pts_valid = dmx_data_ready->buf.pts_exists;
+		event.params.es_data.pts = dmx_data_ready->buf.pts;
+		event.params.es_data.dts_valid = dmx_data_ready->buf.dts_exists;
+		event.params.es_data.dts = dmx_data_ready->buf.dts;
+		event.params.es_data.stc = dmx_data_ready->buf.stc;
+		event.params.es_data.transport_error_indicator_counter =
+				dmx_data_ready->buf.tei_counter;
+		event.params.es_data.continuity_error_counter =
+				dmx_data_ready->buf.cont_err_counter;
+		event.params.es_data.ts_packets_num =
+				dmx_data_ready->buf.ts_packets_num;
+		event.params.es_data.ts_dropped_bytes =
+				dmx_data_ready->buf.ts_dropped_bytes;
+		dvb_dmxdev_add_event(events, &event);
+		spin_unlock(&dmxdevfilter->dev->lock);
+		wake_up_all(&buffer->queue);
+		return 0;
+	}
+
+	if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER) {
+		spin_unlock(&dmxdevfilter->dev->lock);
+		wake_up_all(&buffer->queue);
+		return 0;
+	}
+
+	free = dvb_ringbuffer_free(buffer);
+	if (free < dmx_data_ready->data_length) {
+		pr_err("%s: invalid data length: data_length=%d > free=%zd\n",
+			__func__, dmx_data_ready->data_length, free);
+
+		spin_unlock(&dmxdevfilter->dev->lock);
+		wake_up_all(&buffer->queue);
+		return 0;
+	}
+
+	if (dmxdevfilter->params.pes.output == DMX_OUT_TAP) {
+		if (dmx_data_ready->status == DMX_OK &&
+			!events->current_event_data_size) {
+			events->current_event_start_offset = buffer->pwrite;
+		} else if (dmx_data_ready->status == DMX_OK_PES_END) {
+			event.type = DMX_EVENT_NEW_PES;
+
+			event.params.pes.base_offset =
+				events->current_event_start_offset;
+			event.params.pes.start_offset =
+				(events->current_event_start_offset +
+				dmx_data_ready->pes_end.start_gap) %
+				buffer->size;
+
+			event.params.pes.actual_length =
+				dmx_data_ready->pes_end.actual_length;
+			event.params.pes.total_length =
+				events->current_event_data_size;
+
+			event.params.pes.flags = 0;
+			if (dmx_data_ready->pes_end.disc_indicator_set)
+				event.params.pes.flags |=
+					DMX_FILTER_DISCONTINUITY_INDICATOR;
+			if (dmx_data_ready->pes_end.pes_length_mismatch)
+				event.params.pes.flags |=
+					DMX_FILTER_PES_LENGTH_ERROR;
+
+			event.params.pes.stc = dmx_data_ready->pes_end.stc;
+			event.params.pes.transport_error_indicator_counter =
+				dmx_data_ready->pes_end.tei_counter;
+			event.params.pes.continuity_error_counter =
+				dmx_data_ready->pes_end.cont_err_counter;
+			event.params.pes.ts_packets_num =
+				dmx_data_ready->pes_end.ts_packets_num;
+
+			/* Do not report zero length PES */
+			if (event.params.pes.total_length)
+				dvb_dmxdev_add_event(events, &event);
+
+			events->current_event_data_size = 0;
+		}
+	} else if (!events->current_event_data_size) {
+		events->current_event_start_offset = buffer->pwrite;
+	}
+
+	events->current_event_data_size += dmx_data_ready->data_length;
+	DVB_RINGBUFFER_PUSH(buffer, dmx_data_ready->data_length);
+
+	if ((dmxdevfilter->params.pes.output == DMX_OUT_TS_TAP) ||
+		(dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP)) {
+		while (events->current_event_data_size >=
+			dmxdevfilter->params.pes.rec_chunk_size) {
+			event.type = DMX_EVENT_NEW_REC_CHUNK;
+			event.params.recording_chunk.offset =
+				events->current_event_start_offset;
+			event.params.recording_chunk.size =
+				dmxdevfilter->params.pes.rec_chunk_size;
+			events->current_event_data_size =
+				events->current_event_data_size -
+				dmxdevfilter->params.pes.rec_chunk_size;
+			events->current_event_start_offset =
+				(events->current_event_start_offset +
+				dmxdevfilter->params.pes.rec_chunk_size) %
+				buffer->size;
+
+			dvb_dmxdev_add_event(events, &event);
+		}
+	}
+	spin_unlock(&dmxdevfilter->dev->lock);
+	wake_up_all(&buffer->queue);
 	return 0;
 }
 
@@ -427,11 +3096,18 @@
 	switch (dmxdevfilter->type) {
 	case DMXDEV_TYPE_SEC:
 		del_timer(&dmxdevfilter->timer);
-		dmxdevfilter->feed.sec->stop_filtering(dmxdevfilter->feed.sec);
+		dmxdevfilter->feed.sec.feed->stop_filtering(
+			dmxdevfilter->feed.sec.feed);
 		break;
 	case DMXDEV_TYPE_PES:
-		list_for_each_entry(feed, &dmxdevfilter->feed.ts, next)
+		list_for_each_entry(feed, &dmxdevfilter->feed.ts, next) {
+			if (dmxdevfilter->params.pes.output == DMX_OUT_TS_TAP) {
+				dmxdevfilter->dev->dvr_feeds_count--;
+				if (!dmxdevfilter->dev->dvr_feeds_count)
+					dmxdevfilter->dev->dvr_feed = NULL;
+			}
 			feed->ts->stop_filtering(feed->ts);
+		}
 		break;
 	default:
 		return -EINVAL;
@@ -449,7 +3125,8 @@
 
 	switch (filter->type) {
 	case DMXDEV_TYPE_SEC:
-		return filter->feed.sec->start_filtering(filter->feed.sec);
+		return filter->feed.sec.feed->start_filtering(
+			filter->feed.sec.feed);
 	case DMXDEV_TYPE_PES:
 		list_for_each_entry(feed, &filter->feed.ts, next) {
 			ret = feed->ts->start_filtering(feed->ts);
@@ -483,7 +3160,7 @@
 		}
 
 	filter->dev->demux->release_section_feed(dmxdev->demux,
-						 filter->feed.sec);
+						 filter->feed.sec.feed);
 
 	return 0;
 }
@@ -492,25 +3169,38 @@
 {
 	struct dmxdev_feed *feed;
 	struct dmx_demux *demux;
+	struct ts_insertion_buffer *ts_buffer;
 
 	if (dmxdevfilter->state < DMXDEV_STATE_GO)
 		return 0;
 
 	switch (dmxdevfilter->type) {
 	case DMXDEV_TYPE_SEC:
-		if (!dmxdevfilter->feed.sec)
+		if (!dmxdevfilter->feed.sec.feed)
 			break;
 		dvb_dmxdev_feed_stop(dmxdevfilter);
 		if (dmxdevfilter->filter.sec)
-			dmxdevfilter->feed.sec->
-			    release_filter(dmxdevfilter->feed.sec,
+			dmxdevfilter->feed.sec.feed->
+			    release_filter(dmxdevfilter->feed.sec.feed,
 					   dmxdevfilter->filter.sec);
 		dvb_dmxdev_feed_restart(dmxdevfilter);
-		dmxdevfilter->feed.sec = NULL;
+		dmxdevfilter->feed.sec.feed = NULL;
 		break;
 	case DMXDEV_TYPE_PES:
 		dvb_dmxdev_feed_stop(dmxdevfilter);
 		demux = dmxdevfilter->dev->demux;
+
+		if (!list_empty(&dmxdevfilter->insertion_buffers)) {
+			feed = list_first_entry(&dmxdevfilter->feed.ts,
+				struct dmxdev_feed, next);
+
+			list_for_each_entry(ts_buffer,
+					&dmxdevfilter->insertion_buffers, next)
+				dvb_dmxdev_cancel_ts_insertion(ts_buffer);
+			if (feed->ts->ts_insertion_terminate)
+				feed->ts->ts_insertion_terminate(feed->ts);
+		}
+
 		list_for_each_entry(feed, &dmxdevfilter->feed.ts, next) {
 			demux->release_ts_feed(demux, feed->ts);
 			feed->ts = NULL;
@@ -522,7 +3212,13 @@
 		return -EINVAL;
 	}
 
-	dvb_ringbuffer_flush(&dmxdevfilter->buffer);
+	spin_lock_irq(&dmxdevfilter->dev->lock);
+	dvb_dmxdev_flush_output(&dmxdevfilter->buffer, &dmxdevfilter->events);
+	dvb_ringbuffer_reset(&dmxdevfilter->buffer);
+	spin_unlock_irq(&dmxdevfilter->dev->lock);
+
+	wake_up_all(&dmxdevfilter->buffer.queue);
+
 	return 0;
 }
 
@@ -589,12 +3285,76 @@
 	tsfeed = feed->ts;
 	tsfeed->priv = filter;
 
-	ret = tsfeed->set(tsfeed, feed->pid, ts_type, ts_pes, 32768, timeout);
+	if (filter->params.pes.output == DMX_OUT_TS_TAP) {
+		tsfeed->buffer.ringbuff = &dmxdev->dvr_buffer;
+		tsfeed->buffer.priv_handle = dmxdev->dvr_priv_buff_handle;
+		if (!dmxdev->dvr_feeds_count)
+			dmxdev->dvr_feed = filter;
+		dmxdev->dvr_feeds_count++;
+	} else if (filter->params.pes.output == DMX_OUT_DECODER) {
+		tsfeed->buffer.ringbuff = &filter->buffer;
+		tsfeed->decoder_buffers = &filter->decoder_buffers;
+		tsfeed->buffer.priv_handle = filter->priv_buff_handle;
+	} else {
+		tsfeed->buffer.ringbuff = &filter->buffer;
+		tsfeed->buffer.priv_handle = filter->priv_buff_handle;
+	}
+
+	if (tsfeed->data_ready_cb) {
+		ret = tsfeed->data_ready_cb(tsfeed, dvb_dmxdev_ts_event_cb);
+
+		if (ret < 0) {
+			dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed);
+			return ret;
+		}
+	}
+
+	ret = tsfeed->set(tsfeed, feed->pid,
+					ts_type, ts_pes,
+					filter->decoder_buffers.buffers_size,
+					timeout);
 	if (ret < 0) {
 		dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed);
 		return ret;
 	}
 
+	if (tsfeed->set_tsp_out_format)
+		tsfeed->set_tsp_out_format(tsfeed, filter->dmx_tsp_format);
+
+	if (tsfeed->set_secure_mode)
+		tsfeed->set_secure_mode(tsfeed, &filter->sec_mode);
+
+	if (tsfeed->set_cipher_ops)
+		tsfeed->set_cipher_ops(tsfeed, &feed->cipher_ops);
+
+	if ((para->pes_type == DMX_PES_VIDEO0) ||
+	    (para->pes_type == DMX_PES_VIDEO1) ||
+	    (para->pes_type == DMX_PES_VIDEO2) ||
+	    (para->pes_type == DMX_PES_VIDEO3)) {
+		if (tsfeed->set_video_codec) {
+			ret = tsfeed->set_video_codec(tsfeed,
+							para->video_codec);
+
+			if (ret < 0) {
+				dmxdev->demux->release_ts_feed(dmxdev->demux,
+								tsfeed);
+				return ret;
+			}
+		}
+	}
+
+	if ((filter->params.pes.output == DMX_OUT_TS_TAP) ||
+		(filter->params.pes.output == DMX_OUT_TSDEMUX_TAP))
+		if (tsfeed->set_idx_params) {
+			ret = tsfeed->set_idx_params(
+					tsfeed, &feed->idx_params);
+			if (ret) {
+				dmxdev->demux->release_ts_feed(dmxdev->demux,
+					tsfeed);
+				return ret;
+			}
+		}
+
 	ret = tsfeed->start_filtering(tsfeed);
 	if (ret < 0) {
 		dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed);
@@ -604,12 +3364,50 @@
 	return 0;
 }
 
+static int dvb_filter_external_buffer_only(struct dmxdev *dmxdev,
+	struct dmxdev_filter *filter)
+{
+	struct dmx_caps caps;
+	int is_external_only;
+	int flags;
+
+	/*
+	 * For backward compatibility, default assumes that
+	 * external only buffers are not supported.
+	 */
+	flags = 0;
+	if (dmxdev->demux->get_caps) {
+		dmxdev->demux->get_caps(dmxdev->demux, &caps);
+
+		if (filter->type == DMXDEV_TYPE_SEC)
+			flags = caps.section.flags;
+		else if (filter->params.pes.output == DMX_OUT_DECODER)
+			/* For decoder filters dmxdev buffer is not required */
+			flags = 0;
+		else if (filter->params.pes.output == DMX_OUT_TAP)
+			flags = caps.pes.flags;
+		else if (filter->dmx_tsp_format == DMX_TSP_FORMAT_188)
+			flags = caps.recording_188_tsp.flags;
+		else
+			flags = caps.recording_192_tsp.flags;
+	}
+
+	if (!(flags & DMX_BUFFER_INTERNAL_SUPPORT) &&
+		(flags & DMX_BUFFER_EXTERNAL_SUPPORT))
+		is_external_only = 1;
+	else
+		is_external_only = 0;
+
+	return is_external_only;
+}
+
 static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter)
 {
 	struct dmxdev *dmxdev = filter->dev;
 	struct dmxdev_feed *feed;
 	void *mem;
 	int ret, i;
+	size_t tsp_size;
 
 	if (filter->state < DMXDEV_STATE_SET)
 		return -EINVAL;
@@ -617,34 +3415,64 @@
 	if (filter->state >= DMXDEV_STATE_GO)
 		dvb_dmxdev_filter_stop(filter);
 
+	if (!dvb_filter_verify_buffer_size(filter))
+		return -EINVAL;
+
 	if (!filter->buffer.data) {
-		mem = vmalloc(filter->buffer.size);
+		/*
+		 * dmxdev buffer in decoder filters is not really used
+		 * to exchange data with applications. Decoder buffers
+		 * can be set using DMX_SET_DECODER_BUFFER, which
+		 * would not update the filter->buffer.data at all.
+		 * Therefore we should not treat this filter as
+		 * other regular filters and should not fail here
+		 * even if user sets the buffer in deocder
+		 * filter as external buffer.
+		 */
+		if (filter->type == DMXDEV_TYPE_PES &&
+			(filter->params.pes.output == DMX_OUT_DECODER ||
+			filter->params.pes.output == DMX_OUT_TS_TAP))
+			filter->buffer_mode = DMX_BUFFER_MODE_INTERNAL;
+
+		if (!(filter->type == DMXDEV_TYPE_PES &&
+			filter->params.pes.output == DMX_OUT_TS_TAP) &&
+			(filter->buffer_mode == DMX_BUFFER_MODE_EXTERNAL ||
+			dvb_filter_external_buffer_only(dmxdev, filter)))
+			return -ENOMEM;
+
+		mem = vmalloc_user(filter->buffer.size);
 		if (!mem)
 			return -ENOMEM;
 		spin_lock_irq(&filter->dev->lock);
 		filter->buffer.data = mem;
 		spin_unlock_irq(&filter->dev->lock);
+	} else if ((filter->buffer_mode == DMX_BUFFER_MODE_INTERNAL) &&
+			dvb_filter_external_buffer_only(dmxdev, filter)) {
+		return -ENOMEM;
 	}
 
-	dvb_ringbuffer_flush(&filter->buffer);
+	filter->eos_state = 0;
+
+	spin_lock_irq(&filter->dev->lock);
+	dvb_dmxdev_flush_output(&filter->buffer, &filter->events);
+	spin_unlock_irq(&filter->dev->lock);
 
 	switch (filter->type) {
 	case DMXDEV_TYPE_SEC:
 	{
 		struct dmx_sct_filter_params *para = &filter->params.sec;
 		struct dmx_section_filter **secfilter = &filter->filter.sec;
-		struct dmx_section_feed **secfeed = &filter->feed.sec;
+		struct dmx_section_feed **secfeed = &filter->feed.sec.feed;
 
 		*secfilter = NULL;
 		*secfeed = NULL;
 
-
 		/* find active filter/feed with same PID */
 		for (i = 0; i < dmxdev->filternum; i++) {
 			if (dmxdev->filter[i].state >= DMXDEV_STATE_GO &&
 			    dmxdev->filter[i].type == DMXDEV_TYPE_SEC &&
 			    dmxdev->filter[i].params.sec.pid == para->pid) {
-				*secfeed = dmxdev->filter[i].feed.sec;
+				*secfeed = dmxdev->filter[i].feed.sec.feed;
 				break;
 			}
 		}
@@ -652,22 +3480,44 @@
 		/* if no feed found, try to allocate new one */
 		if (!*secfeed) {
 			ret = dmxdev->demux->allocate_section_feed(dmxdev->demux,
-								   secfeed,
-								   dvb_dmxdev_section_callback);
+						secfeed,
+						dvb_dmxdev_section_callback);
 			if (ret < 0) {
-				printk("DVB (%s): could not alloc feed\n",
+				pr_err("DVB (%s): could not alloc feed\n",
 				       __func__);
 				return ret;
 			}
 
+			if ((*secfeed)->data_ready_cb) {
+				ret = (*secfeed)->data_ready_cb(
+						*secfeed,
+						dvb_dmxdev_section_event_cb);
+
+				if (ret < 0) {
+					pr_err(
+						"DVB (%s): could not set event cb\n",
+						__func__);
+					dvb_dmxdev_feed_restart(filter);
+					return ret;
+				}
+			}
+
 			ret = (*secfeed)->set(*secfeed, para->pid, 32768,
 					      (para->flags & DMX_CHECK_CRC) ? 1 : 0);
 			if (ret < 0) {
-				printk("DVB (%s): could not set feed\n",
-				       __func__);
+				pr_err("DVB (%s): could not set feed\n",
+					__func__);
 				dvb_dmxdev_feed_restart(filter);
 				return ret;
 			}
+
+			if ((*secfeed)->set_secure_mode)
+				(*secfeed)->set_secure_mode(*secfeed,
+					&filter->sec_mode);
+
+			if ((*secfeed)->set_cipher_ops)
+				(*secfeed)->set_cipher_ops(*secfeed,
+					&filter->feed.sec.cipher_ops);
 		} else {
 			dvb_dmxdev_feed_stop(filter);
 		}
@@ -675,12 +3525,14 @@
 		ret = (*secfeed)->allocate_filter(*secfeed, secfilter);
 		if (ret < 0) {
 			dvb_dmxdev_feed_restart(filter);
-			filter->feed.sec->start_filtering(*secfeed);
-			dprintk("could not get filter\n");
+			filter->feed.sec.feed->start_filtering(*secfeed);
+			pr_debug("could not get filter\n");
 			return ret;
 		}
 
 		(*secfilter)->priv = filter;
+		(*secfilter)->buffer.ringbuff = &filter->buffer;
+		(*secfilter)->buffer.priv_handle = filter->priv_buff_handle;
 
 		memcpy(&((*secfilter)->filter_value[3]),
 		       &(para->filter.filter[1]), DMX_FILTER_SIZE - 1);
@@ -696,8 +3548,12 @@
 		(*secfilter)->filter_mask[2] = 0;
 
 		filter->todo = 0;
+		filter->events.data_read_event_masked =
+			filter->events.event_mask.disable_mask &
+			DMX_EVENT_NEW_SECTION;
 
-		ret = filter->feed.sec->start_filtering(filter->feed.sec);
+		ret = filter->feed.sec.feed->start_filtering(
+				filter->feed.sec.feed);
 		if (ret < 0)
 			return ret;
 
@@ -705,19 +3561,93 @@
 		break;
 	}
 	case DMXDEV_TYPE_PES:
+		if (filter->params.pes.rec_chunk_size <
+			DMX_REC_BUFF_CHUNK_MIN_SIZE)
+			filter->params.pes.rec_chunk_size =
+				DMX_REC_BUFF_CHUNK_MIN_SIZE;
+
+		if (filter->params.pes.rec_chunk_size >=
+			filter->buffer.size)
+			filter->params.pes.rec_chunk_size =
+				filter->buffer.size >> 2;
+
+		/* Align rec-chunk based on output format */
+		if (filter->dmx_tsp_format == DMX_TSP_FORMAT_188)
+			tsp_size = 188;
+		else
+			tsp_size = 192;
+
+		filter->params.pes.rec_chunk_size /= tsp_size;
+		filter->params.pes.rec_chunk_size *= tsp_size;
+
+		if (filter->params.pes.output == DMX_OUT_TS_TAP)
+			dmxdev->dvr_output_events.data_read_event_masked =
+			 dmxdev->dvr_output_events.event_mask.disable_mask &
+			 DMX_EVENT_NEW_REC_CHUNK;
+		else if (filter->params.pes.output == DMX_OUT_TSDEMUX_TAP)
+			filter->events.data_read_event_masked =
+				filter->events.event_mask.disable_mask &
+				DMX_EVENT_NEW_REC_CHUNK;
+		else if (filter->params.pes.output == DMX_OUT_TAP)
+			filter->events.data_read_event_masked =
+				filter->events.event_mask.disable_mask &
+				DMX_EVENT_NEW_PES;
+		else
+			filter->events.data_read_event_masked = 1;
+
+		ret = 0;
 		list_for_each_entry(feed, &filter->feed.ts, next) {
 			ret = dvb_dmxdev_start_feed(dmxdev, filter, feed);
-			if (ret < 0) {
-				dvb_dmxdev_filter_stop(filter);
-				return ret;
+			if (ret)
+				break;
+		}
+
+		if (!ret)
+			break;
+
+		/* cleanup feeds that were started before the failure */
+		list_for_each_entry(feed, &filter->feed.ts, next) {
+			if (!feed->ts)
+				continue;
+			feed->ts->stop_filtering(feed->ts);
+			dmxdev->demux->release_ts_feed(dmxdev->demux, feed->ts);
+			feed->ts = NULL;
+
+			if (filter->params.pes.output == DMX_OUT_TS_TAP) {
+				filter->dev->dvr_feeds_count--;
+				if (!filter->dev->dvr_feeds_count)
+					filter->dev->dvr_feed = NULL;
 			}
 		}
-		break;
+		return ret;
+
 	default:
 		return -EINVAL;
 	}
 
 	dvb_dmxdev_filter_state_set(filter, DMXDEV_STATE_GO);
+
+	if ((filter->type == DMXDEV_TYPE_PES) &&
+		!list_empty(&filter->insertion_buffers)) {
+		struct ts_insertion_buffer *ts_buffer;
+
+		feed = list_first_entry(&filter->feed.ts,
+			struct dmxdev_feed, next);
+
+		ret = 0;
+		if (feed->ts->ts_insertion_init)
+			ret = feed->ts->ts_insertion_init(feed->ts);
+		if (!ret) {
+			list_for_each_entry(ts_buffer,
+				&filter->insertion_buffers, next)
+				dvb_dmxdev_queue_ts_insertion(
+					ts_buffer);
+		} else {
+			pr_err("%s: ts_insertion_init failed, err %d\n",
+				__func__, ret);
+		}
+	}
+
 	return 0;
 }
 
@@ -747,11 +3677,28 @@
 	mutex_init(&dmxdevfilter->mutex);
 	file->private_data = dmxdevfilter;
 
+	memset(&dmxdevfilter->decoder_buffers,
+			0,
+			sizeof(dmxdevfilter->decoder_buffers));
+	dmxdevfilter->decoder_buffers.buffers_size =
+		DMX_DEFAULT_DECODER_BUFFER_SIZE;
+	dmxdevfilter->buffer_mode = DMX_BUFFER_MODE_INTERNAL;
+	dmxdevfilter->priv_buff_handle = NULL;
 	dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
+	dvb_dmxdev_flush_events(&dmxdevfilter->events);
+	dmxdevfilter->events.event_mask.disable_mask = DMX_EVENT_NEW_ES_DATA;
+	dmxdevfilter->events.event_mask.no_wakeup_mask = 0;
+	dmxdevfilter->events.event_mask.wakeup_threshold = 1;
+
 	dmxdevfilter->type = DMXDEV_TYPE_NONE;
 	dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
 	init_timer(&dmxdevfilter->timer);
 
+	dmxdevfilter->sec_mode.is_secured = 0;
+
+	INIT_LIST_HEAD(&dmxdevfilter->insertion_buffers);
+
+	dmxdevfilter->dmx_tsp_format = DMX_TSP_FORMAT_188;
 	dvbdev->users++;
 
 	mutex_unlock(&dmxdev->mutex);
@@ -761,23 +3708,40 @@
 static int dvb_dmxdev_filter_free(struct dmxdev *dmxdev,
 				  struct dmxdev_filter *dmxdevfilter)
 {
+	struct ts_insertion_buffer *ts_buffer, *tmp;
+
 	mutex_lock(&dmxdev->mutex);
 	mutex_lock(&dmxdevfilter->mutex);
 
 	dvb_dmxdev_filter_stop(dmxdevfilter);
 	dvb_dmxdev_filter_reset(dmxdevfilter);
 
+	list_for_each_entry_safe(ts_buffer, tmp,
+			&dmxdevfilter->insertion_buffers, next) {
+		list_del(&ts_buffer->next);
+		vfree(ts_buffer->buffer);
+		vfree(ts_buffer);
+	}
+
 	if (dmxdevfilter->buffer.data) {
 		void *mem = dmxdevfilter->buffer.data;
 
 		spin_lock_irq(&dmxdev->lock);
 		dmxdevfilter->buffer.data = NULL;
 		spin_unlock_irq(&dmxdev->lock);
-		vfree(mem);
+		if (dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_INTERNAL)
+			vfree(mem);
+	}
+
+	if ((dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_EXTERNAL) &&
+		dmxdevfilter->priv_buff_handle) {
+		dmxdev->demux->unmap_buffer(dmxdev->demux,
+			dmxdevfilter->priv_buff_handle);
+		dmxdevfilter->priv_buff_handle = NULL;
 	}
 
 	dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_FREE);
-	wake_up(&dmxdevfilter->buffer.queue);
+	wake_up_all(&dmxdevfilter->buffer.queue);
 	mutex_unlock(&dmxdevfilter->mutex);
 	mutex_unlock(&dmxdev->mutex);
 	return 0;
@@ -795,6 +3759,7 @@
 			      struct dmxdev_filter *filter, u16 pid)
 {
 	struct dmxdev_feed *feed;
+	int ret = 0;
 
 	if ((filter->type != DMXDEV_TYPE_PES) ||
 	    (filter->state < DMXDEV_STATE_SET))
@@ -810,28 +3775,45 @@
 		return -ENOMEM;
 
 	feed->pid = pid;
-	list_add(&feed->next, &filter->feed.ts);
+	feed->cipher_ops.operations_count = 0;
+	feed->idx_params.enable = 0;
 
 	if (filter->state >= DMXDEV_STATE_GO)
-		return dvb_dmxdev_start_feed(dmxdev, filter, feed);
+		ret = dvb_dmxdev_start_feed(dmxdev, filter, feed);
 
-	return 0;
+	if (!ret)
+		list_add(&feed->next, &filter->feed.ts);
+	else
+		kfree(feed);
+
+	return ret;
 }
 
 static int dvb_dmxdev_remove_pid(struct dmxdev *dmxdev,
 				  struct dmxdev_filter *filter, u16 pid)
 {
+	int feed_count;
 	struct dmxdev_feed *feed, *tmp;
 
 	if ((filter->type != DMXDEV_TYPE_PES) ||
 	    (filter->state < DMXDEV_STATE_SET))
 		return -EINVAL;
 
+	feed_count = 0;
+	list_for_each_entry(tmp, &filter->feed.ts, next)
+		feed_count++;
+
+	if (feed_count <= 1)
+		return -EINVAL;
+
 	list_for_each_entry_safe(feed, tmp, &filter->feed.ts, next) {
-		if ((feed->pid == pid) && (feed->ts != NULL)) {
-			feed->ts->stop_filtering(feed->ts);
-			filter->dev->demux->release_ts_feed(filter->dev->demux,
-							    feed->ts);
+		if (feed->pid == pid) {
+			if (feed->ts != NULL) {
+				feed->ts->stop_filtering(feed->ts);
+				filter->dev->demux->release_ts_feed(
+							filter->dev->demux,
+							feed->ts);
+			}
 			list_del(&feed->next);
 			kfree(feed);
 		}
@@ -844,7 +3826,7 @@
 				 struct dmxdev_filter *dmxdevfilter,
 				 struct dmx_sct_filter_params *params)
 {
-	dprintk("function : %s, PID=0x%04x, flags=%02x, timeout=%d\n",
+	pr_debug("function : %s, PID=0x%04x, flags=%02x, timeout=%d\n",
 		__func__, params->pid, params->flags, params->timeout);
 
 	dvb_dmxdev_filter_stop(dmxdevfilter);
@@ -853,6 +3835,7 @@
 	memcpy(&dmxdevfilter->params.sec,
 	       params, sizeof(struct dmx_sct_filter_params));
 	invert_mode(&dmxdevfilter->params.sec.filter);
+	dmxdevfilter->feed.sec.cipher_ops.operations_count = 0;
 	dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
 
 	if (params->flags & DMX_IMMEDIATE_START)
@@ -861,6 +3844,99 @@
 	return 0;
 }
 
+static int dvb_dmxdev_set_secure_mode(
+	struct dmxdev *dmxdev,
+	struct dmxdev_filter *filter,
+	struct dmx_secure_mode *sec_mode)
+{
+	if (!dmxdev || !filter || !sec_mode)
+		return -EINVAL;
+
+	if (filter->state == DMXDEV_STATE_GO) {
+		pr_err("%s: invalid filter state\n", __func__);
+		return -EBUSY;
+	}
+
+	pr_debug("%s: secure=%d\n", __func__, sec_mode->is_secured);
+
+	filter->sec_mode = *sec_mode;
+
+	return 0;
+}
+
+static int dvb_dmxdev_set_cipher(struct dmxdev *dmxdev,
+	struct dmxdev_filter *filter,
+	struct dmx_cipher_operations *cipher_ops)
+{
+	struct dmxdev_feed *feed;
+	struct dmxdev_feed *ts_feed = NULL;
+	struct dmxdev_sec_feed *sec_feed = NULL;
+	struct dmx_caps caps;
+
+	if (!dmxdev || !dmxdev->demux->get_caps)
+		return -EINVAL;
+
+	dmxdev->demux->get_caps(dmxdev->demux, &caps);
+
+	if (!filter || !cipher_ops ||
+		(cipher_ops->operations_count > caps.num_cipher_ops) ||
+		(cipher_ops->operations_count >
+			DMX_MAX_CIPHER_OPERATIONS_COUNT))
+		return -EINVAL;
+
+	pr_debug("%s: pid=%d, operations=%d\n", __func__,
+		cipher_ops->pid, cipher_ops->operations_count);
+
+	if (filter->state < DMXDEV_STATE_SET ||
+		filter->state > DMXDEV_STATE_GO) {
+		pr_err("%s: invalid filter state\n", __func__);
+		return -EPERM;
+	}
+
+	if (!filter->sec_mode.is_secured && cipher_ops->operations_count) {
+		pr_err("%s: secure mode must be enabled to set cipher ops\n",
+			__func__);
+		return -EPERM;
+	}
+
+	switch (filter->type) {
+	case DMXDEV_TYPE_PES:
+		list_for_each_entry(feed, &filter->feed.ts, next) {
+			if (feed->pid == cipher_ops->pid) {
+				ts_feed = feed;
+				ts_feed->cipher_ops = *cipher_ops;
+				if (filter->state == DMXDEV_STATE_GO &&
+					ts_feed->ts->set_cipher_ops)
+					ts_feed->ts->set_cipher_ops(
+						ts_feed->ts, cipher_ops);
+				break;
+			}
+		}
+		break;
+	case DMXDEV_TYPE_SEC:
+		if (filter->params.sec.pid == cipher_ops->pid) {
+			sec_feed = &filter->feed.sec;
+			sec_feed->cipher_ops = *cipher_ops;
+			if (filter->state == DMXDEV_STATE_GO &&
+				sec_feed->feed->set_cipher_ops)
+				sec_feed->feed->set_cipher_ops(sec_feed->feed,
+						cipher_ops);
+		}
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	if (!ts_feed && !sec_feed) {
+		pr_err("%s: pid %d is undefined for this filter\n",
+			__func__, cipher_ops->pid);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev,
 				     struct dmxdev_filter *dmxdevfilter,
 				     struct dmx_pes_filter_params *params)
@@ -891,6 +3967,55 @@
 	return 0;
 }
 
+static int dvb_dmxdev_set_decoder_buffer(struct dmxdev *dmxdev,
+		struct dmxdev_filter *filter,
+		struct dmx_decoder_buffers *buffs)
+{
+	int i;
+	struct dmx_decoder_buffers *dec_buffs;
+	struct dmx_caps caps;
+
+	if (!dmxdev || !filter || !buffs)
+		return -EINVAL;
+
+	dec_buffs = &filter->decoder_buffers;
+	if (!dmxdev->demux->get_caps)
+		return -EINVAL;
+
+	dmxdev->demux->get_caps(dmxdev->demux, &caps);
+	if (!dvb_dmxdev_verify_buffer_size(buffs->buffers_size,
+			caps.decoder.max_size, caps.decoder.size_alignment))
+		return -EINVAL;
+
+	if ((buffs->buffers_size == 0) ||
+		(buffs->is_linear &&
+		 ((buffs->buffers_num <= 1) ||
+		  (buffs->buffers_num > DMX_MAX_DECODER_BUFFER_NUM))))
+		return -EINVAL;
+
+	if (buffs->buffers_num == 0) {
+		/* Internal mode - linear buffers not supported in this mode */
+		if (!(caps.decoder.flags & DMX_BUFFER_INTERNAL_SUPPORT) ||
+			buffs->is_linear)
+			return -EINVAL;
+	} else {
+		/* External buffer(s) mode */
+		if ((!(caps.decoder.flags & DMX_BUFFER_LINEAR_GROUP_SUPPORT) &&
+			buffs->buffers_num > 1) ||
+			!(caps.decoder.flags & DMX_BUFFER_EXTERNAL_SUPPORT) ||
+			buffs->buffers_num > caps.decoder.max_buffer_num)
+			return -EINVAL;
+
+		dec_buffs->is_linear = buffs->is_linear;
+		dec_buffs->buffers_num = buffs->buffers_num;
+		dec_buffs->buffers_size = buffs->buffers_size;
+		for (i = 0; i < dec_buffs->buffers_num; i++)
+			dec_buffs->handles[i] = buffs->handles[i];
+	}
+
+	return 0;
+}
+
 static ssize_t dvb_dmxdev_read_sec(struct dmxdev_filter *dfil,
 				   struct file *file, char __user *buf,
 				   size_t count, loff_t *ppos)
@@ -902,7 +4027,7 @@
 		hcount = 3 + dfil->todo;
 		if (hcount > count)
 			hcount = count;
-		result = dvb_dmxdev_buffer_read(&dfil->buffer,
+		result = dvb_dmxdev_buffer_read(dfil, &dfil->buffer,
 						file->f_flags & O_NONBLOCK,
 						buf, hcount, ppos);
 		if (result < 0) {
@@ -923,7 +4048,7 @@
 	}
 	if (count > dfil->todo)
 		count = dfil->todo;
-	result = dvb_dmxdev_buffer_read(&dfil->buffer,
+	result = dvb_dmxdev_buffer_read(dfil, &dfil->buffer,
 					file->f_flags & O_NONBLOCK,
 					buf, count, ppos);
 	if (result < 0)
@@ -942,12 +4067,36 @@
 	if (mutex_lock_interruptible(&dmxdevfilter->mutex))
 		return -ERESTARTSYS;
 
+	if (dmxdevfilter->eos_state &&
+		dvb_ringbuffer_empty(&dmxdevfilter->buffer)) {
+		mutex_unlock(&dmxdevfilter->mutex);
+		return 0;
+	}
+
 	if (dmxdevfilter->type == DMXDEV_TYPE_SEC)
 		ret = dvb_dmxdev_read_sec(dmxdevfilter, file, buf, count, ppos);
 	else
-		ret = dvb_dmxdev_buffer_read(&dmxdevfilter->buffer,
-					     file->f_flags & O_NONBLOCK,
-					     buf, count, ppos);
+		ret = dvb_dmxdev_buffer_read(dmxdevfilter,
+					&dmxdevfilter->buffer,
+					file->f_flags & O_NONBLOCK,
+					buf, count, ppos);
+
+	if (ret > 0) {
+		dvb_dmxdev_notify_data_read(dmxdevfilter, ret);
+		spin_lock_irq(&dmxdevfilter->dev->lock);
+		dvb_dmxdev_update_events(&dmxdevfilter->events, ret);
+		spin_unlock_irq(&dmxdevfilter->dev->lock);
+
+		/*
+		 * in PULL mode, we might be stalling on
+		 * event queue, so need to wake-up waiters
+		 */
+		if (dmxdevfilter->dev->playback_mode == DMX_PB_MODE_PULL)
+			wake_up_all(&dmxdevfilter->buffer.queue);
+	} else if (ret == -EOVERFLOW) {
+		dvb_dmxdev_auto_flush_buffer(dmxdevfilter,
+			&dmxdevfilter->buffer);
+	}
 
 	mutex_unlock(&dmxdevfilter->mutex);
 	return ret;
@@ -1013,6 +4162,43 @@
 		mutex_unlock(&dmxdevfilter->mutex);
 		break;
 
+	case DMX_SET_BUFFER_MODE:
+		if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+			mutex_unlock(&dmxdev->mutex);
+			return -ERESTARTSYS;
+		}
+		ret = dvb_dmxdev_set_buffer_mode(dmxdevfilter,
+				*(enum dmx_buffer_mode *)parg);
+		mutex_unlock(&dmxdevfilter->mutex);
+		break;
+
+	case DMX_SET_BUFFER:
+		if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+			mutex_unlock(&dmxdev->mutex);
+			return -ERESTARTSYS;
+		}
+		ret = dvb_dmxdev_set_buffer(dmxdevfilter, parg);
+		mutex_unlock(&dmxdevfilter->mutex);
+		break;
+
+	case DMX_GET_BUFFER_STATUS:
+		if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+			mutex_unlock(&dmxdev->mutex);
+			return -ERESTARTSYS;
+		}
+		ret = dvb_dmxdev_get_buffer_status(dmxdevfilter, parg);
+		mutex_unlock(&dmxdevfilter->mutex);
+		break;
+
+	case DMX_RELEASE_DATA:
+		if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+			mutex_unlock(&dmxdev->mutex);
+			return -ERESTARTSYS;
+		}
+		ret = dvb_dmxdev_release_data(dmxdevfilter, arg);
+		mutex_unlock(&dmxdevfilter->mutex);
+		break;
+
 	case DMX_GET_PES_PIDS:
 		if (!dmxdev->demux->get_pes_pids) {
 			ret = -EINVAL;
@@ -1021,9 +4207,6 @@
 		dmxdev->demux->get_pes_pids(dmxdev->demux, parg);
 		break;
 
-#if 0
-	/* Not used upstream and never documented */
-
 	case DMX_GET_CAPS:
 		if (!dmxdev->demux->get_caps) {
 			ret = -EINVAL;
@@ -1033,13 +4216,65 @@
 		break;
 
 	case DMX_SET_SOURCE:
-		if (!dmxdev->demux->set_source) {
+		if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+			mutex_unlock(&dmxdev->mutex);
+			return -ERESTARTSYS;
+		}
+		ret = dvb_dmxdev_set_source(dmxdevfilter, parg);
+		mutex_unlock(&dmxdevfilter->mutex);
+		break;
+
+	case DMX_SET_TS_PACKET_FORMAT:
+		if (!dmxdev->demux->set_tsp_format) {
 			ret = -EINVAL;
 			break;
 		}
-		ret = dmxdev->demux->set_source(dmxdev->demux, parg);
+
+		if (dmxdevfilter->state >= DMXDEV_STATE_GO) {
+			ret = -EBUSY;
+			break;
+		}
+		ret = dmxdev->demux->set_tsp_format(
+				dmxdev->demux,
+				*(enum dmx_tsp_format_t *)parg);
 		break;
-#endif
+
+	case DMX_SET_TS_OUT_FORMAT:
+		if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+			mutex_unlock(&dmxdev->mutex);
+			return -ERESTARTSYS;
+		}
+
+		ret = dvb_dmxdev_set_tsp_out_format(dmxdevfilter,
+				*(enum dmx_tsp_format_t *)parg);
+
+		mutex_unlock(&dmxdevfilter->mutex);
+		break;
+
+	case DMX_SET_DECODER_BUFFER_SIZE:
+		if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+			mutex_unlock(&dmxdev->mutex);
+			return -ERESTARTSYS;
+		}
+
+		ret = dvb_dmxdev_set_decoder_buffer_size(dmxdevfilter, arg);
+		mutex_unlock(&dmxdevfilter->mutex);
+		break;
+
+	case DMX_SET_PLAYBACK_MODE:
+		ret = dvb_dmxdev_set_playback_mode(
+				dmxdevfilter,
+				*(enum dmx_playback_mode_t *)parg);
+		break;
+
+	case DMX_GET_EVENT:
+		if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+			mutex_unlock(&dmxdev->mutex);
+			return -ERESTARTSYS;
+		}
+		ret = dvb_dmxdev_get_event(dmxdevfilter, parg);
+		mutex_unlock(&dmxdevfilter->mutex);
+		break;
 
 	case DMX_GET_STC:
 		if (!dmxdev->demux->get_stc) {
@@ -1070,8 +4305,109 @@
 		mutex_unlock(&dmxdevfilter->mutex);
 		break;
 
+	case DMX_SET_DECODER_BUFFER:
+		if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+		ret = dvb_dmxdev_set_decoder_buffer(dmxdev, dmxdevfilter, parg);
+		mutex_unlock(&dmxdevfilter->mutex);
+		break;
+
+	case DMX_SET_SECURE_MODE:
+		if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+		ret = dvb_dmxdev_set_secure_mode(dmxdev, dmxdevfilter, parg);
+		mutex_unlock(&dmxdevfilter->mutex);
+		break;
+
+	case DMX_SET_CIPHER:
+		if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+		ret = dvb_dmxdev_set_cipher(dmxdev, dmxdevfilter, parg);
+		mutex_unlock(&dmxdevfilter->mutex);
+		break;
+
+	case DMX_REUSE_DECODER_BUFFER:
+		if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+			mutex_unlock(&dmxdev->mutex);
+			return -ERESTARTSYS;
+		}
+		ret = dvb_dmxdev_reuse_decoder_buf(dmxdevfilter, arg);
+		mutex_unlock(&dmxdevfilter->mutex);
+		break;
+
+	case DMX_SET_EVENTS_MASK:
+		if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+			mutex_unlock(&dmxdev->mutex);
+			return -ERESTARTSYS;
+		}
+		ret = dvb_dmxdev_set_event_mask(dmxdevfilter, parg);
+		mutex_unlock(&dmxdevfilter->mutex);
+		break;
+
+	case DMX_GET_EVENTS_MASK:
+		if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+			mutex_unlock(&dmxdev->mutex);
+			return -ERESTARTSYS;
+		}
+		ret = dvb_dmxdev_get_event_mask(dmxdevfilter, parg);
+		mutex_unlock(&dmxdevfilter->mutex);
+		break;
+
+	case DMX_SET_INDEXING_PARAMS:
+		if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+			mutex_unlock(&dmxdev->mutex);
+			return -ERESTARTSYS;
+		}
+		ret = dvb_dmxdev_set_indexing_params(dmxdevfilter, parg);
+		mutex_unlock(&dmxdevfilter->mutex);
+		break;
+
+	case DMX_SET_TS_INSERTION:
+		if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+			mutex_unlock(&dmxdev->mutex);
+			return -ERESTARTSYS;
+		}
+		ret = dvb_dmxdev_set_ts_insertion(dmxdevfilter, parg);
+		mutex_unlock(&dmxdevfilter->mutex);
+		break;
+
+	case DMX_ABORT_TS_INSERTION:
+		if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+			mutex_unlock(&dmxdev->mutex);
+			return -ERESTARTSYS;
+		}
+		ret = dvb_dmxdev_abort_ts_insertion(dmxdevfilter, parg);
+		mutex_unlock(&dmxdevfilter->mutex);
+		break;
+
+	case DMX_GET_SCRAMBLING_BITS:
+		if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+			mutex_unlock(&dmxdev->mutex);
+			return -ERESTARTSYS;
+		}
+		ret = dvb_dmxdev_get_scrambling_bits(dmxdevfilter, parg);
+		mutex_unlock(&dmxdevfilter->mutex);
+		break;
+
+	case DMX_FLUSH_BUFFER:
+		if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+			mutex_unlock(&dmxdev->mutex);
+			return -ERESTARTSYS;
+		}
+		ret = dvb_dmxdev_flush_buffer(dmxdevfilter);
+		mutex_unlock(&dmxdevfilter->mutex);
+		break;
+
 	default:
-		ret = -EINVAL;
+		pr_err("%s: unknown ioctl code (0x%x)\n",
+			__func__, cmd);
+		ret = -ENOIOCTLCMD;
 		break;
 	}
 	mutex_unlock(&dmxdev->mutex);
@@ -1084,13 +4420,78 @@
 	return dvb_usercopy(file, cmd, arg, dvb_demux_do_ioctl);
 }
 
+#ifdef CONFIG_COMPAT
+
+struct dmx_set_ts_insertion32 {
+	__u32 identifier;
+	__u32 repetition_time;
+	compat_uptr_t ts_packets;
+	compat_size_t size;
+};
+
+static long dmx_set_ts_insertion32_wrapper(struct file *file, unsigned int cmd,
+	    unsigned long arg)
+{
+	int ret;
+	struct dmx_set_ts_insertion32 dmx_ts_insert32;
+	struct dmx_set_ts_insertion dmx_ts_insert;
+
+	ret = copy_from_user(&dmx_ts_insert32, (void __user *)arg,
+		sizeof(dmx_ts_insert32));
+	if (ret) {
+		pr_err(
+			"%s: copy dmx_set_ts_insertion32 from user failed, ret=%d\n",
+			__func__, ret);
+		return -EFAULT;
+	}
+
+	memset(&dmx_ts_insert, 0, sizeof(dmx_ts_insert));
+	dmx_ts_insert.identifier = dmx_ts_insert32.identifier;
+	dmx_ts_insert.repetition_time = dmx_ts_insert32.repetition_time;
+	dmx_ts_insert.ts_packets = compat_ptr(dmx_ts_insert32.ts_packets);
+	dmx_ts_insert.size = dmx_ts_insert32.size;
+
+	ret = dvb_demux_do_ioctl(file, DMX_SET_TS_INSERTION, &dmx_ts_insert);
+
+	return ret;
+}
+
+#define DMX_SET_TS_INSERTION32 _IOW('o', 70, struct dmx_set_ts_insertion32)
+
+/*
+ * compat ioctl is called whenever compatibility is required, i.e when a 32bit
+ * process calls an ioctl for a 64bit kernel.
+ */
+static long dvb_demux_compat_ioctl(struct file *file, unsigned int cmd,
+			    unsigned long arg)
+{
+	long ret = 0;
+
+	switch (cmd) {
+	case DMX_SET_TS_INSERTION32:
+		ret = dmx_set_ts_insertion32_wrapper(file, cmd, arg);
+		break;
+	case DMX_SET_TS_INSERTION:
+		pr_err("%s: 64bit ioctl code (0x%lx) used by 32bit userspace\n",
+			__func__, DMX_SET_TS_INSERTION);
+		ret = -ENOIOCTLCMD;
+		break;
+	default:
+		/* use regular ioctl */
+		ret = dvb_usercopy(file, cmd, arg, dvb_demux_do_ioctl);
+	}
+
+	return ret;
+}
+#endif
+
 static unsigned int dvb_demux_poll(struct file *file, poll_table *wait)
 {
 	struct dmxdev_filter *dmxdevfilter = file->private_data;
 	unsigned int mask = 0;
 
-	if ((!dmxdevfilter) || dmxdevfilter->dev->exit)
-		return POLLERR;
+	if (!dmxdevfilter)
+		return -EINVAL;
 
 	poll_wait(file, &dmxdevfilter->buffer.queue, wait);
 
@@ -1099,20 +4500,80 @@
 	    dmxdevfilter->state != DMXDEV_STATE_TIMEDOUT)
 		return 0;
 
-	if (dmxdevfilter->buffer.error)
-		mask |= (POLLIN | POLLRDNORM | POLLPRI | POLLERR);
+	if (dmxdevfilter->buffer.error) {
+		mask |= (POLLIN | POLLRDNORM | POLLERR);
+		if (dmxdevfilter->buffer.error == -EOVERFLOW)
+			mask |= POLLPRI;
+	}
 
 	if (!dvb_ringbuffer_empty(&dmxdevfilter->buffer))
-		mask |= (POLLIN | POLLRDNORM | POLLPRI);
+		mask |= (POLLIN | POLLRDNORM);
+
+	if (dmxdevfilter->events.wakeup_events_counter >=
+		dmxdevfilter->events.event_mask.wakeup_threshold)
+		mask |= POLLPRI;
 
 	return mask;
 }
 
+static int dvb_demux_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct dmxdev_filter *dmxdevfilter = filp->private_data;
+	struct dmxdev *dmxdev = dmxdevfilter->dev;
+	int ret;
+	int vma_size;
+	int buffer_size;
+
+	vma_size = vma->vm_end - vma->vm_start;
+
+	if (vma->vm_flags & VM_WRITE)
+		return -EINVAL;
+
+	if (mutex_lock_interruptible(&dmxdev->mutex))
+		return -ERESTARTSYS;
+
+	if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+		mutex_unlock(&dmxdev->mutex);
+		return -ERESTARTSYS;
+	}
+
+	if ((!dmxdevfilter->buffer.data) ||
+		(dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_EXTERNAL)) {
+		mutex_unlock(&dmxdevfilter->mutex);
+		mutex_unlock(&dmxdev->mutex);
+		return -EINVAL;
+	}
+
+	/* Make sure requested mapping is not larger than buffer size */
+	buffer_size = dmxdevfilter->buffer.size + (PAGE_SIZE-1);
+	buffer_size = buffer_size & ~(PAGE_SIZE-1);
+
+	if (vma_size != buffer_size) {
+		mutex_unlock(&dmxdevfilter->mutex);
+		mutex_unlock(&dmxdev->mutex);
+		return -EINVAL;
+	}
+
+	ret = remap_vmalloc_range(vma, dmxdevfilter->buffer.data, 0);
+	if (ret) {
+		mutex_unlock(&dmxdevfilter->mutex);
+		mutex_unlock(&dmxdev->mutex);
+		return ret;
+	}
+
+	vma->vm_flags |= VM_DONTDUMP;
+	vma->vm_flags |= VM_DONTEXPAND;
+
+	mutex_unlock(&dmxdevfilter->mutex);
+	mutex_unlock(&dmxdev->mutex);
+
+	return 0;
+}
+
 static int dvb_demux_release(struct inode *inode, struct file *file)
 {
 	struct dmxdev_filter *dmxdevfilter = file->private_data;
 	struct dmxdev *dmxdev = dmxdevfilter->dev;
-
 	int ret;
 
 	ret = dvb_dmxdev_filter_free(dmxdev, dmxdevfilter);
@@ -1120,6 +4581,8 @@
 	mutex_lock(&dmxdev->mutex);
 	dmxdev->dvbdev->users--;
 	if(dmxdev->dvbdev->users==1 && dmxdev->exit==1) {
+		fops_put(file->f_op);
+		file->f_op = NULL;
 		mutex_unlock(&dmxdev->mutex);
 		wake_up(&dmxdev->dvbdev->wait_queue);
 	} else
@@ -1136,6 +4599,10 @@
 	.release = dvb_demux_release,
 	.poll = dvb_demux_poll,
 	.llseek = default_llseek,
+	.mmap = dvb_demux_mmap,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = dvb_demux_compat_ioctl,
+#endif
 };
 
 static const struct dvb_device dvbdev_demux = {
@@ -1161,11 +4628,44 @@
 
 	switch (cmd) {
 	case DMX_SET_BUFFER_SIZE:
-		ret = dvb_dvr_set_buffer_size(dmxdev, arg);
+		ret = dvb_dvr_set_buffer_size(dmxdev, file->f_flags, arg);
+		break;
+
+	case DMX_SET_BUFFER_MODE:
+		ret = dvb_dvr_set_buffer_mode(dmxdev, file->f_flags,
+			*(enum dmx_buffer_mode *)parg);
+		break;
+
+	case DMX_SET_BUFFER:
+		ret = dvb_dvr_set_buffer(dmxdev, file->f_flags, parg);
+		break;
+
+	case DMX_GET_BUFFER_STATUS:
+		ret = dvb_dvr_get_buffer_status(dmxdev, file->f_flags, parg);
+		break;
+
+	case DMX_RELEASE_DATA:
+		ret = dvb_dvr_release_data(dmxdev, file->f_flags, arg);
+		break;
+
+	case DMX_FEED_DATA:
+		ret = dvb_dvr_feed_data(dmxdev, file->f_flags, arg);
+		break;
+
+	case DMX_GET_EVENT:
+		ret = dvb_dvr_get_event(dmxdev, file->f_flags, parg);
+		break;
+
+	case DMX_PUSH_OOB_COMMAND:
+		ret = dvb_dvr_push_oob_cmd(dmxdev, file->f_flags, parg);
+		break;
+
+	case DMX_FLUSH_BUFFER:
+		ret = dvb_dvr_flush_buffer(dmxdev, file->f_flags);
 		break;
 
 	default:
-		ret = -EINVAL;
+		ret = -ENOIOCTLCMD;
 		break;
 	}
 	mutex_unlock(&dmxdev->mutex);
@@ -1173,32 +4673,50 @@
 }
 
 static long dvb_dvr_ioctl(struct file *file,
-			 unsigned int cmd, unsigned long arg)
+			unsigned int cmd, unsigned long arg)
 {
 	return dvb_usercopy(file, cmd, arg, dvb_dvr_do_ioctl);
 }
 
+#ifdef CONFIG_COMPAT
+static long dvb_dvr_compat_ioctl(struct file *file, unsigned int cmd,
+			unsigned long arg)
+{
+	return dvb_usercopy(file, cmd, arg, dvb_dvr_do_ioctl);
+}
+#endif
+
 static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait)
 {
 	struct dvb_device *dvbdev = file->private_data;
 	struct dmxdev *dmxdev = dvbdev->priv;
 	unsigned int mask = 0;
 
-	dprintk("function : %s\n", __func__);
-
-	if (dmxdev->exit)
-		return POLLERR;
-
-	poll_wait(file, &dmxdev->dvr_buffer.queue, wait);
+	pr_debug("function : %s\n", __func__);
 
 	if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
-		if (dmxdev->dvr_buffer.error)
-			mask |= (POLLIN | POLLRDNORM | POLLPRI | POLLERR);
+		poll_wait(file, &dmxdev->dvr_buffer.queue, wait);
+
+		if (dmxdev->dvr_buffer.error) {
+			mask |= (POLLIN | POLLRDNORM | POLLERR);
+			if (dmxdev->dvr_buffer.error == -EOVERFLOW)
+				mask |= POLLPRI;
+		}
 
 		if (!dvb_ringbuffer_empty(&dmxdev->dvr_buffer))
-			mask |= (POLLIN | POLLRDNORM | POLLPRI);
-	} else
-		mask |= (POLLOUT | POLLWRNORM | POLLPRI);
+			mask |= (POLLIN | POLLRDNORM);
+
+		if (dmxdev->dvr_output_events.wakeup_events_counter >=
+			dmxdev->dvr_output_events.event_mask.wakeup_threshold)
+			mask |= POLLPRI;
+	} else {
+		poll_wait(file, &dmxdev->dvr_input_buffer.queue, wait);
+		if (dmxdev->dvr_input_buffer.error)
+			mask |= (POLLOUT | POLLRDNORM | POLLPRI | POLLERR);
+
+		if (dvb_ringbuffer_free(&dmxdev->dvr_input_buffer))
+			mask |= (POLLOUT | POLLRDNORM | POLLPRI);
+	}
 
 	return mask;
 }
@@ -1207,7 +4725,11 @@
 	.owner = THIS_MODULE,
 	.read = dvb_dvr_read,
 	.write = dvb_dvr_write,
+	.mmap = dvb_dvr_mmap,
 	.unlocked_ioctl = dvb_dvr_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = dvb_dvr_compat_ioctl,
+#endif
 	.open = dvb_dvr_open,
 	.release = dvb_dvr_release,
 	.poll = dvb_dvr_poll,
@@ -1223,9 +4745,94 @@
 #endif
 	.fops = &dvb_dvr_fops
 };
+
+
+/**
+ * debugfs service to print active filters information.
+ */
+static int dvb_dmxdev_dbgfs_print(struct seq_file *s, void *p)
+{
+	int i;
+	struct dmxdev *dmxdev = s->private;
+	struct dmxdev_filter *filter;
+	int active_count = 0;
+	struct dmx_buffer_status buffer_status;
+	struct dmx_scrambling_bits scrambling_bits;
+	static const char * const pes_feeds[] = {"DEC", "PES", "DVR", "REC"};
+	int ret;
+
+	if (!dmxdev)
+		return 0;
+
+	for (i = 0; i < dmxdev->filternum; i++) {
+		filter = &dmxdev->filter[i];
+		if (filter->state >= DMXDEV_STATE_GO) {
+			active_count++;
+
+			seq_printf(s, "filter_%02d - ", i);
+
+			if (filter->type == DMXDEV_TYPE_SEC) {
+				seq_puts(s, "type: SEC, ");
+				seq_printf(s, "PID %04d ",
+						filter->params.sec.pid);
+				scrambling_bits.pid = filter->params.sec.pid;
+			} else {
+				seq_printf(s, "type: %s, ",
+					pes_feeds[filter->params.pes.output]);
+				seq_printf(s, "PID: %04d ",
+						filter->params.pes.pid);
+				scrambling_bits.pid = filter->params.pes.pid;
+			}
+
+			dvb_dmxdev_get_scrambling_bits(filter,
+				&scrambling_bits);
+
+			if (filter->type == DMXDEV_TYPE_PES &&
+				filter->params.pes.output == DMX_OUT_TS_TAP)
+				ret = dvb_dvr_get_buffer_status(dmxdev,
+					O_RDONLY, &buffer_status);
+			else
+				ret = dvb_dmxdev_get_buffer_status(filter,
+					&buffer_status);
+			if (!ret) {
+				seq_printf(s, "size: %08d, ",
+					buffer_status.size);
+				seq_printf(s, "fullness: %08d, ",
+					buffer_status.fullness);
+				seq_printf(s, "error: %d, ",
+					buffer_status.error);
+			}
+
+			seq_printf(s, "scramble: %d, ",
+				scrambling_bits.value);
+			seq_printf(s, "secured: %d\n",
+				filter->sec_mode.is_secured);
+		}
+	}
+
+	if (!active_count)
+		seq_puts(s, "No active filters\n");
+
+	return 0;
+}
+
+static int dvb_dmxdev_dbgfs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dvb_dmxdev_dbgfs_print, inode->i_private);
+}
+
+static const struct file_operations dbgfs_filters_fops = {
+	.open = dvb_dmxdev_dbgfs_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
+};
+
 int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *dvb_adapter)
 {
 	int i;
+	struct dmx_caps caps;
 
 	if (dmxdev->demux->open(dmxdev->demux) < 0)
 		return -EUSERS;
@@ -1234,8 +4841,12 @@
 	if (!dmxdev->filter)
 		return -ENOMEM;
 
+	dmxdev->playback_mode = DMX_PB_MODE_PUSH;
+	dmxdev->demux->dvr_input_protected = 0;
+
 	mutex_init(&dmxdev->mutex);
 	spin_lock_init(&dmxdev->lock);
+	spin_lock_init(&dmxdev->dvr_in_lock);
 	for (i = 0; i < dmxdev->filternum; i++) {
 		dmxdev->filter[i].dev = dmxdev;
 		dmxdev->filter[i].buffer.data = NULL;
@@ -1244,11 +4855,24 @@
 	}
 
 	dvb_register_device(dvb_adapter, &dmxdev->dvbdev, &dvbdev_demux, dmxdev,
-			    DVB_DEVICE_DEMUX, dmxdev->filternum);
+			    DVB_DEVICE_DEMUX, 0);
 	dvb_register_device(dvb_adapter, &dmxdev->dvr_dvbdev, &dvbdev_dvr,
-			    dmxdev, DVB_DEVICE_DVR, dmxdev->filternum);
+			    dmxdev, DVB_DEVICE_DVR, 0);
 
 	dvb_ringbuffer_init(&dmxdev->dvr_buffer, NULL, 8192);
+	dvb_ringbuffer_init(&dmxdev->dvr_input_buffer, NULL, 8192);
+
+	/* Disable auto buffer flushing if plugin does not allow it */
+	if (dmxdev->demux->get_caps) {
+		dmxdev->demux->get_caps(dmxdev->demux, &caps);
+		if (!(caps.caps & DMX_CAP_AUTO_BUFFER_FLUSH))
+			overflow_auto_flush = 0;
+	}
+
+	if (dmxdev->demux->debugfs_demux_dir)
+		debugfs_create_file("filters", 0444,
+			dmxdev->demux->debugfs_demux_dir, dmxdev,
+			&dbgfs_filters_fops);
 
 	return 0;
 }
diff --git a/drivers/media/dvb-core/dmxdev.h b/drivers/media/dvb-core/dmxdev.h
index 48c6cf9..ad007f4 100644
--- a/drivers/media/dvb-core/dmxdev.h
+++ b/drivers/media/dvb-core/dmxdev.h
@@ -33,7 +33,7 @@
 #include <linux/string.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
-
+#include <linux/kthread.h>
 #include <linux/dvb/dmx.h>
 
 #include "dvbdev.h"
@@ -57,10 +57,87 @@
 
 struct dmxdev_feed {
 	u16 pid;
+	struct dmx_indexing_params idx_params;
+	struct dmx_cipher_operations cipher_ops;
 	struct dmx_ts_feed *ts;
 	struct list_head next;
 };
 
+struct dmxdev_sec_feed {
+	struct dmx_section_feed *feed;
+	struct dmx_cipher_operations cipher_ops;
+};
+
+struct dmxdev_events_queue {
+	/*
+	 * indices used to manage events queue.
+	 * read_index advanced when relevant data is read
+	 * from the buffer.
+	 * notified_index is the index from which next events
+	 * are returned.
+	 * read_index <= notified_index <= write_index
+	 *
+	 * If user reads the data without getting the respective
+	 * event first, the read/notified indices are updated
+	 * automatically to reflect the actual data that exist
+	 * in the buffer.
+	 */
+	u32 read_index;
+	u32 write_index;
+	u32 notified_index;
+
+	/* Bytes read by user without having respective event in the queue */
+	u32 bytes_read_no_event;
+
+	/* internal tracking of PES and recording events */
+	u32 current_event_data_size;
+	u32 current_event_start_offset;
+
+	/* current setting of the events masking */
+	struct dmx_events_mask event_mask;
+
+	/*
+	 * indicates if an event used for data-reading from demux
+	 * filter is enabled or not. These are events on which
+	 * user may wait for before calling read() on the demux filter.
+	 */
+	int data_read_event_masked;
+
+	/*
+	 * holds the current number of pending events in the
+	 * events queue that are considered as a wake-up source
+	 */
+	u32 wakeup_events_counter;
+
+	struct dmx_filter_event queue[DMX_EVENT_QUEUE_SIZE];
+};
+
+#define DMX_MIN_INSERTION_REPETITION_TIME	25 /* in msec */
+struct ts_insertion_buffer {
+	/* work scheduled for insertion of this buffer */
+	struct delayed_work dwork;
+
+	struct list_head next;
+
+	/* buffer holding TS packets for insertion */
+	char *buffer;
+
+	/* buffer size */
+	size_t size;
+
+	/* buffer ID from user */
+	u32 identifier;
+
+	/* repetition time for the buffer insertion */
+	u32 repetition_time;
+
+	/* the recording filter to which this buffer belongs */
+	struct dmxdev_filter *dmxdevfilter;
+
+	/* indication whether insertion should be aborted */
+	int abort;
+};
+
 struct dmxdev_filter {
 	union {
 		struct dmx_section_filter *sec;
@@ -69,7 +146,7 @@
 	union {
 		/* list of TS and PES feeds (struct dmxdev_feed) */
 		struct list_head ts;
-		struct dmx_section_feed *sec;
+		struct dmxdev_sec_feed sec;
 	} feed;
 
 	union {
@@ -77,19 +154,37 @@
 		struct dmx_pes_filter_params pes;
 	} params;
 
+	struct dmxdev_events_queue events;
+
 	enum dmxdev_type type;
 	enum dmxdev_state state;
 	struct dmxdev *dev;
 	struct dvb_ringbuffer buffer;
+	void *priv_buff_handle;
+	enum dmx_buffer_mode buffer_mode;
 
 	struct mutex mutex;
 
+	/* for recording output */
+	enum dmx_tsp_format_t dmx_tsp_format;
+	u32 rec_chunk_size;
+
+	/* list of buffers used for insertion (struct ts_insertion_buffer) */
+	struct list_head insertion_buffers;
+
+	/* End-of-stream indication has been received */
+	int eos_state;
+
 	/* only for sections */
 	struct timer_list timer;
 	int todo;
 	u8 secheader[3];
-};
 
+	struct dmx_secure_mode sec_mode;
+
+	/* Decoder buffer(s) related */
+	struct dmx_decoder_buffers decoder_buffers;
+};
 
 struct dmxdev {
 	struct dvb_device *dvbdev;
@@ -100,18 +195,52 @@
 
 	int filternum;
 	int capabilities;
+#define DMXDEV_CAP_DUPLEX	0x01
+
+	enum dmx_playback_mode_t playback_mode;
+	dmx_source_t source;
 
 	unsigned int exit:1;
-#define DMXDEV_CAP_DUPLEX 1
+	unsigned int dvr_in_exit:1;
+	unsigned int dvr_processing_input:1;
+
 	struct dmx_frontend *dvr_orig_fe;
 
 	struct dvb_ringbuffer dvr_buffer;
+	void *dvr_priv_buff_handle;
+	enum dmx_buffer_mode dvr_buffer_mode;
+	struct dmxdev_events_queue dvr_output_events;
+	struct dmxdev_filter *dvr_feed;
+	int dvr_feeds_count;
+
+	struct dvb_ringbuffer dvr_input_buffer;
+	enum dmx_buffer_mode dvr_input_buffer_mode;
+	struct task_struct *dvr_input_thread;
+	/* DVR commands (data feed / OOB command) queue */
+	struct dvb_ringbuffer dvr_cmd_buffer;
+
 #define DVR_BUFFER_SIZE (10*188*1024)
 
 	struct mutex mutex;
 	spinlock_t lock;
+	spinlock_t dvr_in_lock;
 };
 
+enum dvr_cmd {
+	DVR_DATA_FEED_CMD,
+	DVR_OOB_CMD
+};
+
+struct dvr_command {
+	enum dvr_cmd type;
+	union {
+		struct dmx_oob_command oobcmd;
+		size_t data_feed_count;
+	} cmd;
+};
+
+#define DVR_CMDS_BUFFER_SIZE (sizeof(struct dvr_command)*500)
+
 
 int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *);
 void dvb_dmxdev_release(struct dmxdev *dmxdev);
diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c
index a0cf7b0..474684f 100644
--- a/drivers/media/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb-core/dvb_demux.c
@@ -55,11 +55,151 @@
 MODULE_PARM_DESC(dvb_demux_feed_err_pkts,
 		 "when set to 0, drop packets with the TEI bit set (1 by default)");
 
+/* counter advancing for each new dvb-demux device */
+static int dvb_demux_index;
+
+static int dvb_demux_performancecheck;
+module_param(dvb_demux_performancecheck, int, 0644);
+MODULE_PARM_DESC(dvb_demux_performancecheck,
+		"enable transport stream performance check, reported through debugfs");
+
 #define dprintk_tscheck(x...) do {                              \
 		if (dvb_demux_tscheck && printk_ratelimit())    \
 			printk(x);                              \
 	} while (0)
 
+static const struct dvb_dmx_video_patterns mpeg2_seq_hdr = {
+	{0x00, 0x00, 0x01, 0xB3},
+	{0xFF, 0xFF, 0xFF, 0xFF},
+	4,
+	DMX_IDX_MPEG_SEQ_HEADER
+};
+
+static const struct dvb_dmx_video_patterns mpeg2_gop = {
+	{0x00, 0x00, 0x01, 0xB8},
+	{0xFF, 0xFF, 0xFF, 0xFF},
+	4,
+	DMX_IDX_MPEG_GOP
+};
+
+static const struct dvb_dmx_video_patterns mpeg2_iframe = {
+	{0x00, 0x00, 0x01, 0x00, 0x00, 0x08},
+	{0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x38},
+	6,
+	DMX_IDX_MPEG_I_FRAME_START
+};
+
+static const struct dvb_dmx_video_patterns mpeg2_pframe = {
+	{0x00, 0x00, 0x01, 0x00, 0x00, 0x10},
+	{0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x38},
+	6,
+	DMX_IDX_MPEG_P_FRAME_START
+};
+
+static const struct dvb_dmx_video_patterns mpeg2_bframe = {
+	{0x00, 0x00, 0x01, 0x00, 0x00, 0x18},
+	{0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x38},
+	6,
+	DMX_IDX_MPEG_B_FRAME_START
+};
+
+static const struct dvb_dmx_video_patterns h264_sps = {
+	{0x00, 0x00, 0x01, 0x07},
+	{0xFF, 0xFF, 0xFF, 0x1F},
+	4,
+	DMX_IDX_H264_SPS
+};
+
+static const struct dvb_dmx_video_patterns h264_pps = {
+	{0x00, 0x00, 0x01, 0x08},
+	{0xFF, 0xFF, 0xFF, 0x1F},
+	4,
+	DMX_IDX_H264_PPS
+};
+
+static const struct dvb_dmx_video_patterns h264_idr = {
+	{0x00, 0x00, 0x01, 0x05, 0x80},
+	{0xFF, 0xFF, 0xFF, 0x1F, 0x80},
+	5,
+	DMX_IDX_H264_IDR_START
+};
+
+static const struct dvb_dmx_video_patterns h264_non_idr = {
+	{0x00, 0x00, 0x01, 0x01, 0x80},
+	{0xFF, 0xFF, 0xFF, 0x1F, 0x80},
+	5,
+	DMX_IDX_H264_NON_IDR_START
+};
+
+/*
+ *  Forbidden (1 bit) + NAL idc (2 bits) + NAL type (5 bits)
+ *  I-Slice NAL idc = 3, NAL type = 5, 01100101 mask 0x7F
+ */
+static const struct dvb_dmx_video_patterns h264_idr_islice = {
+	{0x00, 0x00, 0x01, 0x65, 0x80},
+	{0xFF, 0xFF, 0xFF, 0x7F, 0x80},
+	5,
+	DMX_IDX_H264_IDR_ISLICE_START
+};
+
+/*
+ *  Forbidden (1 bit) + NAL idc (2 bits) + NAL type (5 bits)
+ *  P-Slice NAL idc = 2, NAL type = 1, 01000001 mask 0x7F
+ */
+static const struct dvb_dmx_video_patterns h264_non_idr_pslice = {
+	{0x00, 0x00, 0x01, 0x41, 0x80},
+	{0xFF, 0xFF, 0xFF, 0x7F, 0x80},
+	5,
+	DMX_IDX_H264_NON_IDR_PSLICE_START
+};
+
+/*
+ *  Forbidden (1 bit) + NAL idc (2 bits) + NAL type (5 bits)
+ *  B-Slice NAL idc = 0, NAL type = 1, 00000001  mask 0x7F
+ */
+static const struct dvb_dmx_video_patterns h264_non_idr_bslice = {
+	{0x00, 0x00, 0x01, 0x01, 0x80},
+	{0xFF, 0xFF, 0xFF, 0x7F, 0x80},
+	5,
+	DMX_IDX_H264_NON_IDR_BSLICE_START
+};
+
+static const struct dvb_dmx_video_patterns h264_non_access_unit_del = {
+	{0x00, 0x00, 0x01, 0x09},
+	{0xFF, 0xFF, 0xFF, 0x1F},
+	4,
+	DMX_IDX_H264_ACCESS_UNIT_DEL
+};
+
+static const struct dvb_dmx_video_patterns h264_non_sei = {
+	{0x00, 0x00, 0x01, 0x06},
+	{0xFF, 0xFF, 0xFF, 0x1F},
+	4,
+	DMX_IDX_H264_SEI
+};
+
+static const struct dvb_dmx_video_patterns vc1_seq_hdr = {
+	{0x00, 0x00, 0x01, 0x0F},
+	{0xFF, 0xFF, 0xFF, 0xFF},
+	4,
+	DMX_IDX_VC1_SEQ_HEADER
+};
+
+static const struct dvb_dmx_video_patterns vc1_entry_point = {
+	{0x00, 0x00, 0x01, 0x0E},
+	{0xFF, 0xFF, 0xFF, 0xFF},
+	4,
+	DMX_IDX_VC1_ENTRY_POINT
+};
+
+static const struct dvb_dmx_video_patterns vc1_frame = {
+	{0x00, 0x00, 0x01, 0x0D},
+	{0xFF, 0xFF, 0xFF, 0xFF},
+	4,
+	DMX_IDX_VC1_FRAME_START
+};
+
+
 /******************************************************************************
  * static inlined helper functions
  ******************************************************************************/
@@ -69,9 +209,9 @@
 	return 3 + ((buf[1] & 0x0f) << 8) + buf[2];
 }
 
-static inline u16 ts_pid(const u8 *buf)
+static inline u8 ts_scrambling_ctrl(const u8 *buf)
 {
-	return ((buf[1] & 0x1f) << 8) + buf[2];
+	return (buf[3] >> 6) & 0x3;
 }
 
 static inline u8 payload(const u8 *tsp)
@@ -100,37 +240,355 @@
 	memcpy(d, s, len);
 }
 
+static u32 dvb_dmx_calc_time_delta(ktime_t past_time)
+{
+	ktime_t curr_time = ktime_get();
+	s64 delta_time_us = ktime_us_delta(curr_time, past_time);
+
+	return (u32)delta_time_us;
+}
+
 /******************************************************************************
  * Software filter functions
  ******************************************************************************/
 
+/*
+ * Check if two patterns are identical, taking mask into consideration.
+ * @pattern1: the first byte pattern to compare.
+ * @pattern2: the second byte pattern to compare.
+ * @mask: the bit mask to use.
+ * @pattern_size: the length of both patterns and the mask, in bytes.
+ *
+ * Return: 1 if patterns match, 0 otherwise.
+ */
+static inline int dvb_dmx_patterns_match(const u8 *pattern1, const u8 *pattern2,
+					const u8 *mask, size_t pattern_size)
+{
+	int i;
+
+	/*
+	 * Assumption: it is OK to access pattern1, pattern2 and mask.
+	 * This function performs no sanity checks to keep things fast.
+	 */
+
+	for (i = 0; i < pattern_size; i++)
+		if ((pattern1[i] & mask[i]) != (pattern2[i] & mask[i]))
+			return 0;
+
+	return 1;
+}
+
+/*
+ * dvb_dmx_video_pattern_search -
+ * search for framing patterns in a given buffer.
+ *
+ * Optimized version: first search for a common substring, e.g. 0x00 0x00 0x01.
+ * If this string is found, go over all the given patterns (all must start
+ * with this string) and search for their ending in the buffer.
+ *
+ * Assumption: the patterns we look for do not spread over more than two
+ * buffers.
+ *
+ * @paterns: the full patterns information to look for.
+ * @patterns_num: the number of patterns to look for.
+ * @buf: the buffer to search.
+ * @buf_size: the size of the buffer to search. we search the entire buffer.
+ * @prefix_size_masks: a bit mask (per pattern) of possible prefix sizes to use
+ * when searching for a pattern that started at the last buffer.
+ * Updated in this function for use in the next lookup.
+ * @results: lookup results (offset, type, used_prefix_size) per found pattern,
+ * up to DVB_DMX_MAX_FOUND_PATTERNS.
+ *
+ * Return:
+ *   Number of patterns found (up to DVB_DMX_MAX_FOUND_PATTERNS).
+ *   0 if pattern was not found.
+ *   error value on failure.
+ */
+int dvb_dmx_video_pattern_search(
+		const struct dvb_dmx_video_patterns
+			*patterns[DVB_DMX_MAX_SEARCH_PATTERN_NUM],
+		int patterns_num,
+		const u8 *buf,
+		size_t buf_size,
+		struct dvb_dmx_video_prefix_size_masks *prefix_size_masks,
+		struct dvb_dmx_video_patterns_results *results)
+{
+	int i, j;
+	unsigned int current_size;
+	u32 prefix;
+	int found = 0;
+	int start_offset = 0;
+	/* the starting common substring to look for */
+	u8 string[] = {0x00, 0x00, 0x01};
+	/* the mask for the starting string */
+	u8 string_mask[] = {0xFF, 0xFF, 0xFF};
+	/* the size of the starting string (in bytes) */
+	size_t string_size = 3;
+
+	if ((patterns == NULL) || (patterns_num <= 0) || (buf == NULL))
+		return -EINVAL;
+
+	memset(results, 0, sizeof(struct dvb_dmx_video_patterns_results));
+
+	/*
+	 * handle prefix - disregard string, simply check all patterns,
+	 * looking for a matching suffix at the very beginning of the buffer.
+	 */
+	for (j = 0; (j < patterns_num) && !found; j++) {
+		prefix = prefix_size_masks->size_mask[j];
+		current_size = 32;
+		while (prefix) {
+			if (prefix & (0x1 << (current_size - 1))) {
+				/*
+				 * check that we don't look further
+				 * than buf_size boundary
+				 */
+				if ((int)(patterns[j]->size - current_size) >
+						buf_size)
+					break;
+
+				if (dvb_dmx_patterns_match(
+					(patterns[j]->pattern + current_size),
+					buf, (patterns[j]->mask + current_size),
+					(patterns[j]->size - current_size))) {
+
+					/*
+					 * pattern found using prefix at the
+					 * very beginning of the buffer, so
+					 * offset is 0, but we already zeroed
+					 * everything in the beginning of the
+					 * function. that's why the next line
+					 * is commented.
+					 */
+					/* results->info[found].offset = 0; */
+					results->info[found].type =
+							patterns[j]->type;
+					results->info[found].used_prefix_size =
+							current_size;
+					found++;
+					/*
+					 * save offset to start looking from
+					 * in the buffer, to avoid reusing the
+					 * data of a pattern we already found.
+					 */
+					start_offset = (patterns[j]->size -
+							current_size);
+
+					if (found >= DVB_DMX_MAX_FOUND_PATTERNS)
+						goto next_prefix_lookup;
+					/*
+					 * we don't want to search for the same
+					 * pattern with several possible prefix
+					 * sizes if we have already found it,
+					 * so we break from the inner loop.
+					 * since we incremented 'found', we
+					 * will not search for additional
+					 * patterns using a prefix - that would
+					 * imply ambiguous patterns where one
+					 * pattern can be included in another.
+					 * the for loop will exit.
+					 */
+					break;
+				}
+			}
+			prefix &= ~(0x1 << (current_size - 1));
+			current_size--;
+		}
+	}
+
+	/*
+	 * Search buffer for entire pattern, starting with the string.
+	 * Note the external for loop does not execute if buf_size is
+	 * smaller than string_size (the cast to int is required, since
+	 * size_t is unsigned).
+	 */
+	for (i = start_offset; i < (int)(buf_size - string_size + 1); i++) {
+		if (dvb_dmx_patterns_match(string, (buf + i), string_mask,
+							string_size)) {
+			/* now search for patterns: */
+			for (j = 0; j < patterns_num; j++) {
+				/* avoid overflow to next buffer */
+				if ((i + patterns[j]->size) > buf_size)
+					continue;
+
+				if (dvb_dmx_patterns_match(
+					(patterns[j]->pattern + string_size),
+					(buf + i + string_size),
+					(patterns[j]->mask + string_size),
+					(patterns[j]->size - string_size))) {
+
+					results->info[found].offset = i;
+					results->info[found].type =
+						patterns[j]->type;
+					/*
+					 * save offset to start next prefix
+					 * lookup, to avoid reusing the data
+					 * of any pattern we already found.
+					 */
+					if ((i + patterns[j]->size) >
+							start_offset)
+						start_offset = (i +
+							patterns[j]->size);
+					/*
+					 * did not use a prefix to find this
+					 * pattern, but we zeroed everything
+					 * in the beginning of the function.
+					 * So no need to zero used_prefix_size
+					 * for results->info[found]
+					 */
+
+					found++;
+					if (found >= DVB_DMX_MAX_FOUND_PATTERNS)
+						goto next_prefix_lookup;
+					/*
+					 * theoretically we don't have to break
+					 * here, but we don't want to search
+					 * for the other matching patterns on
+					 * the very same same place in the
+					 * buffer. That would mean the
+					 * (pattern & mask) combinations are
+					 * not unique. So we break from inner
+					 * loop and move on to the next place
+					 * in the buffer.
+					 */
+					break;
+				}
+			}
+		}
+	}
+
+next_prefix_lookup:
+	/* check for possible prefix sizes for the next buffer */
+	for (j = 0; j < patterns_num; j++) {
+		prefix_size_masks->size_mask[j] = 0;
+		for (i = 1; i < patterns[j]->size; i++) {
+			/*
+			 * avoid looking outside of the buffer
+			 * or reusing previously used data.
+			 */
+			if (i > (buf_size - start_offset))
+				break;
+
+			if (dvb_dmx_patterns_match(patterns[j]->pattern,
+					(buf + buf_size - i),
+					patterns[j]->mask, i)) {
+				prefix_size_masks->size_mask[j] |=
+						(1 << (i - 1));
+			}
+		}
+	}
+
+	return found;
+}
+EXPORT_SYMBOL(dvb_dmx_video_pattern_search);
+
+/**
+ * dvb_dmx_notify_section_event() - Notify demux event for all filters of a
+ * specified section feed.
+ *
+ * @feed:		dvb_demux_feed object
+ * @event:		demux event to notify
+ * @should_lock:	specifies whether the function should lock the demux
+ *
+ * Caller is responsible for locking the demux properly, either by doing the
+ * locking itself and setting 'should_lock' to 0, or have the function do it
+ * by setting 'should_lock' to 1.
+ */
+int dvb_dmx_notify_section_event(struct dvb_demux_feed *feed,
+	struct dmx_data_ready *event, int should_lock)
+{
+	struct dvb_demux_filter *f;
+
+	if (feed == NULL || event == NULL || feed->type != DMX_TYPE_SEC)
+		return -EINVAL;
+
+	if (!should_lock && !spin_is_locked(&feed->demux->lock))
+		return -EINVAL;
+
+	if (should_lock)
+		spin_lock(&feed->demux->lock);
+
+	f = feed->filter;
+	while (f && feed->feed.sec.is_filtering) {
+		feed->data_ready_cb.sec(&f->filter, event);
+		f = f->next;
+	}
+
+	if (should_lock)
+		spin_unlock(&feed->demux->lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(dvb_dmx_notify_section_event);
+
+static int dvb_dmx_check_pes_end(struct dvb_demux_feed *feed)
+{
+	struct dmx_data_ready data;
+
+	if (!feed->pusi_seen)
+		return 0;
+
+	data.status = DMX_OK_PES_END;
+	data.data_length = 0;
+	data.pes_end.start_gap = 0;
+	data.pes_end.actual_length = feed->peslen;
+	data.pes_end.disc_indicator_set = 0;
+	data.pes_end.pes_length_mismatch = 0;
+	data.pes_end.stc = 0;
+	data.pes_end.tei_counter = feed->pes_tei_counter;
+	data.pes_end.cont_err_counter = feed->pes_cont_err_counter;
+	data.pes_end.ts_packets_num = feed->pes_ts_packets_num;
+
+	return feed->data_ready_cb.ts(&feed->feed.ts, &data);
+}
+
 static inline int dvb_dmx_swfilter_payload(struct dvb_demux_feed *feed,
 					   const u8 *buf)
 {
 	int count = payload(buf);
 	int p;
-	//int ccok;
-	//u8 cc;
+	int ccok;
+	u8 cc;
+	int ret;
 
 	if (count == 0)
 		return -1;
 
 	p = 188 - count;
 
-	/*
 	cc = buf[3] & 0x0f;
-	ccok = ((feed->cc + 1) & 0x0f) == cc;
+	if (feed->first_cc)
+		ccok = 1;
+	else
+		ccok = ((feed->cc + 1) & 0x0f) == cc;
+
+	feed->first_cc = 0;
 	feed->cc = cc;
-	if (!ccok)
-		printk("missed packet!\n");
-	*/
 
-	if (buf[1] & 0x40)	// PUSI ?
-		feed->peslen = 0xfffa;
+	/* PUSI ? */
+	if (buf[1] & 0x40) {
+		dvb_dmx_check_pes_end(feed);
+		feed->pusi_seen = 1;
+		feed->peslen = 0;
+		feed->pes_tei_counter = 0;
+		feed->pes_cont_err_counter = 0;
+		feed->pes_ts_packets_num = 0;
+	}
 
-	feed->peslen += count;
+	if (feed->pusi_seen == 0)
+		return 0;
 
-	return feed->cb.ts(&buf[p], count, NULL, 0, &feed->feed.ts);
+	ret = feed->cb.ts(&buf[p], count, NULL, 0, &feed->feed.ts);
+
+	/* Verify TS packet was copied successfully */
+	if (!ret) {
+		feed->pes_cont_err_counter += !ccok;
+		feed->pes_tei_counter += (buf[1] & 0x80) ? 1 : 0;
+		feed->pes_ts_packets_num++;
+		feed->peslen += count;
+	}
+
+	return ret;
 }
 
 static int dvb_dmx_swfilter_sectionfilter(struct dvb_demux_feed *feed,
@@ -169,10 +627,28 @@
 		return 0;
 
 	if (sec->check_crc) {
+		ktime_t pre_crc_time = ktime_set(0, 0);
+
+		if (dvb_demux_performancecheck)
+			pre_crc_time = ktime_get();
+
 		section_syntax_indicator = ((sec->secbuf[1] & 0x80) != 0);
 		if (section_syntax_indicator &&
-		    demux->check_crc32(feed, sec->secbuf, sec->seclen))
+		    demux->check_crc32(feed, sec->secbuf, sec->seclen)) {
+			if (dvb_demux_performancecheck)
+				demux->total_crc_time +=
+					dvb_dmx_calc_time_delta(pre_crc_time);
+
+			/* Notify on CRC error */
+			feed->cb.sec(NULL, 0, NULL, 0,
+				&f->filter);
+
 			return -1;
+		}
+
+		if (dvb_demux_performancecheck)
+			demux->total_crc_time +=
+				dvb_dmx_calc_time_delta(pre_crc_time);
 	}
 
 	do {
@@ -287,7 +763,7 @@
 	return 0;
 }
 
-static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
+static int dvb_dmx_swfilter_section_one_packet(struct dvb_demux_feed *feed,
 					   const u8 *buf)
 {
 	u8 p, count;
@@ -302,7 +778,16 @@
 	p = 188 - count;	/* payload start */
 
 	cc = buf[3] & 0x0f;
-	ccok = ((feed->cc + 1) & 0x0f) == cc;
+	if (feed->first_cc)
+		ccok = 1;
+	else
+		ccok = ((feed->cc + 1) & 0x0f) == cc;
+
+	/* discard TS packets holding sections with TEI bit set */
+	if (buf[1] & 0x80)
+		return -EINVAL;
+
+	feed->first_cc = 0;
 	feed->cc = cc;
 
 	if (buf[3] & 0x20) {
@@ -356,28 +841,668 @@
 	return 0;
 }
 
-static inline void dvb_dmx_swfilter_packet_type(struct dvb_demux_feed *feed,
-						const u8 *buf)
+/*
+ * dvb_dmx_swfilter_section_packet - wrapper for section filtering of single
+ * TS packet.
+ *
+ * @feed: dvb demux feed
+ * @buf: buffer containing the TS packet
+ * @should_lock: specifies demux locking semantics: if not set, proper demux
+ * locking is expected to have been done by the caller.
+ *
+ * Return error status
+ */
+int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
+	   const u8 *buf, int should_lock)
 {
+	int ret;
+
+	if (!should_lock && !spin_is_locked(&feed->demux->lock)) {
+		pr_err("%s: demux spinlock should have been locked\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (should_lock)
+		spin_lock(&feed->demux->lock);
+
+	ret = dvb_dmx_swfilter_section_one_packet(feed, buf);
+
+	if (should_lock)
+		spin_unlock(&feed->demux->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(dvb_dmx_swfilter_section_packet);
+
+static int dvb_demux_idx_event_sort(struct dmx_index_event_info *curr,
+	struct dmx_index_event_info *new)
+{
+	if (curr->match_tsp_num > new->match_tsp_num)
+		return 0;
+
+	if (curr->match_tsp_num < new->match_tsp_num)
+		return 1;
+	/*
+	 * In case TSP numbers are equal, sort according to event type giving
+	 * priority to PUSI events first, then RAI and finally framing events.
+	 */
+	if ((curr->type & DMX_IDX_RAI && new->type & DMX_IDX_PUSI) ||
+		(!(curr->type & DMX_IDX_PUSI) && !(curr->type & DMX_IDX_RAI) &&
+			new->type & (DMX_IDX_PUSI | DMX_IDX_RAI)))
+		return 0;
+
+	return 1;
+}
+
+static int dvb_demux_save_idx_event(struct dvb_demux_feed *feed,
+		struct dmx_index_event_info *idx_event,
+		int traverse_from_tail)
+{
+	struct dmx_index_entry *idx_entry;
+	struct dmx_index_entry *curr_entry;
+	struct list_head *pos;
+
+	/* get entry from free list */
+	if (list_empty(&feed->rec_info->idx_info.free_list)) {
+		pr_err("%s: index free list is empty\n", __func__);
+		return -ENOMEM;
+	}
+
+	idx_entry = list_first_entry(&feed->rec_info->idx_info.free_list,
+					struct dmx_index_entry, next);
+	list_del(&idx_entry->next);
+
+	idx_entry->event = *idx_event;
+
+	pos = &feed->rec_info->idx_info.ready_list;
+	if (traverse_from_tail) {
+		list_for_each_entry_reverse(curr_entry,
+			&feed->rec_info->idx_info.ready_list, next) {
+			if (dvb_demux_idx_event_sort(&curr_entry->event,
+				idx_event)) {
+				pos = &curr_entry->next;
+				break;
+			}
+		}
+	} else {
+		list_for_each_entry(curr_entry,
+			&feed->rec_info->idx_info.ready_list, next) {
+			if (!dvb_demux_idx_event_sort(&curr_entry->event,
+				idx_event)) {
+				pos = &curr_entry->next;
+				break;
+			}
+		}
+	}
+
+	if (traverse_from_tail)
+		list_add(&idx_entry->next, pos);
+	else
+		list_add_tail(&idx_entry->next, pos);
+
+	return 0;
+}
+
+int dvb_demux_push_idx_event(struct dvb_demux_feed *feed,
+		struct dmx_index_event_info *idx_event, int should_lock)
+{
+	int ret;
+
+	if (!should_lock && !spin_is_locked(&feed->demux->lock))
+		return -EINVAL;
+
+	if (should_lock)
+		spin_lock(&feed->demux->lock);
+	ret = dvb_demux_save_idx_event(feed, idx_event, 1);
+	if (should_lock)
+		spin_unlock(&feed->demux->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(dvb_demux_push_idx_event);
+
+static inline void dvb_dmx_notify_indexing(struct dvb_demux_feed *feed)
+{
+	struct dmx_data_ready dmx_data_ready;
+	struct dmx_index_entry *curr_entry;
+	struct list_head *n, *pos;
+
+	dmx_data_ready.status = DMX_OK_IDX;
+
+	list_for_each_safe(pos, n, &feed->rec_info->idx_info.ready_list) {
+		curr_entry = list_entry(pos, struct dmx_index_entry, next);
+
+		if ((feed->rec_info->idx_info.min_pattern_tsp_num == (u64)-1) ||
+			(curr_entry->event.match_tsp_num <=
+			 feed->rec_info->idx_info.min_pattern_tsp_num)) {
+			dmx_data_ready.idx_event = curr_entry->event;
+			feed->data_ready_cb.ts(&feed->feed.ts, &dmx_data_ready);
+			list_del(&curr_entry->next);
+			list_add_tail(&curr_entry->next,
+				&feed->rec_info->idx_info.free_list);
+		}
+	}
+}
+
+void dvb_dmx_notify_idx_events(struct dvb_demux_feed *feed, int should_lock)
+{
+	if (!should_lock && !spin_is_locked(&feed->demux->lock))
+		return;
+
+	if (should_lock)
+		spin_lock(&feed->demux->lock);
+	dvb_dmx_notify_indexing(feed);
+	if (should_lock)
+		spin_unlock(&feed->demux->lock);
+}
+EXPORT_SYMBOL(dvb_dmx_notify_idx_events);
+
+static void dvb_dmx_process_pattern_result(struct dvb_demux_feed *feed,
+		struct dvb_dmx_video_patterns_results *patterns, int pattern,
+		u64 curr_stc, u64 prev_stc,
+		u64 curr_match_tsp, u64 prev_match_tsp,
+		u64 curr_pusi_tsp, u64 prev_pusi_tsp)
+{
+	int mpeg_frame_start;
+	int h264_frame_start;
+	int vc1_frame_start;
+	int seq_start;
+	u64 frame_end_in_seq;
+	struct dmx_index_event_info idx_event;
+
+	idx_event.pid = feed->pid;
+	if (patterns->info[pattern].used_prefix_size) {
+		idx_event.match_tsp_num = prev_match_tsp;
+		idx_event.last_pusi_tsp_num = prev_pusi_tsp;
+		idx_event.stc = prev_stc;
+	} else {
+		idx_event.match_tsp_num = curr_match_tsp;
+		idx_event.last_pusi_tsp_num = curr_pusi_tsp;
+		idx_event.stc = curr_stc;
+	}
+
+	/* notify on frame-end if needed */
+	if (feed->prev_frame_valid) {
+		if (feed->prev_frame_type & DMX_IDX_MPEG_I_FRAME_START) {
+			idx_event.type = DMX_IDX_MPEG_I_FRAME_END;
+			frame_end_in_seq = DMX_IDX_MPEG_FIRST_SEQ_FRAME_END;
+		} else if (feed->prev_frame_type & DMX_IDX_MPEG_P_FRAME_START) {
+			idx_event.type = DMX_IDX_MPEG_P_FRAME_END;
+			frame_end_in_seq = DMX_IDX_MPEG_FIRST_SEQ_FRAME_END;
+		} else if (feed->prev_frame_type & DMX_IDX_MPEG_B_FRAME_START) {
+			idx_event.type = DMX_IDX_MPEG_B_FRAME_END;
+			frame_end_in_seq = DMX_IDX_MPEG_FIRST_SEQ_FRAME_END;
+		} else if (feed->prev_frame_type & DMX_IDX_H264_IDR_START) {
+			idx_event.type = DMX_IDX_H264_IDR_END;
+			frame_end_in_seq = DMX_IDX_H264_FIRST_SPS_FRAME_END;
+		} else if (feed->prev_frame_type & DMX_IDX_H264_NON_IDR_START) {
+			idx_event.type = DMX_IDX_H264_NON_IDR_END;
+			frame_end_in_seq = DMX_IDX_H264_FIRST_SPS_FRAME_END;
+		} else if (feed->prev_frame_type &
+			   DMX_IDX_H264_IDR_ISLICE_START) {
+			idx_event.type = DMX_IDX_H264_IDR_END;
+			frame_end_in_seq = DMX_IDX_H264_FIRST_SPS_FRAME_END;
+		} else if (feed->prev_frame_type &
+			   DMX_IDX_H264_NON_IDR_PSLICE_START) {
+			idx_event.type = DMX_IDX_H264_NON_IDR_END;
+			frame_end_in_seq = DMX_IDX_H264_FIRST_SPS_FRAME_END;
+		} else if (feed->prev_frame_type &
+			   DMX_IDX_H264_NON_IDR_BSLICE_START) {
+			idx_event.type = DMX_IDX_H264_NON_IDR_END;
+			frame_end_in_seq = DMX_IDX_H264_FIRST_SPS_FRAME_END;
+		} else {
+			idx_event.type = DMX_IDX_VC1_FRAME_END;
+			frame_end_in_seq = DMX_IDX_VC1_FIRST_SEQ_FRAME_END;
+		}
+
+		if (feed->idx_params.types & idx_event.type)
+			dvb_demux_save_idx_event(feed, &idx_event, 1);
+
+		if (feed->first_frame_in_seq_notified &&
+			feed->idx_params.types & frame_end_in_seq) {
+			idx_event.type = frame_end_in_seq;
+			dvb_demux_save_idx_event(feed, &idx_event, 1);
+			feed->first_frame_in_seq_notified = 0;
+		}
+	}
+
+	seq_start = patterns->info[pattern].type &
+		(DMX_IDX_MPEG_SEQ_HEADER | DMX_IDX_H264_SPS |
+		 DMX_IDX_VC1_SEQ_HEADER);
+
+	/* did we find start of sequence/SPS? */
+	if (seq_start) {
+		feed->first_frame_in_seq = 1;
+		feed->first_frame_in_seq_notified = 0;
+		feed->prev_frame_valid = 0;
+		idx_event.type = patterns->info[pattern].type;
+		if (feed->idx_params.types & idx_event.type)
+			dvb_demux_save_idx_event(feed, &idx_event, 1);
+		return;
+	}
+
+	mpeg_frame_start = patterns->info[pattern].type &
+		(DMX_IDX_MPEG_I_FRAME_START |
+		 DMX_IDX_MPEG_P_FRAME_START |
+		 DMX_IDX_MPEG_B_FRAME_START);
+
+	h264_frame_start = patterns->info[pattern].type &
+		(DMX_IDX_H264_IDR_START | DMX_IDX_H264_NON_IDR_START);
+
+	vc1_frame_start = patterns->info[pattern].type &
+		DMX_IDX_VC1_FRAME_START;
+
+	if (!mpeg_frame_start && !h264_frame_start && !vc1_frame_start) {
+		/* neither sequence nor frame, notify on the entry if needed */
+		idx_event.type = patterns->info[pattern].type;
+		if (feed->idx_params.types & idx_event.type)
+			dvb_demux_save_idx_event(feed, &idx_event, 1);
+		feed->prev_frame_valid = 0;
+		return;
+	}
+
+	/* notify on first frame in sequence/sps if needed */
+	if (feed->first_frame_in_seq) {
+		feed->first_frame_in_seq = 0;
+		feed->first_frame_in_seq_notified = 1;
+		if (mpeg_frame_start)
+			idx_event.type = DMX_IDX_MPEG_FIRST_SEQ_FRAME_START;
+		else if (h264_frame_start)
+			idx_event.type = DMX_IDX_H264_FIRST_SPS_FRAME_START;
+		else
+			idx_event.type = DMX_IDX_VC1_FIRST_SEQ_FRAME_START;
+
+		if (feed->idx_params.types & idx_event.type)
+			dvb_demux_save_idx_event(feed, &idx_event, 1);
+	}
+
+	/* notify on frame start if needed */
+	idx_event.type = patterns->info[pattern].type;
+	if (feed->idx_params.types & idx_event.type)
+		dvb_demux_save_idx_event(feed, &idx_event, 1);
+
+	feed->prev_frame_valid = 1;
+	feed->prev_frame_type = patterns->info[pattern].type;
+}
+
+void dvb_dmx_process_idx_pattern(struct dvb_demux_feed *feed,
+		struct dvb_dmx_video_patterns_results *patterns, int pattern,
+		u64 curr_stc, u64 prev_stc,
+		u64 curr_match_tsp, u64 prev_match_tsp,
+		u64 curr_pusi_tsp, u64 prev_pusi_tsp)
+{
+	spin_lock(&feed->demux->lock);
+	dvb_dmx_process_pattern_result(feed,
+		patterns, pattern,
+		curr_stc, prev_stc,
+		curr_match_tsp, prev_match_tsp,
+		curr_pusi_tsp, prev_pusi_tsp);
+	spin_unlock(&feed->demux->lock);
+}
+EXPORT_SYMBOL(dvb_dmx_process_idx_pattern);
+
+static void dvb_dmx_index(struct dvb_demux_feed *feed,
+		const u8 *buf,
+		const u8 timestamp[TIMESTAMP_LEN])
+{
+	int i;
+	int p;
+	u64 stc;
+	int found_patterns;
+	int count = payload(buf);
+	u64 min_pattern_tsp_num;
+	struct dvb_demux_feed *tmp_feed;
+	struct dvb_demux *demux = feed->demux;
+	struct dmx_index_event_info idx_event;
+	struct dvb_dmx_video_patterns_results patterns;
+
+	if (feed->demux->convert_ts)
+		feed->demux->convert_ts(feed, timestamp, &stc);
+	else
+		stc = 0;
+
+	idx_event.pid = feed->pid;
+	idx_event.stc = stc;
+	idx_event.match_tsp_num = feed->rec_info->ts_output_count;
+
+	/* PUSI ? */
+	if (buf[1] & 0x40) {
+		feed->curr_pusi_tsp_num = feed->rec_info->ts_output_count;
+		if (feed->idx_params.types & DMX_IDX_PUSI) {
+			idx_event.type = DMX_IDX_PUSI;
+			idx_event.last_pusi_tsp_num =
+				feed->curr_pusi_tsp_num;
+			dvb_demux_save_idx_event(feed, &idx_event, 1);
+		}
+	}
+
+	/*
+	 * if we still did not encounter a TS packet with PUSI indication,
+	 * we cannot report index entries yet as we need to provide
+	 * the TS packet number with PUSI indication preceding the TS
+	 * packet pointed by the reported index entry.
+	 */
+	if (feed->curr_pusi_tsp_num == (u64)-1) {
+		dvb_dmx_notify_indexing(feed);
+		return;
+	}
+
+	if ((feed->idx_params.types & DMX_IDX_RAI) && /* index RAI? */
+		(buf[3] & 0x20) && /* adaptation field exists? */
+		(buf[4] > 0) && /* adaptation field len > 0 ? */
+		(buf[5] & 0x40)) { /* RAI is set? */
+		idx_event.type = DMX_IDX_RAI;
+		idx_event.last_pusi_tsp_num =
+			feed->curr_pusi_tsp_num;
+		dvb_demux_save_idx_event(feed, &idx_event, 1);
+	}
+
+	/*
+	 * if no pattern search is required, or the TS packet has no payload,
+	 * pattern search is not executed.
+	 */
+	if (!feed->pattern_num || !count) {
+		dvb_dmx_notify_indexing(feed);
+		return;
+	}
+
+	p = 188 - count; /* payload start */
+
+	found_patterns =
+		dvb_dmx_video_pattern_search(feed->patterns,
+				feed->pattern_num, &buf[p], count,
+				&feed->prefix_size, &patterns);
+
+	for (i = 0; i < found_patterns; i++)
+		dvb_dmx_process_pattern_result(feed, &patterns, i,
+			stc, feed->prev_stc,
+			feed->rec_info->ts_output_count, feed->prev_tsp_num,
+			feed->curr_pusi_tsp_num, feed->prev_pusi_tsp_num);
+
+	feed->prev_tsp_num = feed->rec_info->ts_output_count;
+	feed->prev_pusi_tsp_num = feed->curr_pusi_tsp_num;
+	feed->prev_stc = stc;
+	feed->last_pattern_tsp_num = feed->rec_info->ts_output_count;
+
+	/*
+	 * it is possible to have a TS packet that has a prefix of
+	 * a video pattern but the video pattern is not identified yet
+	 * until we get the next TS packet of that PID. When we get
+	 * the next TS packet of that PID, pattern-search would
+	 * detect that we have a new index entry that starts in the
+	 * previous TS packet.
+	 * In order to notify the user on index entries with match_tsp_num
+	 * in ascending order, index events with match_tsp_num up to
+	 * the last_pattern_tsp_num are notified now to the user,
+	 * the rest can't be notified now as we might hit the above
+	 * scenario and cause the events not to be notified with
+	 * ascending order of match_tsp_num.
+	 */
+	if (feed->rec_info->idx_info.pattern_search_feeds_num == 1) {
+		/*
+		 * optimization for case we have only one PID
+		 * with video pattern search, in this case
+		 * min_pattern_tsp_num is simply updated to the new
+		 * TS packet number of the PID with pattern search.
+		 */
+		feed->rec_info->idx_info.min_pattern_tsp_num =
+			feed->last_pattern_tsp_num;
+		dvb_dmx_notify_indexing(feed);
+		return;
+	}
+
+	/*
+	 * if we have more than one PID with pattern search,
+	 * min_pattern_tsp_num needs to be updated now based on
+	 * last_pattern_tsp_num of all PIDs with pattern search.
+	 */
+	min_pattern_tsp_num = (u64)-1;
+	i = feed->rec_info->idx_info.pattern_search_feeds_num;
+	list_for_each_entry(tmp_feed, &demux->feed_list, list_head) {
+		if ((tmp_feed->state != DMX_STATE_GO) ||
+			(tmp_feed->type != DMX_TYPE_TS) ||
+			(tmp_feed->feed.ts.buffer.ringbuff !=
+			 feed->feed.ts.buffer.ringbuff))
+			continue;
+
+		if ((tmp_feed->last_pattern_tsp_num != (u64)-1) &&
+			((min_pattern_tsp_num == (u64)-1) ||
+			 (tmp_feed->last_pattern_tsp_num <
+			  min_pattern_tsp_num)))
+			min_pattern_tsp_num = tmp_feed->last_pattern_tsp_num;
+
+		if (tmp_feed->pattern_num) {
+			i--;
+			if (i == 0)
+				break;
+		}
+	}
+
+	feed->rec_info->idx_info.min_pattern_tsp_num = min_pattern_tsp_num;
+
+	/* notify all index entries up to min_pattern_tsp_num */
+	dvb_dmx_notify_indexing(feed);
+}
+
+static inline void dvb_dmx_swfilter_output_packet(
+	struct dvb_demux_feed *feed,
+	const u8 *buf,
+	const u8 timestamp[TIMESTAMP_LEN])
+{
+	/*
+	 * if we output 192 packet with timestamp at head of packet,
+	 * output the timestamp now before the 188 TS packet
+	 */
+	if (feed->tsp_out_format == DMX_TSP_FORMAT_192_HEAD)
+		feed->cb.ts(timestamp, TIMESTAMP_LEN, NULL,
+			0, &feed->feed.ts);
+
+	feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts);
+
+	/*
+	 * if we output 192 packet with timestamp at tail of packet,
+	 * output the timestamp now after the 188 TS packet
+	 */
+	if (feed->tsp_out_format == DMX_TSP_FORMAT_192_TAIL)
+		feed->cb.ts(timestamp, TIMESTAMP_LEN, NULL,
+			0, &feed->feed.ts);
+
+	if (feed->idx_params.enable)
+		dvb_dmx_index(feed, buf, timestamp);
+
+	feed->rec_info->ts_output_count++;
+}
+
+static inline void dvb_dmx_configure_decoder_fullness(
+						struct dvb_demux *demux,
+						int initialize)
+{
+	struct dvb_demux_feed *feed;
+	int j;
+
+	for (j = 0; j < demux->feednum; j++) {
+		feed = &demux->feed[j];
+
+		if ((feed->state != DMX_STATE_GO) ||
+			(feed->type != DMX_TYPE_TS) ||
+			!(feed->ts_type & TS_DECODER))
+			continue;
+
+		if (initialize) {
+			if (demux->decoder_fullness_init)
+				demux->decoder_fullness_init(feed);
+		} else {
+			if (demux->decoder_fullness_abort)
+				demux->decoder_fullness_abort(feed);
+		}
+	}
+}
+
+static inline int dvb_dmx_swfilter_buffer_check(
+					struct dvb_demux *demux,
+					u16 pid)
+{
+	int desired_space;
+	int ret;
+	struct dmx_ts_feed *ts;
+	struct dvb_demux_filter *f;
+	struct dvb_demux_feed *feed;
+	int was_locked;
+	int i, j;
+
+	if (likely(spin_is_locked(&demux->lock)))
+		was_locked = 1;
+	else
+		was_locked = 0;
+
+	/*
+	 * Check that there's enough free space for data output.
+	 * If there no space, wait for it (block).
+	 * Since this function is called while spinlock
+	 * is acquired, the lock should be released first.
+	 * Once we get control back, lock is acquired back
+	 * and checks that the filter is still valid.
+	 */
+	for (j = 0; j < demux->feednum; j++) {
+		feed = &demux->feed[j];
+
+		if (demux->sw_filter_abort)
+			return -ENODEV;
+
+		if ((feed->state != DMX_STATE_GO) ||
+			((feed->pid != pid) && (feed->pid != 0x2000)))
+			continue;
+
+		if (feed->secure_mode.is_secured &&
+			!dvb_dmx_is_rec_feed(feed))
+			return 0;
+
+		if (feed->type == DMX_TYPE_TS) {
+			desired_space = 192; /* upper bound */
+			ts = &feed->feed.ts;
+
+			if (feed->ts_type & TS_PACKET) {
+				if (likely(was_locked))
+					spin_unlock(&demux->lock);
+
+				ret = demux->buffer_ctrl.ts(ts,
+					desired_space, 1);
+
+				if (likely(was_locked))
+					spin_lock(&demux->lock);
+
+				if (ret < 0)
+					continue;
+			}
+
+			if (demux->sw_filter_abort)
+				return -ENODEV;
+
+			if (!ts->is_filtering)
+				continue;
+
+			if ((feed->ts_type & TS_DECODER) &&
+				(demux->decoder_fullness_wait)) {
+				if (likely(was_locked))
+					spin_unlock(&demux->lock);
+
+				ret = demux->decoder_fullness_wait(
+								feed,
+								desired_space);
+
+				if (likely(was_locked))
+					spin_lock(&demux->lock);
+
+				if (ret < 0)
+					continue;
+			}
+
+			continue;
+		}
+
+		/* else - section case */
+		desired_space = feed->feed.sec.tsfeedp + 188; /* upper bound */
+		for (i = 0; i < demux->filternum; i++) {
+			if (demux->sw_filter_abort)
+				return -EPERM;
+
+			if (!feed->feed.sec.is_filtering)
+				continue;
+
+			f = &demux->filter[i];
+			if (f->feed != feed)
+				continue;
+
+			if (likely(was_locked))
+				spin_unlock(&demux->lock);
+
+			ret = demux->buffer_ctrl.sec(&f->filter,
+				desired_space, 1);
+
+			if (likely(was_locked))
+				spin_lock(&demux->lock);
+
+			if (ret < 0)
+				break;
+		}
+	}
+
+	return 0;
+}
+
+static inline void dvb_dmx_swfilter_packet_type(struct dvb_demux_feed *feed,
+			const u8 *buf, const u8 timestamp[TIMESTAMP_LEN])
+{
+	u16 pid = ts_pid(buf);
+	u8 scrambling_bits = ts_scrambling_ctrl(buf);
+	struct dmx_data_ready dmx_data_ready;
+
+	/*
+	 * Notify on scrambling status change only when we move
+	 * from clear (0) to non-clear and vise-versa
+	 */
+	if ((scrambling_bits && !feed->scrambling_bits) ||
+		(!scrambling_bits && feed->scrambling_bits)) {
+		dmx_data_ready.status = DMX_OK_SCRAMBLING_STATUS;
+		dmx_data_ready.data_length = 0;
+		dmx_data_ready.scrambling_bits.pid = pid;
+		dmx_data_ready.scrambling_bits.old_value =
+			feed->scrambling_bits;
+		dmx_data_ready.scrambling_bits.new_value = scrambling_bits;
+
+		if (feed->type == DMX_TYPE_SEC)
+			dvb_dmx_notify_section_event(feed, &dmx_data_ready, 0);
+		else if (feed->feed.ts.is_filtering)
+			feed->data_ready_cb.ts(&feed->feed.ts, &dmx_data_ready);
+	}
+
+	feed->scrambling_bits = scrambling_bits;
+
 	switch (feed->type) {
 	case DMX_TYPE_TS:
 		if (!feed->feed.ts.is_filtering)
 			break;
 		if (feed->ts_type & TS_PACKET) {
-			if (feed->ts_type & TS_PAYLOAD_ONLY)
-				dvb_dmx_swfilter_payload(feed, buf);
-			else
-				feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts);
+			if (feed->ts_type & TS_PAYLOAD_ONLY) {
+				if (!feed->secure_mode.is_secured)
+					dvb_dmx_swfilter_payload(feed, buf);
+			} else {
+				dvb_dmx_swfilter_output_packet(feed,
+						buf, timestamp);
+			}
 		}
-		if (feed->ts_type & TS_DECODER)
+		if ((feed->ts_type & TS_DECODER) &&
+			!feed->secure_mode.is_secured)
 			if (feed->demux->write_to_decoder)
 				feed->demux->write_to_decoder(feed, buf, 188);
 		break;
 
 	case DMX_TYPE_SEC:
-		if (!feed->feed.sec.is_filtering)
+		if (!feed->feed.sec.is_filtering ||
+			feed->secure_mode.is_secured)
 			break;
-		if (dvb_dmx_swfilter_section_packet(feed, buf) < 0)
+		if (dvb_dmx_swfilter_section_one_packet(feed, buf) < 0)
 			feed->feed.sec.seclen = feed->feed.sec.secbufp = 0;
 		break;
 
@@ -391,7 +1516,8 @@
 	((f)->feed.ts.is_filtering) &&					\
 	(((f)->ts_type & (TS_PACKET | TS_DEMUX)) == TS_PACKET))
 
-static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
+static void dvb_dmx_swfilter_one_packet(struct dvb_demux *demux, const u8 *buf,
+				const u8 timestamp[TIMESTAMP_LEN])
 {
 	struct dvb_demux_feed *feed;
 	u16 pid = ts_pid(buf);
@@ -412,12 +1538,11 @@
 					* 188 * 8;
 				/* convert to 1024 basis */
 				speed_bytes = 1000 * div64_u64(speed_bytes,
-						1024);
+							       1024);
 				speed_timedelta = ktime_ms_delta(cur_time,
 							demux->speed_last_time);
-				printk(KERN_INFO "TS speed %llu Kbits/sec \n",
-						div64_u64(speed_bytes,
-							speed_timedelta));
+				pr_info("TS speed %llu Kbits/sec\n",
+				div64_u64(speed_bytes, speed_timedelta));
 			}
 
 			demux->speed_last_time = cur_time;
@@ -426,11 +1551,12 @@
 	}
 
 	if (buf[1] & 0x80) {
-		dprintk_tscheck("TEI detected. "
-				"PID=0x%x data1=0x%x\n",
-				pid, buf[1]);
-		/* data in this packet can't be trusted - drop it unless
-		 * module option dvb_demux_feed_err_pkts is set */
+		dprintk_tscheck("TEI detected. PID=0x%x data1=0x%x\n", pid,
+			buf[1]);
+		/*
+		 * data in this packet can't be trusted - drop it unless
+		 * module option dvb_demux_feed_err_pkts is set
+		 */
 		if (!dvb_demux_feed_err_pkts)
 			return;
 	} else /* if TEI bit is set, pid may be wrong- skip pkt counter */
@@ -439,10 +1565,12 @@
 			if (pid < MAX_PID) {
 				if (buf[3] & 0x10)
 					demux->cnt_storage[pid] =
-						(demux->cnt_storage[pid] + 1) & 0xf;
+						(demux->cnt_storage[pid] + 1) &
+						0xf;
 
 				if ((buf[3] & 0xf) != demux->cnt_storage[pid]) {
-					dprintk_tscheck("TS packet counter mismatch. PID=0x%x expected 0x%x got 0x%x\n",
+					dprintk_tscheck(
+						"TS packet counter mismatch. PID=0x%x expected 0x%x got 0x%x\n",
 						pid, demux->cnt_storage[pid],
 						buf[3] & 0xf);
 					demux->cnt_storage[pid] = buf[3] & 0xf;
@@ -451,48 +1579,76 @@
 			/* end check */
 		}
 
+	if (demux->playback_mode == DMX_PB_MODE_PULL)
+		if (dvb_dmx_swfilter_buffer_check(demux, pid) < 0)
+			return;
+
 	list_for_each_entry(feed, &demux->feed_list, list_head) {
 		if ((feed->pid != pid) && (feed->pid != 0x2000))
 			continue;
 
-		/* copy each packet only once to the dvr device, even
-		 * if a PID is in multiple filters (e.g. video + PCR) */
+		/*
+		 * copy each packet only once to the dvr device, even
+		 * if a PID is in multiple filters (e.g. video + PCR)
+		 */
 		if ((DVR_FEED(feed)) && (dvr_done++))
 			continue;
 
 		if (feed->pid == pid)
-			dvb_dmx_swfilter_packet_type(feed, buf);
-		else if (feed->pid == 0x2000)
-			feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts);
+			dvb_dmx_swfilter_packet_type(feed, buf, timestamp);
+		else if ((feed->pid == 0x2000) &&
+			     (feed->feed.ts.is_filtering))
+			dvb_dmx_swfilter_output_packet(feed, buf, timestamp);
 	}
 }
 
+void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf,
+				const u8 timestamp[TIMESTAMP_LEN])
+{
+	spin_lock(&demux->lock);
+	dvb_dmx_swfilter_one_packet(demux, buf, timestamp);
+	spin_unlock(&demux->lock);
+}
+EXPORT_SYMBOL(dvb_dmx_swfilter_packet);
+
 void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf,
 			      size_t count)
 {
-	unsigned long flags;
+	ktime_t pre_time = ktime_set(0, 0);
+	u8 timestamp[TIMESTAMP_LEN] = {0};
 
-	spin_lock_irqsave(&demux->lock, flags);
+	if (dvb_demux_performancecheck)
+		pre_time = ktime_get();
+
+	spin_lock(&demux->lock);
+
+	demux->sw_filter_abort = 0;
+	dvb_dmx_configure_decoder_fullness(demux, 1);
 
 	while (count--) {
 		if (buf[0] == 0x47)
-			dvb_dmx_swfilter_packet(demux, buf);
+			dvb_dmx_swfilter_one_packet(demux, buf, timestamp);
 		buf += 188;
 	}
 
-	spin_unlock_irqrestore(&demux->lock, flags);
-}
+	spin_unlock(&demux->lock);
 
+	if (dvb_demux_performancecheck)
+		demux->total_process_time += dvb_dmx_calc_time_delta(pre_time);
+}
 EXPORT_SYMBOL(dvb_dmx_swfilter_packets);
 
 static inline int find_next_packet(const u8 *buf, int pos, size_t count,
-				   const int pktsize)
+				   const int pktsize, const int leadingbytes)
 {
 	int start = pos, lost;
 
 	while (pos < count) {
-		if (buf[pos] == 0x47 ||
-		    (pktsize == 204 && buf[pos] == 0xB8))
+		if ((buf[pos] == 0x47 && !leadingbytes) ||
+		    (pktsize == 204 && buf[pos] == 0xB8) ||
+			(pktsize == 192 && leadingbytes &&
+			 (pos+leadingbytes < count) &&
+				buf[pos+leadingbytes] == 0x47))
 			break;
 		pos++;
 	}
@@ -501,8 +1657,11 @@
 	if (lost) {
 		/* This garbage is part of a valid packet? */
 		int backtrack = pos - pktsize;
+
 		if (backtrack >= 0 && (buf[backtrack] == 0x47 ||
-		    (pktsize == 204 && buf[backtrack] == 0xB8)))
+		    (pktsize == 204 && buf[backtrack] == 0xB8) ||
+			(pktsize == 192 &&
+			buf[backtrack+leadingbytes] == 0x47)))
 			return backtrack;
 	}
 
@@ -511,13 +1670,20 @@
 
 /* Filter all pktsize= 188 or 204 sized packets and skip garbage. */
 static inline void _dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf,
-		size_t count, const int pktsize)
+		size_t count, const int pktsize, const int leadingbytes)
 {
 	int p = 0, i, j;
 	const u8 *q;
-	unsigned long flags;
+	ktime_t pre_time;
+	u8 timestamp[TIMESTAMP_LEN];
 
-	spin_lock_irqsave(&demux->lock, flags);
+	if (dvb_demux_performancecheck)
+		pre_time = ktime_get();
+
+	spin_lock(&demux->lock);
+
+	demux->sw_filter_abort = 0;
+	dvb_dmx_configure_decoder_fullness(demux, 1);
 
 	if (demux->tsbufp) { /* tsbuf[0] is now 0x47. */
 		i = demux->tsbufp;
@@ -528,14 +1694,36 @@
 			goto bailout;
 		}
 		memcpy(&demux->tsbuf[i], buf, j);
-		if (demux->tsbuf[0] == 0x47) /* double check */
-			dvb_dmx_swfilter_packet(demux, demux->tsbuf);
+
+		if (pktsize == 192) {
+			if (leadingbytes)
+				memcpy(timestamp, &demux->tsbuf[p],
+					TIMESTAMP_LEN);
+			else
+				memcpy(timestamp, &demux->tsbuf[188],
+					TIMESTAMP_LEN);
+		} else {
+			memset(timestamp, 0, TIMESTAMP_LEN);
+		}
+
+		if (pktsize == 192 &&
+			leadingbytes &&
+			demux->tsbuf[leadingbytes] == 0x47)  /* double check */
+			dvb_dmx_swfilter_one_packet(demux,
+				demux->tsbuf + TIMESTAMP_LEN, timestamp);
+		else if (demux->tsbuf[0] == 0x47) /* double check */
+			dvb_dmx_swfilter_one_packet(demux,
+					demux->tsbuf, timestamp);
 		demux->tsbufp = 0;
 		p += j;
 	}
 
 	while (1) {
-		p = find_next_packet(buf, p, count, pktsize);
+		p = find_next_packet(buf, p, count, pktsize, leadingbytes);
+
+		if (demux->sw_filter_abort)
+			goto bailout;
+
 		if (p >= count)
 			break;
 		if (count - p < pktsize)
@@ -548,7 +1736,19 @@
 			demux->tsbuf[0] = 0x47;
 			q = demux->tsbuf;
 		}
-		dvb_dmx_swfilter_packet(demux, q);
+
+		if (pktsize == 192) {
+			if (leadingbytes) {
+				q = &buf[p+leadingbytes];
+				memcpy(timestamp, &buf[p], TIMESTAMP_LEN);
+			} else {
+				memcpy(timestamp, &buf[p+188], TIMESTAMP_LEN);
+			}
+		} else {
+			memset(timestamp, 0, TIMESTAMP_LEN);
+		}
+
+		dvb_dmx_swfilter_one_packet(demux, q, timestamp);
 		p += pktsize;
 	}
 
@@ -561,33 +1761,65 @@
 	}
 
 bailout:
-	spin_unlock_irqrestore(&demux->lock, flags);
+	spin_unlock(&demux->lock);
+
+	if (dvb_demux_performancecheck)
+		demux->total_process_time += dvb_dmx_calc_time_delta(pre_time);
 }
 
 void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count)
 {
-	_dvb_dmx_swfilter(demux, buf, count, 188);
+	_dvb_dmx_swfilter(demux, buf, count, 188, 0);
 }
 EXPORT_SYMBOL(dvb_dmx_swfilter);
 
 void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf, size_t count)
 {
-	_dvb_dmx_swfilter(demux, buf, count, 204);
+	_dvb_dmx_swfilter(demux, buf, count, 204, 0);
 }
 EXPORT_SYMBOL(dvb_dmx_swfilter_204);
 
 void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf, size_t count)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&demux->lock, flags);
+	spin_lock(&demux->lock);
 
 	demux->feed->cb.ts(buf, count, NULL, 0, &demux->feed->feed.ts);
 
-	spin_unlock_irqrestore(&demux->lock, flags);
+	spin_unlock(&demux->lock);
 }
 EXPORT_SYMBOL(dvb_dmx_swfilter_raw);
 
+void dvb_dmx_swfilter_format(
+			struct dvb_demux *demux,
+			const u8 *buf,
+			size_t count,
+			enum dmx_tsp_format_t tsp_format)
+{
+	switch (tsp_format) {
+	case DMX_TSP_FORMAT_188:
+		_dvb_dmx_swfilter(demux, buf, count, 188, 0);
+		break;
+
+	case DMX_TSP_FORMAT_192_TAIL:
+		_dvb_dmx_swfilter(demux, buf, count, 192, 0);
+		break;
+
+	case DMX_TSP_FORMAT_192_HEAD:
+		_dvb_dmx_swfilter(demux, buf, count, 192, TIMESTAMP_LEN);
+		break;
+
+	case DMX_TSP_FORMAT_204:
+		_dvb_dmx_swfilter(demux, buf, count, 204, 0);
+		break;
+
+	default:
+		pr_err("%s: invalid TS packet format (format=%d)\n", __func__,
+			tsp_format);
+		break;
+	}
+}
+EXPORT_SYMBOL(dvb_dmx_swfilter_format);
+
 static struct dvb_demux_filter *dvb_dmx_filter_alloc(struct dvb_demux *demux)
 {
 	int i;
@@ -620,6 +1852,311 @@
 	return &demux->feed[i];
 }
 
+const struct dvb_dmx_video_patterns *dvb_dmx_get_pattern(u64 dmx_idx_pattern)
+{
+	switch (dmx_idx_pattern) {
+	case DMX_IDX_MPEG_SEQ_HEADER:
+		return &mpeg2_seq_hdr;
+
+	case DMX_IDX_MPEG_GOP:
+		return &mpeg2_gop;
+
+	case DMX_IDX_MPEG_I_FRAME_START:
+		return &mpeg2_iframe;
+
+	case DMX_IDX_MPEG_P_FRAME_START:
+		return &mpeg2_pframe;
+
+	case DMX_IDX_MPEG_B_FRAME_START:
+		return &mpeg2_bframe;
+
+	case DMX_IDX_H264_SPS:
+		return &h264_sps;
+
+	case DMX_IDX_H264_PPS:
+		return &h264_pps;
+
+	case DMX_IDX_H264_IDR_START:
+		return &h264_idr;
+
+	case DMX_IDX_H264_NON_IDR_START:
+		return &h264_non_idr;
+
+	case DMX_IDX_H264_IDR_ISLICE_START:
+		return &h264_idr_islice;
+
+	case DMX_IDX_H264_NON_IDR_PSLICE_START:
+		return &h264_non_idr_pslice;
+
+	case DMX_IDX_H264_NON_IDR_BSLICE_START:
+		return &h264_non_idr_bslice;
+
+	case DMX_IDX_H264_ACCESS_UNIT_DEL:
+		return &h264_non_access_unit_del;
+
+	case DMX_IDX_H264_SEI:
+		return &h264_non_sei;
+
+	case DMX_IDX_VC1_SEQ_HEADER:
+		return &vc1_seq_hdr;
+
+	case DMX_IDX_VC1_ENTRY_POINT:
+		return &vc1_entry_point;
+
+	case DMX_IDX_VC1_FRAME_START:
+		return &vc1_frame;
+
+	default:
+		return NULL;
+	}
+}
+EXPORT_SYMBOL(dvb_dmx_get_pattern);
+
+static void dvb_dmx_init_idx_state(struct dvb_demux_feed *feed)
+{
+	feed->prev_tsp_num = (u64)-1;
+	feed->curr_pusi_tsp_num = (u64)-1;
+	feed->prev_pusi_tsp_num = (u64)-1;
+	feed->prev_frame_valid = 0;
+	feed->first_frame_in_seq = 0;
+	feed->first_frame_in_seq_notified = 0;
+	feed->last_pattern_tsp_num = (u64)-1;
+	feed->pattern_num = 0;
+	memset(&feed->prefix_size, 0,
+		sizeof(struct dvb_dmx_video_prefix_size_masks));
+
+	if (feed->idx_params.types &
+		(DMX_IDX_MPEG_SEQ_HEADER |
+		 DMX_IDX_MPEG_FIRST_SEQ_FRAME_START |
+		 DMX_IDX_MPEG_FIRST_SEQ_FRAME_END)) {
+		feed->patterns[feed->pattern_num] =
+			dvb_dmx_get_pattern(DMX_IDX_MPEG_SEQ_HEADER);
+		feed->pattern_num++;
+	}
+
+	if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+		(feed->idx_params.types & DMX_IDX_MPEG_GOP)) {
+		feed->patterns[feed->pattern_num] =
+			dvb_dmx_get_pattern(DMX_IDX_MPEG_GOP);
+		feed->pattern_num++;
+	}
+
+	/* MPEG2 I-frame */
+	if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+		(feed->idx_params.types &
+		 (DMX_IDX_MPEG_I_FRAME_START | DMX_IDX_MPEG_I_FRAME_END |
+		  DMX_IDX_MPEG_P_FRAME_END | DMX_IDX_MPEG_B_FRAME_END |
+		  DMX_IDX_MPEG_FIRST_SEQ_FRAME_START |
+		  DMX_IDX_MPEG_FIRST_SEQ_FRAME_END))) {
+		feed->patterns[feed->pattern_num] =
+			dvb_dmx_get_pattern(DMX_IDX_MPEG_I_FRAME_START);
+		feed->pattern_num++;
+	}
+
+	/* MPEG2 P-frame */
+	if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+		(feed->idx_params.types &
+		 (DMX_IDX_MPEG_P_FRAME_START | DMX_IDX_MPEG_P_FRAME_END |
+		  DMX_IDX_MPEG_I_FRAME_END | DMX_IDX_MPEG_B_FRAME_END |
+		  DMX_IDX_MPEG_FIRST_SEQ_FRAME_START |
+		  DMX_IDX_MPEG_FIRST_SEQ_FRAME_END))) {
+		feed->patterns[feed->pattern_num] =
+			dvb_dmx_get_pattern(DMX_IDX_MPEG_P_FRAME_START);
+		feed->pattern_num++;
+	}
+
+	/* MPEG2 B-frame */
+	if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+		(feed->idx_params.types &
+		 (DMX_IDX_MPEG_B_FRAME_START | DMX_IDX_MPEG_B_FRAME_END |
+		  DMX_IDX_MPEG_I_FRAME_END | DMX_IDX_MPEG_P_FRAME_END |
+		  DMX_IDX_MPEG_FIRST_SEQ_FRAME_START |
+		  DMX_IDX_MPEG_FIRST_SEQ_FRAME_END))) {
+		feed->patterns[feed->pattern_num] =
+			dvb_dmx_get_pattern(DMX_IDX_MPEG_B_FRAME_START);
+		feed->pattern_num++;
+	}
+
+	if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+		(feed->idx_params.types &
+		 (DMX_IDX_H264_SPS |
+		  DMX_IDX_H264_FIRST_SPS_FRAME_START |
+		  DMX_IDX_H264_FIRST_SPS_FRAME_END))) {
+		feed->patterns[feed->pattern_num] =
+			dvb_dmx_get_pattern(DMX_IDX_H264_SPS);
+		feed->pattern_num++;
+	}
+
+	if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+		(feed->idx_params.types & DMX_IDX_H264_PPS)) {
+		feed->patterns[feed->pattern_num] =
+			dvb_dmx_get_pattern(DMX_IDX_H264_PPS);
+		feed->pattern_num++;
+	}
+
+	/* H264 IDR */
+	if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+		(feed->idx_params.types &
+		 (DMX_IDX_H264_IDR_START | DMX_IDX_H264_IDR_END |
+		  DMX_IDX_H264_NON_IDR_END |
+		  DMX_IDX_H264_FIRST_SPS_FRAME_START |
+		  DMX_IDX_H264_FIRST_SPS_FRAME_END))) {
+		feed->patterns[feed->pattern_num] =
+			dvb_dmx_get_pattern(DMX_IDX_H264_IDR_START);
+		feed->pattern_num++;
+	}
+
+	/* H264 non-IDR */
+	if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+		(feed->idx_params.types &
+		 (DMX_IDX_H264_NON_IDR_START | DMX_IDX_H264_NON_IDR_END |
+		  DMX_IDX_H264_IDR_END |
+		  DMX_IDX_H264_FIRST_SPS_FRAME_START |
+		  DMX_IDX_H264_FIRST_SPS_FRAME_END))) {
+		feed->patterns[feed->pattern_num] =
+			dvb_dmx_get_pattern(DMX_IDX_H264_NON_IDR_START);
+		feed->pattern_num++;
+	}
+
+	/* H264 IDR ISlice */
+	if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+		(feed->idx_params.types &
+		 (DMX_IDX_H264_IDR_ISLICE_START | DMX_IDX_H264_IDR_END |
+		  DMX_IDX_H264_NON_IDR_END |
+		  DMX_IDX_H264_FIRST_SPS_FRAME_START |
+		  DMX_IDX_H264_FIRST_SPS_FRAME_END))) {
+		feed->patterns[feed->pattern_num] =
+			dvb_dmx_get_pattern(DMX_IDX_H264_IDR_ISLICE_START);
+		feed->pattern_num++;
+	}
+	/* H264 non-IDR PSlice */
+	if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+		(feed->idx_params.types &
+		 (DMX_IDX_H264_NON_IDR_PSLICE_START | DMX_IDX_H264_NON_IDR_END |
+		  DMX_IDX_H264_IDR_END |
+		  DMX_IDX_H264_FIRST_SPS_FRAME_START |
+		  DMX_IDX_H264_FIRST_SPS_FRAME_END))) {
+		feed->patterns[feed->pattern_num] =
+			dvb_dmx_get_pattern(DMX_IDX_H264_NON_IDR_PSLICE_START);
+		feed->pattern_num++;
+	}
+	/* H264 non-IDR BSlice */
+	if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+		(feed->idx_params.types &
+		 (DMX_IDX_H264_NON_IDR_BSLICE_START | DMX_IDX_H264_NON_IDR_END |
+		  DMX_IDX_H264_IDR_END |
+		  DMX_IDX_H264_FIRST_SPS_FRAME_START |
+		  DMX_IDX_H264_FIRST_SPS_FRAME_END))) {
+		feed->patterns[feed->pattern_num] =
+			dvb_dmx_get_pattern(DMX_IDX_H264_NON_IDR_BSLICE_START);
+		feed->pattern_num++;
+	}
+
+	if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+		(feed->idx_params.types & DMX_IDX_H264_ACCESS_UNIT_DEL)) {
+		feed->patterns[feed->pattern_num] =
+			dvb_dmx_get_pattern(DMX_IDX_H264_ACCESS_UNIT_DEL);
+		feed->pattern_num++;
+	}
+
+	if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+		(feed->idx_params.types & DMX_IDX_H264_SEI)) {
+		feed->patterns[feed->pattern_num] =
+			dvb_dmx_get_pattern(DMX_IDX_H264_SEI);
+		feed->pattern_num++;
+	}
+
+	if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+		(feed->idx_params.types &
+		 (DMX_IDX_VC1_SEQ_HEADER |
+		  DMX_IDX_VC1_FIRST_SEQ_FRAME_START |
+		  DMX_IDX_VC1_FIRST_SEQ_FRAME_END))) {
+		feed->patterns[feed->pattern_num] =
+			dvb_dmx_get_pattern(DMX_IDX_VC1_SEQ_HEADER);
+		feed->pattern_num++;
+	}
+
+	if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+		(feed->idx_params.types & DMX_IDX_VC1_ENTRY_POINT)) {
+		feed->patterns[feed->pattern_num] =
+			dvb_dmx_get_pattern(DMX_IDX_VC1_ENTRY_POINT);
+		feed->pattern_num++;
+	}
+
+	/* VC1 frame */
+	if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+		(feed->idx_params.types &
+		 (DMX_IDX_VC1_FRAME_START | DMX_IDX_VC1_FRAME_END |
+		  DMX_IDX_VC1_FIRST_SEQ_FRAME_START |
+		  DMX_IDX_VC1_FIRST_SEQ_FRAME_END))) {
+		feed->patterns[feed->pattern_num] =
+			dvb_dmx_get_pattern(DMX_IDX_VC1_FRAME_START);
+		feed->pattern_num++;
+	}
+
+	if (feed->pattern_num)
+		feed->rec_info->idx_info.pattern_search_feeds_num++;
+}
+
+static struct dvb_demux_rec_info *dvb_dmx_alloc_rec_info(
+					struct dmx_ts_feed *ts_feed)
+{
+	int i;
+	struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+	struct dvb_demux *demux = feed->demux;
+	struct dvb_demux_rec_info *rec_info;
+	struct dvb_demux_feed *tmp_feed;
+
+	/* check if this feed share recording buffer with other active feeds */
+	list_for_each_entry(tmp_feed, &demux->feed_list, list_head) {
+		if ((tmp_feed->state == DMX_STATE_GO) &&
+			(tmp_feed->type == DMX_TYPE_TS) &&
+			(tmp_feed != feed) &&
+			(tmp_feed->feed.ts.buffer.ringbuff ==
+			 ts_feed->buffer.ringbuff)) {
+			/* indexing information is shared between the feeds */
+			tmp_feed->rec_info->ref_count++;
+			return tmp_feed->rec_info;
+		}
+	}
+
+	/* Need to allocate a new indexing info */
+	for (i = 0; i < demux->feednum; i++)
+		if (!demux->rec_info_pool[i].ref_count)
+			break;
+
+	if (i == demux->feednum)
+		return NULL;
+
+	rec_info = &demux->rec_info_pool[i];
+	rec_info->ref_count++;
+	INIT_LIST_HEAD(&rec_info->idx_info.free_list);
+	INIT_LIST_HEAD(&rec_info->idx_info.ready_list);
+
+	for (i = 0; i < DMX_IDX_EVENT_QUEUE_SIZE; i++)
+		list_add(&rec_info->idx_info.events[i].next,
+			&rec_info->idx_info.free_list);
+
+	rec_info->ts_output_count = 0;
+	rec_info->idx_info.min_pattern_tsp_num = (u64)-1;
+	rec_info->idx_info.pattern_search_feeds_num = 0;
+	rec_info->idx_info.indexing_feeds_num = 0;
+
+	return rec_info;
+}
+
+static void dvb_dmx_free_rec_info(struct dmx_ts_feed *ts_feed)
+{
+	struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+
+	if (!feed->rec_info || !feed->rec_info->ref_count) {
+		pr_err("%s: invalid idx info state\n", __func__);
+		return;
+	}
+
+	feed->rec_info->ref_count--;
+}
+
 static int dvb_demux_feed_find(struct dvb_demux_feed *feed)
 {
 	struct dvb_demux_feed *entry;
@@ -635,7 +2172,7 @@
 {
 	spin_lock_irq(&feed->demux->lock);
 	if (dvb_demux_feed_find(feed)) {
-		printk(KERN_ERR "%s: feed already in list (type=%x state=%x pid=%x)\n",
+		pr_err("%s: feed already in list (type=%x state=%x pid=%x)\n",
 		       __func__, feed->type, feed->state, feed->pid);
 		goto out;
 	}
@@ -649,7 +2186,7 @@
 {
 	spin_lock_irq(&feed->demux->lock);
 	if (!(dvb_demux_feed_find(feed))) {
-		printk(KERN_ERR "%s: feed not in list (type=%x state=%x pid=%x)\n",
+		pr_err("%s: feed not in list (type=%x state=%x pid=%x)\n",
 		       __func__, feed->type, feed->state, feed->pid);
 		goto out;
 	}
@@ -733,7 +2270,34 @@
 		return -ENODEV;
 	}
 
-	if ((ret = demux->start_feed(feed)) < 0) {
+	feed->first_cc = 1;
+	feed->scrambling_bits = 0;
+
+	if ((feed->ts_type & TS_PACKET) &&
+		!(feed->ts_type & TS_PAYLOAD_ONLY)) {
+		feed->rec_info = dvb_dmx_alloc_rec_info(ts_feed);
+		if (!feed->rec_info) {
+			mutex_unlock(&demux->mutex);
+			return -ENOMEM;
+		}
+		if (feed->idx_params.enable) {
+			dvb_dmx_init_idx_state(feed);
+			feed->rec_info->idx_info.indexing_feeds_num++;
+			if (demux->set_indexing)
+				demux->set_indexing(feed);
+		}
+	} else {
+		feed->pattern_num = 0;
+		feed->rec_info = NULL;
+	}
+
+	ret = demux->start_feed(feed);
+	if (ret < 0) {
+		if ((feed->ts_type & TS_PACKET) &&
+		    !(feed->ts_type & TS_PAYLOAD_ONLY)) {
+			dvb_dmx_free_rec_info(ts_feed);
+			feed->rec_info = NULL;
+		}
 		mutex_unlock(&demux->mutex);
 		return ret;
 	}
@@ -771,11 +2335,337 @@
 	ts_feed->is_filtering = 0;
 	feed->state = DMX_STATE_ALLOCATED;
 	spin_unlock_irq(&demux->lock);
+
+	if (feed->rec_info) {
+		if (feed->pattern_num)
+			feed->rec_info->idx_info.pattern_search_feeds_num--;
+		if (feed->idx_params.enable)
+			feed->rec_info->idx_info.indexing_feeds_num--;
+		dvb_dmx_free_rec_info(ts_feed);
+		feed->rec_info = NULL;
+	}
+
 	mutex_unlock(&demux->mutex);
 
 	return ret;
 }
 
+static int dmx_ts_feed_decoder_buff_status(struct dmx_ts_feed *ts_feed,
+			struct dmx_buffer_status *dmx_buffer_status)
+{
+	struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+	struct dvb_demux *demux = feed->demux;
+	int ret;
+
+	mutex_lock(&demux->mutex);
+
+	if (feed->state < DMX_STATE_GO) {
+		mutex_unlock(&demux->mutex);
+		return -EINVAL;
+	}
+
+	if (!demux->decoder_buffer_status) {
+		mutex_unlock(&demux->mutex);
+		return -ENODEV;
+	}
+
+	ret = demux->decoder_buffer_status(feed, dmx_buffer_status);
+
+	mutex_unlock(&demux->mutex);
+
+	return ret;
+}
+
+static int dmx_ts_feed_reuse_decoder_buffer(struct dmx_ts_feed *ts_feed,
+						int cookie)
+{
+	struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+	struct dvb_demux *demux = feed->demux;
+	int ret;
+
+	mutex_lock(&demux->mutex);
+
+	if (feed->state < DMX_STATE_GO) {
+		mutex_unlock(&demux->mutex);
+		return -EINVAL;
+	}
+
+	if (!demux->reuse_decoder_buffer) {
+		mutex_unlock(&demux->mutex);
+		return -ENODEV;
+	}
+
+	ret = demux->reuse_decoder_buffer(feed, cookie);
+
+	mutex_unlock(&demux->mutex);
+
+	return ret;
+}
+
+static int dmx_ts_feed_data_ready_cb(struct dmx_ts_feed *feed,
+				dmx_ts_data_ready_cb callback)
+{
+	struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed;
+	struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+
+	mutex_lock(&dvbdmx->mutex);
+
+	if (dvbdmxfeed->state == DMX_STATE_GO) {
+		mutex_unlock(&dvbdmx->mutex);
+		return -EINVAL;
+	}
+
+	dvbdmxfeed->data_ready_cb.ts = callback;
+
+	mutex_unlock(&dvbdmx->mutex);
+	return 0;
+}
+
+static int dmx_ts_set_secure_mode(struct dmx_ts_feed *feed,
+				struct dmx_secure_mode *secure_mode)
+{
+	struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed;
+	struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+
+	if (mutex_lock_interruptible(&dvbdmx->mutex))
+		return -ERESTARTSYS;
+
+	if (dvbdmxfeed->state == DMX_STATE_GO) {
+		mutex_unlock(&dvbdmx->mutex);
+		return -EBUSY;
+	}
+
+	dvbdmxfeed->secure_mode = *secure_mode;
+	mutex_unlock(&dvbdmx->mutex);
+	return 0;
+}
+
+static int dmx_ts_set_cipher_ops(struct dmx_ts_feed *feed,
+				struct dmx_cipher_operations *cipher_ops)
+{
+	struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed;
+	struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+	int ret = 0;
+
+	if (mutex_lock_interruptible(&dvbdmx->mutex))
+		return -ERESTARTSYS;
+
+	if ((dvbdmxfeed->state == DMX_STATE_GO) &&
+		dvbdmx->set_cipher_op)
+		ret = dvbdmx->set_cipher_op(dvbdmxfeed, cipher_ops);
+
+	if (!ret)
+		dvbdmxfeed->cipher_ops = *cipher_ops;
+
+	mutex_unlock(&dvbdmx->mutex);
+	return ret;
+}
+
+static int dmx_ts_set_video_codec(
+	struct dmx_ts_feed *ts_feed,
+	enum dmx_video_codec video_codec)
+{
+	struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+
+	feed->video_codec = video_codec;
+
+	return 0;
+}
+
+static int dmx_ts_set_idx_params(struct dmx_ts_feed *ts_feed,
+	struct dmx_indexing_params *idx_params)
+{
+	struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+	struct dvb_demux *dvbdmx = feed->demux;
+	int idx_enabled;
+	int ret = 0;
+
+	mutex_lock(&dvbdmx->mutex);
+
+	if ((feed->state == DMX_STATE_GO) &&
+		!feed->rec_info) {
+		mutex_unlock(&dvbdmx->mutex);
+		return -EINVAL;
+	}
+
+	idx_enabled = feed->idx_params.enable;
+	feed->idx_params = *idx_params;
+
+	if (feed->state == DMX_STATE_GO) {
+		spin_lock_irq(&dvbdmx->lock);
+		if (feed->pattern_num)
+			feed->rec_info->idx_info.pattern_search_feeds_num--;
+		if (idx_enabled && !idx_params->enable)
+			feed->rec_info->idx_info.indexing_feeds_num--;
+		if (!idx_enabled && idx_params->enable)
+			feed->rec_info->idx_info.indexing_feeds_num++;
+		dvb_dmx_init_idx_state(feed);
+		spin_unlock_irq(&dvbdmx->lock);
+
+		if (dvbdmx->set_indexing)
+			ret = dvbdmx->set_indexing(feed);
+	}
+
+	mutex_unlock(&dvbdmx->mutex);
+
+	return ret;
+}
+
+static int dvbdmx_ts_feed_oob_cmd(struct dmx_ts_feed *ts_feed,
+		struct dmx_oob_command *cmd)
+{
+	struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+	struct dmx_data_ready data;
+	struct dvb_demux *dvbdmx = feed->demux;
+	int ret = 0;
+	int secure_non_rec = feed->secure_mode.is_secured &&
+		!dvb_dmx_is_rec_feed(feed);
+
+	mutex_lock(&dvbdmx->mutex);
+
+	if (feed->state != DMX_STATE_GO) {
+		mutex_unlock(&dvbdmx->mutex);
+		return -EINVAL;
+	}
+
+	/* Decoder & non-recording secure feeds are handled by plug-in */
+	if ((feed->ts_type & TS_DECODER) || secure_non_rec) {
+		if (feed->demux->oob_command)
+			ret = feed->demux->oob_command(feed, cmd);
+	}
+
+	if (!(feed->ts_type & (TS_PAYLOAD_ONLY | TS_PACKET)) ||
+		secure_non_rec) {
+		mutex_unlock(&dvbdmx->mutex);
+		return ret;
+	}
+
+	data.data_length = 0;
+
+	switch (cmd->type) {
+	case DMX_OOB_CMD_EOS:
+		if (feed->ts_type & TS_PAYLOAD_ONLY)
+			dvb_dmx_check_pes_end(feed);
+
+		data.status = DMX_OK_EOS;
+		ret = feed->data_ready_cb.ts(&feed->feed.ts, &data);
+		break;
+
+	case DMX_OOB_CMD_MARKER:
+		data.status = DMX_OK_MARKER;
+		data.marker.id = cmd->params.marker.id;
+		ret = feed->data_ready_cb.ts(&feed->feed.ts, &data);
+		break;
+
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	mutex_unlock(&dvbdmx->mutex);
+	return ret;
+}
+
+static int dvbdmx_ts_get_scrambling_bits(struct dmx_ts_feed *ts_feed,
+			u8 *value)
+{
+	struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+	struct dvb_demux *demux = feed->demux;
+
+	spin_lock(&demux->lock);
+
+	if (!ts_feed->is_filtering) {
+		spin_unlock(&demux->lock);
+		return -EINVAL;
+	}
+
+	*value = feed->scrambling_bits;
+	spin_unlock(&demux->lock);
+
+	return 0;
+}
+
+static int dvbdmx_ts_insertion_insert_buffer(struct dmx_ts_feed *ts_feed,
+			char *data, size_t size)
+{
+	struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+	struct dvb_demux *demux = feed->demux;
+
+	spin_lock(&demux->lock);
+	if (!ts_feed->is_filtering) {
+		spin_unlock(&demux->lock);
+		return 0;
+	}
+
+	feed->cb.ts(data, size, NULL, 0, ts_feed);
+
+	spin_unlock(&demux->lock);
+
+	return 0;
+}
+
+static int dmx_ts_set_tsp_out_format(
+	struct dmx_ts_feed *ts_feed,
+	enum dmx_tsp_format_t tsp_format)
+{
+	struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+	struct dvb_demux *dvbdmx = feed->demux;
+
+	mutex_lock(&dvbdmx->mutex);
+
+	if (feed->state == DMX_STATE_GO) {
+		mutex_unlock(&dvbdmx->mutex);
+		return -EINVAL;
+	}
+
+	feed->tsp_out_format = tsp_format;
+	mutex_unlock(&dvbdmx->mutex);
+	return 0;
+}
+
+/**
+ * dvbdmx_ts_reset_pes_state() - Reset the current PES length and PES counters
+ *
+ * @feed: dvb demux feed object
+ */
+void dvbdmx_ts_reset_pes_state(struct dvb_demux_feed *feed)
+{
+	unsigned long flags;
+
+	/*
+	 * Reset PES state.
+	 * PUSI seen indication is kept so we can get partial PES.
+	 */
+	spin_lock_irqsave(&feed->demux->lock, flags);
+
+	feed->peslen = 0;
+	feed->pes_tei_counter = 0;
+	feed->pes_cont_err_counter = 0;
+	feed->pes_ts_packets_num = 0;
+
+	spin_unlock_irqrestore(&feed->demux->lock, flags);
+}
+EXPORT_SYMBOL(dvbdmx_ts_reset_pes_state);
+
+static int dvbdmx_ts_flush_buffer(struct dmx_ts_feed *ts_feed, size_t length)
+{
+	struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+	struct dvb_demux *demux = feed->demux;
+	int ret = 0;
+
+	if (mutex_lock_interruptible(&demux->mutex))
+		return -ERESTARTSYS;
+
+	dvbdmx_ts_reset_pes_state(feed);
+
+	if ((feed->ts_type & TS_DECODER) && demux->flush_decoder_buffer)
+		/* Call decoder specific flushing if one exists */
+		ret = demux->flush_decoder_buffer(feed, length);
+
+	mutex_unlock(&demux->mutex);
+	return ret;
+}
+
 static int dvbdmx_allocate_ts_feed(struct dmx_demux *dmx,
 				   struct dmx_ts_feed **ts_feed,
 				   dmx_ts_cb callback)
@@ -795,8 +2685,21 @@
 	feed->cb.ts = callback;
 	feed->demux = demux;
 	feed->pid = 0xffff;
-	feed->peslen = 0xfffa;
+	feed->peslen = 0;
+	feed->pes_tei_counter = 0;
+	feed->pes_ts_packets_num = 0;
+	feed->pes_cont_err_counter = 0;
+	feed->secure_mode.is_secured = 0;
 	feed->buffer = NULL;
+	feed->tsp_out_format = DMX_TSP_FORMAT_188;
+	feed->idx_params.enable = 0;
+
+	/* default behaviour - pass first PES data even if it is
+	 * partial PES data from previous PES that we didn't receive its header.
+	 * Override this to 0 in your start_feed function in order to handle
+	 * first PES differently.
+	 */
+	feed->pusi_seen = 1;
 
 	(*ts_feed) = &feed->feed.ts;
 	(*ts_feed)->parent = dmx;
@@ -805,6 +2708,22 @@
 	(*ts_feed)->start_filtering = dmx_ts_feed_start_filtering;
 	(*ts_feed)->stop_filtering = dmx_ts_feed_stop_filtering;
 	(*ts_feed)->set = dmx_ts_feed_set;
+	(*ts_feed)->set_video_codec = dmx_ts_set_video_codec;
+	(*ts_feed)->set_idx_params = dmx_ts_set_idx_params;
+	(*ts_feed)->set_tsp_out_format = dmx_ts_set_tsp_out_format;
+	(*ts_feed)->get_decoder_buff_status = dmx_ts_feed_decoder_buff_status;
+	(*ts_feed)->reuse_decoder_buffer = dmx_ts_feed_reuse_decoder_buffer;
+	(*ts_feed)->data_ready_cb = dmx_ts_feed_data_ready_cb;
+	(*ts_feed)->notify_data_read = NULL;
+	(*ts_feed)->set_secure_mode = dmx_ts_set_secure_mode;
+	(*ts_feed)->set_cipher_ops = dmx_ts_set_cipher_ops;
+	(*ts_feed)->oob_command = dvbdmx_ts_feed_oob_cmd;
+	(*ts_feed)->get_scrambling_bits = dvbdmx_ts_get_scrambling_bits;
+	(*ts_feed)->ts_insertion_init = NULL;
+	(*ts_feed)->ts_insertion_terminate = NULL;
+	(*ts_feed)->ts_insertion_insert_buffer =
+		dvbdmx_ts_insertion_insert_buffer;
+	(*ts_feed)->flush_buffer = dvbdmx_ts_flush_buffer;
 
 	if (!(feed->filter = dvb_dmx_filter_alloc(demux))) {
 		feed->state = DMX_STATE_FREE;
@@ -840,7 +2759,7 @@
 
 	feed->state = DMX_STATE_FREE;
 	feed->filter->state = DMX_STATE_FREE;
-
+	ts_feed->priv = NULL;
 	dvb_demux_feed_del(feed);
 
 	feed->pid = 0xffff;
@@ -966,6 +2885,8 @@
 	dvbdmxfeed->feed.sec.secbuf = dvbdmxfeed->feed.sec.secbuf_base;
 	dvbdmxfeed->feed.sec.secbufp = 0;
 	dvbdmxfeed->feed.sec.seclen = 0;
+	dvbdmxfeed->first_cc = 1;
+	dvbdmxfeed->scrambling_bits = 0;
 
 	if (!dvbdmx->start_feed) {
 		mutex_unlock(&dvbdmx->mutex);
@@ -996,6 +2917,11 @@
 
 	mutex_lock(&dvbdmx->mutex);
 
+	if (dvbdmxfeed->state < DMX_STATE_GO) {
+		mutex_unlock(&dvbdmx->mutex);
+		return -EINVAL;
+	}
+
 	if (!dvbdmx->stop_feed) {
 		mutex_unlock(&dvbdmx->mutex);
 		return -ENODEV;
@@ -1012,6 +2938,66 @@
 	return ret;
 }
 
+
+static int dmx_section_feed_data_ready_cb(struct dmx_section_feed *feed,
+				dmx_section_data_ready_cb callback)
+{
+	struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed;
+	struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+
+	mutex_lock(&dvbdmx->mutex);
+
+	if (dvbdmxfeed->state == DMX_STATE_GO) {
+		mutex_unlock(&dvbdmx->mutex);
+		return -EINVAL;
+	}
+
+	dvbdmxfeed->data_ready_cb.sec = callback;
+
+	mutex_unlock(&dvbdmx->mutex);
+	return 0;
+}
+
+static int dmx_section_set_secure_mode(struct dmx_section_feed *feed,
+				struct dmx_secure_mode *secure_mode)
+{
+	struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed;
+	struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+
+	mutex_lock(&dvbdmx->mutex);
+
+	if (dvbdmxfeed->state == DMX_STATE_GO) {
+		mutex_unlock(&dvbdmx->mutex);
+		return -EBUSY;
+	}
+
+	dvbdmxfeed->secure_mode = *secure_mode;
+	mutex_unlock(&dvbdmx->mutex);
+	return 0;
+}
+
+static int dmx_section_set_cipher_ops(struct dmx_section_feed *feed,
+				struct dmx_cipher_operations *cipher_ops)
+{
+	struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed;
+	struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+	int ret = 0;
+
+	if (mutex_lock_interruptible(&dvbdmx->mutex))
+		return -ERESTARTSYS;
+
+	if ((dvbdmxfeed->state == DMX_STATE_GO) &&
+		dvbdmx->set_cipher_op) {
+		ret = dvbdmx->set_cipher_op(dvbdmxfeed, cipher_ops);
+	}
+
+	if (!ret)
+		dvbdmxfeed->cipher_ops = *cipher_ops;
+
+	mutex_unlock(&dvbdmx->mutex);
+	return ret;
+}
+
 static int dmx_section_feed_release_filter(struct dmx_section_feed *feed,
 					   struct dmx_section_filter *filter)
 {
@@ -1045,12 +3031,82 @@
 		f->next = f->next->next;
 	}
 
+	filter->priv = NULL;
 	dvbdmxfilter->state = DMX_STATE_FREE;
 	spin_unlock_irq(&dvbdmx->lock);
 	mutex_unlock(&dvbdmx->mutex);
 	return 0;
 }
 
+static int dvbdmx_section_feed_oob_cmd(struct dmx_section_feed *section_feed,
+		struct dmx_oob_command *cmd)
+{
+	struct dvb_demux_feed *feed = (struct dvb_demux_feed *)section_feed;
+	struct dvb_demux *dvbdmx = feed->demux;
+	struct dmx_data_ready data;
+	int ret = 0;
+
+	data.data_length = 0;
+
+	mutex_lock(&dvbdmx->mutex);
+
+	if (feed->state != DMX_STATE_GO) {
+		mutex_unlock(&dvbdmx->mutex);
+		return -EINVAL;
+	}
+
+	/* Secure section feeds are handled by the plug-in */
+	if (feed->secure_mode.is_secured) {
+		if (feed->demux->oob_command)
+			ret = feed->demux->oob_command(feed, cmd);
+		else
+			ret = 0;
+
+		mutex_unlock(&dvbdmx->mutex);
+		return ret;
+	}
+
+	switch (cmd->type) {
+	case DMX_OOB_CMD_EOS:
+		data.status = DMX_OK_EOS;
+		break;
+
+	case DMX_OOB_CMD_MARKER:
+		data.status = DMX_OK_MARKER;
+		data.marker.id = cmd->params.marker.id;
+		break;
+
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	if (!ret)
+		ret = dvb_dmx_notify_section_event(feed, &data, 1);
+
+	mutex_unlock(&dvbdmx->mutex);
+	return ret;
+}
+
+static int dvbdmx_section_get_scrambling_bits(
+	struct dmx_section_feed *section_feed, u8 *value)
+{
+	struct dvb_demux_feed *feed = (struct dvb_demux_feed *)section_feed;
+	struct dvb_demux *demux = feed->demux;
+
+	spin_lock(&demux->lock);
+
+	if (!section_feed->is_filtering) {
+		spin_unlock(&demux->lock);
+		return -EINVAL;
+	}
+
+	*value = feed->scrambling_bits;
+	spin_unlock(&demux->lock);
+
+	return 0;
+}
+
 static int dvbdmx_allocate_section_feed(struct dmx_demux *demux,
 					struct dmx_section_feed **feed,
 					dmx_section_cb callback)
@@ -1070,11 +3126,14 @@
 	dvbdmxfeed->cb.sec = callback;
 	dvbdmxfeed->demux = dvbdmx;
 	dvbdmxfeed->pid = 0xffff;
+	dvbdmxfeed->secure_mode.is_secured = 0;
+	dvbdmxfeed->tsp_out_format = DMX_TSP_FORMAT_188;
 	dvbdmxfeed->feed.sec.secbuf = dvbdmxfeed->feed.sec.secbuf_base;
 	dvbdmxfeed->feed.sec.secbufp = dvbdmxfeed->feed.sec.seclen = 0;
 	dvbdmxfeed->feed.sec.tsfeedp = 0;
 	dvbdmxfeed->filter = NULL;
 	dvbdmxfeed->buffer = NULL;
+	dvbdmxfeed->idx_params.enable = 0;
 
 	(*feed) = &dvbdmxfeed->feed.sec;
 	(*feed)->is_filtering = 0;
@@ -1086,6 +3145,13 @@
 	(*feed)->start_filtering = dmx_section_feed_start_filtering;
 	(*feed)->stop_filtering = dmx_section_feed_stop_filtering;
 	(*feed)->release_filter = dmx_section_feed_release_filter;
+	(*feed)->data_ready_cb = dmx_section_feed_data_ready_cb;
+	(*feed)->notify_data_read = NULL;
+	(*feed)->set_secure_mode = dmx_section_set_secure_mode;
+	(*feed)->set_cipher_ops = dmx_section_set_cipher_ops;
+	(*feed)->oob_command = dvbdmx_section_feed_oob_cmd;
+	(*feed)->get_scrambling_bits = dvbdmx_section_get_scrambling_bits;
+	(*feed)->flush_buffer = NULL;
 
 	mutex_unlock(&dvbdmx->mutex);
 	return 0;
@@ -1108,7 +3174,7 @@
 	dvbdmxfeed->buffer = NULL;
 #endif
 	dvbdmxfeed->state = DMX_STATE_FREE;
-
+	feed->priv = NULL;
 	dvb_demux_feed_del(dvbdmxfeed);
 
 	dvbdmxfeed->pid = 0xffff;
@@ -1144,23 +3210,18 @@
 	return 0;
 }
 
-static int dvbdmx_write(struct dmx_demux *demux, const char __user *buf, size_t count)
+static int dvbdmx_write(struct dmx_demux *demux, const char *buf, size_t count)
 {
 	struct dvb_demux *dvbdemux = (struct dvb_demux *)demux;
-	void *p;
 
-	if ((!demux->frontend) || (demux->frontend->source != DMX_MEMORY_FE))
+	if (!demux->frontend || !buf || demux->dvr_input_protected ||
+		(demux->frontend->source != DMX_MEMORY_FE))
 		return -EINVAL;
-
-	p = memdup_user(buf, count);
-	if (IS_ERR(p))
-		return PTR_ERR(p);
-	if (mutex_lock_interruptible(&dvbdemux->mutex)) {
-		kfree(p);
+	if (mutex_lock_interruptible(&dvbdemux->mutex))
 		return -ERESTARTSYS;
-	}
-	dvb_dmx_swfilter(dvbdemux, p, count);
-	kfree(p);
+
+	dvb_dmx_swfilter_format(dvbdemux, buf, count, dvbdemux->tsp_format);
+
 	mutex_unlock(&dvbdemux->mutex);
 
 	if (signal_pending(current))
@@ -1168,6 +3229,40 @@
 	return count;
 }
 
+static int dvbdmx_write_cancel(struct dmx_demux *demux)
+{
+	struct dvb_demux *dvbdmx = (struct dvb_demux *)demux;
+
+	spin_lock_irq(&dvbdmx->lock);
+
+	/* cancel any pending wait for decoder's buffers */
+	dvbdmx->sw_filter_abort = 1;
+	dvbdmx->tsbufp = 0;
+	dvb_dmx_configure_decoder_fullness(dvbdmx, 0);
+
+	spin_unlock_irq(&dvbdmx->lock);
+
+	return 0;
+}
+
+static int dvbdmx_set_playback_mode(struct dmx_demux *demux,
+				 enum dmx_playback_mode_t mode,
+				 dmx_ts_fullness ts_fullness_callback,
+				 dmx_section_fullness sec_fullness_callback)
+{
+	struct dvb_demux *dvbdmx = (struct dvb_demux *)demux;
+
+	mutex_lock(&dvbdmx->mutex);
+
+	dvbdmx->playback_mode = mode;
+	dvbdmx->buffer_ctrl.ts = ts_fullness_callback;
+	dvbdmx->buffer_ctrl.sec = sec_fullness_callback;
+
+	mutex_unlock(&dvbdmx->mutex);
+
+	return 0;
+}
+
 static int dvbdmx_add_frontend(struct dmx_demux *demux,
 			       struct dmx_frontend *frontend)
 {
@@ -1225,7 +3320,7 @@
 	struct dvb_demux *dvbdemux = (struct dvb_demux *)demux;
 
 	mutex_lock(&dvbdemux->mutex);
-
+	dvbdemux->sw_filter_abort = 0;
 	demux->frontend = NULL;
 	mutex_unlock(&dvbdemux->mutex);
 	return 0;
@@ -1235,7 +3330,50 @@
 {
 	struct dvb_demux *dvbdemux = (struct dvb_demux *)demux;
 
-	memcpy(pids, dvbdemux->pids, 5 * sizeof(u16));
+    /* 4 Demux Instances each with group of 5 pids */
+	memcpy(pids, dvbdemux->pids, DMX_PES_OTHER*sizeof(u16));
+	return 0;
+}
+
+static int dvbdmx_get_tsp_size(struct dmx_demux *demux)
+{
+	int tsp_size;
+	struct dvb_demux *dvbdemux = (struct dvb_demux *)demux;
+
+	mutex_lock(&dvbdemux->mutex);
+	tsp_size = dvbdemux->ts_packet_size;
+	mutex_unlock(&dvbdemux->mutex);
+
+	return tsp_size;
+}
+
+static int dvbdmx_set_tsp_format(
+	struct dmx_demux *demux,
+	enum dmx_tsp_format_t tsp_format)
+{
+	struct dvb_demux *dvbdemux = (struct dvb_demux *)demux;
+
+	if ((tsp_format > DMX_TSP_FORMAT_204) ||
+		(tsp_format < DMX_TSP_FORMAT_188))
+		return -EINVAL;
+
+	mutex_lock(&dvbdemux->mutex);
+
+	dvbdemux->tsp_format = tsp_format;
+	switch (tsp_format) {
+	case DMX_TSP_FORMAT_188:
+		dvbdemux->ts_packet_size = 188;
+		break;
+	case DMX_TSP_FORMAT_192_TAIL:
+	case DMX_TSP_FORMAT_192_HEAD:
+		dvbdemux->ts_packet_size = 192;
+		break;
+	case DMX_TSP_FORMAT_204:
+		dvbdemux->ts_packet_size = 204;
+		break;
+	}
+
+	mutex_unlock(&dvbdemux->mutex);
 	return 0;
 }
 
@@ -1257,13 +3395,50 @@
 		dvbdemux->filter = NULL;
 		return -ENOMEM;
 	}
+
+	dvbdemux->rec_info_pool = vmalloc(dvbdemux->feednum *
+		sizeof(struct dvb_demux_rec_info));
+	if (!dvbdemux->rec_info_pool) {
+		vfree(dvbdemux->feed);
+		vfree(dvbdemux->filter);
+		dvbdemux->feed = NULL;
+		dvbdemux->filter = NULL;
+		return -ENOMEM;
+	}
+
+	dvbdemux->sw_filter_abort = 0;
+	dvbdemux->total_process_time = 0;
+	dvbdemux->total_crc_time = 0;
+	snprintf(dvbdemux->alias,
+			MAX_DVB_DEMUX_NAME_LEN,
+			"demux%d",
+			dvb_demux_index++);
+
+	dvbdemux->dmx.debugfs_demux_dir =
+		debugfs_create_dir(dvbdemux->alias, NULL);
+
+	if (dvbdemux->dmx.debugfs_demux_dir != NULL) {
+		debugfs_create_u32(
+			"total_processing_time", 0664,
+			dvbdemux->dmx.debugfs_demux_dir,
+			&dvbdemux->total_process_time);
+
+		debugfs_create_u32(
+			"total_crc_time", 0664,
+			dvbdemux->dmx.debugfs_demux_dir,
+			&dvbdemux->total_crc_time);
+	}
+
 	for (i = 0; i < dvbdemux->filternum; i++) {
 		dvbdemux->filter[i].state = DMX_STATE_FREE;
 		dvbdemux->filter[i].index = i;
 	}
+
 	for (i = 0; i < dvbdemux->feednum; i++) {
 		dvbdemux->feed[i].state = DMX_STATE_FREE;
 		dvbdemux->feed[i].index = i;
+
+		dvbdemux->rec_info_pool[i].ref_count = 0;
 	}
 
 	dvbdemux->cnt_storage = vmalloc(MAX_PID + 1);
@@ -1283,6 +3458,9 @@
 	dvbdemux->recording = 0;
 	dvbdemux->tsbufp = 0;
 
+	dvbdemux->tsp_format = DMX_TSP_FORMAT_188;
+	dvbdemux->ts_packet_size = 188;
+
 	if (!dvbdemux->check_crc32)
 		dvbdemux->check_crc32 = dvb_dmx_crc32;
 
@@ -1294,10 +3472,14 @@
 	dmx->open = dvbdmx_open;
 	dmx->close = dvbdmx_close;
 	dmx->write = dvbdmx_write;
+	dmx->write_cancel = dvbdmx_write_cancel;
+	dmx->set_playback_mode = dvbdmx_set_playback_mode;
 	dmx->allocate_ts_feed = dvbdmx_allocate_ts_feed;
 	dmx->release_ts_feed = dvbdmx_release_ts_feed;
 	dmx->allocate_section_feed = dvbdmx_allocate_section_feed;
 	dmx->release_section_feed = dvbdmx_release_section_feed;
+	dmx->map_buffer = NULL;
+	dmx->unmap_buffer = NULL;
 
 	dmx->add_frontend = dvbdmx_add_frontend;
 	dmx->remove_frontend = dvbdmx_remove_frontend;
@@ -1306,6 +3488,9 @@
 	dmx->disconnect_frontend = dvbdmx_disconnect_frontend;
 	dmx->get_pes_pids = dvbdmx_get_pes_pids;
 
+	dmx->set_tsp_format = dvbdmx_set_tsp_format;
+	dmx->get_tsp_size = dvbdmx_get_tsp_size;
+
 	mutex_init(&dvbdemux->mutex);
 	spin_lock_init(&dvbdemux->lock);
 
@@ -1316,9 +3501,14 @@
 
 void dvb_dmx_release(struct dvb_demux *dvbdemux)
 {
+	if (dvbdemux->dmx.debugfs_demux_dir != NULL)
+		debugfs_remove_recursive(dvbdemux->dmx.debugfs_demux_dir);
+
+	dvb_demux_index--;
 	vfree(dvbdemux->cnt_storage);
 	vfree(dvbdemux->filter);
 	vfree(dvbdemux->feed);
+	vfree(dvbdemux->rec_info_pool);
 }
 
 EXPORT_SYMBOL(dvb_dmx_release);
diff --git a/drivers/media/dvb-core/dvb_demux.h b/drivers/media/dvb-core/dvb_demux.h
index 5ed3cab..7ba053d 100644
--- a/drivers/media/dvb-core/dvb_demux.h
+++ b/drivers/media/dvb-core/dvb_demux.h
@@ -27,6 +27,7 @@
 #include <linux/timer.h>
 #include <linux/spinlock.h>
 #include <linux/mutex.h>
+#include <linux/debugfs.h>
 
 #include "demux.h"
 
@@ -44,6 +45,8 @@
 
 #define MAX_PID 0x1fff
 
+#define TIMESTAMP_LEN	4
+
 #define SPEED_PKTS_INTERVAL 50000
 
 struct dvb_demux_filter {
@@ -64,6 +67,92 @@
 
 #define DMX_FEED_ENTRY(pos) list_entry(pos, struct dvb_demux_feed, list_head)
 
+
+struct dmx_index_entry {
+	struct dmx_index_event_info event;
+	struct list_head next;
+};
+
+#define DMX_IDX_EVENT_QUEUE_SIZE	DMX_EVENT_QUEUE_SIZE
+
+struct dvb_demux_rec_info {
+	/* Reference counter for number of feeds using this information */
+	int ref_count;
+
+	/* Counter for number of TS packets output to recording buffer */
+	u64 ts_output_count;
+
+	/* Indexing information */
+	struct {
+		/*
+		 * Minimum TS packet number encountered in recording filter
+		 * among all feeds that search for video patterns
+		 */
+		u64 min_pattern_tsp_num;
+
+		/* Number of indexing-enabled feeds */
+		u8 indexing_feeds_num;
+
+		/* Number of feeds with video pattern search request */
+		u8 pattern_search_feeds_num;
+
+		/* Index entries pool */
+		struct dmx_index_entry events[DMX_IDX_EVENT_QUEUE_SIZE];
+
+		/* List of free entries that can be used for new index events */
+		struct list_head free_list;
+
+		/* List holding ready index entries not notified to user yet */
+		struct list_head ready_list;
+	} idx_info;
+};
+
+#define DVB_DMX_MAX_PATTERN_LEN			6
+struct dvb_dmx_video_patterns {
+	/* the byte pattern to look for */
+	u8 pattern[DVB_DMX_MAX_PATTERN_LEN];
+
+	/* the byte mask to use (same length as pattern) */
+	u8 mask[DVB_DMX_MAX_PATTERN_LEN];
+
+	/* the length of the pattern, in bytes */
+	size_t size;
+
+	/* the type of the pattern. One of DMX_IDX_* definitions */
+	u64 type;
+};
+
+#define DVB_DMX_MAX_FOUND_PATTERNS					20
+#define DVB_DMX_MAX_SEARCH_PATTERN_NUM				20
+struct dvb_dmx_video_prefix_size_masks {
+	/*
+	 * a bit mask (per pattern) of possible prefix sizes to use
+	 * when searching for a pattern that started in the previous TS packet.
+	 * Updated by dvb_dmx_video_pattern_search for use in the next lookup.
+	 */
+	u32 size_mask[DVB_DMX_MAX_FOUND_PATTERNS];
+};
+
+struct dvb_dmx_video_patterns_results {
+	struct {
+		/*
+		 * The offset in the buffer where the pattern was found.
+		 * If a pattern is found using a prefix (i.e. started on the
+		 * previous buffer), offset is zero.
+		 */
+		u32 offset;
+
+		/*
+		 * The type of the pattern found.
+		 * One of DMX_IDX_* definitions.
+		 */
+		u64 type;
+
+		/* The prefix size that was used to find this pattern */
+		u32 used_prefix_size;
+	} info[DVB_DMX_MAX_FOUND_PATTERNS];
+};
+
 struct dvb_demux_feed {
 	union {
 		struct dmx_ts_feed ts;
@@ -75,6 +164,11 @@
 		dmx_section_cb sec;
 	} cb;
 
+	union {
+		dmx_ts_data_ready_cb ts;
+		dmx_section_data_ready_cb sec;
+	} data_ready_cb;
+
 	struct dvb_demux *demux;
 	void *priv;
 	int type;
@@ -82,6 +176,9 @@
 	u16 pid;
 	u8 *buffer;
 	int buffer_size;
+	enum dmx_tsp_format_t tsp_out_format;
+	struct dmx_secure_mode secure_mode;
+	struct dmx_cipher_operations cipher_ops;
 
 	ktime_t timeout;
 	struct dvb_demux_filter *filter;
@@ -90,12 +187,34 @@
 	enum dmx_ts_pes pes_type;
 
 	int cc;
+	int first_cc;
 	int pusi_seen;		/* prevents feeding of garbage from previous section */
 
+	u8 scrambling_bits;
+
+	struct dvb_demux_rec_info *rec_info;
+	u64 prev_tsp_num;
+	u64 prev_stc;
+	u64 curr_pusi_tsp_num;
+	u64 prev_pusi_tsp_num;
+	int prev_frame_valid;
+	u64 prev_frame_type;
+	int first_frame_in_seq;
+	int first_frame_in_seq_notified;
+	u64 last_pattern_tsp_num;
+	int pattern_num;
+const struct dvb_dmx_video_patterns *patterns[DVB_DMX_MAX_SEARCH_PATTERN_NUM];
+	struct dvb_dmx_video_prefix_size_masks prefix_size;
 	u16 peslen;
+	u32 pes_tei_counter;
+	u32 pes_cont_err_counter;
+	u32 pes_ts_packets_num;
 
 	struct list_head list_head;
 	unsigned int index;	/* a unique index for each feed (can be used as hardware pid filter index) */
+
+	enum dmx_video_codec video_codec;
+	struct dmx_indexing_params idx_params;
 };
 
 struct dvb_demux {
@@ -107,10 +226,27 @@
 	int (*stop_feed)(struct dvb_demux_feed *feed);
 	int (*write_to_decoder)(struct dvb_demux_feed *feed,
 				 const u8 *buf, size_t len);
+	int (*decoder_fullness_init)(struct dvb_demux_feed *feed);
+	int (*decoder_fullness_wait)(struct dvb_demux_feed *feed,
+				 size_t required_space);
+	int (*decoder_fullness_abort)(struct dvb_demux_feed *feed);
+	int (*decoder_buffer_status)(struct dvb_demux_feed *feed,
+				struct dmx_buffer_status *dmx_buffer_status);
+	int (*reuse_decoder_buffer)(struct dvb_demux_feed *feed,
+				int cookie);
+	int (*set_cipher_op)(struct dvb_demux_feed *feed,
+				struct dmx_cipher_operations *cipher_ops);
 	u32 (*check_crc32)(struct dvb_demux_feed *feed,
 			    const u8 *buf, size_t len);
 	void (*memcopy)(struct dvb_demux_feed *feed, u8 *dst,
 			 const u8 *src, size_t len);
+	int (*oob_command)(struct dvb_demux_feed *feed,
+		struct dmx_oob_command *cmd);
+	void (*convert_ts)(struct dvb_demux_feed *feed,
+			 const u8 timestamp[TIMESTAMP_LEN],
+			 u64 *timestampIn27Mhz);
+	int (*set_indexing)(struct dvb_demux_feed *feed);
+	int (*flush_decoder_buffer)(struct dvb_demux_feed *feed, size_t length);
 
 	int users;
 #define MAX_DVB_DEMUX_USERS 10
@@ -136,10 +272,35 @@
 
 	ktime_t speed_last_time; /* for TS speed check */
 	uint32_t speed_pkts_cnt; /* for TS speed check */
+
+	enum dmx_tsp_format_t tsp_format;
+	size_t ts_packet_size;
+
+	enum dmx_playback_mode_t playback_mode;
+	int sw_filter_abort;
+
+	struct {
+		dmx_ts_fullness ts;
+		dmx_section_fullness sec;
+	} buffer_ctrl;
+
+	struct dvb_demux_rec_info *rec_info_pool;
+
+	/*
+	 * the following is used for debugfs exposing info
+	 * about dvb demux performance.
+	 */
+#define MAX_DVB_DEMUX_NAME_LEN 10
+	char alias[MAX_DVB_DEMUX_NAME_LEN];
+
+	u32 total_process_time;
+	u32 total_crc_time;
 };
 
 int dvb_dmx_init(struct dvb_demux *dvbdemux);
 void dvb_dmx_release(struct dvb_demux *dvbdemux);
+int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed, const u8 *buf,
+	int should_lock);
 void dvb_dmx_swfilter_packets(struct dvb_demux *dvbdmx, const u8 *buf,
 			      size_t count);
 void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count);
@@ -147,5 +308,141 @@
 			  size_t count);
 void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf,
 			  size_t count);
+void dvb_dmx_swfilter_format(
+			struct dvb_demux *demux, const u8 *buf,
+			size_t count,
+			enum dmx_tsp_format_t tsp_format);
+void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf,
+				const u8 timestamp[TIMESTAMP_LEN]);
+const struct dvb_dmx_video_patterns *dvb_dmx_get_pattern(u64 dmx_idx_pattern);
+int dvb_dmx_video_pattern_search(
+		const struct dvb_dmx_video_patterns
+			*patterns[DVB_DMX_MAX_SEARCH_PATTERN_NUM],
+		int patterns_num,
+		const u8 *buf, size_t buf_size,
+		struct dvb_dmx_video_prefix_size_masks *prefix_size_masks,
+		struct dvb_dmx_video_patterns_results *results);
+int dvb_demux_push_idx_event(struct dvb_demux_feed *feed,
+		struct dmx_index_event_info *idx_event, int should_lock);
+void dvb_dmx_process_idx_pattern(struct dvb_demux_feed *feed,
+		struct dvb_dmx_video_patterns_results *patterns, int pattern,
+		u64 curr_stc, u64 prev_stc,
+		u64 curr_match_tsp, u64 prev_match_tsp,
+		u64 curr_pusi_tsp, u64 prev_pusi_tsp);
+void dvb_dmx_notify_idx_events(struct dvb_demux_feed *feed, int should_lock);
+int dvb_dmx_notify_section_event(struct dvb_demux_feed *feed,
+	struct dmx_data_ready *event, int should_lock);
+void dvbdmx_ts_reset_pes_state(struct dvb_demux_feed *feed);
+
+/**
+ * dvb_dmx_is_video_feed - Returns whether the PES feed
+ * is video one.
+ *
+ * @feed: The feed to be checked.
+ *
+ * Return     1 if feed is video feed, 0 otherwise.
+ */
+static inline int dvb_dmx_is_video_feed(struct dvb_demux_feed *feed)
+{
+	if (feed->type != DMX_TYPE_TS)
+		return 0;
+
+	if (feed->ts_type & (~TS_DECODER))
+		return 0;
+
+	if ((feed->pes_type == DMX_PES_VIDEO0) ||
+		(feed->pes_type == DMX_PES_VIDEO1) ||
+		(feed->pes_type == DMX_PES_VIDEO2) ||
+		(feed->pes_type == DMX_PES_VIDEO3))
+		return 1;
+
+	return 0;
+}
+
+/**
+ * dvb_dmx_is_audio_feed - Returns whether the PES feed
+ * is audio one.
+ *
+ * @feed: The feed to be checked.
+ *
+ * Return     1 if feed is audio feed, 0 otherwise.
+ */
+static inline int dvb_dmx_is_audio_feed(struct dvb_demux_feed *feed)
+{
+	if (feed->type != DMX_TYPE_TS)
+		return 0;
+
+	if (feed->ts_type & (~TS_DECODER))
+		return 0;
+
+	if ((feed->pes_type == DMX_PES_AUDIO0) ||
+		(feed->pes_type == DMX_PES_AUDIO1) ||
+		(feed->pes_type == DMX_PES_AUDIO2) ||
+		(feed->pes_type == DMX_PES_AUDIO3))
+		return 1;
+
+	return 0;
+}
+
+/**
+ * dvb_dmx_is_pcr_feed - Returns whether the PES feed
+ * is PCR one.
+ *
+ * @feed: The feed to be checked.
+ *
+ * Return     1 if feed is PCR feed, 0 otherwise.
+ */
+static inline int dvb_dmx_is_pcr_feed(struct dvb_demux_feed *feed)
+{
+	if (feed->type != DMX_TYPE_TS)
+		return 0;
+
+	if (feed->ts_type & (~TS_DECODER))
+		return 0;
+
+	if ((feed->pes_type == DMX_PES_PCR0) ||
+		(feed->pes_type == DMX_PES_PCR1) ||
+		(feed->pes_type == DMX_PES_PCR2) ||
+		(feed->pes_type == DMX_PES_PCR3))
+		return 1;
+
+	return 0;
+}
+
+/**
+ * dvb_dmx_is_sec_feed - Returns whether this is a section feed
+ *
+ * @feed: The feed to be checked.
+ *
+ * Return 1 if feed is a section feed, 0 otherwise.
+ */
+static inline int dvb_dmx_is_sec_feed(struct dvb_demux_feed *feed)
+{
+	return (feed->type == DMX_TYPE_SEC);
+}
+
+/**
+ * dvb_dmx_is_rec_feed - Returns whether this is a recording feed
+ *
+ * @feed: The feed to be checked.
+ *
+ * Return 1 if feed is recording feed, 0 otherwise.
+ */
+static inline int dvb_dmx_is_rec_feed(struct dvb_demux_feed *feed)
+{
+	if (feed->type != DMX_TYPE_TS)
+		return 0;
+
+	if (feed->ts_type & (TS_DECODER | TS_PAYLOAD_ONLY))
+		return 0;
+
+	return 1;
+}
+
+static inline u16 ts_pid(const u8 *buf)
+{
+	return ((buf[1] & 0x1f) << 8) + buf[2];
+}
+
 
 #endif /* _DVB_DEMUX_H_ */
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index 9914f69..efb7d52 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -997,7 +997,7 @@
 		netdev_dbg(dev, "start filtering\n");
 		priv->secfeed->start_filtering(priv->secfeed);
 	} else if (priv->feedtype == DVB_NET_FEEDTYPE_ULE) {
-		ktime_t timeout = ns_to_ktime(10 * NSEC_PER_MSEC);
+		ktime_t timeout = ktime_set(0, 10*NSEC_PER_MSEC); // 10 msec
 
 		/* we have payloads encapsulated in TS */
 		netdev_dbg(dev, "alloc tsfeed\n");
diff --git a/drivers/media/dvb-core/dvb_ringbuffer.c b/drivers/media/dvb-core/dvb_ringbuffer.c
index 7df7fb3..d4514c1 100644
--- a/drivers/media/dvb-core/dvb_ringbuffer.c
+++ b/drivers/media/dvb-core/dvb_ringbuffer.c
@@ -37,6 +37,8 @@
 
 #define PKT_READY 0
 #define PKT_DISPOSED 1
+#define PKT_PENDING 2
+
 
 
 void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data, size_t len)
@@ -209,18 +211,19 @@
 }
 
 ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf,
-				  const u8 __user *buf, size_t len)
+					const u8 __user *buf, size_t len)
 {
-	int status;
 	size_t todo = len;
 	size_t split;
+	ssize_t oldpwrite = rbuf->pwrite;
 
-	split = (rbuf->pwrite + len > rbuf->size) ? rbuf->size - rbuf->pwrite : 0;
+	split = (rbuf->pwrite + len > rbuf->size) ?
+			rbuf->size - rbuf->pwrite :
+			0;
 
 	if (split > 0) {
-		status = copy_from_user(rbuf->data+rbuf->pwrite, buf, split);
-		if (status)
-			return len - todo;
+		if (copy_from_user(rbuf->data + rbuf->pwrite, buf, split))
+			return -EFAULT;
 		buf += split;
 		todo -= split;
 		/* smp_store_release() for write pointer update to ensure that
@@ -230,9 +233,12 @@
 		 */
 		smp_store_release(&rbuf->pwrite, 0);
 	}
-	status = copy_from_user(rbuf->data+rbuf->pwrite, buf, todo);
-	if (status)
-		return len - todo;
+
+	if (copy_from_user(rbuf->data + rbuf->pwrite, buf, todo)) {
+		/* smp_store_release() for write pointer update */
+		smp_store_release(&rbuf->pwrite, oldpwrite);
+		return -EFAULT;
+	}
 	/* smp_store_release() for write pointer update, see above */
 	smp_store_release(&rbuf->pwrite, (rbuf->pwrite + todo) % rbuf->size);
 
@@ -253,6 +259,31 @@
 	return status;
 }
 
+ssize_t dvb_ringbuffer_pkt_start(struct dvb_ringbuffer *rbuf, size_t len)
+{
+	ssize_t oldpwrite = rbuf->pwrite;
+
+	DVB_RINGBUFFER_WRITE_BYTE(rbuf, len >> 8);
+	DVB_RINGBUFFER_WRITE_BYTE(rbuf, len & 0xff);
+	DVB_RINGBUFFER_WRITE_BYTE(rbuf, PKT_PENDING);
+
+	return oldpwrite;
+}
+EXPORT_SYMBOL(dvb_ringbuffer_pkt_start);
+
+int dvb_ringbuffer_pkt_close(struct dvb_ringbuffer *rbuf, ssize_t idx)
+{
+	idx = (idx + 2) % rbuf->size;
+
+	if (rbuf->data[idx] != PKT_PENDING)
+		return -EINVAL;
+
+	rbuf->data[idx] = PKT_READY;
+
+	return 0;
+}
+EXPORT_SYMBOL(dvb_ringbuffer_pkt_close);
+
 ssize_t dvb_ringbuffer_pkt_read_user(struct dvb_ringbuffer *rbuf, size_t idx,
 				int offset, u8 __user *buf, size_t len)
 {
@@ -260,6 +291,9 @@
 	size_t split;
 	size_t pktlen;
 
+	if (DVB_RINGBUFFER_PEEK(rbuf, (idx+2)) != PKT_READY)
+		return -EINVAL;
+
 	pktlen = rbuf->data[idx] << 8;
 	pktlen |= rbuf->data[(idx + 1) % rbuf->size];
 	if (offset > pktlen) return -EINVAL;
@@ -280,6 +314,7 @@
 
 	return len;
 }
+EXPORT_SYMBOL(dvb_ringbuffer_pkt_read_user);
 
 ssize_t dvb_ringbuffer_pkt_read(struct dvb_ringbuffer *rbuf, size_t idx,
 				int offset, u8* buf, size_t len)
@@ -288,6 +323,9 @@
 	size_t split;
 	size_t pktlen;
 
+	if (rbuf->data[(idx + 2) % rbuf->size] != PKT_READY)
+		return -EINVAL;
+
 	pktlen = rbuf->data[idx] << 8;
 	pktlen |= rbuf->data[(idx + 1) % rbuf->size];
 	if (offset > pktlen) return -EINVAL;
@@ -305,6 +343,7 @@
 	memcpy(buf, rbuf->data+idx, todo);
 	return len;
 }
+EXPORT_SYMBOL(dvb_ringbuffer_pkt_read);
 
 void dvb_ringbuffer_pkt_dispose(struct dvb_ringbuffer *rbuf, size_t idx)
 {
@@ -324,6 +363,7 @@
 		}
 	}
 }
+EXPORT_SYMBOL(dvb_ringbuffer_pkt_dispose);
 
 ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, size_t idx, size_t* pktlen)
 {
@@ -339,7 +379,10 @@
 		idx = (idx + curpktlen + DVB_RINGBUFFER_PKTHDRSIZE) % rbuf->size;
 	}
 
-	consumed = (idx - rbuf->pread) % rbuf->size;
+	if (idx >= rbuf->pread)
+		consumed = idx - rbuf->pread;
+	else
+		consumed = rbuf->size - (rbuf->pread - idx);
 
 	while((dvb_ringbuffer_avail(rbuf) - consumed) > DVB_RINGBUFFER_PKTHDRSIZE) {
 
@@ -352,6 +395,9 @@
 			return idx;
 		}
 
+		if (curpktstatus == PKT_PENDING)
+			return -EFAULT;
+
 		consumed += curpktlen + DVB_RINGBUFFER_PKTHDRSIZE;
 		idx = (idx + curpktlen + DVB_RINGBUFFER_PKTHDRSIZE) % rbuf->size;
 	}
@@ -359,8 +405,7 @@
 	// no packets available
 	return -1;
 }
-
-
+EXPORT_SYMBOL(dvb_ringbuffer_pkt_next);
 
 EXPORT_SYMBOL(dvb_ringbuffer_init);
 EXPORT_SYMBOL(dvb_ringbuffer_empty);
diff --git a/drivers/media/dvb-core/dvb_ringbuffer.h b/drivers/media/dvb-core/dvb_ringbuffer.h
index bbe9487..900630e 100644
--- a/drivers/media/dvb-core/dvb_ringbuffer.h
+++ b/drivers/media/dvb-core/dvb_ringbuffer.h
@@ -124,6 +124,9 @@
  */
 #define DVB_RINGBUFFER_PEEK(rbuf, offs)	\
 			((rbuf)->data[((rbuf)->pread + (offs)) % (rbuf)->size])
+#define DVB_RINGBUFFER_PUSH(rbuf, num)	\
+			((rbuf)->pwrite = (((rbuf)->pwrite+(num))%(rbuf)->size))
+
 
 /**
  * DVB_RINGBUFFER_SKIP - advance read ptr by @num bytes
@@ -274,7 +277,35 @@
  *          in bytes.
  * returns Packet index (if >=0), or -1 if no packets available.
  */
-extern ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf,
-				       size_t idx, size_t *pktlen);
+extern ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, size_t idx,
+				       size_t *pktlen);
+
+
+/**
+ * Start a new packet that will be written directly by the user to the packet
+ * buffer.
+ * The function only writes the header of the packet into the packet buffer,
+ * and the packet is in pending state (can't be read by the reader) until it is
+ * closed using dvb_ringbuffer_pkt_close. You must write the data into the
+ * packet buffer using dvb_ringbuffer_write followed by
+ * dvb_ringbuffer_pkt_close.
+ *
+ * @rbuf: Ringbuffer concerned.
+ * @len: Size of the packet's data
+ * returns Index of the packet's header that was started.
+ */
+extern ssize_t dvb_ringbuffer_pkt_start(struct dvb_ringbuffer *rbuf,
+						size_t len);
+
+/**
+ * Close a packet that was started using dvb_ringbuffer_pkt_start.
+ * The packet will be marked as ready to be ready.
+ *
+ * @rbuf: Ringbuffer concerned.
+ * @idx: Packet index that was returned by dvb_ringbuffer_pkt_start
+ * returns error status, -EINVAL if the provided index is invalid
+ */
+extern int dvb_ringbuffer_pkt_close(struct dvb_ringbuffer *rbuf, ssize_t idx);
+
 
 #endif /* _DVB_RINGBUFFER_H_ */
diff --git a/drivers/media/platform/msm/Kconfig b/drivers/media/platform/msm/Kconfig
index d5d873c..484819d 100644
--- a/drivers/media/platform/msm/Kconfig
+++ b/drivers/media/platform/msm/Kconfig
@@ -16,3 +16,5 @@
 source "drivers/media/platform/msm/vidc/Kconfig"
 
 source "drivers/media/platform/msm/sde/Kconfig"
+source "drivers/media/platform/msm/dvb/Kconfig"
+source "drivers/media/platform/msm/broadcast/Kconfig"
diff --git a/drivers/media/platform/msm/Makefile b/drivers/media/platform/msm/Makefile
index adeb2aa..e64bcd1 100644
--- a/drivers/media/platform/msm/Makefile
+++ b/drivers/media/platform/msm/Makefile
@@ -5,3 +5,5 @@
 obj-$(CONFIG_MSM_VIDC_V4L2) += vidc/
 obj-y += sde/
 obj-$(CONFIG_SPECTRA_CAMERA) += camera/
+obj-y += broadcast/
+obj-$(CONFIG_DVB_MPQ) += dvb/
diff --git a/drivers/media/platform/msm/broadcast/Kconfig b/drivers/media/platform/msm/broadcast/Kconfig
new file mode 100644
index 0000000..cdd1b20
--- /dev/null
+++ b/drivers/media/platform/msm/broadcast/Kconfig
@@ -0,0 +1,14 @@
+#
+# MSM Broadcast subsystem drivers
+#
+
+config TSPP
+	depends on ARCH_QCOM
+	tristate "TSPP (Transport Stream Packet Processor) Support"
+	---help---
+	Transport Stream Packet Processor v1 is used to offload the
+	processing of MPEG transport streams from the main processor.
+	It is used to process incoming transport streams from TSIF
+	to supports use-cases such as transport stream live play
+	and recording.
+	This can also be compiled as a loadable module.
diff --git a/drivers/media/platform/msm/broadcast/Makefile b/drivers/media/platform/msm/broadcast/Makefile
new file mode 100644
index 0000000..3735bdc
--- /dev/null
+++ b/drivers/media/platform/msm/broadcast/Makefile
@@ -0,0 +1,4 @@
+#
+# Makefile for MSM Broadcast subsystem drivers.
+#
+obj-$(CONFIG_TSPP) += tspp.o
diff --git a/drivers/media/platform/msm/broadcast/tspp.c b/drivers/media/platform/msm/broadcast/tspp.c
new file mode 100644
index 0000000..43b426de
--- /dev/null
+++ b/drivers/media/platform/msm/broadcast/tspp.c
@@ -0,0 +1,3252 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>        /* Just for modules */
+#include <linux/kernel.h>        /* Only for KERN_INFO */
+#include <linux/err.h>           /* Error macros */
+#include <linux/list.h>          /* Linked list */
+#include <linux/cdev.h>
+#include <linux/init.h>          /* Needed for the macros */
+#include <linux/io.h>            /* IO macros */
+#include <linux/device.h>        /* Device drivers need this */
+#include <linux/sched.h>         /* Externally defined globals */
+#include <linux/pm_runtime.h>    /* Runtime power management */
+#include <linux/fs.h>
+#include <linux/uaccess.h>       /* copy_to_user */
+#include <linux/slab.h>          /* kfree, kzalloc */
+#include <linux/ioport.h>        /* XXX_ mem_region */
+#include <linux/dma-mapping.h>   /* dma_XXX */
+#include <linux/dmapool.h>       /* DMA pools */
+#include <linux/delay.h>         /* msleep */
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/poll.h>          /* poll() file op */
+#include <linux/wait.h>          /* wait() macros, sleeping */
+#include <linux/bitops.h>        /* BIT() macro */
+#include <linux/regulator/consumer.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include <linux/msm-sps.h>            /* BAM stuff */
+#include <linux/wakelock.h>      /* Locking functions */
+#include <linux/timer.h>         /* Timer services */
+#include <linux/jiffies.h>       /* Jiffies counter */
+#include <linux/qcom_tspp.h>
+#include <linux/debugfs.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/string.h>
+#include <linux/msm-bus.h>
+#include <linux/interrupt.h>	/* tasklet */
+#include <asm/arch_timer.h> /* Timer */
+#include <linux/avtimer_kernel.h> /* Timer */
+
+/*
+ * General defines
+ */
+#define TSPP_TSIF_INSTANCES            2
+#define TSPP_GPIOS_PER_TSIF            4
+#define TSPP_FILTER_TABLES             3
+#define TSPP_MAX_DEVICES               1
+#define TSPP_NUM_CHANNELS              16
+#define TSPP_NUM_PRIORITIES            16
+#define TSPP_NUM_KEYS                  8
+#define INVALID_CHANNEL                0xFFFFFFFF
+#define TSPP_BAM_DEFAULT_IPC_LOGLVL    2
+/*
+ * BAM descriptor FIFO size (in number of descriptors).
+ * Max number of descriptors allowed by SPS which is 8K-1.
+ */
+#define TSPP_SPS_DESCRIPTOR_COUNT      (8 * 1024 - 1)
+#define TSPP_PACKET_LENGTH             188
+#define TSPP_MIN_BUFFER_SIZE           (TSPP_PACKET_LENGTH)
+
+/* Max descriptor buffer size allowed by SPS */
+#define TSPP_MAX_BUFFER_SIZE           (32 * 1024 - 1)
+
+/*
+ * Returns whether to use DMA pool for TSPP output buffers.
+ * For buffers smaller than page size, using DMA pool
+ * provides better memory utilization as dma_alloc_coherent
+ * allocates minimum of page size.
+ */
+#define TSPP_USE_DMA_POOL(buff_size)   ((buff_size) < PAGE_SIZE)
+
+/*
+ * Max allowed TSPP buffers/descriptors.
+ * If SPS desc FIFO holds X descriptors, we can queue up to X-1 descriptors.
+ */
+#define TSPP_NUM_BUFFERS               (TSPP_SPS_DESCRIPTOR_COUNT - 1)
+#define TSPP_TSIF_DEFAULT_TIME_LIMIT   60
+#define SPS_DESCRIPTOR_SIZE            8
+#define MIN_ACCEPTABLE_BUFFER_COUNT    2
+#define TSPP_DEBUG(msg...)
+
+/*
+ * TSIF register offsets
+ */
+#define TSIF_STS_CTL_OFF               (0x0)
+#define TSIF_TIME_LIMIT_OFF            (0x4)
+#define TSIF_CLK_REF_OFF               (0x8)
+#define TSIF_LPBK_FLAGS_OFF            (0xc)
+#define TSIF_LPBK_DATA_OFF            (0x10)
+#define TSIF_TEST_CTL_OFF             (0x14)
+#define TSIF_TEST_MODE_OFF            (0x18)
+#define TSIF_TEST_RESET_OFF           (0x1c)
+#define TSIF_TEST_EXPORT_OFF          (0x20)
+#define TSIF_TEST_CURRENT_OFF         (0x24)
+#define TSIF_TTS_CTL_OFF	      (0x38)
+
+#define TSIF_DATA_PORT_OFF            (0x100)
+
+/* bits for TSIF_STS_CTL register */
+#define TSIF_STS_CTL_EN_IRQ       BIT(28)
+#define TSIF_STS_CTL_PACK_AVAIL   BIT(27)
+#define TSIF_STS_CTL_1ST_PACKET   BIT(26)
+#define TSIF_STS_CTL_OVERFLOW     BIT(25)
+#define TSIF_STS_CTL_LOST_SYNC    BIT(24)
+#define TSIF_STS_CTL_TIMEOUT      BIT(23)
+#define TSIF_STS_CTL_INV_SYNC     BIT(21)
+#define TSIF_STS_CTL_INV_NULL     BIT(20)
+#define TSIF_STS_CTL_INV_ERROR    BIT(19)
+#define TSIF_STS_CTL_INV_ENABLE   BIT(18)
+#define TSIF_STS_CTL_INV_DATA     BIT(17)
+#define TSIF_STS_CTL_INV_CLOCK    BIT(16)
+#define TSIF_STS_CTL_SPARE        BIT(15)
+#define TSIF_STS_CTL_EN_NULL      BIT(11)
+#define TSIF_STS_CTL_EN_ERROR     BIT(10)
+#define TSIF_STS_CTL_LAST_BIT     BIT(9)
+#define TSIF_STS_CTL_EN_TIME_LIM  BIT(8)
+#define TSIF_STS_CTL_EN_TCR       BIT(7)
+#define TSIF_STS_CTL_TEST_MODE    BIT(6)
+#define TSIF_STS_CTL_MODE_2       BIT(5)
+#define TSIF_STS_CTL_EN_DM        BIT(4)
+#define TSIF_STS_CTL_STOP         BIT(3)
+#define TSIF_STS_CTL_START        BIT(0)
+
+/* bits for TSIF_TTS_CTRL register */
+#define TSIF_TTS_CTL_TTS_ENDIANNESS	BIT(4)
+#define TSIF_TTS_CTL_TTS_SOURCE		BIT(3)
+#define TSIF_TTS_CTL_TTS_LENGTH_1	BIT(1)
+#define TSIF_TTS_CTL_TTS_LENGTH_0	BIT(0)
+
+/*
+ * TSPP register offsets
+ */
+#define TSPP_RST			0x00
+#define TSPP_CLK_CONTROL		0x04
+#define TSPP_CONFIG			0x08
+#define TSPP_CONTROL			0x0C
+#define TSPP_PS_DISABLE			0x10
+#define TSPP_MSG_IRQ_STATUS		0x14
+#define TSPP_MSG_IRQ_MASK		0x18
+#define TSPP_IRQ_STATUS			0x1C
+#define TSPP_IRQ_MASK			0x20
+#define TSPP_IRQ_CLEAR			0x24
+#define TSPP_PIPE_ERROR_STATUS(_n)	(0x28 + (_n << 2))
+#define TSPP_STATUS			0x68
+#define TSPP_CURR_TSP_HEADER		0x6C
+#define TSPP_CURR_PID_FILTER		0x70
+#define TSPP_SYSTEM_KEY(_n)		(0x74 + (_n << 2))
+#define TSPP_CBC_INIT_VAL(_n)		(0x94 + (_n << 2))
+#define TSPP_DATA_KEY_RESET		0x9C
+#define TSPP_KEY_VALID			0xA0
+#define TSPP_KEY_ERROR			0xA4
+#define TSPP_TEST_CTRL			0xA8
+#define TSPP_VERSION			0xAC
+#define TSPP_GENERICS			0xB0
+#define TSPP_NOP			0xB4
+
+/*
+ * Register bit definitions
+ */
+/* TSPP_RST */
+#define TSPP_RST_RESET                    BIT(0)
+
+/* TSPP_CLK_CONTROL	*/
+#define TSPP_CLK_CONTROL_FORCE_CRYPTO     BIT(9)
+#define TSPP_CLK_CONTROL_FORCE_PES_PL     BIT(8)
+#define TSPP_CLK_CONTROL_FORCE_PES_AF     BIT(7)
+#define TSPP_CLK_CONTROL_FORCE_RAW_CTRL   BIT(6)
+#define TSPP_CLK_CONTROL_FORCE_PERF_CNT   BIT(5)
+#define TSPP_CLK_CONTROL_FORCE_CTX_SEARCH BIT(4)
+#define TSPP_CLK_CONTROL_FORCE_TSP_PROC   BIT(3)
+#define TSPP_CLK_CONTROL_FORCE_CONS_AHB2MEM BIT(2)
+#define TSPP_CLK_CONTROL_FORCE_TS_AHB2MEM BIT(1)
+#define TSPP_CLK_CONTROL_SET_CLKON        BIT(0)
+
+/* TSPP_CONFIG	*/
+#define TSPP_CONFIG_SET_PACKET_LENGTH(_a, _b) (_a = (_a & 0xF0) | \
+((_b & 0xF) << 8))
+#define TSPP_CONFIG_GET_PACKET_LENGTH(_a) ((_a >> 8) & 0xF)
+#define TSPP_CONFIG_DUP_WITH_DISC_EN		BIT(7)
+#define TSPP_CONFIG_PES_SYNC_ERROR_MASK   BIT(6)
+#define TSPP_CONFIG_PS_LEN_ERR_MASK       BIT(5)
+#define TSPP_CONFIG_PS_CONT_ERR_UNSP_MASK BIT(4)
+#define TSPP_CONFIG_PS_CONT_ERR_MASK      BIT(3)
+#define TSPP_CONFIG_PS_DUP_TSP_MASK       BIT(2)
+#define TSPP_CONFIG_TSP_ERR_IND_MASK      BIT(1)
+#define TSPP_CONFIG_TSP_SYNC_ERR_MASK     BIT(0)
+
+/* TSPP_CONTROL */
+#define TSPP_CONTROL_PID_FILTER_LOCK      BIT(5)
+#define TSPP_CONTROL_FORCE_KEY_CALC       BIT(4)
+#define TSPP_CONTROL_TSP_CONS_SRC_DIS     BIT(3)
+#define TSPP_CONTROL_TSP_TSIF1_SRC_DIS    BIT(2)
+#define TSPP_CONTROL_TSP_TSIF0_SRC_DIS    BIT(1)
+#define TSPP_CONTROL_PERF_COUNT_INIT      BIT(0)
+
+/* TSPP_MSG_IRQ_STATUS + TSPP_MSG_IRQ_MASK */
+#define TSPP_MSG_TSPP_IRQ                 BIT(2)
+#define TSPP_MSG_TSIF_1_IRQ               BIT(1)
+#define TSPP_MSG_TSIF_0_IRQ               BIT(0)
+
+/* TSPP_IRQ_STATUS + TSPP_IRQ_MASK + TSPP_IRQ_CLEAR */
+#define TSPP_IRQ_STATUS_TSP_RD_CMPL		BIT(19)
+#define TSPP_IRQ_STATUS_KEY_ERROR		BIT(18)
+#define TSPP_IRQ_STATUS_KEY_SWITCHED_BAD	BIT(17)
+#define TSPP_IRQ_STATUS_KEY_SWITCHED		BIT(16)
+#define TSPP_IRQ_STATUS_PS_BROKEN(_n)		BIT((_n))
+
+/* TSPP_PIPE_ERROR_STATUS */
+#define TSPP_PIPE_PES_SYNC_ERROR		BIT(3)
+#define TSPP_PIPE_PS_LENGTH_ERROR		BIT(2)
+#define TSPP_PIPE_PS_CONTINUITY_ERROR		BIT(1)
+#define TSPP_PIP_PS_LOST_START			BIT(0)
+
+/* TSPP_STATUS			*/
+#define TSPP_STATUS_TSP_PKT_AVAIL		BIT(10)
+#define TSPP_STATUS_TSIF1_DM_REQ		BIT(6)
+#define TSPP_STATUS_TSIF0_DM_REQ		BIT(2)
+#define TSPP_CURR_FILTER_TABLE			BIT(0)
+
+/* TSPP_GENERICS		*/
+#define TSPP_GENERICS_CRYPTO_GEN		BIT(12)
+#define TSPP_GENERICS_MAX_CONS_PIPES		BIT(7)
+#define TSPP_GENERICS_MAX_PIPES			BIT(2)
+#define TSPP_GENERICS_TSIF_1_GEN		BIT(1)
+#define TSPP_GENERICS_TSIF_0_GEN		BIT(0)
+
+/*
+ * TSPP memory regions
+ */
+#define TSPP_PID_FILTER_TABLE0      0x800
+#define TSPP_PID_FILTER_TABLE1      0x880
+#define TSPP_PID_FILTER_TABLE2      0x900
+#define TSPP_GLOBAL_PERFORMANCE     0x980 /* see tspp_global_performance */
+#define TSPP_PIPE_CONTEXT           0x990 /* see tspp_pipe_context */
+#define TSPP_PIPE_PERFORMANCE       0x998 /* see tspp_pipe_performance */
+#define TSPP_TSP_BUFF_WORD(_n)      (0xC10 + (_n << 2))
+#define TSPP_DATA_KEY               0xCD0
+
+struct debugfs_entry {
+	const char *name;
+	mode_t mode;
+	int offset;
+};
+
+static const struct debugfs_entry debugfs_tsif_regs[] = {
+	{"sts_ctl",             0644, TSIF_STS_CTL_OFF},
+	{"time_limit",          0644, TSIF_TIME_LIMIT_OFF},
+	{"clk_ref",             0644, TSIF_CLK_REF_OFF},
+	{"lpbk_flags",          0644, TSIF_LPBK_FLAGS_OFF},
+	{"lpbk_data",           0644, TSIF_LPBK_DATA_OFF},
+	{"test_ctl",            0644, TSIF_TEST_CTL_OFF},
+	{"test_mode",           0644, TSIF_TEST_MODE_OFF},
+	{"test_reset",          0200, TSIF_TEST_RESET_OFF},
+	{"test_export",         0644, TSIF_TEST_EXPORT_OFF},
+	{"test_current",        0444, TSIF_TEST_CURRENT_OFF},
+	{"data_port",           0400, TSIF_DATA_PORT_OFF},
+	{"tts_source",          0600, TSIF_TTS_CTL_OFF},
+};
+
+static const struct debugfs_entry debugfs_tspp_regs[] = {
+	{"rst",                 0644, TSPP_RST},
+	{"clk_control",         0644, TSPP_CLK_CONTROL},
+	{"config",              0644, TSPP_CONFIG},
+	{"control",             0644, TSPP_CONTROL},
+	{"ps_disable",          0644, TSPP_PS_DISABLE},
+	{"msg_irq_status",      0644, TSPP_MSG_IRQ_STATUS},
+	{"msg_irq_mask",        0644, TSPP_MSG_IRQ_MASK},
+	{"irq_status",          0644, TSPP_IRQ_STATUS},
+	{"irq_mask",            0644, TSPP_IRQ_MASK},
+	{"irq_clear",           0644, TSPP_IRQ_CLEAR},
+	/* {"pipe_error_status",S_IRUGO | S_IWUSR, TSPP_PIPE_ERROR_STATUS}, */
+	{"status",              0644, TSPP_STATUS},
+	{"curr_tsp_header",     0644, TSPP_CURR_TSP_HEADER},
+	{"curr_pid_filter",     0644, TSPP_CURR_PID_FILTER},
+	/* {"system_key",       S_IRUGO | S_IWUSR, TSPP_SYSTEM_KEY}, */
+	/* {"cbc_init_val",     S_IRUGO | S_IWUSR, TSPP_CBC_INIT_VAL}, */
+	{"data_key_reset",      0644, TSPP_DATA_KEY_RESET},
+	{"key_valid",           0644, TSPP_KEY_VALID},
+	{"key_error",           0644, TSPP_KEY_ERROR},
+	{"test_ctrl",           0644, TSPP_TEST_CTRL},
+	{"version",             0644, TSPP_VERSION},
+	{"generics",            0644, TSPP_GENERICS},
+	{"pid_filter_table0",   0644, TSPP_PID_FILTER_TABLE0},
+	{"pid_filter_table1",   0644, TSPP_PID_FILTER_TABLE1},
+	{"pid_filter_table2",   0644, TSPP_PID_FILTER_TABLE2},
+	{"tsp_total_num",       0644, TSPP_GLOBAL_PERFORMANCE},
+	{"tsp_ignored_num",     0644, TSPP_GLOBAL_PERFORMANCE + 4},
+	{"tsp_err_ind_num",     0644, TSPP_GLOBAL_PERFORMANCE + 8},
+	{"tsp_sync_err_num",    0644, TSPP_GLOBAL_PERFORMANCE + 16},
+	{"pipe_context",        0644, TSPP_PIPE_CONTEXT},
+	{"pipe_performance",    0644, TSPP_PIPE_PERFORMANCE},
+	{"data_key",            0644, TSPP_DATA_KEY}
+};
+
+struct tspp_pid_filter {
+	u32 filter;			/* see FILTER_ macros */
+	u32 config;			/* see FILTER_ macros */
+};
+
+/* tsp_info */
+#define FILTER_HEADER_ERROR_MASK          BIT(7)
+#define FILTER_TRANS_END_DISABLE          BIT(6)
+#define FILTER_DEC_ON_ERROR_EN            BIT(5)
+#define FILTER_DECRYPT                    BIT(4)
+#define FILTER_HAS_ENCRYPTION(_p)         (_p->config & FILTER_DECRYPT)
+#define FILTER_GET_PIPE_NUMBER0(_p)       (_p->config & 0xF)
+#define FILTER_SET_PIPE_NUMBER0(_p, _b)   (_p->config = \
+			(_p->config & ~0xF) | (_b & 0xF))
+#define FILTER_GET_PIPE_PROCESS0(_p)      ((_p->filter >> 30) & 0x3)
+#define FILTER_SET_PIPE_PROCESS0(_p, _b)  (_p->filter = \
+			(_p->filter & ~(0x3<<30)) | ((_b & 0x3) << 30))
+#define FILTER_GET_PIPE_PID(_p)           ((_p->filter >> 13) & 0x1FFF)
+#define FILTER_SET_PIPE_PID(_p, _b)       (_p->filter = \
+			(_p->filter & ~(0x1FFF<<13)) | ((_b & 0x1FFF) << 13))
+#define FILTER_GET_PID_MASK(_p)           (_p->filter & 0x1FFF)
+#define FILTER_SET_PID_MASK(_p, _b)       (_p->filter = \
+			(_p->filter & ~0x1FFF) | (_b & 0x1FFF))
+#define FILTER_GET_PIPE_PROCESS1(_p)      ((_p->config >> 30) & 0x3)
+#define FILTER_SET_PIPE_PROCESS1(_p, _b)  (_p->config = \
+			(_p->config & ~(0x3<<30)) | ((_b & 0x3) << 30))
+#define FILTER_GET_KEY_NUMBER(_p)         ((_p->config >> 8) & 0x7)
+#define FILTER_SET_KEY_NUMBER(_p, _b)     (_p->config = \
+			(_p->config & ~(0x7<<8)) | ((_b & 0x7) << 8))
+
+struct tspp_global_performance_regs {
+	u32 tsp_total;
+	u32 tsp_ignored;
+	u32 tsp_error;
+	u32 tsp_sync;
+};
+
+struct tspp_pipe_context_regs {
+	u16 pes_bytes_left;
+	u16 count;
+	u32 tsif_suffix;
+} __packed;
+#define CONTEXT_GET_STATE(_a)					(_a & 0x3)
+#define CONTEXT_UNSPEC_LENGTH					BIT(11)
+#define CONTEXT_GET_CONT_COUNT(_a)			((_a >> 12) & 0xF)
+
+#define MSEC_TO_JIFFIES(msec)			((msec) * HZ / 1000)
+
+struct tspp_pipe_performance_regs {
+	u32 tsp_total;
+	u32 ps_duplicate_tsp;
+	u32 tsp_no_payload;
+	u32 tsp_broken_ps;
+	u32 ps_total_num;
+	u32 ps_continuity_error;
+	u32 ps_length_error;
+	u32 pes_sync_error;
+};
+
+struct tspp_tsif_device {
+	void __iomem *base;
+	u32 time_limit;
+	u32 ref_count;
+	enum tspp_tsif_mode mode;
+	int clock_inverse;
+	int data_inverse;
+	int sync_inverse;
+	int enable_inverse;
+	u32 tsif_irq;
+
+	/* debugfs */
+	struct dentry *dent_tsif;
+	struct dentry *debugfs_tsif_regs[ARRAY_SIZE(debugfs_tsif_regs)];
+	u32 stat_rx;
+	u32 stat_overflow;
+	u32 stat_lost_sync;
+	u32 stat_timeout;
+	enum tsif_tts_source tts_source;
+	u32 lpass_timer_enable;
+};
+
+enum tspp_buf_state {
+	TSPP_BUF_STATE_EMPTY,	/* buffer has been allocated, but not waiting */
+	TSPP_BUF_STATE_WAITING, /* buffer is waiting to be filled */
+	TSPP_BUF_STATE_DATA,    /* buffer is not empty and can be read */
+	TSPP_BUF_STATE_LOCKED   /* buffer is being read by a client */
+};
+
+struct tspp_mem_buffer {
+	struct tspp_mem_buffer *next;
+	struct sps_mem_buffer sps;
+	struct tspp_data_descriptor desc; /* buffer descriptor for kernel api */
+	enum tspp_buf_state state;
+	size_t filled;          /* how much data this buffer is holding */
+	int read_index;         /* where to start reading data from */
+};
+
+/* this represents each char device 'channel' */
+struct tspp_channel {
+	struct tspp_device *pdev; /* can use container_of instead? */
+	struct sps_pipe *pipe;
+	struct sps_connect config;
+	struct sps_register_event event;
+	struct tspp_mem_buffer *data;    /* list of buffers */
+	struct tspp_mem_buffer *read;    /* first buffer ready to be read */
+	struct tspp_mem_buffer *waiting; /* first outstanding transfer */
+	struct tspp_mem_buffer *locked;  /* buffer currently being read */
+	wait_queue_head_t in_queue; /* set when data is received */
+	u32 id;           /* channel id (0-15) */
+	int used;         /* is this channel in use? */
+	int key;          /* which encryption key index is used */
+	u32 buffer_size;  /* size of the sps transfer buffers */
+	u32 max_buffers;  /* how many buffers should be allocated */
+	u32 buffer_count; /* how many buffers are actually allocated */
+	u32 filter_count; /* how many filters have been added to this channel */
+	u32 int_freq;     /* generate interrupts every x descriptors */
+	enum tspp_source src;
+	enum tspp_mode mode;
+	tspp_notifier *notifier; /* used only with kernel api */
+	void *notify_data;       /* data to be passed with the notifier */
+	u32 expiration_period_ms; /* notification on partially filled buffers */
+	struct timer_list expiration_timer;
+	struct dma_pool *dma_pool;
+	tspp_memfree *memfree;   /* user defined memory free function */
+	void *user_info; /* user cookie passed to memory alloc/free function */
+};
+
+struct tspp_pid_filter_table {
+	struct tspp_pid_filter filter[TSPP_NUM_PRIORITIES];
+};
+
+struct tspp_key_entry {
+	u32 even_lsb;
+	u32 even_msb;
+	u32 odd_lsb;
+	u32 odd_msb;
+};
+
+struct tspp_key_table {
+	struct tspp_key_entry entry[TSPP_NUM_KEYS];
+};
+
+struct tspp_pinctrl {
+	struct pinctrl *pinctrl;
+
+	struct pinctrl_state *disabled;
+	struct pinctrl_state *tsif0_mode1;
+	struct pinctrl_state *tsif0_mode2;
+	struct pinctrl_state *tsif1_mode1;
+	struct pinctrl_state *tsif1_mode2;
+	struct pinctrl_state *dual_mode1;
+	struct pinctrl_state *dual_mode2;
+
+	bool tsif0_active;
+	bool tsif1_active;
+};
+
+/* this represents the actual hardware device */
+struct tspp_device {
+	struct list_head devlist; /* list of all devices */
+	struct platform_device *pdev;
+	void __iomem *base;
+	uint32_t tsif_bus_client;
+	unsigned int tspp_irq;
+	unsigned int bam_irq;
+	unsigned long bam_handle;
+	struct sps_bam_props bam_props;
+	struct wakeup_source ws;
+	spinlock_t spinlock;
+	struct tasklet_struct tlet;
+	struct tspp_tsif_device tsif[TSPP_TSIF_INSTANCES];
+	/* clocks */
+	struct clk *tsif_pclk;
+	struct clk *tsif_ref_clk;
+	/* regulators */
+	struct regulator *tsif_vreg;
+	/* data */
+	struct tspp_pid_filter_table *filters[TSPP_FILTER_TABLES];
+	struct tspp_channel channels[TSPP_NUM_CHANNELS];
+	struct tspp_key_table *tspp_key_table;
+	struct tspp_global_performance_regs *tspp_global_performance;
+	struct tspp_pipe_context_regs *tspp_pipe_context;
+	struct tspp_pipe_performance_regs *tspp_pipe_performance;
+	bool req_irqs;
+	/* pinctrl */
+	struct mutex mutex;
+	struct tspp_pinctrl pinctrl;
+	unsigned int tts_source; /* Time stamp source type LPASS timer/TCR */
+
+	struct dentry *dent;
+	struct dentry *debugfs_regs[ARRAY_SIZE(debugfs_tspp_regs)];
+};
+
+static int tspp_key_entry;
+static u32 channel_id;  /* next channel id number to assign */
+
+static LIST_HEAD(tspp_devices);
+
+/*** IRQ ***/
+static irqreturn_t tspp_isr(int irq, void *dev)
+{
+	struct tspp_device *device = dev;
+	u32 status, mask;
+	u32 data;
+
+	status = readl_relaxed(device->base + TSPP_IRQ_STATUS);
+	mask = readl_relaxed(device->base + TSPP_IRQ_MASK);
+	status &= mask;
+
+	if (!status) {
+		dev_warn(&device->pdev->dev, "Spurious interrupt");
+		return IRQ_NONE;
+	}
+
+	/* if (status & TSPP_IRQ_STATUS_TSP_RD_CMPL) */
+
+	if (status & TSPP_IRQ_STATUS_KEY_ERROR) {
+		/* read the key error info */
+		data = readl_relaxed(device->base + TSPP_KEY_ERROR);
+		dev_info(&device->pdev->dev, "key error 0x%x", data);
+	}
+	if (status & TSPP_IRQ_STATUS_KEY_SWITCHED_BAD) {
+		data = readl_relaxed(device->base + TSPP_KEY_VALID);
+		dev_info(&device->pdev->dev, "key invalidated: 0x%x", data);
+	}
+	if (status & TSPP_IRQ_STATUS_KEY_SWITCHED)
+		dev_info(&device->pdev->dev, "key switched");
+
+	if (status & 0xffff)
+		dev_info(&device->pdev->dev, "broken pipe %i", status & 0xffff);
+
+	writel_relaxed(status, device->base + TSPP_IRQ_CLEAR);
+
+	/*
+	 * Before returning IRQ_HANDLED to the generic interrupt handling
+	 * framework need to make sure all operations including clearing of
+	 * interrupt status registers in the hardware is performed.
+	 * Thus a barrier after clearing the interrupt status register
+	 * is required to guarantee that the interrupt status register has
+	 * really been cleared by the time we return from this handler.
+	 */
+	wmb();
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t tsif_isr(int irq, void *dev)
+{
+	struct tspp_tsif_device *tsif_device = dev;
+	u32 sts_ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF);
+
+	if (!(sts_ctl & (TSIF_STS_CTL_PACK_AVAIL |
+			 TSIF_STS_CTL_OVERFLOW |
+			 TSIF_STS_CTL_LOST_SYNC |
+			 TSIF_STS_CTL_TIMEOUT)))
+		return IRQ_NONE;
+
+	if (sts_ctl & TSIF_STS_CTL_OVERFLOW)
+		tsif_device->stat_overflow++;
+
+	if (sts_ctl & TSIF_STS_CTL_LOST_SYNC)
+		tsif_device->stat_lost_sync++;
+
+	if (sts_ctl & TSIF_STS_CTL_TIMEOUT)
+		tsif_device->stat_timeout++;
+
+	iowrite32(sts_ctl, tsif_device->base + TSIF_STS_CTL_OFF);
+
+	/*
+	 * Before returning IRQ_HANDLED to the generic interrupt handling
+	 * framework need to make sure all operations including clearing of
+	 * interrupt status registers in the hardware is performed.
+	 * Thus a barrier after clearing the interrupt status register
+	 * is required to guarantee that the interrupt status register has
+	 * really been cleared by the time we return from this handler.
+	 */
+	wmb();
+	return IRQ_HANDLED;
+}
+
+/*** callbacks ***/
+static void tspp_sps_complete_cb(struct sps_event_notify *notify)
+{
+	struct tspp_device *pdev;
+
+	if (!notify || !notify->user)
+		return;
+
+	pdev = notify->user;
+	tasklet_schedule(&pdev->tlet);
+}
+
+static void tspp_expiration_timer(unsigned long data)
+{
+	struct tspp_device *pdev = (struct tspp_device *)data;
+
+	if (pdev)
+		tasklet_schedule(&pdev->tlet);
+}
+
+/*** tasklet ***/
+static void tspp_sps_complete_tlet(unsigned long data)
+{
+	int i;
+	int complete;
+	unsigned long flags;
+	struct sps_iovec iovec;
+	struct tspp_channel *channel;
+	struct tspp_device *device = (struct tspp_device *)data;
+
+	spin_lock_irqsave(&device->spinlock, flags);
+
+	for (i = 0; i < TSPP_NUM_CHANNELS; i++) {
+		complete = 0;
+		channel = &device->channels[i];
+
+		if (!channel->used || !channel->waiting)
+			continue;
+
+		/* stop the expiration timer */
+		if (channel->expiration_period_ms)
+			del_timer(&channel->expiration_timer);
+
+		/* get completions */
+		while (channel->waiting->state == TSPP_BUF_STATE_WAITING) {
+			if (sps_get_iovec(channel->pipe, &iovec) != 0) {
+				pr_err("tspp: Error in iovec on channel %i",
+					channel->id);
+				break;
+			}
+			if (iovec.size == 0)
+				break;
+
+			if (DESC_FULL_ADDR(iovec.flags, iovec.addr)
+			    != channel->waiting->sps.phys_base)
+				pr_err("tspp: buffer mismatch %pa",
+					&channel->waiting->sps.phys_base);
+
+			complete = 1;
+			channel->waiting->state = TSPP_BUF_STATE_DATA;
+			channel->waiting->filled = iovec.size;
+			channel->waiting->read_index = 0;
+
+			if (channel->src == TSPP_SOURCE_TSIF0)
+				device->tsif[0].stat_rx++;
+			else if (channel->src == TSPP_SOURCE_TSIF1)
+				device->tsif[1].stat_rx++;
+
+			/* update the pointers */
+			channel->waiting = channel->waiting->next;
+		}
+
+		/* wake any waiting processes */
+		if (complete) {
+			wake_up_interruptible(&channel->in_queue);
+
+			/* call notifiers */
+			if (channel->notifier)
+				channel->notifier(channel->id,
+					channel->notify_data);
+		}
+
+		/* restart expiration timer */
+		if (channel->expiration_period_ms)
+			mod_timer(&channel->expiration_timer,
+				jiffies +
+				MSEC_TO_JIFFIES(
+					channel->expiration_period_ms));
+	}
+
+	spin_unlock_irqrestore(&device->spinlock, flags);
+}
+
+static int tspp_config_gpios(struct tspp_device *device,
+				enum tspp_source source,
+				int enable)
+{
+	int ret;
+	struct pinctrl_state *s;
+	struct tspp_pinctrl *p = &device->pinctrl;
+	bool mode2;
+
+	/*
+	 * TSIF devices are handled separately, however changing of the pinctrl
+	 * state must be protected from race condition.
+	 */
+	if (mutex_lock_interruptible(&device->mutex))
+		return -ERESTARTSYS;
+
+	switch (source) {
+	case TSPP_SOURCE_TSIF0:
+		mode2 = device->tsif[0].mode == TSPP_TSIF_MODE_2;
+		if (enable == p->tsif1_active) {
+			if (enable)
+				/* Both tsif enabled */
+				s = mode2 ? p->dual_mode2 : p->dual_mode1;
+			else
+				/* Both tsif disabled */
+				s = p->disabled;
+		} else if (enable) {
+			/* Only tsif0 is enabled */
+			s = mode2 ? p->tsif0_mode2 : p->tsif0_mode1;
+		} else {
+			/* Only tsif1 is enabled */
+			s = mode2 ? p->tsif1_mode2 : p->tsif1_mode1;
+		}
+
+		ret = pinctrl_select_state(p->pinctrl, s);
+		if (!ret)
+			p->tsif0_active = enable;
+		break;
+	case TSPP_SOURCE_TSIF1:
+		mode2 = device->tsif[1].mode == TSPP_TSIF_MODE_2;
+		if (enable == p->tsif0_active) {
+			if (enable)
+				/* Both tsif enabled */
+				s = mode2 ? p->dual_mode2 : p->dual_mode1;
+			else
+				/* Both tsif disabled */
+				s = p->disabled;
+		} else if (enable) {
+			/* Only tsif1 is enabled */
+			s = mode2 ? p->tsif1_mode2 : p->tsif1_mode1;
+		} else {
+			/* Only tsif0 is enabled */
+			s = mode2 ? p->tsif0_mode2 : p->tsif0_mode1;
+		}
+
+		ret = pinctrl_select_state(p->pinctrl, s);
+		if (!ret)
+			p->tsif1_active = enable;
+		break;
+	default:
+		pr_err("%s: invalid source %d\n", __func__, source);
+		mutex_unlock(&device->mutex);
+		return -EINVAL;
+	}
+
+	if (ret)
+		pr_err("%s: failed to change pinctrl state, ret=%d\n",
+			__func__, ret);
+
+	mutex_unlock(&device->mutex);
+	return ret;
+}
+
+static int tspp_get_pinctrl(struct tspp_device *device)
+{
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *state;
+
+	pinctrl = devm_pinctrl_get(&device->pdev->dev);
+	if (IS_ERR_OR_NULL(pinctrl)) {
+		pr_err("%s: Unable to get pinctrl handle\n", __func__);
+		return -EINVAL;
+	}
+	device->pinctrl.pinctrl = pinctrl;
+
+	state = pinctrl_lookup_state(pinctrl, "disabled");
+	if (IS_ERR_OR_NULL(state)) {
+		pr_err("%s: Unable to find state %s\n",
+			__func__, "disabled");
+		return -EINVAL;
+	}
+	device->pinctrl.disabled = state;
+
+	state = pinctrl_lookup_state(pinctrl, "tsif0-mode1");
+	if (IS_ERR_OR_NULL(state)) {
+		pr_err("%s: Unable to find state %s\n",
+			__func__, "tsif0-mode1");
+		return -EINVAL;
+	}
+	device->pinctrl.tsif0_mode1 = state;
+
+	state = pinctrl_lookup_state(pinctrl, "tsif0-mode2");
+	if (IS_ERR_OR_NULL(state)) {
+		pr_err("%s: Unable to find state %s\n",
+			__func__, "tsif0-mode2");
+		return -EINVAL;
+	}
+	device->pinctrl.tsif0_mode2 = state;
+
+	state = pinctrl_lookup_state(pinctrl, "tsif1-mode1");
+	if (IS_ERR_OR_NULL(state)) {
+		pr_err("%s: Unable to find state %s\n",
+			__func__, "tsif1-mode1");
+		return -EINVAL;
+	}
+	device->pinctrl.tsif1_mode1 = state;
+
+	state = pinctrl_lookup_state(pinctrl, "tsif1-mode2");
+	if (IS_ERR_OR_NULL(state)) {
+		pr_err("%s: Unable to find state %s\n",
+			__func__, "tsif1-mode2");
+		return -EINVAL;
+	}
+	device->pinctrl.tsif1_mode2 = state;
+
+	state = pinctrl_lookup_state(pinctrl, "dual-tsif-mode1");
+	if (IS_ERR_OR_NULL(state)) {
+		pr_err("%s: Unable to find state %s\n",
+			__func__, "dual-tsif-mode1");
+		return -EINVAL;
+	}
+	device->pinctrl.dual_mode1 = state;
+
+	state = pinctrl_lookup_state(pinctrl, "dual-tsif-mode2");
+	if (IS_ERR_OR_NULL(state)) {
+		pr_err("%s: Unable to find state %s\n",
+			__func__, "dual-tsif-mode2");
+		return -EINVAL;
+	}
+	device->pinctrl.dual_mode2 = state;
+
+	device->pinctrl.tsif0_active = false;
+	device->pinctrl.tsif1_active = false;
+
+	return 0;
+}
+
+
+/*** Clock functions ***/
+static int tspp_clock_start(struct tspp_device *device)
+{
+	int rc;
+
+	if (device == NULL) {
+		pr_err("tspp: Can't start clocks, invalid device\n");
+		return -EINVAL;
+	}
+
+	if (device->tsif_bus_client) {
+		rc = msm_bus_scale_client_update_request(
+					device->tsif_bus_client, 1);
+		if (rc) {
+			pr_err("tspp: Can't enable bus\n");
+			return -EBUSY;
+		}
+	}
+
+	if (device->tsif_vreg) {
+		rc = regulator_set_voltage(device->tsif_vreg,
+					RPMH_REGULATOR_LEVEL_OFF,
+					RPMH_REGULATOR_LEVEL_MAX);
+		if (rc) {
+			pr_err("Unable to set CX voltage.\n");
+			if (device->tsif_bus_client)
+				msm_bus_scale_client_update_request(
+					device->tsif_bus_client, 0);
+			return rc;
+		}
+	}
+
+	if (device->tsif_pclk && clk_prepare_enable(device->tsif_pclk) != 0) {
+		pr_err("tspp: Can't start pclk");
+
+		if (device->tsif_vreg) {
+			regulator_set_voltage(device->tsif_vreg,
+					RPMH_REGULATOR_LEVEL_OFF,
+					RPMH_REGULATOR_LEVEL_MAX);
+		}
+
+		if (device->tsif_bus_client)
+			msm_bus_scale_client_update_request(
+				device->tsif_bus_client, 0);
+		return -EBUSY;
+	}
+
+	if (device->tsif_ref_clk &&
+		clk_prepare_enable(device->tsif_ref_clk) != 0) {
+		pr_err("tspp: Can't start ref clk");
+		clk_disable_unprepare(device->tsif_pclk);
+		if (device->tsif_vreg) {
+			regulator_set_voltage(device->tsif_vreg,
+					RPMH_REGULATOR_LEVEL_OFF,
+					RPMH_REGULATOR_LEVEL_MAX);
+		}
+
+		if (device->tsif_bus_client)
+			msm_bus_scale_client_update_request(
+				device->tsif_bus_client, 0);
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static void tspp_clock_stop(struct tspp_device *device)
+{
+	int rc;
+
+	if (device == NULL) {
+		pr_err("tspp: Can't stop clocks, invalid device\n");
+		return;
+	}
+
+	if (device->tsif_pclk)
+		clk_disable_unprepare(device->tsif_pclk);
+
+	if (device->tsif_ref_clk)
+		clk_disable_unprepare(device->tsif_ref_clk);
+
+	if (device->tsif_vreg) {
+		rc = regulator_set_voltage(device->tsif_vreg,
+					RPMH_REGULATOR_LEVEL_OFF,
+					RPMH_REGULATOR_LEVEL_MAX);
+		if (rc)
+			pr_err("Unable to set CX voltage.\n");
+	}
+
+	if (device->tsif_bus_client) {
+		rc = msm_bus_scale_client_update_request(
+					device->tsif_bus_client, 0);
+		if (rc)
+			pr_err("tspp: Can't disable bus\n");
+	}
+}
+
+/*** TSIF functions ***/
+static int tspp_start_tsif(struct tspp_tsif_device *tsif_device)
+{
+	int start_hardware = 0;
+	u32 ctl;
+	u32 tts_ctl;
+	int retval;
+
+	if (tsif_device->ref_count == 0) {
+		start_hardware = 1;
+	} else if (tsif_device->ref_count > 0) {
+		ctl = readl_relaxed(tsif_device->base + TSIF_STS_CTL_OFF);
+		if ((ctl & TSIF_STS_CTL_START) != 1) {
+			/* this hardware should already be running */
+			pr_warn("tspp: tsif hw not started but ref count > 0");
+			start_hardware = 1;
+		}
+	}
+
+	if (start_hardware) {
+		ctl = TSIF_STS_CTL_EN_IRQ |
+				TSIF_STS_CTL_EN_DM |
+				TSIF_STS_CTL_PACK_AVAIL |
+				TSIF_STS_CTL_OVERFLOW |
+				TSIF_STS_CTL_LOST_SYNC;
+
+		if (tsif_device->clock_inverse)
+			ctl |= TSIF_STS_CTL_INV_CLOCK;
+
+		if (tsif_device->data_inverse)
+			ctl |= TSIF_STS_CTL_INV_DATA;
+
+		if (tsif_device->sync_inverse)
+			ctl |= TSIF_STS_CTL_INV_SYNC;
+
+		if (tsif_device->enable_inverse)
+			ctl |= TSIF_STS_CTL_INV_ENABLE;
+
+		switch (tsif_device->mode) {
+		case TSPP_TSIF_MODE_LOOPBACK:
+			ctl |= TSIF_STS_CTL_EN_NULL |
+					TSIF_STS_CTL_EN_ERROR |
+					TSIF_STS_CTL_TEST_MODE;
+			break;
+		case TSPP_TSIF_MODE_1:
+			ctl |= TSIF_STS_CTL_EN_TIME_LIM;
+			if (tsif_device->tts_source != TSIF_TTS_LPASS_TIMER)
+				ctl |= TSIF_STS_CTL_EN_TCR;
+			break;
+		case TSPP_TSIF_MODE_2:
+			ctl |= TSIF_STS_CTL_EN_TIME_LIM |
+					TSIF_STS_CTL_MODE_2;
+			if (tsif_device->tts_source != TSIF_TTS_LPASS_TIMER)
+				ctl |= TSIF_STS_CTL_EN_TCR;
+			break;
+		default:
+			pr_warn("tspp: unknown tsif mode 0x%x",
+				tsif_device->mode);
+		}
+		/* Set 4bytes Time Stamp for TCR */
+		if (tsif_device->tts_source == TSIF_TTS_LPASS_TIMER) {
+			if (tsif_device->lpass_timer_enable == 0) {
+				retval = avcs_core_open();
+				if (retval < 0) {
+					pr_warn("tspp: avcs open fail:%d\n",
+						retval);
+					return retval;
+				}
+				retval = avcs_core_disable_power_collapse(1);
+				if (retval  < 0) {
+					pr_warn("tspp: avcs power enable:%d\n",
+						retval);
+					return retval;
+				}
+				tsif_device->lpass_timer_enable = 1;
+			}
+
+			tts_ctl	= readl_relaxed(tsif_device->base +
+						TSIF_TTS_CTL_OFF);
+			tts_ctl = 0;
+			/* Set LPASS Timer TTS source */
+			tts_ctl |= TSIF_TTS_CTL_TTS_SOURCE;
+			 /* Set 4 byte TTS */
+			tts_ctl |= TSIF_TTS_CTL_TTS_LENGTH_0;
+
+			writel_relaxed(tts_ctl, tsif_device->base +
+				       TSIF_TTS_CTL_OFF);
+			/* write TTS control register */
+			wmb();
+			tts_ctl	= readl_relaxed(tsif_device->base +
+						TSIF_TTS_CTL_OFF);
+		}
+
+		writel_relaxed(ctl, tsif_device->base + TSIF_STS_CTL_OFF);
+		/* write Status control register */
+		wmb();
+		writel_relaxed(tsif_device->time_limit,
+			  tsif_device->base + TSIF_TIME_LIMIT_OFF);
+		/* assure register configuration is done before starting TSIF */
+		wmb();
+		writel_relaxed(ctl | TSIF_STS_CTL_START,
+			  tsif_device->base + TSIF_STS_CTL_OFF);
+		/* assure TSIF start configuration */
+		wmb();
+	}
+
+	ctl = readl_relaxed(tsif_device->base + TSIF_STS_CTL_OFF);
+	if (!(ctl & TSIF_STS_CTL_START))
+		return -EBUSY;
+
+	tsif_device->ref_count++;
+	return 0;
+}
+
+static void tspp_stop_tsif(struct tspp_tsif_device *tsif_device)
+{
+	if (tsif_device->ref_count == 0) {
+		if (tsif_device->lpass_timer_enable == 1) {
+			if (avcs_core_disable_power_collapse(0) == 0)
+				tsif_device->lpass_timer_enable = 0;
+		}
+		return;
+	}
+
+	tsif_device->ref_count--;
+
+	if (tsif_device->ref_count == 0) {
+		writel_relaxed(TSIF_STS_CTL_STOP,
+			tsif_device->base + TSIF_STS_CTL_OFF);
+		/* assure TSIF stop configuration */
+		wmb();
+	}
+}
+
+/*** local TSPP functions ***/
+static int tspp_channels_in_use(struct tspp_device *pdev)
+{
+	int i;
+	int count = 0;
+
+	for (i = 0; i < TSPP_NUM_CHANNELS; i++)
+		count += (pdev->channels[i].used ? 1 : 0);
+
+	return count;
+}
+
+static struct tspp_device *tspp_find_by_id(int id)
+{
+	struct tspp_device *dev;
+
+	list_for_each_entry(dev, &tspp_devices, devlist) {
+		if (dev->pdev->id == id)
+			return dev;
+	}
+	return NULL;
+}
+
+static int tspp_get_key_entry(void)
+{
+	int i;
+
+	for (i = 0; i < TSPP_NUM_KEYS; i++) {
+		if (!(tspp_key_entry & (1 << i))) {
+			tspp_key_entry |= (1 << i);
+			return i;
+		}
+	}
+	return 1 < TSPP_NUM_KEYS;
+}
+
+static void tspp_free_key_entry(int entry)
+{
+	if (entry > TSPP_NUM_KEYS) {
+		pr_err("tspp_free_key_entry: index out of bounds");
+		return;
+	}
+
+	tspp_key_entry &= ~(1 << entry);
+}
+
+static int tspp_alloc_buffer(u32 channel_id, struct tspp_data_descriptor *desc,
+	u32 size, struct dma_pool *dma_pool, tspp_allocator *alloc, void *user)
+{
+	if (size < TSPP_MIN_BUFFER_SIZE ||
+		size > TSPP_MAX_BUFFER_SIZE) {
+		pr_err("tspp: bad buffer size %i", size);
+		return -ENOMEM;
+	}
+
+	if (alloc) {
+		TSPP_DEBUG("tspp using alloc function");
+		desc->virt_base = alloc(channel_id, size,
+			&desc->phys_base, user);
+	} else {
+		if (!dma_pool)
+			desc->virt_base = dma_alloc_coherent(NULL, size,
+				&desc->phys_base, GFP_KERNEL);
+		else
+			desc->virt_base = dma_pool_alloc(dma_pool, GFP_KERNEL,
+				&desc->phys_base);
+
+		if (desc->virt_base == 0) {
+			pr_err("tspp: dma buffer allocation failed %i\n", size);
+			return -ENOMEM;
+		}
+	}
+
+	desc->size = size;
+	return 0;
+}
+
+static int tspp_queue_buffer(struct tspp_channel *channel,
+	struct tspp_mem_buffer *buffer)
+{
+	int rc;
+	u32 flags = 0;
+
+	/* make sure the interrupt frequency is valid */
+	if (channel->int_freq < 1)
+		channel->int_freq = 1;
+
+	/* generate interrupt according to requested frequency */
+	if (buffer->desc.id % channel->int_freq == channel->int_freq-1)
+		flags = SPS_IOVEC_FLAG_INT;
+
+	/* start the transfer */
+	rc = sps_transfer_one(channel->pipe,
+		buffer->sps.phys_base,
+		buffer->sps.size,
+		flags ? channel->pdev : NULL,
+		flags);
+	if (rc < 0)
+		return rc;
+
+	buffer->state = TSPP_BUF_STATE_WAITING;
+
+	return 0;
+}
+
+static int tspp_global_reset(struct tspp_device *pdev)
+{
+	u32 i, val;
+
+	/* stop all TSIFs */
+	for (i = 0; i < TSPP_TSIF_INSTANCES; i++) {
+		pdev->tsif[i].ref_count = 1; /* allows stopping hw */
+		tspp_stop_tsif(&pdev->tsif[i]); /* will reset ref_count to 0 */
+		pdev->tsif[i].time_limit = TSPP_TSIF_DEFAULT_TIME_LIMIT;
+		pdev->tsif[i].clock_inverse = 0;
+		pdev->tsif[i].data_inverse = 0;
+		pdev->tsif[i].sync_inverse = 0;
+		pdev->tsif[i].enable_inverse = 0;
+		pdev->tsif[i].lpass_timer_enable = 0;
+	}
+	writel_relaxed(TSPP_RST_RESET, pdev->base + TSPP_RST);
+	/* assure state is reset before continuing with configuration */
+	wmb();
+
+	/* TSPP tables */
+	for (i = 0; i < TSPP_FILTER_TABLES; i++)
+		memset_io(pdev->filters[i],
+			0, sizeof(struct tspp_pid_filter_table));
+
+	/* disable all filters */
+	val = (2 << TSPP_NUM_CHANNELS) - 1;
+	writel_relaxed(val, pdev->base + TSPP_PS_DISABLE);
+
+	/* TSPP registers */
+	val = readl_relaxed(pdev->base + TSPP_CONTROL);
+	writel_relaxed(val | TSPP_CLK_CONTROL_FORCE_PERF_CNT,
+		pdev->base + TSPP_CONTROL);
+	/* assure tspp performance count clock is set to 0 */
+	wmb();
+	memset_io(pdev->tspp_global_performance, 0,
+		sizeof(struct tspp_global_performance_regs));
+	memset_io(pdev->tspp_pipe_context, 0,
+		sizeof(struct tspp_pipe_context_regs));
+	memset_io(pdev->tspp_pipe_performance, 0,
+		sizeof(struct tspp_pipe_performance_regs));
+	/* assure tspp pipe context registers are set to 0 */
+	wmb();
+	writel_relaxed(val & ~TSPP_CLK_CONTROL_FORCE_PERF_CNT,
+		pdev->base + TSPP_CONTROL);
+	/* assure tspp performance count clock  is reset */
+	wmb();
+
+	val = readl_relaxed(pdev->base + TSPP_CONFIG);
+	val &= ~(TSPP_CONFIG_PS_LEN_ERR_MASK |
+			TSPP_CONFIG_PS_CONT_ERR_UNSP_MASK |
+			TSPP_CONFIG_PS_CONT_ERR_MASK);
+	TSPP_CONFIG_SET_PACKET_LENGTH(val, TSPP_PACKET_LENGTH);
+	writel_relaxed(val, pdev->base + TSPP_CONFIG);
+	writel_relaxed(0x0007ffff, pdev->base + TSPP_IRQ_MASK);
+	writel_relaxed(0x000fffff, pdev->base + TSPP_IRQ_CLEAR);
+	writel_relaxed(0, pdev->base + TSPP_RST);
+	/* assure tspp reset clear */
+	wmb();
+
+	tspp_key_entry = 0;
+
+	return 0;
+}
+
+static void tspp_channel_init(struct tspp_channel *channel,
+	struct tspp_device *pdev)
+{
+	channel->pdev = pdev;
+	channel->data = NULL;
+	channel->read = NULL;
+	channel->waiting = NULL;
+	channel->locked = NULL;
+	channel->id = channel_id++;
+	channel->used = 0;
+	channel->buffer_size = TSPP_MIN_BUFFER_SIZE;
+	channel->max_buffers = TSPP_NUM_BUFFERS;
+	channel->buffer_count = 0;
+	channel->filter_count = 0;
+	channel->int_freq = 1;
+	channel->src = TSPP_SOURCE_NONE;
+	channel->mode = TSPP_MODE_DISABLED;
+	channel->notifier = NULL;
+	channel->notify_data = NULL;
+	channel->expiration_period_ms = 0;
+	channel->memfree = NULL;
+	channel->user_info = NULL;
+	init_waitqueue_head(&channel->in_queue);
+}
+
+static void tspp_set_tsif_mode(struct tspp_channel *channel,
+	enum tspp_tsif_mode mode)
+{
+	int index;
+
+	switch (channel->src) {
+	case TSPP_SOURCE_TSIF0:
+		index = 0;
+		break;
+	case TSPP_SOURCE_TSIF1:
+		index = 1;
+		break;
+	default:
+		pr_warn("tspp: can't set mode for non-tsif source %d",
+			channel->src);
+		return;
+	}
+	channel->pdev->tsif[index].mode = mode;
+}
+
+static void tspp_set_signal_inversion(struct tspp_channel *channel,
+					int clock_inverse, int data_inverse,
+					int sync_inverse, int enable_inverse)
+{
+	int index;
+
+	switch (channel->src) {
+	case TSPP_SOURCE_TSIF0:
+		index = 0;
+		break;
+	case TSPP_SOURCE_TSIF1:
+		index = 1;
+		break;
+	default:
+		return;
+	}
+	channel->pdev->tsif[index].clock_inverse = clock_inverse;
+	channel->pdev->tsif[index].data_inverse = data_inverse;
+	channel->pdev->tsif[index].sync_inverse = sync_inverse;
+	channel->pdev->tsif[index].enable_inverse = enable_inverse;
+}
+
+static int tspp_is_buffer_size_aligned(u32 size, enum tspp_mode mode)
+{
+	u32 alignment;
+
+	switch (mode) {
+	case TSPP_MODE_RAW:
+		/* must be a multiple of 192 */
+		alignment = (TSPP_PACKET_LENGTH + 4);
+		if (size % alignment)
+			return 0;
+		return 1;
+
+	case TSPP_MODE_RAW_NO_SUFFIX:
+		/* must be a multiple of 188 */
+		alignment = TSPP_PACKET_LENGTH;
+		if (size % alignment)
+			return 0;
+		return 1;
+
+	case TSPP_MODE_DISABLED:
+	case TSPP_MODE_PES:
+	default:
+		/* no alignment requirement */
+		return 1;
+	}
+
+}
+
+static u32 tspp_align_buffer_size_by_mode(u32 size, enum tspp_mode mode)
+{
+	u32 new_size;
+	u32 alignment;
+
+	switch (mode) {
+	case TSPP_MODE_RAW:
+		/* must be a multiple of 192 */
+		alignment = (TSPP_PACKET_LENGTH + 4);
+		break;
+
+	case TSPP_MODE_RAW_NO_SUFFIX:
+		/* must be a multiple of 188 */
+		alignment = TSPP_PACKET_LENGTH;
+		break;
+
+	case TSPP_MODE_DISABLED:
+	case TSPP_MODE_PES:
+	default:
+		/* no alignment requirement - give the user what he asks for */
+		alignment = 1;
+		break;
+	}
+	/* align up */
+	new_size = (((size + alignment - 1) / alignment) * alignment);
+	return new_size;
+}
+
+static void tspp_destroy_buffers(u32 channel_id, struct tspp_channel *channel)
+{
+	int i;
+	struct tspp_mem_buffer *pbuf, *temp;
+
+	pbuf = channel->data;
+	for (i = 0; i < channel->buffer_count; i++) {
+		if (pbuf->desc.phys_base) {
+			if (channel->memfree) {
+				channel->memfree(channel_id,
+					pbuf->desc.size,
+					pbuf->desc.virt_base,
+					pbuf->desc.phys_base,
+					channel->user_info);
+			} else {
+				if (!channel->dma_pool)
+					dma_free_coherent(
+						&channel->pdev->pdev->dev,
+						pbuf->desc.size,
+						pbuf->desc.virt_base,
+						pbuf->desc.phys_base);
+				else
+					dma_pool_free(channel->dma_pool,
+						pbuf->desc.virt_base,
+						pbuf->desc.phys_base);
+			}
+			pbuf->desc.phys_base = 0;
+		}
+		pbuf->desc.virt_base = 0;
+		pbuf->state = TSPP_BUF_STATE_EMPTY;
+		temp = pbuf;
+		pbuf = pbuf->next;
+		kfree(temp);
+	}
+}
+
+static int msm_tspp_req_irqs(struct tspp_device *device)
+{
+	int rc;
+	int i;
+	int j;
+
+	rc = request_irq(device->tspp_irq, tspp_isr, IRQF_SHARED,
+		dev_name(&device->pdev->dev), device);
+	if (rc) {
+		dev_err(&device->pdev->dev,
+			"failed to request TSPP IRQ %d : %d",
+			device->tspp_irq, rc);
+		return rc;
+	}
+
+	for (i = 0; i < TSPP_TSIF_INSTANCES; i++) {
+		rc = request_irq(device->tsif[i].tsif_irq,
+			tsif_isr, IRQF_SHARED, dev_name(&device->pdev->dev),
+			&device->tsif[i]);
+		if (rc) {
+			dev_err(&device->pdev->dev,
+				"failed to request TSIF%d IRQ: %d",
+				i, rc);
+			goto failed;
+		}
+	}
+	device->req_irqs = true;
+	return 0;
+
+failed:
+	free_irq(device->tspp_irq, device);
+	for (j = 0; j < i; j++)
+		free_irq(device->tsif[j].tsif_irq, device);
+
+	return rc;
+}
+
+static inline void msm_tspp_free_irqs(struct tspp_device *device)
+{
+	int i;
+
+	for (i = 0; i < TSPP_TSIF_INSTANCES; i++) {
+		if (device->tsif[i].tsif_irq)
+			free_irq(device->tsif[i].tsif_irq,  &device->tsif[i]);
+	}
+
+	if (device->tspp_irq)
+		free_irq(device->tspp_irq, device);
+	device->req_irqs = false;
+}
+
+/*** TSPP API functions ***/
+
+/**
+ * tspp_open_stream - open a TSPP stream for use.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @source: stream source parameters.
+ *
+ * Return  error status
+ *
+ */
+int tspp_open_stream(u32 dev, u32 channel_id,
+			struct tspp_select_source *source)
+{
+	u32 val;
+	int rc;
+	struct tspp_device *pdev;
+	struct tspp_channel *channel;
+	bool req_irqs = false;
+
+	TSPP_DEBUG("tspp_open_stream %i %i %i %i",
+		dev, channel_id, source->source, source->mode);
+
+	if (dev >= TSPP_MAX_DEVICES) {
+		pr_err("tspp: device id out of range");
+		return -ENODEV;
+	}
+
+	if (channel_id >= TSPP_NUM_CHANNELS) {
+		pr_err("tspp: channel id out of range");
+		return -ECHRNG;
+	}
+
+	pdev = tspp_find_by_id(dev);
+	if (!pdev) {
+		pr_err("tspp_str: can't find device %i", dev);
+		return -ENODEV;
+	}
+	channel = &pdev->channels[channel_id];
+	channel->src = source->source;
+	tspp_set_tsif_mode(channel, source->mode);
+	tspp_set_signal_inversion(channel, source->clk_inverse,
+			source->data_inverse, source->sync_inverse,
+			source->enable_inverse);
+
+	/* Request IRQ resources on first open */
+	if (!pdev->req_irqs && (source->source == TSPP_SOURCE_TSIF0 ||
+		source->source == TSPP_SOURCE_TSIF1)) {
+		rc = msm_tspp_req_irqs(pdev);
+		if (rc) {
+			pr_err("tspp: error requesting irqs\n");
+			return rc;
+		}
+		req_irqs = true;
+	}
+
+	switch (source->source) {
+	case TSPP_SOURCE_TSIF0:
+		if (tspp_config_gpios(pdev, channel->src, 1) != 0) {
+			rc = -EBUSY;
+			pr_err("tspp: error enabling tsif0 GPIOs\n");
+			goto free_irq;
+		}
+		/* make sure TSIF0 is running & enabled */
+		if (tspp_start_tsif(&pdev->tsif[0]) != 0) {
+			rc = -EBUSY;
+			pr_err("tspp: error starting tsif0");
+			goto free_irq;
+		}
+		if (pdev->tsif[0].ref_count == 1) {
+			val = readl_relaxed(pdev->base + TSPP_CONTROL);
+			writel_relaxed(val & ~TSPP_CONTROL_TSP_TSIF0_SRC_DIS,
+				pdev->base + TSPP_CONTROL);
+			/* Assure BAM TS PKT packet processing is enabled */
+			wmb();
+		}
+		break;
+	case TSPP_SOURCE_TSIF1:
+		if (tspp_config_gpios(pdev, channel->src, 1) != 0) {
+			rc = -EBUSY;
+			pr_err("tspp: error enabling tsif1 GPIOs\n");
+			goto free_irq;
+		}
+		/* make sure TSIF1 is running & enabled */
+		if (tspp_start_tsif(&pdev->tsif[1]) != 0) {
+			rc = -EBUSY;
+			pr_err("tspp: error starting tsif1");
+			goto free_irq;
+		}
+		if (pdev->tsif[1].ref_count == 1) {
+			val = readl_relaxed(pdev->base + TSPP_CONTROL);
+			writel_relaxed(val & ~TSPP_CONTROL_TSP_TSIF1_SRC_DIS,
+				pdev->base + TSPP_CONTROL);
+			/* Assure BAM TS PKT packet processing is enabled */
+			wmb();
+		}
+		break;
+	case TSPP_SOURCE_MEM:
+		break;
+	default:
+		pr_err("tspp: channel %i invalid source %i",
+			channel->id, source->source);
+		return -EBUSY;
+	}
+
+	return 0;
+
+free_irq:
+	/* Free irqs only if were requested during opening of this stream */
+	if (req_irqs)
+		msm_tspp_free_irqs(pdev);
+	return rc;
+}
+EXPORT_SYMBOL(tspp_open_stream);
+
+/**
+ * tspp_close_stream - close a TSPP stream.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ *
+ * Return  error status
+ *
+ */
+int tspp_close_stream(u32 dev, u32 channel_id)
+{
+	u32 val;
+	u32 prev_ref_count = 0;
+	struct tspp_device *pdev;
+	struct tspp_channel *channel;
+
+	if (channel_id >= TSPP_NUM_CHANNELS) {
+		pr_err("tspp: channel id out of range");
+		return -ECHRNG;
+	}
+	pdev = tspp_find_by_id(dev);
+	if (!pdev) {
+		pr_err("tspp_cs: can't find device %i", dev);
+		return -EBUSY;
+	}
+	channel = &pdev->channels[channel_id];
+
+	switch (channel->src) {
+	case TSPP_SOURCE_TSIF0:
+		prev_ref_count = pdev->tsif[0].ref_count;
+		tspp_stop_tsif(&pdev->tsif[0]);
+		if (tspp_config_gpios(pdev, channel->src, 0) != 0)
+			pr_err("tspp: error disabling tsif0 GPIOs\n");
+
+		if (prev_ref_count == 1) {
+			val = readl_relaxed(pdev->base + TSPP_CONTROL);
+			writel_relaxed(val | TSPP_CONTROL_TSP_TSIF0_SRC_DIS,
+				pdev->base + TSPP_CONTROL);
+			/* Assure BAM TS PKT packet processing is disabled */
+			wmb();
+		}
+		break;
+	case TSPP_SOURCE_TSIF1:
+		prev_ref_count = pdev->tsif[1].ref_count;
+		tspp_stop_tsif(&pdev->tsif[1]);
+		if (tspp_config_gpios(pdev, channel->src, 0) != 0)
+			pr_err("tspp: error disabling tsif0 GPIOs\n");
+
+		if (prev_ref_count == 1) {
+			val = readl_relaxed(pdev->base + TSPP_CONTROL);
+			writel_relaxed(val | TSPP_CONTROL_TSP_TSIF1_SRC_DIS,
+				pdev->base + TSPP_CONTROL);
+			/* Assure BAM TS PKT packet processing is disabled */
+			wmb();
+		}
+		break;
+	case TSPP_SOURCE_MEM:
+		break;
+	case TSPP_SOURCE_NONE:
+		break;
+	}
+
+	channel->src = TSPP_SOURCE_NONE;
+
+	/* Free requested interrupts to save power */
+	if ((pdev->tsif[0].ref_count + pdev->tsif[1].ref_count) == 0 &&
+		prev_ref_count)
+		msm_tspp_free_irqs(pdev);
+
+	return 0;
+}
+EXPORT_SYMBOL(tspp_close_stream);
+
+static int tspp_init_sps_device(struct tspp_device *dev)
+{
+	int ret;
+
+	ret = sps_register_bam_device(&dev->bam_props, &dev->bam_handle);
+	if (ret) {
+		pr_err("tspp: failed to register bam device, err-%d\n", ret);
+		return ret;
+	}
+
+	ret = sps_device_reset(dev->bam_handle);
+	if (ret) {
+		sps_deregister_bam_device(dev->bam_handle);
+		pr_err("tspp: error resetting bam device, err=%d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * tspp_open_channel - open a TSPP channel.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ *
+ * Return  error status
+ *
+ */
+int tspp_open_channel(u32 dev, u32 channel_id)
+{
+	int rc = 0;
+	struct sps_connect *config;
+	struct sps_register_event *event;
+	struct tspp_channel *channel;
+	struct tspp_device *pdev;
+
+	if (channel_id >= TSPP_NUM_CHANNELS) {
+		pr_err("tspp: channel id out of range");
+		return -ECHRNG;
+	}
+	pdev = tspp_find_by_id(dev);
+	if (!pdev) {
+		pr_err("tspp_oc: can't find device %i", dev);
+		return -ENODEV;
+	}
+	channel = &pdev->channels[channel_id];
+
+	if (channel->used) {
+		pr_err("tspp channel already in use");
+		return -EBUSY;
+	}
+
+	config = &channel->config;
+	event = &channel->event;
+
+	/* start the clocks if needed */
+	if (tspp_channels_in_use(pdev) == 0) {
+		rc = tspp_clock_start(pdev);
+		if (rc)
+			return rc;
+
+		if (pdev->bam_handle == SPS_DEV_HANDLE_INVALID) {
+			rc = tspp_init_sps_device(pdev);
+			if (rc) {
+				pr_err("tspp: failed to init sps device, err=%d\n",
+					rc);
+				tspp_clock_stop(pdev);
+				return rc;
+			}
+		}
+
+		__pm_stay_awake(&pdev->ws);
+	}
+
+	/* mark it as used */
+	channel->used = 1;
+
+	/* start the bam  */
+	channel->pipe = sps_alloc_endpoint();
+	if (channel->pipe == 0) {
+		pr_err("tspp: error allocating endpoint");
+		rc = -ENOMEM;
+		goto err_sps_alloc;
+	}
+
+	/* get default configuration */
+	sps_get_config(channel->pipe, config);
+
+	config->source = pdev->bam_handle;
+	config->destination = SPS_DEV_HANDLE_MEM;
+	config->mode = SPS_MODE_SRC;
+	config->options =
+		SPS_O_AUTO_ENABLE | /* connection is auto-enabled */
+		SPS_O_STREAMING | /* streaming mode */
+		SPS_O_DESC_DONE | /* interrupt on end of descriptor */
+		SPS_O_ACK_TRANSFERS | /* must use sps_get_iovec() */
+		SPS_O_HYBRID; /* Read actual descriptors in sps_get_iovec() */
+	config->src_pipe_index = channel->id;
+	config->desc.size =
+		TSPP_SPS_DESCRIPTOR_COUNT * SPS_DESCRIPTOR_SIZE;
+	config->desc.base = dma_alloc_coherent(&pdev->pdev->dev,
+						config->desc.size,
+						&config->desc.phys_base,
+						GFP_KERNEL);
+	if (config->desc.base == 0) {
+		pr_err("tspp: error allocating sps descriptors");
+		rc = -ENOMEM;
+		goto err_desc_alloc;
+	}
+
+	memset(config->desc.base, 0, config->desc.size);
+
+	rc = sps_connect(channel->pipe, config);
+	if (rc) {
+		pr_err("tspp: error connecting bam");
+		goto err_connect;
+	}
+
+	event->mode = SPS_TRIGGER_CALLBACK;
+	event->options = SPS_O_DESC_DONE;
+	event->callback = tspp_sps_complete_cb;
+	event->xfer_done = NULL;
+	event->user = pdev;
+
+	rc = sps_register_event(channel->pipe, event);
+	if (rc) {
+		pr_err("tspp: error registering event");
+		goto err_event;
+	}
+
+	init_timer(&channel->expiration_timer);
+	channel->expiration_timer.function = tspp_expiration_timer;
+	channel->expiration_timer.data = (unsigned long)pdev;
+	channel->expiration_timer.expires = 0xffffffffL;
+
+	rc = pm_runtime_get(&pdev->pdev->dev);
+	if (rc < 0) {
+		dev_err(&pdev->pdev->dev,
+			"Runtime PM: Unable to wake up tspp device, rc = %d",
+			rc);
+	}
+	return 0;
+
+err_event:
+	sps_disconnect(channel->pipe);
+err_connect:
+	dma_free_coherent(&pdev->pdev->dev, config->desc.size,
+		config->desc.base, config->desc.phys_base);
+err_desc_alloc:
+	sps_free_endpoint(channel->pipe);
+err_sps_alloc:
+	channel->used = 0;
+	return rc;
+}
+EXPORT_SYMBOL(tspp_open_channel);
+
+/**
+ * tspp_close_channel - close a TSPP channel.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ *
+ * Return  error status
+ *
+ */
+int tspp_close_channel(u32 dev, u32 channel_id)
+{
+	int i;
+	int id;
+	int table_idx;
+	u32 val;
+	unsigned long flags;
+
+	struct sps_connect *config;
+	struct tspp_device *pdev;
+	struct tspp_channel *channel;
+
+	if (channel_id >= TSPP_NUM_CHANNELS) {
+		pr_err("tspp: channel id out of range");
+		return -ECHRNG;
+	}
+	pdev = tspp_find_by_id(dev);
+	if (!pdev) {
+		pr_err("tspp_close: can't find device %i", dev);
+		return -ENODEV;
+	}
+	channel = &pdev->channels[channel_id];
+
+	/* if the channel is not used, we are done */
+	if (!channel->used)
+		return 0;
+
+	/*
+	 * Need to protect access to used and waiting fields, as they are
+	 * used by the tasklet which is invoked from interrupt context
+	 */
+	spin_lock_irqsave(&pdev->spinlock, flags);
+	channel->used = 0;
+	channel->waiting = NULL;
+	spin_unlock_irqrestore(&pdev->spinlock, flags);
+
+	if (channel->expiration_period_ms)
+		del_timer(&channel->expiration_timer);
+
+	channel->notifier = NULL;
+	channel->notify_data = NULL;
+	channel->expiration_period_ms = 0;
+
+	config = &channel->config;
+	pdev = channel->pdev;
+
+	/* disable pipe (channel) */
+	val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+	writel_relaxed(val | channel->id, pdev->base + TSPP_PS_DISABLE);
+	/* Assure PS_DISABLE register is set */
+	wmb();
+
+	/* unregister all filters for this channel */
+	for (table_idx = 0; table_idx < TSPP_FILTER_TABLES; table_idx++) {
+		for (i = 0; i < TSPP_NUM_PRIORITIES; i++) {
+			struct tspp_pid_filter *filter =
+				&pdev->filters[table_idx]->filter[i];
+			id = FILTER_GET_PIPE_NUMBER0(filter);
+			if (id == channel->id) {
+				if (FILTER_HAS_ENCRYPTION(filter))
+					tspp_free_key_entry(
+						FILTER_GET_KEY_NUMBER(filter));
+				filter->config = 0;
+				filter->filter = 0;
+			}
+		}
+	}
+	channel->filter_count = 0;
+
+	/* disconnect the bam */
+	if (sps_disconnect(channel->pipe) != 0)
+		pr_warn("tspp: Error freeing sps endpoint (%i)", channel->id);
+
+	/* destroy the buffers */
+	dma_free_coherent(&pdev->pdev->dev, config->desc.size,
+		config->desc.base, config->desc.phys_base);
+
+	sps_free_endpoint(channel->pipe);
+
+	tspp_destroy_buffers(channel_id, channel);
+
+	dma_pool_destroy(channel->dma_pool);
+	channel->dma_pool = NULL;
+
+	channel->src = TSPP_SOURCE_NONE;
+	channel->mode = TSPP_MODE_DISABLED;
+	channel->memfree = NULL;
+	channel->user_info = NULL;
+	channel->buffer_count = 0;
+	channel->data = NULL;
+	channel->read = NULL;
+	channel->locked = NULL;
+
+	if (tspp_channels_in_use(pdev) == 0) {
+		sps_deregister_bam_device(pdev->bam_handle);
+		pdev->bam_handle = SPS_DEV_HANDLE_INVALID;
+
+		__pm_relax(&pdev->ws);
+		tspp_clock_stop(pdev);
+	}
+
+	pm_runtime_put(&pdev->pdev->dev);
+
+	return 0;
+}
+EXPORT_SYMBOL(tspp_close_channel);
+
+/**
+ * tspp_get_ref_clk_counter - return the TSIF clock reference (TCR) counter.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @source: The TSIF source from which the counter should be read
+ * @tcr_counter: the value of TCR counter
+ *
+ * Return  error status
+ *
+ * TCR increments at a rate equal to 27 MHz/256 = 105.47 kHz.
+ * If source is neither TSIF 0 or TSIF1 0 is returned.
+ */
+int tspp_get_ref_clk_counter(u32 dev, enum tspp_source source, u32 *tcr_counter)
+{
+	struct tspp_device *pdev;
+	struct tspp_tsif_device *tsif_device;
+
+	if (!tcr_counter)
+		return -EINVAL;
+
+	pdev = tspp_find_by_id(dev);
+	if (!pdev) {
+		pr_err("tspp_get_ref_clk_counter: can't find device %i\n", dev);
+		return -ENODEV;
+	}
+
+	switch (source) {
+	case TSPP_SOURCE_TSIF0:
+		tsif_device = &pdev->tsif[0];
+		break;
+
+	case TSPP_SOURCE_TSIF1:
+		tsif_device = &pdev->tsif[1];
+		break;
+
+	default:
+		tsif_device = NULL;
+		break;
+	}
+
+	if (tsif_device && tsif_device->ref_count)
+		*tcr_counter = ioread32(tsif_device->base + TSIF_CLK_REF_OFF);
+	else
+		*tcr_counter = 0;
+
+	return 0;
+}
+EXPORT_SYMBOL(tspp_get_ref_clk_counter);
+
+/**
+ * tspp_get_lpass_time_counter - return the LPASS  Timer counter value.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @source: The TSIF source from which the counter should be read
+ * @tcr_counter: the value of TCR counter
+ *
+ * Return  error status
+ *
+ * If source is neither TSIF 0 or TSIF1 0 is returned.
+ */
+int tspp_get_lpass_time_counter(u32 dev, enum tspp_source source,
+			u64 *lpass_time_counter)
+{
+	struct tspp_device *pdev;
+	struct tspp_tsif_device *tsif_device;
+
+	if (!lpass_time_counter)
+		return -EINVAL;
+
+	pdev = tspp_find_by_id(dev);
+	if (!pdev) {
+		pr_err("tspp_get_lpass_time_counter: can't find device %i\n",
+		       dev);
+		return -ENODEV;
+	}
+
+	switch (source) {
+	case TSPP_SOURCE_TSIF0:
+		tsif_device = &pdev->tsif[0];
+		break;
+
+	case TSPP_SOURCE_TSIF1:
+		tsif_device = &pdev->tsif[1];
+		break;
+
+	default:
+		tsif_device = NULL;
+		break;
+	}
+
+	if (tsif_device && tsif_device->ref_count) {
+		if (avcs_core_query_timer(lpass_time_counter) < 0) {
+			pr_err("tspp_get_lpass_time_counter: read error\n");
+			*lpass_time_counter = 0;
+			return -ENETRESET;
+		}
+	} else
+		*lpass_time_counter = 0;
+
+	return 0;
+}
+EXPORT_SYMBOL(tspp_get_lpass_time_counter);
+
+/**
+ * tspp_get_tts_source - Return the TTS source value.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @tts_source:Updated TTS source type
+ *
+ * Return  error status
+ *
+ */
+int tspp_get_tts_source(u32 dev, int *tts_source)
+{
+	struct tspp_device *pdev;
+
+	if (tts_source == NULL)
+		return -EINVAL;
+
+	pdev = tspp_find_by_id(dev);
+	if (!pdev) {
+		pr_err("tspp_get_tts_source: can't find device %i\n",
+		       dev);
+		return -ENODEV;
+	}
+
+	*tts_source = pdev->tts_source;
+
+	return 0;
+}
+EXPORT_SYMBOL(tspp_get_tts_source);
+
+/**
+ * tspp_add_filter - add a TSPP filter to a channel.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @filter: TSPP filter parameters
+ *
+ * Return  error status
+ *
+ */
+int tspp_add_filter(u32 dev, u32 channel_id,
+	struct tspp_filter *filter)
+{
+	int i, rc;
+	int other_channel;
+	int entry;
+	u32 val, pid, enabled;
+	struct tspp_device *pdev;
+	struct tspp_pid_filter p;
+	struct tspp_channel *channel;
+
+	TSPP_DEBUG("tspp: add filter");
+	if (channel_id >= TSPP_NUM_CHANNELS) {
+		pr_err("tspp: channel id out of range");
+		return -ECHRNG;
+	}
+	pdev = tspp_find_by_id(dev);
+	if (!pdev) {
+		pr_err("tspp_add: can't find device %i", dev);
+		return -ENODEV;
+	}
+
+	channel = &pdev->channels[channel_id];
+
+	if (filter->source > TSPP_SOURCE_MEM) {
+		pr_err("tspp invalid source");
+		return -ENOSR;
+	}
+
+	if (filter->priority >= TSPP_NUM_PRIORITIES) {
+		pr_err("tspp invalid filter priority");
+		return -ENOSR;
+	}
+
+	channel->mode = filter->mode;
+	/*
+	 * if buffers are already allocated, verify they fulfil
+	 * the alignment requirements.
+	 */
+	if ((channel->buffer_count > 0) &&
+	   (!tspp_is_buffer_size_aligned(channel->buffer_size, channel->mode)))
+		pr_warn("tspp: buffers allocated with incorrect alignment\n");
+
+	if (filter->mode == TSPP_MODE_PES) {
+		for (i = 0; i < TSPP_NUM_PRIORITIES; i++) {
+			struct tspp_pid_filter *tspp_filter =
+				&pdev->filters[channel->src]->filter[i];
+			pid = FILTER_GET_PIPE_PID((tspp_filter));
+			enabled = FILTER_GET_PIPE_PROCESS0(tspp_filter);
+			if (enabled && (pid == filter->pid)) {
+				other_channel =
+					FILTER_GET_PIPE_NUMBER0(tspp_filter);
+				pr_err("tspp: pid 0x%x already in use by channel %i",
+					filter->pid, other_channel);
+				return -EBADSLT;
+			}
+		}
+	}
+
+	/* make sure this priority is not already in use */
+	enabled = FILTER_GET_PIPE_PROCESS0(
+		(&(pdev->filters[channel->src]->filter[filter->priority])));
+	if (enabled) {
+		pr_err("tspp: filter priority %i source %i is already enabled\n",
+			filter->priority, channel->src);
+		return -ENOSR;
+	}
+
+	if (channel->mode == TSPP_MODE_PES) {
+		/*
+		 * if we are already processing in PES mode, disable pipe
+		 * (channel) and filter to be updated
+		 */
+		val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+		writel_relaxed(val | (1 << channel->id),
+			pdev->base + TSPP_PS_DISABLE);
+		/* Assure PS_DISABLE register is set */
+		wmb();
+	}
+
+	/* update entry */
+	p.filter = 0;
+	p.config = FILTER_TRANS_END_DISABLE;
+	FILTER_SET_PIPE_PROCESS0((&p), filter->mode);
+	FILTER_SET_PIPE_PID((&p), filter->pid);
+	FILTER_SET_PID_MASK((&p), filter->mask);
+	FILTER_SET_PIPE_NUMBER0((&p), channel->id);
+	FILTER_SET_PIPE_PROCESS1((&p), TSPP_MODE_DISABLED);
+	if (filter->decrypt) {
+		entry = tspp_get_key_entry();
+		if (entry == -1) {
+			pr_err("tspp: no more keys available!");
+		} else {
+			p.config |= FILTER_DECRYPT;
+			FILTER_SET_KEY_NUMBER((&p), entry);
+		}
+	}
+
+	pdev->filters[channel->src]->
+		filter[filter->priority].config = p.config;
+	pdev->filters[channel->src]->
+		filter[filter->priority].filter = p.filter;
+
+	/*
+	 * allocate buffers if needed (i.e. if user did has not already called
+	 * tspp_allocate_buffers() explicitly).
+	 */
+	if (channel->buffer_count == 0) {
+		channel->buffer_size =
+		tspp_align_buffer_size_by_mode(channel->buffer_size,
+							channel->mode);
+		rc = tspp_allocate_buffers(dev, channel->id,
+					channel->max_buffers,
+					channel->buffer_size,
+					channel->int_freq, NULL, NULL, NULL);
+		if (rc != 0) {
+			pr_err("tspp: tspp_allocate_buffers failed\n");
+			return rc;
+		}
+	}
+
+	/* reenable pipe */
+	val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+	writel_relaxed(val & ~(1 << channel->id), pdev->base + TSPP_PS_DISABLE);
+	/* Assure PS_DISABLE register is reset */
+	wmb();
+	val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+
+	channel->filter_count++;
+
+	return 0;
+}
+EXPORT_SYMBOL(tspp_add_filter);
+
+/**
+ * tspp_remove_filter - remove a TSPP filter from a channel.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @filter: TSPP filter parameters
+ *
+ * Return  error status
+ *
+ */
+int tspp_remove_filter(u32 dev, u32 channel_id,
+	struct tspp_filter *filter)
+{
+	int entry;
+	u32 val;
+	struct tspp_device *pdev;
+	int src;
+	struct tspp_pid_filter *tspp_filter;
+	struct tspp_channel *channel;
+
+	if (channel_id >= TSPP_NUM_CHANNELS) {
+		pr_err("tspp: channel id out of range");
+		return -ECHRNG;
+	}
+	if (!filter) {
+		pr_err("tspp: NULL filter pointer");
+		return -EINVAL;
+	}
+	pdev = tspp_find_by_id(dev);
+	if (!pdev) {
+		pr_err("tspp_remove: can't find device %i", dev);
+		return -ENODEV;
+	}
+	if (filter->priority >= TSPP_NUM_PRIORITIES) {
+		pr_err("tspp invalid filter priority");
+		return -ENOSR;
+	}
+	channel = &pdev->channels[channel_id];
+
+	src = channel->src;
+	if ((src == TSPP_SOURCE_TSIF0) || (src == TSPP_SOURCE_TSIF1))
+		tspp_filter = &(pdev->filters[src]->filter[filter->priority]);
+	else {
+		pr_err("tspp_remove: wrong source type %d", src);
+		return -EINVAL;
+	}
+
+
+	/* disable pipe (channel) */
+	val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+	writel_relaxed(val | channel->id, pdev->base + TSPP_PS_DISABLE);
+	/* Assure PS_DISABLE register is set */
+	wmb();
+
+	/* update data keys */
+	if (tspp_filter->config & FILTER_DECRYPT) {
+		entry = FILTER_GET_KEY_NUMBER(tspp_filter);
+		tspp_free_key_entry(entry);
+	}
+
+	/* update pid table */
+	tspp_filter->config = 0;
+	tspp_filter->filter = 0;
+
+	channel->filter_count--;
+
+	/* reenable pipe */
+	val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+	writel_relaxed(val & ~(1 << channel->id),
+		pdev->base + TSPP_PS_DISABLE);
+	/* Assure PS_DISABLE register is reset */
+	wmb();
+	val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+
+	return 0;
+}
+EXPORT_SYMBOL(tspp_remove_filter);
+
+/**
+ * tspp_set_key - set TSPP key in key table.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @key: TSPP key parameters
+ *
+ * Return  error status
+ *
+ */
+int tspp_set_key(u32 dev, u32 channel_id, struct tspp_key *key)
+{
+	int i;
+	int id;
+	int key_index;
+	int data;
+	struct tspp_channel *channel;
+	struct tspp_device *pdev;
+
+	if (channel_id >= TSPP_NUM_CHANNELS) {
+		pr_err("tspp: channel id out of range");
+		return -ECHRNG;
+	}
+	pdev = tspp_find_by_id(dev);
+	if (!pdev) {
+		pr_err("tspp_set: can't find device %i", dev);
+		return -ENODEV;
+	}
+	channel = &pdev->channels[channel_id];
+
+	/* read the key index used by this channel */
+	for (i = 0; i < TSPP_NUM_PRIORITIES; i++) {
+		struct tspp_pid_filter *tspp_filter =
+			&(pdev->filters[channel->src]->filter[i]);
+		id = FILTER_GET_PIPE_NUMBER0(tspp_filter);
+		if (id == channel->id) {
+			if (FILTER_HAS_ENCRYPTION(tspp_filter)) {
+				key_index = FILTER_GET_KEY_NUMBER(tspp_filter);
+				break;
+			}
+		}
+	}
+	if (i == TSPP_NUM_PRIORITIES) {
+		pr_err("tspp: no encryption on this channel");
+		return -ENOKEY;
+	}
+
+	if (key->parity == TSPP_KEY_PARITY_EVEN) {
+		pdev->tspp_key_table->entry[key_index].even_lsb = key->lsb;
+		pdev->tspp_key_table->entry[key_index].even_msb = key->msb;
+	} else {
+		pdev->tspp_key_table->entry[key_index].odd_lsb = key->lsb;
+		pdev->tspp_key_table->entry[key_index].odd_msb = key->msb;
+	}
+	data = readl_relaxed(channel->pdev->base + TSPP_KEY_VALID);
+
+	return 0;
+}
+EXPORT_SYMBOL(tspp_set_key);
+
+/**
+ * tspp_register_notification - register TSPP channel notification function.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @notify: notification function
+ * @userdata: user data to pass to notification function
+ * @timer_ms: notification for partially filled buffers
+ *
+ * Return  error status
+ *
+ */
+int tspp_register_notification(u32 dev, u32 channel_id,
+	tspp_notifier *notify, void *userdata, u32 timer_ms)
+{
+	struct tspp_channel *channel;
+	struct tspp_device *pdev;
+
+	if (channel_id >= TSPP_NUM_CHANNELS) {
+		pr_err("tspp: channel id out of range");
+		return -ECHRNG;
+	}
+	pdev = tspp_find_by_id(dev);
+	if (!pdev) {
+		pr_err("tspp_reg: can't find device %i", dev);
+		return -ENODEV;
+	}
+	channel = &pdev->channels[channel_id];
+	channel->notifier = notify;
+	channel->notify_data = userdata;
+	channel->expiration_period_ms = timer_ms;
+
+	return 0;
+}
+EXPORT_SYMBOL(tspp_register_notification);
+
+/**
+ * tspp_unregister_notification - unregister TSPP channel notification function.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ *
+ * Return  error status
+ *
+ */
+int tspp_unregister_notification(u32 dev, u32 channel_id)
+{
+	struct tspp_channel *channel;
+	struct tspp_device *pdev;
+
+	if (channel_id >= TSPP_NUM_CHANNELS) {
+		pr_err("tspp: channel id out of range");
+		return -ECHRNG;
+	}
+	pdev = tspp_find_by_id(dev);
+	if (!pdev) {
+		pr_err("tspp_unreg: can't find device %i", dev);
+		return -ENODEV;
+	}
+	channel = &pdev->channels[channel_id];
+	channel->notifier = NULL;
+	channel->notify_data = 0;
+	return 0;
+}
+EXPORT_SYMBOL(tspp_unregister_notification);
+
+/**
+ * tspp_get_buffer - get TSPP data buffer.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ *
+ * Return  error status
+ *
+ */
+const struct tspp_data_descriptor *tspp_get_buffer(u32 dev, u32 channel_id)
+{
+	struct tspp_mem_buffer *buffer;
+	struct tspp_channel *channel;
+	struct tspp_device *pdev;
+	unsigned long flags;
+
+	if (channel_id >= TSPP_NUM_CHANNELS) {
+		pr_err("tspp: channel id out of range");
+		return NULL;
+	}
+	pdev = tspp_find_by_id(dev);
+	if (!pdev) {
+		pr_err("tspp_get: can't find device %i", dev);
+		return NULL;
+	}
+
+	spin_lock_irqsave(&pdev->spinlock, flags);
+
+	channel = &pdev->channels[channel_id];
+
+	if (!channel->read) {
+		spin_unlock_irqrestore(&pdev->spinlock, flags);
+		pr_warn("tspp: no buffer to get on channel %i!",
+			channel->id);
+		return NULL;
+	}
+
+	buffer = channel->read;
+	/* see if we have any buffers ready to read */
+	if (buffer->state != TSPP_BUF_STATE_DATA) {
+		spin_unlock_irqrestore(&pdev->spinlock, flags);
+		return NULL;
+	}
+
+	if (buffer->state == TSPP_BUF_STATE_DATA) {
+		/* mark the buffer as busy */
+		buffer->state = TSPP_BUF_STATE_LOCKED;
+
+		/* increment the pointer along the list */
+		channel->read = channel->read->next;
+	}
+
+	spin_unlock_irqrestore(&pdev->spinlock, flags);
+
+	return &buffer->desc;
+}
+EXPORT_SYMBOL(tspp_get_buffer);
+
+/**
+ * tspp_release_buffer - release TSPP data buffer back to TSPP.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @descriptor_id: buffer descriptor ID
+ *
+ * Return  error status
+ *
+ */
+int tspp_release_buffer(u32 dev, u32 channel_id, u32 descriptor_id)
+{
+	int i, found = 0;
+	struct tspp_mem_buffer *buffer;
+	struct tspp_channel *channel;
+	struct tspp_device *pdev;
+	unsigned long flags;
+
+	if (channel_id >= TSPP_NUM_CHANNELS) {
+		pr_err("tspp: channel id out of range");
+		return -ECHRNG;
+	}
+	pdev = tspp_find_by_id(dev);
+	if (!pdev) {
+		pr_err("tspp: can't find device %i", dev);
+		return -ENODEV;
+	}
+
+	spin_lock_irqsave(&pdev->spinlock, flags);
+
+	channel = &pdev->channels[channel_id];
+
+	if (descriptor_id > channel->buffer_count)
+		pr_warn("tspp: desc id looks weird 0x%08x", descriptor_id);
+
+	/* find the correct descriptor */
+	buffer = channel->locked;
+	for (i = 0; i < channel->buffer_count; i++) {
+		if (buffer->desc.id == descriptor_id) {
+			found = 1;
+			break;
+		}
+		buffer = buffer->next;
+	}
+	channel->locked = channel->locked->next;
+
+	if (!found) {
+		spin_unlock_irqrestore(&pdev->spinlock, flags);
+		pr_err("tspp: cant find desc %i", descriptor_id);
+		return -EINVAL;
+	}
+
+	/* make sure the buffer is in the expected state */
+	if (buffer->state != TSPP_BUF_STATE_LOCKED) {
+		spin_unlock_irqrestore(&pdev->spinlock, flags);
+		pr_err("tspp: buffer %i not locked", descriptor_id);
+		return -EINVAL;
+	}
+	/* unlock the buffer and requeue it */
+	buffer->state = TSPP_BUF_STATE_WAITING;
+
+	if (tspp_queue_buffer(channel, buffer))
+		pr_warn("tspp: can't requeue buffer");
+
+	spin_unlock_irqrestore(&pdev->spinlock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(tspp_release_buffer);
+
+/**
+ * tspp_allocate_buffers - allocate TSPP data buffers.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @count: number of buffers to allocate
+ * @size: size of each buffer to allocate
+ * @int_freq: interrupt frequency
+ * @alloc: user defined memory allocator function. Pass NULL for default.
+ * @memfree: user defined memory free function. Pass NULL for default.
+ * @user: user data to pass to the memory allocator/free function
+ *
+ * Return  error status
+ *
+ * The user can optionally call this function explicitly to allocate the TSPP
+ * data buffers. Alternatively, if the user did not call this function, it
+ * is called implicitly by tspp_add_filter().
+ */
+int tspp_allocate_buffers(u32 dev, u32 channel_id, u32 count, u32 size,
+			u32 int_freq, tspp_allocator *alloc,
+			tspp_memfree *memfree, void *user)
+{
+	struct tspp_channel *channel;
+	struct tspp_device *pdev;
+	struct tspp_mem_buffer *last = NULL;
+
+	TSPP_DEBUG("tspp_allocate_buffers");
+
+	if (channel_id >= TSPP_NUM_CHANNELS) {
+		pr_err("%s: channel id out of range", __func__);
+		return -ECHRNG;
+	}
+
+	pdev = tspp_find_by_id(dev);
+	if (!pdev) {
+		pr_err("%s: can't find device %i", __func__, dev);
+		return -ENODEV;
+	}
+
+	if (count < MIN_ACCEPTABLE_BUFFER_COUNT) {
+		pr_err("%s: tspp requires a minimum of %i buffers\n",
+			__func__, MIN_ACCEPTABLE_BUFFER_COUNT);
+		return -EINVAL;
+	}
+
+	if (count > TSPP_NUM_BUFFERS) {
+		pr_err("%s: tspp requires a maximum of %i buffers\n",
+			__func__, TSPP_NUM_BUFFERS);
+		return -EINVAL;
+	}
+
+	channel = &pdev->channels[channel_id];
+
+	/* allow buffer allocation only if there was no previous buffer
+	 * allocation for this channel.
+	 */
+	if (channel->buffer_count > 0) {
+		pr_err("%s: buffers already allocated for channel %u",
+			__func__, channel_id);
+		return -EINVAL;
+	}
+
+	channel->max_buffers = count;
+
+	/* set up interrupt frequency */
+	if (int_freq > channel->max_buffers) {
+		int_freq = channel->max_buffers;
+		pr_warn("%s: setting interrupt frequency to %u\n",
+			__func__, int_freq);
+	}
+	channel->int_freq = int_freq;
+	/*
+	 * it is the responsibility of the caller to tspp_allocate_buffers(),
+	 * whether it's the user or the driver, to make sure the size parameter
+	 * is compatible to the channel mode.
+	 */
+	channel->buffer_size = size;
+
+	/* save user defined memory free function for later use */
+	channel->memfree = memfree;
+	channel->user_info = user;
+
+	/*
+	 * For small buffers, create a DMA pool so that memory
+	 * is not wasted through dma_alloc_coherent.
+	 */
+	if (TSPP_USE_DMA_POOL(channel->buffer_size)) {
+		channel->dma_pool = dma_pool_create("tspp",
+			&pdev->pdev->dev, channel->buffer_size, 0, 0);
+		if (!channel->dma_pool) {
+			pr_err("%s: Can't allocate memory pool\n", __func__);
+			return -ENOMEM;
+		}
+	} else {
+		channel->dma_pool = NULL;
+	}
+
+
+	for (channel->buffer_count = 0;
+		channel->buffer_count < channel->max_buffers;
+		channel->buffer_count++) {
+
+		/* allocate the descriptor */
+		struct tspp_mem_buffer *desc = (struct tspp_mem_buffer *)
+			kmalloc(sizeof(struct tspp_mem_buffer), GFP_KERNEL);
+		if (!desc) {
+			pr_warn("%s: Can't allocate desc %i",
+				__func__, channel->buffer_count);
+			break;
+		}
+
+		desc->desc.id = channel->buffer_count;
+		/* allocate the buffer */
+		if (tspp_alloc_buffer(channel_id, &desc->desc,
+			channel->buffer_size, channel->dma_pool,
+			alloc, user) != 0) {
+			kfree(desc);
+			pr_warn("%s: Can't allocate buffer %i",
+				__func__, channel->buffer_count);
+			break;
+		}
+
+		/* add the descriptor to the list */
+		desc->filled = 0;
+		desc->read_index = 0;
+		if (!channel->data) {
+			channel->data = desc;
+			desc->next = channel->data;
+		} else {
+			if (last != NULL)
+				last->next = desc;
+		}
+		last = desc;
+		desc->next = channel->data;
+
+		/* prepare the sps descriptor */
+		desc->sps.phys_base = desc->desc.phys_base;
+		desc->sps.base = desc->desc.virt_base;
+		desc->sps.size = desc->desc.size;
+
+		/* start the transfer */
+		if (tspp_queue_buffer(channel, desc))
+			pr_err("%s: can't queue buffer %i",
+				__func__, desc->desc.id);
+	}
+
+	if (channel->buffer_count < channel->max_buffers) {
+		/*
+		 * we failed to allocate the requested number of buffers.
+		 * we don't allow a partial success, so need to clean up here.
+		 */
+		tspp_destroy_buffers(channel_id, channel);
+		channel->buffer_count = 0;
+
+		dma_pool_destroy(channel->dma_pool);
+		channel->dma_pool = NULL;
+		return -ENOMEM;
+	}
+
+	channel->waiting = channel->data;
+	channel->read = channel->data;
+	channel->locked = channel->data;
+
+	/* Now that buffers are scheduled to HW, kick data expiration timer */
+	if (channel->expiration_period_ms)
+		mod_timer(&channel->expiration_timer,
+			jiffies +
+			MSEC_TO_JIFFIES(
+				channel->expiration_period_ms));
+
+	return 0;
+}
+EXPORT_SYMBOL(tspp_allocate_buffers);
+
+/*** debugfs ***/
+static int debugfs_iomem_x32_set(void *data, u64 val)
+{
+	int rc;
+	int clock_started = 0;
+	struct tspp_device *pdev;
+
+	pdev = tspp_find_by_id(0);
+	if (!pdev) {
+		pr_err("%s: can't find device 0\n", __func__);
+		return 0;
+	}
+
+	if (tspp_channels_in_use(pdev) == 0) {
+		rc = tspp_clock_start(pdev);
+		if (rc) {
+			pr_err("%s: tspp_clock_start failed %d\n",
+				__func__, rc);
+			return 0;
+		}
+		clock_started = 1;
+	}
+
+	writel_relaxed(val, data);
+	/* Assure register write */
+	wmb();
+
+	if (clock_started)
+		tspp_clock_stop(pdev);
+	return 0;
+}
+
+static int debugfs_iomem_x32_get(void *data, u64 *val)
+{
+	int rc;
+	int clock_started = 0;
+	struct tspp_device *pdev;
+
+	pdev = tspp_find_by_id(0);
+	if (!pdev) {
+		pr_err("%s: can't find device 0\n", __func__);
+		*val = 0;
+		return 0;
+	}
+
+	if (tspp_channels_in_use(pdev) == 0) {
+		rc = tspp_clock_start(pdev);
+		if (rc) {
+			pr_err("%s: tspp_clock_start failed %d\n",
+				__func__, rc);
+			*val = 0;
+			return 0;
+		}
+		clock_started = 1;
+	}
+
+	*val = readl_relaxed(data);
+
+	if (clock_started)
+		tspp_clock_stop(pdev);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
+			debugfs_iomem_x32_set, "0x%08llx");
+
+static void tsif_debugfs_init(struct tspp_tsif_device *tsif_device,
+	int instance)
+{
+	char name[10];
+
+	snprintf(name, 10, "tsif%i", instance);
+	tsif_device->dent_tsif = debugfs_create_dir(
+	      name, NULL);
+	if (tsif_device->dent_tsif) {
+		int i;
+		void __iomem *base = tsif_device->base;
+
+		for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++) {
+			tsif_device->debugfs_tsif_regs[i] =
+			   debugfs_create_file(
+				debugfs_tsif_regs[i].name,
+				debugfs_tsif_regs[i].mode,
+				tsif_device->dent_tsif,
+				base + debugfs_tsif_regs[i].offset,
+				&fops_iomem_x32);
+		}
+
+		debugfs_create_u32(
+			"stat_rx_chunks", 0664,
+			tsif_device->dent_tsif,
+			&tsif_device->stat_rx);
+
+		debugfs_create_u32(
+			"stat_overflow", 0664,
+			tsif_device->dent_tsif,
+			&tsif_device->stat_overflow);
+
+		debugfs_create_u32(
+			"stat_lost_sync", 0664,
+			tsif_device->dent_tsif,
+			&tsif_device->stat_lost_sync);
+
+		debugfs_create_u32(
+			"stat_timeout", 0664,
+			tsif_device->dent_tsif,
+			&tsif_device->stat_timeout);
+	}
+}
+
+static void tsif_debugfs_exit(struct tspp_tsif_device *tsif_device)
+{
+	int i;
+
+	debugfs_remove_recursive(tsif_device->dent_tsif);
+	tsif_device->dent_tsif = NULL;
+	for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++)
+		tsif_device->debugfs_tsif_regs[i] = NULL;
+}
+
+static void tspp_debugfs_init(struct tspp_device *device, int instance)
+{
+	char name[10];
+
+	snprintf(name, 10, "tspp%i", instance);
+	device->dent = debugfs_create_dir(
+	      name, NULL);
+	if (device->dent) {
+		int i;
+		void __iomem *base = device->base;
+
+		for (i = 0; i < ARRAY_SIZE(debugfs_tspp_regs); i++)
+			device->debugfs_regs[i] =
+			   debugfs_create_file(
+				debugfs_tspp_regs[i].name,
+				debugfs_tspp_regs[i].mode,
+				device->dent,
+				base + debugfs_tspp_regs[i].offset,
+				&fops_iomem_x32);
+	}
+}
+
+static void tspp_debugfs_exit(struct tspp_device *device)
+{
+	int i;
+
+	debugfs_remove_recursive(device->dent);
+	for (i = 0; i < ARRAY_SIZE(debugfs_tspp_regs); i++)
+		device->debugfs_regs[i] = NULL;
+}
+
+static int msm_tspp_map_irqs(struct platform_device *pdev,
+				struct tspp_device *device)
+{
+	int rc;
+
+	/* get IRQ numbers from platform information */
+
+	/* map TSPP IRQ */
+	rc = platform_get_irq_byname(pdev, "TSIF_TSPP_IRQ");
+	if (rc > 0) {
+		device->tspp_irq = rc;
+	} else {
+		dev_err(&pdev->dev, "failed to get TSPP IRQ");
+		return -EINVAL;
+	}
+
+	/* map TSIF IRQs */
+	rc = platform_get_irq_byname(pdev, "TSIF0_IRQ");
+	if (rc > 0) {
+		device->tsif[0].tsif_irq = rc;
+	} else {
+		dev_err(&pdev->dev, "failed to get TSIF0 IRQ");
+		return -EINVAL;
+	}
+
+	rc = platform_get_irq_byname(pdev, "TSIF1_IRQ");
+	if (rc > 0) {
+		device->tsif[1].tsif_irq = rc;
+	} else {
+		dev_err(&pdev->dev, "failed to get TSIF1 IRQ");
+		return -EINVAL;
+	}
+
+	/* map BAM IRQ */
+	rc = platform_get_irq_byname(pdev, "TSIF_BAM_IRQ");
+	if (rc > 0) {
+		device->bam_irq = rc;
+	} else {
+		dev_err(&pdev->dev, "failed to get TSPP BAM IRQ");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int msm_tspp_probe(struct platform_device *pdev)
+{
+	int rc = -ENODEV;
+	u32 version;
+	u32 i;
+	struct tspp_device *device;
+	struct resource *mem_tsif0;
+	struct resource *mem_tsif1;
+	struct resource *mem_tspp;
+	struct resource *mem_bam;
+	struct msm_bus_scale_pdata *tspp_bus_pdata = NULL;
+	unsigned long rate;
+
+	if (pdev->dev.of_node) {
+		/* ID is always 0 since there is only 1 instance of TSPP */
+		pdev->id = 0;
+		tspp_bus_pdata = msm_bus_cl_get_pdata(pdev);
+	} else {
+		/* must have device tree data */
+		pr_err("tspp: Device tree data not available\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	/* OK, we will use this device */
+	device = kzalloc(sizeof(struct tspp_device), GFP_KERNEL);
+	if (!device) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	/* set up references */
+	device->pdev = pdev;
+	platform_set_drvdata(pdev, device);
+
+	/* setup pin control */
+	rc = tspp_get_pinctrl(device);
+	if (rc) {
+		pr_err("tspp: failed to get pin control data, rc=%d\n", rc);
+		goto err_pinctrl;
+	}
+
+	/* register bus client */
+	if (tspp_bus_pdata) {
+		device->tsif_bus_client =
+			msm_bus_scale_register_client(tspp_bus_pdata);
+		if (!device->tsif_bus_client)
+			pr_err("tspp: Unable to register bus client\n");
+	} else {
+		device->tsif_bus_client = 0;
+	}
+
+	/* map regulators */
+	device->tsif_vreg = devm_regulator_get_optional(&pdev->dev, "vdd_cx");
+	if (IS_ERR_OR_NULL(device->tsif_vreg)) {
+		rc = PTR_ERR(device->tsif_vreg);
+		device->tsif_vreg = NULL;
+		if (rc == -ENODEV) {
+			pr_notice("%s: vdd_cx regulator will not be used\n",
+				__func__);
+		} else {
+			dev_err(&pdev->dev,
+				"failed to get CX regulator, err=%d\n", rc);
+			goto err_regulator;
+		}
+	} else {
+		/* Set an initial voltage and enable the regulator */
+		rc = regulator_set_voltage(device->tsif_vreg,
+					RPMH_REGULATOR_LEVEL_OFF,
+					RPMH_REGULATOR_LEVEL_MAX);
+		if (rc) {
+			dev_err(&pdev->dev, "Unable to set CX voltage.\n");
+			goto err_regulator;
+		}
+
+		rc = regulator_enable(device->tsif_vreg);
+		if (rc) {
+			dev_err(&pdev->dev, "Unable to enable CX regulator.\n");
+			goto err_regulator;
+		}
+	}
+
+	/* map clocks */
+	device->tsif_pclk = clk_get(&pdev->dev, "iface_clk");
+	if (IS_ERR_OR_NULL(device->tsif_pclk)) {
+		rc = PTR_ERR(device->tsif_pclk);
+		device->tsif_pclk = NULL;
+		goto err_pclock;
+	}
+
+	device->tsif_ref_clk = clk_get(&pdev->dev, "ref_clk");
+	if (IS_ERR_OR_NULL(device->tsif_ref_clk)) {
+		rc = PTR_ERR(device->tsif_ref_clk);
+		device->tsif_ref_clk = NULL;
+		goto err_refclock;
+	}
+	rate = clk_round_rate(device->tsif_ref_clk, 1);
+	rc = clk_set_rate(device->tsif_ref_clk, rate);
+	if (rc)
+		goto err_res_tsif0;
+
+	/* map I/O memory */
+	mem_tsif0 = platform_get_resource_byname(pdev,
+				IORESOURCE_MEM, "MSM_TSIF0_PHYS");
+	if (!mem_tsif0) {
+		pr_err("tspp: Missing tsif0 MEM resource\n");
+		rc = -ENXIO;
+		goto err_res_tsif0;
+	}
+	device->tsif[0].base = ioremap(mem_tsif0->start,
+		resource_size(mem_tsif0));
+	if (!device->tsif[0].base) {
+		pr_err("tspp: ioremap failed\n");
+		goto err_map_tsif0;
+	}
+
+	mem_tsif1 = platform_get_resource_byname(pdev,
+				IORESOURCE_MEM, "MSM_TSIF1_PHYS");
+	if (!mem_tsif1) {
+		dev_err(&pdev->dev, "Missing tsif1 MEM resource\n");
+		rc = -ENXIO;
+		goto err_res_tsif1;
+	}
+	device->tsif[1].base = ioremap(mem_tsif1->start,
+		resource_size(mem_tsif1));
+	if (!device->tsif[1].base) {
+		dev_err(&pdev->dev, "ioremap failed");
+		goto err_map_tsif1;
+	}
+
+	mem_tspp = platform_get_resource_byname(pdev,
+				IORESOURCE_MEM, "MSM_TSPP_PHYS");
+	if (!mem_tspp) {
+		dev_err(&pdev->dev, "Missing MEM resource");
+		rc = -ENXIO;
+		goto err_res_dev;
+	}
+	device->base = ioremap(mem_tspp->start, resource_size(mem_tspp));
+	if (!device->base) {
+		dev_err(&pdev->dev, "ioremap failed");
+		goto err_map_dev;
+	}
+
+	mem_bam = platform_get_resource_byname(pdev,
+				IORESOURCE_MEM, "MSM_TSPP_BAM_PHYS");
+	if (!mem_bam) {
+		pr_err("tspp: Missing bam MEM resource");
+		rc = -ENXIO;
+		goto err_res_bam;
+	}
+	memset(&device->bam_props, 0, sizeof(device->bam_props));
+	device->bam_props.phys_addr = mem_bam->start;
+	device->bam_props.virt_addr = ioremap(mem_bam->start,
+		resource_size(mem_bam));
+	if (!device->bam_props.virt_addr) {
+		dev_err(&pdev->dev, "ioremap failed");
+		goto err_map_bam;
+	}
+
+	if (msm_tspp_map_irqs(pdev, device))
+		goto err_irq;
+	device->req_irqs = false;
+
+	/* Check whether AV timer time stamps are enabled */
+	if (!of_property_read_u32(pdev->dev.of_node, "qcom,lpass-timer-tts",
+				  &device->tts_source)) {
+		if (device->tts_source == 1)
+			device->tts_source = TSIF_TTS_LPASS_TIMER;
+		else
+			device->tts_source = TSIF_TTS_TCR;
+	} else {
+		device->tts_source = TSIF_TTS_TCR;
+	}
+
+	for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
+		device->tsif[i].tts_source = device->tts_source;
+
+	/* power management */
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+	tspp_debugfs_init(device, 0);
+
+	for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
+		tsif_debugfs_init(&device->tsif[i], i);
+
+	wakeup_source_init(&device->ws, dev_name(&pdev->dev));
+
+	/* set up pointers to ram-based 'registers' */
+	device->filters[0] = device->base + TSPP_PID_FILTER_TABLE0;
+	device->filters[1] = device->base + TSPP_PID_FILTER_TABLE1;
+	device->filters[2] = device->base + TSPP_PID_FILTER_TABLE2;
+	device->tspp_key_table = device->base + TSPP_DATA_KEY;
+	device->tspp_global_performance =
+		device->base + TSPP_GLOBAL_PERFORMANCE;
+	device->tspp_pipe_context =
+		device->base + TSPP_PIPE_CONTEXT;
+	device->tspp_pipe_performance =
+		device->base + TSPP_PIPE_PERFORMANCE;
+
+	device->bam_props.summing_threshold = 0x10;
+	device->bam_props.irq = device->bam_irq;
+	device->bam_props.manage = SPS_BAM_MGR_LOCAL;
+	/*add SPS BAM log level*/
+	device->bam_props.ipc_loglevel = TSPP_BAM_DEFAULT_IPC_LOGLVL;
+
+	if (tspp_clock_start(device) != 0) {
+		dev_err(&pdev->dev, "Can't start clocks");
+		goto err_clock;
+	}
+
+	device->bam_handle = SPS_DEV_HANDLE_INVALID;
+
+	spin_lock_init(&device->spinlock);
+	mutex_init(&device->mutex);
+	tasklet_init(&device->tlet, tspp_sps_complete_tlet,
+			(unsigned long)device);
+
+	/* initialize everything to a known state */
+	tspp_global_reset(device);
+
+	version = readl_relaxed(device->base + TSPP_VERSION);
+	/*
+	 * TSPP version can be bits [7:0] or alternatively,
+	 * TSPP major version is bits [31:28].
+	 */
+	if ((version != 0x1) && (((version >> 28) & 0xF) != 0x1))
+		pr_warn("tspp: unrecognized hw version=%i", version);
+
+	/* initialize the channels */
+	for (i = 0; i < TSPP_NUM_CHANNELS; i++)
+		tspp_channel_init(&(device->channels[i]), device);
+
+	/* stop the clocks for power savings */
+	tspp_clock_stop(device);
+
+	/* everything is ok, so add the device to the list */
+	list_add_tail(&(device->devlist), &tspp_devices);
+	return 0;
+
+err_clock:
+	tspp_debugfs_exit(device);
+	for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
+		tsif_debugfs_exit(&device->tsif[i]);
+err_irq:
+	iounmap(device->bam_props.virt_addr);
+err_map_bam:
+err_res_bam:
+	iounmap(device->base);
+err_map_dev:
+err_res_dev:
+	iounmap(device->tsif[1].base);
+err_map_tsif1:
+err_res_tsif1:
+	iounmap(device->tsif[0].base);
+err_map_tsif0:
+err_res_tsif0:
+	if (device->tsif_ref_clk)
+		clk_put(device->tsif_ref_clk);
+err_refclock:
+	if (device->tsif_pclk)
+		clk_put(device->tsif_pclk);
+err_pclock:
+	if (device->tsif_vreg)
+		regulator_disable(device->tsif_vreg);
+err_regulator:
+	if (device->tsif_bus_client)
+		msm_bus_scale_unregister_client(device->tsif_bus_client);
+err_pinctrl:
+	kfree(device);
+
+out:
+	return rc;
+}
+
+static int msm_tspp_remove(struct platform_device *pdev)
+{
+	struct tspp_channel *channel;
+	u32 i;
+
+	struct tspp_device *device = platform_get_drvdata(pdev);
+
+	/* free the buffers, and delete the channels */
+	for (i = 0; i < TSPP_NUM_CHANNELS; i++) {
+		channel = &device->channels[i];
+		tspp_close_channel(device->pdev->id, i);
+	}
+
+	for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
+		tsif_debugfs_exit(&device->tsif[i]);
+
+	mutex_destroy(&device->mutex);
+
+	if (device->tsif_bus_client)
+		msm_bus_scale_unregister_client(device->tsif_bus_client);
+
+	wakeup_source_trash(&device->ws);
+	if (device->req_irqs)
+		msm_tspp_free_irqs(device);
+
+	iounmap(device->bam_props.virt_addr);
+	iounmap(device->base);
+	for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
+		iounmap(device->tsif[i].base);
+
+	if (device->tsif_ref_clk)
+		clk_put(device->tsif_ref_clk);
+
+	if (device->tsif_pclk)
+		clk_put(device->tsif_pclk);
+
+	if (device->tsif_vreg)
+		regulator_disable(device->tsif_vreg);
+
+	pm_runtime_disable(&pdev->dev);
+
+	kfree(device);
+
+	return 0;
+}
+
+/*** power management ***/
+
+static int tspp_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: suspending...");
+	return 0;
+}
+
+static int tspp_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: resuming...");
+	return 0;
+}
+
+static const struct dev_pm_ops tspp_dev_pm_ops = {
+	.runtime_suspend = tspp_runtime_suspend,
+	.runtime_resume = tspp_runtime_resume,
+};
+
+static const struct of_device_id msm_match_table[] = {
+	{.compatible = "qcom,msm_tspp"},
+	{}
+};
+
+static struct platform_driver msm_tspp_driver = {
+	.probe          = msm_tspp_probe,
+	.remove         = msm_tspp_remove,
+	.driver         = {
+		.name   = "msm_tspp",
+		.pm     = &tspp_dev_pm_ops,
+		.of_match_table = msm_match_table,
+	},
+};
+
+
+static int __init mod_init(void)
+{
+	int rc;
+
+	/* register the driver, and check hardware */
+	rc = platform_driver_register(&msm_tspp_driver);
+	if (rc)
+		pr_err("tspp: platform_driver_register failed: %d", rc);
+
+	return rc;
+}
+
+static void __exit mod_exit(void)
+{
+	/* delete low level driver */
+	platform_driver_unregister(&msm_tspp_driver);
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+MODULE_DESCRIPTION("TSPP platform device");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/Makefile b/drivers/media/platform/msm/camera/Makefile
index 2e71d05..99bd263 100644
--- a/drivers/media/platform/msm/camera/Makefile
+++ b/drivers/media/platform/msm/camera/Makefile
@@ -7,4 +7,4 @@
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_cdm/
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_isp/
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_module/
-obj-$(CONFIG_SPECTRA_CAMERA) += icp/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_icp/
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
index 1105d2c..6009c25 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
@@ -669,7 +669,8 @@
 	soc_info = &cdm_hw->soc_info;
 	cdm_core = (struct cam_cdm *)cdm_hw->core_info;
 
-	rc = cam_soc_util_enable_platform_resource(soc_info, true, true);
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		CAM_SVS_VOTE, true);
 	if (rc) {
 		pr_err("Enable platform failed\n");
 		goto end;
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.c b/drivers/media/platform/msm/camera/cam_core/cam_context.c
index 17b3c7c..fac8900 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.c
@@ -34,7 +34,7 @@
 	return rc;
 }
 
-int cam_context_handle_get_dev_info(struct cam_context *ctx,
+int cam_context_handle_crm_get_dev_info(struct cam_context *ctx,
 	struct cam_req_mgr_device_info *info)
 {
 	int rc;
@@ -63,7 +63,7 @@
 	return rc;
 }
 
-int cam_context_handle_link(struct cam_context *ctx,
+int cam_context_handle_crm_link(struct cam_context *ctx,
 	struct cam_req_mgr_core_dev_link_setup *link)
 {
 	int rc;
@@ -91,7 +91,7 @@
 	return rc;
 }
 
-int cam_context_handle_unlink(struct cam_context *ctx,
+int cam_context_handle_crm_unlink(struct cam_context *ctx,
 	struct cam_req_mgr_core_dev_link_setup *unlink)
 {
 	int rc;
@@ -120,7 +120,7 @@
 	return rc;
 }
 
-int cam_context_handle_apply_req(struct cam_context *ctx,
+int cam_context_handle_crm_apply_req(struct cam_context *ctx,
 	struct cam_req_mgr_apply_request *apply)
 {
 	int rc;
@@ -149,6 +149,29 @@
 	return rc;
 }
 
+int cam_context_handle_crm_flush_req(struct cam_context *ctx,
+	struct cam_req_mgr_flush_request *flush)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		pr_err("%s: Context is not ready\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].crm_ops.flush_req) {
+		rc = ctx->state_machine[ctx->state].crm_ops.flush_req(ctx,
+			flush);
+	} else {
+		pr_err("%s: No crm flush req in dev %d, state %d\n",
+			__func__, ctx->dev_hdl, ctx->state);
+		rc = -EPROTO;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
 
 int cam_context_handle_acquire_dev(struct cam_context *ctx,
 	struct cam_acquire_dev_cmd *cmd)
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.h b/drivers/media/platform/msm/camera/cam_core/cam_context.h
index 37a5c03..7f0fb7f 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.h
@@ -100,6 +100,7 @@
  * @link:                  Link the context
  * @unlink:                Unlink the context
  * @apply_req:             Apply setting for the context
+ * @flush_req:             Flush request to remove request ids
  *
  */
 struct cam_ctx_crm_ops {
@@ -111,6 +112,8 @@
 			struct cam_req_mgr_core_dev_link_setup *unlink);
 	int (*apply_req)(struct cam_context *ctx,
 			struct cam_req_mgr_apply_request *apply);
+	int (*flush_req)(struct cam_context *ctx,
+			struct cam_req_mgr_flush_request *flush);
 };
 
 
@@ -182,7 +185,7 @@
 };
 
 /**
- * cam_context_handle_get_dev_info()
+ * cam_context_handle_crm_get_dev_info()
  *
  * @brief:        Handle get device information command
  *
@@ -190,11 +193,11 @@
  * @info:                  Device information returned
  *
  */
-int cam_context_handle_get_dev_info(struct cam_context *ctx,
+int cam_context_handle_crm_get_dev_info(struct cam_context *ctx,
 		struct cam_req_mgr_device_info *info);
 
 /**
- * cam_context_handle_link()
+ * cam_context_handle_crm_link()
  *
  * @brief:        Handle link command
  *
@@ -202,11 +205,11 @@
  * @link:                  Link command payload
  *
  */
-int cam_context_handle_link(struct cam_context *ctx,
+int cam_context_handle_crm_link(struct cam_context *ctx,
 		struct cam_req_mgr_core_dev_link_setup *link);
 
 /**
- * cam_context_handle_unlink()
+ * cam_context_handle_crm_unlink()
  *
  * @brief:        Handle unlink command
  *
@@ -214,11 +217,11 @@
  * @unlink:                Unlink command payload
  *
  */
-int cam_context_handle_unlink(struct cam_context *ctx,
+int cam_context_handle_crm_unlink(struct cam_context *ctx,
 		struct cam_req_mgr_core_dev_link_setup *unlink);
 
 /**
- * cam_context_handle_apply_req()
+ * cam_context_handle_crm_apply_req()
  *
  * @brief:        Handle apply request command
  *
@@ -226,9 +229,20 @@
  * @apply:                 Apply request command payload
  *
  */
-int cam_context_handle_apply_req(struct cam_context *ctx,
+int cam_context_handle_crm_apply_req(struct cam_context *ctx,
 		struct cam_req_mgr_apply_request *apply);
 
+/**
+ * cam_context_handle_crm_flush_req()
+ *
+ * @brief:        Handle flush request command
+ *
+ * @ctx:                   Object pointer for cam_context
+ * @apply:                 Flush request command payload
+ *
+ */
+int cam_context_handle_crm_flush_req(struct cam_context *ctx,
+		struct cam_req_mgr_flush_request *apply);
 
 /**
  * cam_context_handle_acquire_dev()
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
index 2a0c4a7..edd2e11 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
@@ -28,48 +28,45 @@
 int cam_context_buf_done_from_hw(struct cam_context *ctx,
 	void *done_event_data, uint32_t bubble_state)
 {
-	int rc = 0;
-	int i, j;
+	int j;
+	int result;
 	struct cam_ctx_request *req;
 	struct cam_hw_done_event_data *done =
 		(struct cam_hw_done_event_data *)done_event_data;
 
 	if (list_empty(&ctx->active_req_list)) {
 		pr_err("Buf done with no active request\n");
-		rc = -EINVAL;
-		goto end;
+		return -EIO;
 	}
 
 	req = list_first_entry(&ctx->active_req_list,
 		struct cam_ctx_request, list);
 
-	for (i = 0; i < done->num_handles; i++) {
-		for (j = 0; j < req->num_out_map_entries; j++) {
-			if (done->resource_handle[i] ==
-				req->out_map_entries[j].resource_handle)
-				break;
-		}
+	if (done->request_id != req->request_id) {
+		pr_err("mismatch: done request [%lld], active request [%lld]\n",
+			done->request_id, req->request_id);
+		return -EIO;
+	}
 
-		if (j == req->num_out_map_entries) {
-			pr_err("Can not find matching lane handle 0x%x\n",
-				done->resource_handle[i]);
-			rc = -EINVAL;
-			continue;
-		}
+	if (!req->num_out_map_entries) {
+		pr_err("active request with no output fence objects to signal\n");
+		return -EIO;
+	}
 
-		cam_sync_signal(req->out_map_entries[j].sync_id,
-			CAM_SYNC_STATE_SIGNALED_SUCCESS);
-		req->num_out_acked++;
+	list_del_init(&req->list);
+	if (!bubble_state)
+		result = CAM_SYNC_STATE_SIGNALED_SUCCESS;
+	else
+		result = CAM_SYNC_STATE_SIGNALED_ERROR;
+
+	for (j = 0; j < req->num_out_map_entries; j++) {
+		cam_sync_signal(req->out_map_entries[j].sync_id, result);
 		req->out_map_entries[j].sync_id = -1;
 	}
 
-	if (req->num_out_acked == req->num_out_map_entries) {
-		list_del_init(&req->list);
-		list_add_tail(&req->list, &ctx->free_req_list);
-	}
+	list_add_tail(&req->list, &ctx->free_req_list);
 
-end:
-	return rc;
+	return 0;
 }
 
 int cam_context_apply_req_to_hw(struct cam_context *ctx,
@@ -114,64 +111,7 @@
 	return rc;
 }
 
-int32_t cam_context_release_dev_to_hw(struct cam_context *ctx,
-	struct cam_release_dev_cmd *cmd)
-{
-	int rc = 0;
-	int i;
-	struct cam_hw_release_args arg;
-	struct cam_ctx_request *req;
-
-	if (!ctx->hw_mgr_intf) {
-		pr_err("HW interface is not ready\n");
-		rc = -EFAULT;
-		goto end;
-	}
-
-	if (ctx->ctxt_to_hw_map) {
-		arg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
-		ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
-			&arg);
-		ctx->ctxt_to_hw_map = NULL;
-	}
-
-	ctx->session_hdl = 0;
-	ctx->dev_hdl = 0;
-	ctx->link_hdl = 0;
-
-	while (!list_empty(&ctx->active_req_list)) {
-		req = list_first_entry(&ctx->active_req_list,
-			struct cam_ctx_request, list);
-		list_del_init(&req->list);
-		pr_warn("signal fence in active list. fence num %d\n",
-			req->num_out_map_entries);
-		for (i = 0; i < req->num_out_map_entries; i++) {
-			if (req->out_map_entries[i].sync_id != -1)
-				cam_sync_signal(req->out_map_entries[i].sync_id,
-					CAM_SYNC_STATE_SIGNALED_ERROR);
-		}
-		list_add_tail(&req->list, &ctx->free_req_list);
-	}
-
-	/* flush the pending queue */
-	while (!list_empty(&ctx->pending_req_list)) {
-		req = list_first_entry(&ctx->pending_req_list,
-			struct cam_ctx_request, list);
-		list_del_init(&req->list);
-		pr_debug("signal fence in pending list. fence num %d\n",
-			req->num_out_map_entries);
-		for (i = 0; i < req->num_out_map_entries; i++)
-			if (req->out_map_entries[i].sync_id != -1)
-				cam_sync_signal(req->out_map_entries[i].sync_id,
-					CAM_SYNC_STATE_SIGNALED_ERROR);
-		list_add_tail(&req->list, &ctx->free_req_list);
-	}
-
-end:
-	return rc;
-}
-
-void cam_context_sync_callback(int32_t sync_obj, int status, void *data)
+static void cam_context_sync_callback(int32_t sync_obj, int status, void *data)
 {
 	struct cam_context *ctx = data;
 	struct cam_ctx_request *req = NULL;
@@ -195,6 +135,67 @@
 	}
 }
 
+int32_t cam_context_release_dev_to_hw(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int i;
+	struct cam_hw_release_args arg;
+	struct cam_ctx_request *req;
+
+	if ((!ctx->hw_mgr_intf) || (!ctx->hw_mgr_intf->hw_release)) {
+		pr_err("HW interface is not ready\n");
+		return -EINVAL;
+	}
+
+	arg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+	if ((list_empty(&ctx->active_req_list)) &&
+		(list_empty(&ctx->pending_req_list)))
+		arg.active_req = false;
+	else
+		arg.active_req = true;
+
+	ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &arg);
+	ctx->ctxt_to_hw_map = NULL;
+
+	ctx->session_hdl = 0;
+	ctx->dev_hdl = 0;
+	ctx->link_hdl = 0;
+
+	while (!list_empty(&ctx->active_req_list)) {
+		req = list_first_entry(&ctx->active_req_list,
+			struct cam_ctx_request, list);
+		list_del_init(&req->list);
+		pr_debug("signal fence in active list. fence num %d\n",
+			req->num_out_map_entries);
+		for (i = 0; i < req->num_out_map_entries; i++) {
+			if (req->out_map_entries[i].sync_id > 0)
+				cam_sync_signal(req->out_map_entries[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+		}
+		list_add_tail(&req->list, &ctx->free_req_list);
+	}
+
+	while (!list_empty(&ctx->pending_req_list)) {
+		req = list_first_entry(&ctx->pending_req_list,
+			struct cam_ctx_request, list);
+		list_del_init(&req->list);
+		for (i = 0; i < req->num_in_map_entries; i++)
+			if (req->in_map_entries[i].sync_id > 0)
+				cam_sync_deregister_callback(
+					cam_context_sync_callback, ctx,
+					req->in_map_entries[i].sync_id);
+		pr_debug("signal out fence in pending list. fence num %d\n",
+			req->num_out_map_entries);
+		for (i = 0; i < req->num_out_map_entries; i++)
+			if (req->out_map_entries[i].sync_id > 0)
+				cam_sync_signal(req->out_map_entries[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+		list_add_tail(&req->list, &ctx->free_req_list);
+	}
+
+	return 0;
+}
+
 int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
 	struct cam_config_dev_cmd *cmd)
 {
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_core_defs.h b/drivers/media/platform/msm/camera/cam_core/cam_core_defs.h
index 3498836..45d989f 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_core_defs.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_core_defs.h
@@ -9,8 +9,9 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
-#ifndef _CAM_REQ_MGR_CORE_DEFS_H_
-#define _CAM_REQ_MGR_CORE_DEFS_H_
+
+#ifndef _CAM_CORE_DEFS_H_
+#define _CAM_CORE_DEFS_H_
 
 #define CAM_CORE_TRACE_ENABLE 0
 
@@ -40,5 +41,5 @@
 	__func__, __LINE__, ##args)
 #endif
 
-#endif /* _CAM_REQ_MGR_CORE_DEFS_H_ */
+#endif /* _CAM_CORE_DEFS_H_ */
 
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
index f72a1d7..aab75d5 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
@@ -51,7 +51,7 @@
  * struct cam_hw_fence_map_entry - Entry for the resource to sync id map
  *
  * @resrouce_handle:       Resource port id for the buffer
- * @sync_id:               Synce id
+ * @sync_id:               Sync id
  *
  */
 struct cam_hw_fence_map_entry {
@@ -65,12 +65,14 @@
  * @num_handles:           number of handles in the event
  * @resrouce_handle:       list of the resource handle
  * @timestamp:             time stamp
+ * @request_id:            request identifier
  *
  */
 struct cam_hw_done_event_data {
 	uint32_t           num_handles;
 	uint32_t           resource_handle[CAM_NUM_OUT_PER_COMP_IRQ_MAX];
 	struct timeval     timestamp;
+	uint64_t           request_id;
 };
 
 /**
@@ -95,10 +97,12 @@
  * struct cam_hw_release_args - Payload for release command
  *
  * @ctxt_to_hw_map:        HW context from the acquire
+ * @active_req:            Active request flag
  *
  */
 struct cam_hw_release_args {
 	void              *ctxt_to_hw_map;
+	bool               active_req;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.c b/drivers/media/platform/msm/camera/cam_core/cam_node.c
index 74a94b2..ab4c25d 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_node.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_node.c
@@ -201,7 +201,7 @@
 	return rc;
 }
 
-static int __cam_node_get_dev_info(struct cam_req_mgr_device_info *info)
+static int __cam_node_crm_get_dev_info(struct cam_req_mgr_device_info *info)
 {
 	struct cam_context *ctx = NULL;
 
@@ -214,10 +214,11 @@
 			__func__, info->dev_hdl);
 		return -EINVAL;
 	}
-	return cam_context_handle_get_dev_info(ctx, info);
+	return cam_context_handle_crm_get_dev_info(ctx, info);
 }
 
-static int __cam_node_link_setup(struct cam_req_mgr_core_dev_link_setup *setup)
+static int __cam_node_crm_link_setup(
+	struct cam_req_mgr_core_dev_link_setup *setup)
 {
 	int rc;
 	struct cam_context *ctx = NULL;
@@ -233,14 +234,14 @@
 	}
 
 	if (setup->link_enable)
-		rc = cam_context_handle_link(ctx, setup);
+		rc = cam_context_handle_crm_link(ctx, setup);
 	else
-		rc = cam_context_handle_unlink(ctx, setup);
+		rc = cam_context_handle_crm_unlink(ctx, setup);
 
 	return rc;
 }
 
-static int __cam_node_apply_req(struct cam_req_mgr_apply_request *apply)
+static int __cam_node_crm_apply_req(struct cam_req_mgr_apply_request *apply)
 {
 	struct cam_context *ctx = NULL;
 
@@ -254,7 +255,26 @@
 		return -EINVAL;
 	}
 
-	return cam_context_handle_apply_req(ctx, apply);
+	return cam_context_handle_crm_apply_req(ctx, apply);
+}
+
+static int __cam_node_crm_flush_req(struct cam_req_mgr_flush_request *flush)
+{
+	struct cam_context *ctx = NULL;
+
+	if (!flush) {
+		pr_err("%s: Invalid flush request payload\n", __func__);
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_context *) cam_get_device_priv(flush->dev_hdl);
+	if (!ctx) {
+		pr_err("%s: Can not get context for handle %d\n",
+			__func__, flush->dev_hdl);
+		return -EINVAL;
+	}
+
+	return cam_context_handle_crm_flush_req(ctx, flush);
 }
 
 int cam_node_deinit(struct cam_node *node)
@@ -283,9 +303,10 @@
 	strlcpy(node->name, name, sizeof(node->name));
 
 	memcpy(&node->hw_mgr_intf, hw_mgr_intf, sizeof(node->hw_mgr_intf));
-	node->crm_node_intf.apply_req = __cam_node_apply_req;
-	node->crm_node_intf.get_dev_info = __cam_node_get_dev_info;
-	node->crm_node_intf.link_setup = __cam_node_link_setup;
+	node->crm_node_intf.apply_req = __cam_node_crm_apply_req;
+	node->crm_node_intf.get_dev_info = __cam_node_crm_get_dev_info;
+	node->crm_node_intf.link_setup = __cam_node_crm_link_setup;
+	node->crm_node_intf.flush_req = __cam_node_crm_flush_req;
 
 	mutex_init(&node->list_mutex);
 	INIT_LIST_HEAD(&node->free_ctx_list);
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_subdev.c b/drivers/media/platform/msm/camera/cam_core/cam_subdev.c
index a89981d..8664ce8 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_subdev.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_subdev.c
@@ -129,16 +129,12 @@
 	int rc;
 	struct cam_node *node = NULL;
 
-	if (!sd || !pdev || !name) {
-		rc = -EINVAL;
-		goto err;
-	}
+	if (!sd || !pdev || !name)
+		return -EINVAL;
 
 	node = kzalloc(sizeof(*node), GFP_KERNEL);
-	if (!node) {
-		rc = -ENOMEM;
-		goto err;
-	}
+	if (!node)
+		return -ENOMEM;
 
 	/* Setup camera v4l2 subdevice */
 	sd->pdev = pdev;
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
index 9a30d64..813f392 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
@@ -665,7 +665,8 @@
 }
 
 static int cam_cpas_util_apply_client_ahb_vote(struct cam_hw_info *cpas_hw,
-	struct cam_cpas_client *cpas_client, struct cam_ahb_vote *ahb_vote)
+	struct cam_cpas_client *cpas_client, struct cam_ahb_vote *ahb_vote,
+	enum cam_vote_level *applied_level)
 {
 	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
 	struct cam_cpas_bus_client *ahb_bus_client = &cpas_core->ahb_bus_client;
@@ -710,9 +711,21 @@
 
 	rc = cam_cpas_util_vote_bus_client_level(ahb_bus_client,
 		highest_level);
-	if (rc)
+	if (rc) {
 		pr_err("Failed in ahb vote, level=%d, rc=%d\n",
 			highest_level, rc);
+		goto unlock_bus_client;
+	}
+
+	rc = cam_soc_util_set_clk_rate_level(&cpas_hw->soc_info, highest_level);
+	if (rc) {
+		pr_err("Failed in scaling clock rate level %d for AHB\n",
+			highest_level);
+		goto unlock_bus_client;
+	}
+
+	if (applied_level)
+		*applied_level = highest_level;
 
 unlock_bus_client:
 	mutex_unlock(&ahb_bus_client->lock);
@@ -748,7 +761,7 @@
 		cpas_core->cpas_client[client_indx]->ahb_level);
 
 	rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw,
-		cpas_core->cpas_client[client_indx], ahb_vote);
+		cpas_core->cpas_client[client_indx], ahb_vote, NULL);
 
 unlock_client:
 	mutex_unlock(&cpas_core->client_mutex[client_indx]);
@@ -765,6 +778,7 @@
 	struct cam_cpas_client *cpas_client;
 	struct cam_ahb_vote *ahb_vote;
 	struct cam_axi_vote *axi_vote;
+	enum cam_vote_level applied_level = CAM_SVS_VOTE;
 	int rc;
 
 	if (!hw_priv || !start_args) {
@@ -820,7 +834,7 @@
 		client_indx, ahb_vote->type, ahb_vote->vote.level,
 		cpas_client->ahb_level);
 	rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw, cpas_client,
-		ahb_vote);
+		ahb_vote, &applied_level);
 	if (rc)
 		goto done;
 
@@ -833,7 +847,8 @@
 		goto done;
 
 	if (cpas_core->streamon_clients == 0) {
-		rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info);
+		rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info,
+			applied_level);
 		if (rc) {
 			pr_err("enable_resorce failed, rc=%d\n", rc);
 			goto done;
@@ -932,7 +947,7 @@
 	ahb_vote.type = CAM_VOTE_ABSOLUTE;
 	ahb_vote.vote.level = CAM_SUSPEND_VOTE;
 	rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw, cpas_client,
-		&ahb_vote);
+		&ahb_vote, NULL);
 	if (rc)
 		goto done;
 
@@ -1383,7 +1398,7 @@
 	if (rc)
 		goto axi_cleanup;
 
-	rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info);
+	rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info, CAM_SVS_VOTE);
 	if (rc) {
 		pr_err("failed in soc_enable_resources, rc=%d\n", rc);
 		goto remove_default_vote;
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
index 0c71ece..09c2ae5 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
@@ -22,26 +22,6 @@
 #include "cam_cpas_hw.h"
 #include "cam_cpas_soc.h"
 
-static int cam_cpas_get_vote_level_from_string(const char *string,
-	enum cam_vote_level *vote_level)
-{
-	if (!vote_level || !string)
-		return -EINVAL;
-
-	if (strnstr("suspend", string, strlen(string)))
-		*vote_level = CAM_SUSPEND_VOTE;
-	else if (strnstr("svs", string, strlen(string)))
-		*vote_level = CAM_SVS_VOTE;
-	else if (strnstr("nominal", string, strlen(string)))
-		*vote_level = CAM_NOMINAL_VOTE;
-	else if (strnstr("turbo", string, strlen(string)))
-		*vote_level = CAM_TURBO_VOTE;
-	else
-		*vote_level = CAM_SVS_VOTE;
-
-	return 0;
-}
-
 int cam_cpas_get_custom_dt_info(struct platform_device *pdev,
 	struct cam_cpas_private_soc *soc_private)
 {
@@ -130,7 +110,7 @@
 				return -ENODEV;
 			}
 
-			rc = cam_cpas_get_vote_level_from_string(ahb_string,
+			rc = cam_soc_util_get_level_from_string(ahb_string,
 				&soc_private->vdd_ahb[i].ahb_level);
 			if (rc) {
 				pr_err("invalid ahb-string at index=%d\n", i);
@@ -207,11 +187,13 @@
 	return rc;
 }
 
-int cam_cpas_soc_enable_resources(struct cam_hw_soc_info *soc_info)
+int cam_cpas_soc_enable_resources(struct cam_hw_soc_info *soc_info,
+	enum cam_vote_level default_level)
 {
 	int rc = 0;
 
-	rc = cam_soc_util_enable_platform_resource(soc_info, true, true);
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		default_level, true);
 	if (rc)
 		pr_err("enable platform resource failed, rc=%d\n", rc);
 
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
index d3dfbbd..b2ad513 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
@@ -59,6 +59,7 @@
 int cam_cpas_soc_init_resources(struct cam_hw_soc_info *soc_info,
 	irq_handler_t vfe_irq_handler, void *irq_data);
 int cam_cpas_soc_deinit_resources(struct cam_hw_soc_info *soc_info);
-int cam_cpas_soc_enable_resources(struct cam_hw_soc_info *soc_info);
+int cam_cpas_soc_enable_resources(struct cam_hw_soc_info *soc_info,
+	enum cam_vote_level default_level);
 int cam_cpas_soc_disable_resources(struct cam_hw_soc_info *soc_info);
 #endif /* _CAM_CPAS_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
index 27b8504..801d09d 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
@@ -17,6 +17,7 @@
 #include <linux/platform_device.h>
 
 #include <media/cam_cpas.h>
+#include "cam_soc_util.h"
 
 #define CAM_HW_IDENTIFIER_LENGTH 128
 
@@ -101,7 +102,7 @@
 };
 
 /**
- * enum cam_vote_level - Enum for voting type
+ * enum cam_vote_type - Enum for voting type
  *
  * @CAM_VOTE_ABSOLUTE : Absolute vote
  * @CAM_VOTE_DYNAMIC  : Dynamic vote
@@ -112,21 +113,6 @@
 };
 
 /**
- * enum cam_vote_level - Enum for voting level
- *
- * @CAM_SUSPEND_VOTE : Suspend vote
- * @CAM_SVS_VOTE     : SVS vote
- * @CAM_NOMINAL_VOTE : Nominal vote
- * @CAM_TURBO_VOTE   : Turbo vote
- */
-enum cam_vote_level {
-	CAM_SUSPEND_VOTE,
-	CAM_SVS_VOTE,
-	CAM_NOMINAL_VOTE,
-	CAM_TURBO_VOTE,
-};
-
-/**
  * struct cam_ahb_vote : AHB vote
  *
  * @type  : AHB voting type.
diff --git a/drivers/media/platform/msm/camera/icp/Makefile b/drivers/media/platform/msm/camera/cam_icp/Makefile
similarity index 63%
rename from drivers/media/platform/msm/camera/icp/Makefile
rename to drivers/media/platform/msm/camera/cam_icp/Makefile
index c42b162..b35e4e4 100644
--- a/drivers/media/platform/msm/camera/icp/Makefile
+++ b/drivers/media/platform/msm/camera/cam_icp/Makefile
@@ -2,13 +2,12 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
-ccflags-y += -Idrivers/media/platform/msm/camera/icp
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/include
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/fw_inc
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/fw_inc
 ccflags-y += -Idrivers/media/platform/msm/camera
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
 
-
 obj-$(CONFIG_SPECTRA_CAMERA) += icp_hw/
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_icp_subdev.o cam_icp_context.o hfi.o
diff --git a/drivers/media/platform/msm/camera/icp/cam_icp_context.c b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
similarity index 98%
rename from drivers/media/platform/msm/camera/icp/cam_icp_context.c
rename to drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
index 41290f4..2311f66 100644
--- a/drivers/media/platform/msm/camera/icp/cam_icp_context.c
+++ b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
@@ -107,7 +107,7 @@
 static int __cam_icp_handle_buf_done_in_ready(void *ctx,
 	uint32_t evt_id, void *done)
 {
-	return cam_context_buf_done_from_hw(ctx, done, 0);
+	return cam_context_buf_done_from_hw(ctx, done, evt_id);
 }
 
 static struct cam_ctx_ops
diff --git a/drivers/media/platform/msm/camera/icp/cam_icp_context.h b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/cam_icp_context.h
rename to drivers/media/platform/msm/camera/cam_icp/cam_icp_context.h
diff --git a/drivers/media/platform/msm/camera/icp/cam_icp_subdev.c b/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
similarity index 98%
rename from drivers/media/platform/msm/camera/icp/cam_icp_subdev.c
rename to drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
index 703561d..69c2e03 100644
--- a/drivers/media/platform/msm/camera/icp/cam_icp_subdev.c
+++ b/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
@@ -142,8 +142,6 @@
 		return -EINVAL;
 	}
 
-	memset(&g_icp_dev, 0, sizeof(g_icp_dev));
-
 	g_icp_dev.sd.pdev = pdev;
 	g_icp_dev.sd.internal_ops = &cam_icp_subdev_internal_ops;
 	rc = cam_subdev_probe(&g_icp_dev.sd, pdev, CAM_ICP_DEV_NAME,
diff --git a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_intf.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
similarity index 95%
rename from drivers/media/platform/msm/camera/icp/fw_inc/hfi_intf.h
rename to drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
index 0ffea5b..9150795 100644
--- a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
@@ -98,6 +98,13 @@
  * @icp_base: icp base address
  */
 void cam_hfi_enable_cpu(void __iomem *icp_base);
+
+/**
+ * cam_hfi_disable_cpu() - disable A5 CPU
+ * @icp_base: icp base address
+ */
+void cam_hfi_disable_cpu(void __iomem *icp_base);
+
 /**
  * cam_hfi_deinit() - cleanup HFI
  */
diff --git a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_reg.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h
similarity index 99%
rename from drivers/media/platform/msm/camera/icp/fw_inc/hfi_reg.h
rename to drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h
index ff6b72a..04e3c85 100644
--- a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_reg.h
+++ b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h
@@ -49,6 +49,7 @@
 #define ICP_CSR_EN_CLKGATE_WFI                  (1 << 12)
 #define ICP_CSR_EDBGRQ                          (1 << 14)
 #define ICP_CSR_DBGSWENABLE                     (1 << 22)
+#define ICP_CSR_A5_STATUS_WFI                   (1 << 7)
 
 /* start of Queue table and queues */
 #define MAX_ICP_HFI_QUEUES                      4
diff --git a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_session_defs.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_session_defs.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/fw_inc/hfi_session_defs.h
rename to drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_session_defs.h
diff --git a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_sys_defs.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_sys_defs.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/fw_inc/hfi_sys_defs.h
rename to drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_sys_defs.h
diff --git a/drivers/media/platform/msm/camera/icp/hfi.c b/drivers/media/platform/msm/camera/cam_icp/hfi.c
similarity index 97%
rename from drivers/media/platform/msm/camera/icp/hfi.c
rename to drivers/media/platform/msm/camera/cam_icp/hfi.c
index 170c8cf..b763a39 100644
--- a/drivers/media/platform/msm/camera/icp/hfi.c
+++ b/drivers/media/platform/msm/camera/cam_icp/hfi.c
@@ -302,6 +302,19 @@
 	return 0;
 }
 
+void cam_hfi_disable_cpu(void __iomem *icp_base)
+{
+	uint32_t data;
+	uint32_t val;
+
+	data = cam_io_r(icp_base + HFI_REG_A5_CSR_A5_STATUS);
+	/* Add waiting logic in case it is not idle */
+	if (data & ICP_CSR_A5_STATUS_WFI) {
+		val = cam_io_r(icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+		val &= ~(ICP_FLAG_CSR_A5_EN | ICP_FLAG_CSR_WAKE_UP_EN);
+		cam_io_w(val, icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+	}
+}
 
 void cam_hfi_enable_cpu(void __iomem *icp_base)
 {
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/Makefile b/drivers/media/platform/msm/camera/cam_icp/icp_hw/Makefile
similarity index 60%
rename from drivers/media/platform/msm/camera/icp/icp_hw/Makefile
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/Makefile
index 8e95286..5276340 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/Makefile
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/Makefile
@@ -1,9 +1,9 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
-ccflags-y += -Idrivers/media/platform/msm/camera/icp
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/include
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
 
 obj-$(CONFIG_SPECTRA_CAMERA) += icp_hw_mgr/ a5_hw/ ipe_hw/ bps_hw/
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/Makefile b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/Makefile
new file mode 100644
index 0000000..5f4f9fa
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/fw_inc
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += a5_dev.o a5_core.o a5_soc.o
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
similarity index 99%
rename from drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_core.c
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
index 39eacd8..9f6f940 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
@@ -272,7 +272,7 @@
 	}
 
 	cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
-	cpas_vote.ahb_vote.vote.level = CAM_TURBO_VOTE;
+	cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
 	cpas_vote.axi_vote.compressed_bw = ICP_TURBO_VOTE;
 	cpas_vote.axi_vote.uncompressed_bw = ICP_TURBO_VOTE;
 
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_core.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_core.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.h
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_dev.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_dev.c
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_dev.c
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_dev.c
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c
similarity index 96%
rename from drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_soc.c
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c
index d12b3b6..a98f01f 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_soc.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c
@@ -82,7 +82,8 @@
 {
 	int rc = 0;
 
-	rc = cam_soc_util_enable_platform_resource(soc_info, true, true);
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		CAM_TURBO_VOTE, true);
 	if (rc)
 		pr_err("%s: enable platform failed\n", __func__);
 
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_soc.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_soc.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.h
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/Makefile b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/Makefile
new file mode 100644
index 0000000..114e4a1
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/fw_inc
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += bps_dev.o bps_core.o bps_soc.o
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
similarity index 98%
rename from drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_core.c
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
index 91652d7..cabdc8a 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
@@ -74,7 +74,7 @@
 	}
 
 	cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
-	cpas_vote.ahb_vote.vote.level = CAM_TURBO_VOTE;
+	cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
 	cpas_vote.axi_vote.compressed_bw = ICP_TURBO_VOTE;
 	cpas_vote.axi_vote.uncompressed_bw = ICP_TURBO_VOTE;
 
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_core.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_core.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.h
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_dev.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_dev.c
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_dev.c
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_dev.c
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
similarity index 95%
rename from drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_soc.c
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
index 76884bf..8a3c7ac 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_soc.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
@@ -66,7 +66,8 @@
 {
 	int rc = 0;
 
-	rc = cam_soc_util_enable_platform_resource(soc_info, true, false);
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		CAM_TURBO_VOTE, false);
 	if (rc)
 		pr_err("%s: enable platform failed\n", __func__);
 
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_soc.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_soc.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.h
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/Makefile b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/Makefile
similarity index 63%
rename from drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/Makefile
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/Makefile
index 4a6c3c0..71afea4 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/Makefile
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/Makefile
@@ -1,16 +1,16 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/isp/isp_hw/include
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/include
 ccflags-y += -Idrivers/media/platform/msm/camera/isp/isp_hw/hw_utils/include
 ccflags-y += -Idrivers/media/platform/msm/camera/isp/isp_hw/isp_hw_mgr/
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/fw_inc/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/fw_inc/
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr/
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/a5_hw/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
 
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_icp_hw_mgr.o
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
similarity index 71%
rename from drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index 677c24e..fe719c7 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -26,7 +26,6 @@
 #include <linux/debugfs.h>
 #include <media/cam_defs.h>
 #include <media/cam_icp.h>
-#include <linux/debugfs.h>
 
 #include "cam_sync_api.h"
 #include "cam_packet_util.h"
@@ -89,10 +88,8 @@
 	task_data = (struct hfi_cmd_work_data *)data;
 
 	rc = hfi_write_cmd(task_data->data);
-	if (rc < 0)
-		pr_err("unable to write\n");
-
 	ICP_DBG("task type : %u, rc : %d\n", task_data->type, rc);
+
 	return rc;
 }
 
@@ -100,12 +97,12 @@
 {
 	int i;
 	uint32_t idx;
-	uint32_t request_id;
+	uint64_t request_id;
 	struct cam_icp_hw_ctx_data *ctx_data = NULL;
 	struct hfi_msg_ipebps_async_ack *ioconfig_ack = NULL;
 	struct hfi_msg_frame_process_done *frame_done;
 	struct hfi_frame_process_info *hfi_frame_process;
-	struct cam_hw_done_event_data   buf_data;
+	struct cam_hw_done_event_data buf_data;
 
 	ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
 	if (ioconfig_ack->err_type != HFI_ERR_SYS_NONE) {
@@ -119,12 +116,9 @@
 		pr_err("result : %u\n", frame_done->result);
 		return -EIO;
 	}
-	ICP_DBG("result : %u\n", frame_done->result);
 
 	ctx_data = (struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
 	request_id = ioconfig_ack->user_data2;
-	ICP_DBG("ctx : %pK, request_id :%d\n",
-		(void *)ctx_data->context_priv, request_id);
 
 	hfi_frame_process = &ctx_data->hfi_frame_process;
 	for (i = 0; i < CAM_FRAME_CMD_MAX; i++)
@@ -132,22 +126,17 @@
 			break;
 
 	if (i >= CAM_FRAME_CMD_MAX) {
-		pr_err("unable to find pkt in ctx data for req_id =%d\n",
+		pr_err("unable to find pkt in ctx data for req_id =%lld\n",
 			request_id);
 		return -EINVAL;
 	}
 	idx = i;
 
-	/* send event to ctx this needs to be done in msg handler */
-	buf_data.num_handles = hfi_frame_process->num_out_resources[idx];
-	for (i = 0; i < buf_data.num_handles; i++)
-		buf_data.resource_handle[i] =
-			hfi_frame_process->out_resource[idx][i];
-
-	ctx_data->ctxt_event_cb(ctx_data->context_priv, 0, &buf_data);
+	buf_data.request_id = hfi_frame_process->request_id[idx];
+	ctx_data->ctxt_event_cb(ctx_data->context_priv, false, &buf_data);
 
 	/* now release memory for hfi frame process command */
-	ICP_DBG("matching request id: %d\n",
+	ICP_DBG("matching request id: %lld\n",
 			hfi_frame_process->request_id[idx]);
 	mutex_lock(&ctx_data->hfi_frame_process.lock);
 	hfi_frame_process->request_id[idx] = 0;
@@ -176,6 +165,11 @@
 		}
 		ctx_data =
 			(struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
+		if (!ctx_data) {
+			pr_err("wrong ctx data from IPE response\n");
+			return -EINVAL;
+		}
+
 		mutex_lock(&ctx_data->ctx_mutex);
 		ctx_data->scratch_mem_size = ipe_config_ack->scratch_mem_size;
 		mutex_unlock(&ctx_data->ctx_mutex);
@@ -191,6 +185,10 @@
 		}
 		ctx_data =
 			(struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
+		if (!ctx_data) {
+			pr_err("wrong ctx data from BPS response\n");
+			return -EINVAL;
+		}
 	}
 	complete(&ctx_data->wait_complete);
 
@@ -253,29 +251,25 @@
 {
 	int rc;
 
-	switch (msg_ptr[ICP_PACKET_IPCODE]) {
+	switch (msg_ptr[ICP_PACKET_OPCODE]) {
 	case HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO:
 	case HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO:
 		ICP_DBG("received HFI_IPEBPS_CMD_OPCODE_IPE/BPS_CONFIG_IO:\n");
 		rc = cam_icp_mgr_process_msg_config_io(msg_ptr);
-		if (rc < 0) {
-			pr_err("error in process_msg_config_io\n");
+		if (rc)
 			return rc;
-		}
 		break;
 
 	case HFI_IPEBPS_CMD_OPCODE_IPE_FRAME_PROCESS:
 	case HFI_IPEBPS_CMD_OPCODE_BPS_FRAME_PROCESS:
 		ICP_DBG("received OPCODE_IPE/BPS_FRAME_PROCESS:\n");
 		rc = cam_icp_mgr_process_msg_frame_process(msg_ptr);
-		if (rc < 0) {
-			pr_err("error in msg_frame_process\n");
+		if (rc)
 			return rc;
-		}
 		break;
 	default:
 		pr_err("Invalid opcode : %u\n",
-			msg_ptr[ICP_PACKET_IPCODE]);
+			msg_ptr[ICP_PACKET_OPCODE]);
 		break;
 	}
 
@@ -286,23 +280,27 @@
 {
 	struct cam_icp_hw_ctx_data *ctx_data = NULL;
 	struct hfi_msg_ipebps_async_ack *ioconfig_ack = NULL;
+	int rc = 0;
 
-	if (msg_ptr[ICP_PACKET_IPCODE] ==
-		HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY ||
-		msg_ptr[ICP_PACKET_IPCODE] ==
-		HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY) {
-		ICP_DBG("received HFI_IPEBPS_CMD_OPCODE_IPE/BPS_DESTROY:\n");
+	switch (msg_ptr[ICP_PACKET_OPCODE]) {
+	case HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY:
+	case HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY:
+	case HFI_IPEBPS_CMD_OPCODE_IPE_ABORT:
+	case HFI_IPEBPS_CMD_OPCODE_BPS_ABORT:
+		ICP_DBG("received IPE/BPS_DESTROY/ABORT:\n");
 		ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
 		ctx_data =
 			(struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
 		complete(&ctx_data->wait_complete);
-
-	} else {
-		pr_err("Invalid opcode : %u\n", msg_ptr[ICP_PACKET_IPCODE]);
-		return -EINVAL;
+		break;
+	default:
+		pr_err("Invalid opcode : %u\n",
+			msg_ptr[ICP_PACKET_OPCODE]);
+		rc = -EINVAL;
+		break;
 	}
 
-	return 0;
+	return rc;
 }
 
 static int32_t cam_icp_mgr_process_msg(void *priv, void *data)
@@ -341,27 +339,19 @@
 	case HFI_MSG_SYS_PING_ACK:
 		ICP_DBG("received HFI_MSG_SYS_PING_ACK\n");
 		rc = cam_icp_mgr_process_msg_ping_ack(msg_ptr);
-		if (rc)
-			pr_err("fail process PING_ACK\n");
 		break;
 
 	case HFI_MSG_IPEBPS_CREATE_HANDLE_ACK:
 		ICP_DBG("received HFI_MSG_IPEBPS_CREATE_HANDLE_ACK\n");
 		rc = cam_icp_mgr_process_msg_create_handle(msg_ptr);
-		if (rc)
-			pr_err("fail process CREATE_HANDLE_ACK\n");
 		break;
 
 	case HFI_MSG_IPEBPS_ASYNC_COMMAND_INDIRECT_ACK:
 		rc = cam_icp_mgr_process_indirect_ack_msg(msg_ptr);
-		if (rc)
-			pr_err("fail process INDIRECT_ACK\n");
 		break;
 
 	case  HFI_MSG_IPEBPS_ASYNC_COMMAND_DIRECT_ACK:
 		rc = cam_icp_mgr_process_direct_ack_msg(msg_ptr);
-		if (rc)
-			pr_err("fail process DIRECT_ACK\n");
 		break;
 
 	case HFI_MSG_EVENT_NOTIFY:
@@ -416,27 +406,44 @@
 	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.sec_heap);
 }
 
-static int cam_icp_allocate_hfi_mem(void)
+static int cam_icp_alloc_shared_mem(struct cam_mem_mgr_memory_desc *qtbl)
 {
 	int rc;
 	struct cam_mem_mgr_request_desc alloc;
 	struct cam_mem_mgr_memory_desc out;
-	dma_addr_t iova;
+
+	memset(&alloc, 0, sizeof(alloc));
+	memset(&out, 0, sizeof(out));
+	alloc.size = SZ_1M;
+	alloc.align = 0;
+	alloc.region = CAM_MEM_MGR_REGION_SHARED;
+	alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
+	rc = cam_mem_mgr_request_mem(&alloc, &out);
+	if (rc)
+		return rc;
+
+	*qtbl = out;
+	ICP_DBG("kva = %llX\n", out.kva);
+	ICP_DBG("qtbl IOVA = %X\n", out.iova);
+	ICP_DBG("SMMU HDL = %X\n", out.smmu_hdl);
+	ICP_DBG("MEM HDL = %X\n", out.mem_handle);
+	ICP_DBG("length = %lld\n", out.len);
+	ICP_DBG("region = %d\n", out.region);
+
+	return rc;
+}
+
+static int cam_icp_allocate_fw_mem(void)
+{
+	int rc;
 	uint64_t kvaddr;
 	size_t len;
-
-	rc = cam_smmu_get_region_info(icp_hw_mgr.iommu_hdl,
-		CAM_MEM_MGR_REGION_SHARED,
-		&icp_hw_mgr.hfi_mem.shmem);
-	if (rc)
-		return -ENOMEM;
+	dma_addr_t iova;
 
 	rc = cam_smmu_alloc_firmware(icp_hw_mgr.iommu_hdl,
 		&iova, &kvaddr, &len);
-	if (rc < 0) {
-		pr_err("Unable to allocate FW memory\n");
+	if (rc)
 		return -ENOMEM;
-	}
 
 	icp_hw_mgr.hfi_mem.fw_buf.len = len;
 	icp_hw_mgr.hfi_mem.fw_buf.kva = kvaddr;
@@ -447,112 +454,58 @@
 	ICP_DBG("IOVA = %llX\n", iova);
 	ICP_DBG("length = %zu\n", len);
 
-	memset(&alloc, 0, sizeof(alloc));
-	memset(&out, 0, sizeof(out));
-	alloc.size = SZ_1M;
-	alloc.align = 0;
-	alloc.region = CAM_MEM_MGR_REGION_SHARED;
-	alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
-	rc = cam_mem_mgr_request_mem(&alloc, &out);
-	if (rc < 0) {
+	return rc;
+}
+
+static int cam_icp_allocate_hfi_mem(void)
+{
+	int rc;
+
+	rc = cam_smmu_get_region_info(icp_hw_mgr.iommu_hdl,
+		CAM_MEM_MGR_REGION_SHARED,
+		&icp_hw_mgr.hfi_mem.shmem);
+	if (rc) {
+		pr_err("Unable to get shared memory info\n");
+		return rc;
+	}
+
+	rc = cam_icp_allocate_fw_mem();
+	if (rc) {
+		pr_err("Unable to allocate FW memory\n");
+		return rc;
+	}
+
+	rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.qtbl);
+	if (rc) {
 		pr_err("Unable to allocate qtbl memory\n");
 		goto qtbl_alloc_failed;
 	}
-	icp_hw_mgr.hfi_mem.qtbl = out;
 
-	ICP_DBG("kva = %llX\n", out.kva);
-	ICP_DBG("qtbl IOVA = %X\n", out.iova);
-	ICP_DBG("SMMU HDL = %X\n", out.smmu_hdl);
-	ICP_DBG("MEM HDL = %X\n", out.mem_handle);
-	ICP_DBG("length = %lld\n", out.len);
-	ICP_DBG("region = %d\n", out.region);
-
-	/* Allocate memory for cmd queue */
-	memset(&alloc, 0, sizeof(alloc));
-	memset(&out, 0, sizeof(out));
-	alloc.size = SZ_1M;
-	alloc.align = 0;
-	alloc.region = CAM_MEM_MGR_REGION_SHARED;
-	alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
-	rc = cam_mem_mgr_request_mem(&alloc, &out);
-	if (rc < 0) {
+	rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.cmd_q);
+	if (rc) {
 		pr_err("Unable to allocate cmd q memory\n");
 		goto cmd_q_alloc_failed;
 	}
-	icp_hw_mgr.hfi_mem.cmd_q = out;
 
-	ICP_DBG("kva = %llX\n", out.kva);
-	ICP_DBG("cmd_q IOVA = %X\n", out.iova);
-	ICP_DBG("SMMU HDL = %X\n", out.smmu_hdl);
-	ICP_DBG("MEM HDL = %X\n", out.mem_handle);
-	ICP_DBG("length = %lld\n", out.len);
-	ICP_DBG("region = %d\n", out.region);
-
-	/* Allocate memory for msg queue */
-	memset(&alloc, 0, sizeof(alloc));
-	memset(&out, 0, sizeof(out));
-	alloc.size = SZ_1M;
-	alloc.align = 0;
-	alloc.region = CAM_MEM_MGR_REGION_SHARED;
-	alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
-	rc = cam_mem_mgr_request_mem(&alloc, &out);
-	if (rc < 0) {
+	rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.msg_q);
+	if (rc) {
 		pr_err("Unable to allocate msg q memory\n");
 		goto msg_q_alloc_failed;
 	}
-	icp_hw_mgr.hfi_mem.msg_q = out;
 
-	ICP_DBG("kva = %llX\n", out.kva);
-	ICP_DBG("msg_q IOVA = %X\n", out.iova);
-	ICP_DBG("SMMU HDL = %X\n", out.smmu_hdl);
-	ICP_DBG("MEM HDL = %X\n", out.mem_handle);
-	ICP_DBG("length = %lld\n", out.len);
-	ICP_DBG("region = %d\n", out.region);
-
-	/* Allocate memory for dbg queue */
-	memset(&alloc, 0, sizeof(alloc));
-	memset(&out, 0, sizeof(out));
-	alloc.size = SZ_1M;
-	alloc.align = 0;
-	alloc.region = CAM_MEM_MGR_REGION_SHARED;
-	alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
-	rc = cam_mem_mgr_request_mem(&alloc, &out);
-	if (rc < 0) {
+	rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.dbg_q);
+	if (rc) {
 		pr_err("Unable to allocate dbg q memory\n");
 		goto dbg_q_alloc_failed;
 	}
-	icp_hw_mgr.hfi_mem.dbg_q = out;
 
-	ICP_DBG("kva = %llX\n", out.kva);
-	ICP_DBG("dbg_q IOVA = %X\n", out.iova);
-	ICP_DBG("SMMU HDL = %X\n",  out.smmu_hdl);
-	ICP_DBG("MEM HDL = %X\n", out.mem_handle);
-	ICP_DBG("length = %lld\n", out.len);
-	ICP_DBG("region = %d\n", out.region);
-
-	/* Allocate memory for sec heap queue */
-	memset(&alloc, 0, sizeof(alloc));
-	memset(&out, 0, sizeof(out));
-	alloc.size = SZ_1M;
-	alloc.align = 0;
-	alloc.region = CAM_MEM_MGR_REGION_SHARED;
-	alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
-	rc = cam_mem_mgr_request_mem(&alloc, &out);
-	if (rc < 0) {
+	rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.sec_heap);
+	if (rc) {
 		pr_err("Unable to allocate sec heap q memory\n");
 		goto sec_heap_alloc_failed;
 	}
-	icp_hw_mgr.hfi_mem.sec_heap = out;
-
-	ICP_DBG("kva = %llX\n", out.kva);
-	ICP_DBG("sec_heap IOVA = %X\n", out.iova);
-	ICP_DBG("SMMU HDL = %X\n", out.smmu_hdl);
-	ICP_DBG("MEM HDL = %X\n", out.mem_handle);
-	ICP_DBG("length = %lld\n", out.len);
-	ICP_DBG("region = %d\n", out.region);
 
 	return rc;
-
 sec_heap_alloc_failed:
 	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.dbg_q);
 dbg_q_alloc_failed:
@@ -563,20 +516,17 @@
 	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.qtbl);
 qtbl_alloc_failed:
 	cam_smmu_dealloc_firmware(icp_hw_mgr.iommu_hdl);
-	pr_err("returned with error : %d\n", rc);
-
 	return rc;
 }
 
 static int cam_icp_mgr_get_free_ctx(struct cam_icp_hw_mgr *hw_mgr)
 {
 	int i = 0;
-	int num_ctx = CAM_ICP_CTX_MAX;
 
-	for (i = 0; i < num_ctx; i++) {
+	for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
 		mutex_lock(&hw_mgr->ctx_data[i].ctx_mutex);
-		if (hw_mgr->ctx_data[i].in_use == 0) {
-			hw_mgr->ctx_data[i].in_use = 1;
+		if (hw_mgr->ctx_data[i].in_use == false) {
+			hw_mgr->ctx_data[i].in_use = true;
 			mutex_unlock(&hw_mgr->ctx_data[i].ctx_mutex);
 			break;
 		}
@@ -586,22 +536,87 @@
 	return i;
 }
 
+static void cam_icp_mgr_put_ctx(struct cam_icp_hw_ctx_data *ctx_data)
+{
+	mutex_lock(&ctx_data->ctx_mutex);
+	ctx_data->in_use = false;
+	mutex_unlock(&ctx_data->ctx_mutex);
+}
+
+static int cam_icp_mgr_abort_handle(
+	struct cam_icp_hw_ctx_data *ctx_data)
+{
+	int rc = 0;
+	int timeout = 5000;
+	struct hfi_cmd_work_data *task_data;
+	struct hfi_cmd_ipebps_async abort_cmd;
+	struct crm_workq_task *task;
+	unsigned long rem_jiffies;
+
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+	if (!task)
+		return -ENOMEM;
+
+	abort_cmd.size =
+		sizeof(struct hfi_cmd_ipebps_async) +
+		sizeof(struct hfi_cmd_abort_destroy) -
+		sizeof(abort_cmd.payload.direct);
+	abort_cmd.pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_DIRECT;
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
+		abort_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_BPS_ABORT;
+	else
+		abort_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_IPE_ABORT;
+
+	reinit_completion(&ctx_data->wait_complete);
+	abort_cmd.num_fw_handles = 1;
+	abort_cmd.fw_handles[0] = ctx_data->fw_handle;
+	abort_cmd.user_data1 = (uint64_t)ctx_data;
+	abort_cmd.user_data2 = (uint64_t)0x0;
+	memcpy(abort_cmd.payload.direct, &ctx_data->temp_payload,
+		sizeof(uint32_t));
+
+	task_data = (struct hfi_cmd_work_data *)task->payload;
+	task_data->data = (void *)&abort_cmd;
+	task_data->request_id = 0;
+	task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
+	task->process_cb = cam_icp_mgr_process_cmd;
+	rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	if (rc)
+		return rc;
+
+	ICP_DBG("fw_handle = %x ctx_data = %pK\n",
+		ctx_data->fw_handle, ctx_data);
+	rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+			msecs_to_jiffies((timeout)));
+	if (!rem_jiffies) {
+		rc = -ETIMEDOUT;
+		ICP_DBG("FW timeout/err in abort handle command\n");
+	}
+
+	return rc;
+}
+
 static int cam_icp_mgr_destroy_handle(
-		struct cam_icp_hw_ctx_data *ctx_data,
-		struct crm_workq_task *task)
+	struct cam_icp_hw_ctx_data *ctx_data)
 {
 	int rc = 0;
 	int timeout = 5000;
 	struct hfi_cmd_work_data *task_data;
 	struct hfi_cmd_ipebps_async destroy_cmd;
+	struct crm_workq_task *task;
 	unsigned long rem_jiffies;
 
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+	if (!task)
+		return -ENOMEM;
+
 	destroy_cmd.size =
 		sizeof(struct hfi_cmd_ipebps_async) +
 		sizeof(struct ipe_bps_destroy) -
 		sizeof(destroy_cmd.payload.direct);
 	destroy_cmd.pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_DIRECT;
-	if (ctx_data->icp_dev_acquire_info.dev_type == CAM_ICP_RES_TYPE_BPS)
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
 		destroy_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY;
 	else
 		destroy_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY;
@@ -619,14 +634,18 @@
 	task_data->request_id = 0;
 	task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
 	task->process_cb = cam_icp_mgr_process_cmd;
-	cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr, CRM_TASK_PRIORITY_0);
+	rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	if (rc)
+		return rc;
+
 	ICP_DBG("fw_handle = %x ctx_data = %pK\n",
 		ctx_data->fw_handle, ctx_data);
 	rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
 			msecs_to_jiffies((timeout)));
 	if (!rem_jiffies) {
 		rc = -ETIMEDOUT;
-		pr_err("FW response timeout: %d\n", rc);
+		ICP_DBG("FW response timeout: %d\n", rc);
 	}
 
 	return rc;
@@ -634,7 +653,6 @@
 
 static int cam_icp_mgr_release_ctx(struct cam_icp_hw_mgr *hw_mgr, int ctx_id)
 {
-	struct crm_workq_task *task;
 	int i = 0;
 
 	if (ctx_id >= CAM_ICP_CTX_MAX) {
@@ -650,13 +668,11 @@
 	}
 	mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
 
-	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
-	if (task)
-		cam_icp_mgr_destroy_handle(&hw_mgr->ctx_data[ctx_id], task);
+	cam_icp_mgr_destroy_handle(&hw_mgr->ctx_data[ctx_id]);
 
 	mutex_lock(&hw_mgr->hw_mgr_mutex);
 	mutex_lock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
-	hw_mgr->ctx_data[ctx_id].in_use = 0;
+	hw_mgr->ctx_data[ctx_id].in_use = false;
 	hw_mgr->ctx_data[ctx_id].fw_handle = 0;
 	hw_mgr->ctx_data[ctx_id].scratch_mem_size = 0;
 	mutex_lock(&hw_mgr->ctx_data[ctx_id].hfi_frame_process.lock);
@@ -664,52 +680,22 @@
 		clear_bit(i, hw_mgr->ctx_data[ctx_id].hfi_frame_process.bitmap);
 	mutex_unlock(&hw_mgr->ctx_data[ctx_id].hfi_frame_process.lock);
 	mutex_destroy(&hw_mgr->ctx_data[ctx_id].hfi_frame_process.lock);
-	mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
 	kfree(hw_mgr->ctx_data[ctx_id].hfi_frame_process.bitmap);
+	hw_mgr->ctxt_cnt--;
+	kfree(hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info);
+	hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info = NULL;
+	mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
 	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 
 	return 0;
 }
 
-static int cam_icp_mgr_get_ctx_from_fw_handle(struct cam_icp_hw_mgr *hw_mgr,
-							uint32_t fw_handle)
+static void cam_icp_mgr_device_deinit(struct cam_icp_hw_mgr *hw_mgr)
 {
-	int ctx_id;
-
-	for (ctx_id = 0; ctx_id < CAM_ICP_CTX_MAX; ctx_id++) {
-		mutex_lock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
-		if (hw_mgr->ctx_data[ctx_id].in_use) {
-			if (hw_mgr->ctx_data[ctx_id].fw_handle == fw_handle) {
-				mutex_unlock(
-					&hw_mgr->ctx_data[ctx_id].ctx_mutex);
-				return ctx_id;
-			}
-		}
-		mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
-	}
-	ICP_DBG("Invalid fw handle to get ctx\n");
-
-	return -EINVAL;
-}
-
-static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args)
-{
-	struct cam_icp_hw_mgr *hw_mgr = hw_priv;
 	struct cam_hw_intf *a5_dev_intf = NULL;
 	struct cam_hw_intf *ipe0_dev_intf = NULL;
 	struct cam_hw_intf *ipe1_dev_intf = NULL;
 	struct cam_hw_intf *bps_dev_intf = NULL;
-	struct cam_icp_a5_set_irq_cb irq_cb;
-	struct cam_icp_a5_set_fw_buf_info fw_buf_info;
-	struct cam_icp_hw_ctx_data *ctx_data = NULL;
-	int i;
-
-	mutex_lock(&hw_mgr->hw_mgr_mutex);
-	if (hw_mgr->fw_download ==  false) {
-		ICP_DBG("hw mgr is already closed\n");
-		mutex_unlock(&hw_mgr->hw_mgr_mutex);
-		return 0;
-	}
 
 	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
 	ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
@@ -718,84 +704,78 @@
 
 	if ((!a5_dev_intf) || (!ipe0_dev_intf) || (!bps_dev_intf)) {
 		pr_err("dev intfs are wrong, failed to close\n");
+		return;
+	}
+
+	if (ipe1_dev_intf)
+		ipe1_dev_intf->hw_ops.deinit(ipe1_dev_intf->hw_priv, NULL, 0);
+	ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
+	bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
+	a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
+}
+
+static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args)
+{
+	struct cam_icp_hw_mgr *hw_mgr = hw_priv;
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	struct cam_icp_a5_set_irq_cb irq_cb;
+	struct cam_icp_a5_set_fw_buf_info fw_buf_info;
+	int i, rc = 0;
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	if ((hw_mgr->fw_download ==  false) && (!hw_mgr->ctxt_cnt)) {
+		ICP_DBG("hw mgr is already closed\n");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return 0;
+	}
+
+	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+	if (!a5_dev_intf) {
+		pr_err("a5_dev_intf is NULL\n");
 		mutex_unlock(&hw_mgr->hw_mgr_mutex);
 		return -EINVAL;
 	}
 
 	irq_cb.icp_hw_mgr_cb = NULL;
 	irq_cb.data = NULL;
-	a5_dev_intf->hw_ops.process_cmd(
+	rc = a5_dev_intf->hw_ops.process_cmd(
 		a5_dev_intf->hw_priv,
 		CAM_ICP_A5_SET_IRQ_CB,
 		&irq_cb, sizeof(irq_cb));
+	if (rc)
+		pr_err("deregister irq call back failed\n");
 
 	fw_buf_info.kva = 0;
 	fw_buf_info.iova = 0;
 	fw_buf_info.len = 0;
-	a5_dev_intf->hw_ops.process_cmd(
+	rc = a5_dev_intf->hw_ops.process_cmd(
 		a5_dev_intf->hw_priv,
 		CAM_ICP_A5_CMD_SET_FW_BUF,
 		&fw_buf_info,
 		sizeof(fw_buf_info));
+	if (rc)
+		pr_err("nullify the fw buf failed\n");
 
 	mutex_unlock(&hw_mgr->hw_mgr_mutex);
-
-	for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
-		ctx_data = &hw_mgr->ctx_data[i];
+	for (i = 0; i < CAM_ICP_CTX_MAX; i++)
 		cam_icp_mgr_release_ctx(hw_mgr, i);
-	}
 
 	mutex_lock(&hw_mgr->hw_mgr_mutex);
-	ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
-	if (ipe1_dev_intf)
-		ipe1_dev_intf->hw_ops.deinit(ipe1_dev_intf->hw_priv,
-			NULL, 0);
-
-	ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
-	bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
-	a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
 	cam_hfi_deinit();
+	cam_icp_mgr_device_deinit(hw_mgr);
 	cam_icp_free_hfi_mem();
 	hw_mgr->fw_download = false;
 	mutex_unlock(&hw_mgr->hw_mgr_mutex);
-
 	return 0;
 }
 
-static int cam_icp_mgr_download_fw(void *hw_mgr_priv, void *download_fw_args)
+static int cam_icp_mgr_device_init(struct cam_icp_hw_mgr *hw_mgr)
 {
+	int rc = 0;
 	struct cam_hw_intf *a5_dev_intf = NULL;
 	struct cam_hw_intf *ipe0_dev_intf = NULL;
 	struct cam_hw_intf *ipe1_dev_intf = NULL;
 	struct cam_hw_intf *bps_dev_intf = NULL;
-	struct cam_hw_info *a5_dev = NULL;
-	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
-	struct cam_icp_a5_set_irq_cb irq_cb;
-	struct cam_icp_a5_set_fw_buf_info fw_buf_info;
-	struct hfi_mem_info hfi_mem;
-	unsigned long rem_jiffies;
-	int timeout = 5000;
-	int rc = 0;
-
-	if (!hw_mgr) {
-		pr_err("hw_mgr is NULL\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&hw_mgr->hw_mgr_mutex);
-	if (hw_mgr->fw_download) {
-		ICP_DBG("FW already downloaded\n");
-		mutex_unlock(&hw_mgr->hw_mgr_mutex);
-		return rc;
-	}
-
-	/* Allocate memory for FW and shared memory */
-	rc = cam_icp_allocate_hfi_mem();
-	if (rc < 0) {
-		pr_err("hfi mem alloc failed\n");
-		mutex_unlock(&hw_mgr->hw_mgr_mutex);
-		return rc;
-	}
 
 	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
 	ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
@@ -804,47 +784,58 @@
 
 	if ((!a5_dev_intf) || (!ipe0_dev_intf) || (!bps_dev_intf)) {
 		pr_err("dev intfs are wrong\n");
-		goto dev_intf_fail;
+		return -EINVAL;
 	}
 
-	a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
-
 	rc = a5_dev_intf->hw_ops.init(a5_dev_intf->hw_priv, NULL, 0);
-	if (rc < 0) {
-		pr_err("a5 dev init failed\n");
+	if (rc)
 		goto a5_dev_init_failed;
-	}
+
 	rc = bps_dev_intf->hw_ops.init(bps_dev_intf->hw_priv, NULL, 0);
-	if (rc < 0) {
-		pr_err("bps dev init failed\n");
+	if (rc)
 		goto bps_dev_init_failed;
-	}
+
 	rc = ipe0_dev_intf->hw_ops.init(ipe0_dev_intf->hw_priv, NULL, 0);
-	if (rc < 0) {
-		pr_err("ipe0 dev init failed\n");
+	if (rc)
 		goto ipe0_dev_init_failed;
-	}
 
 	if (ipe1_dev_intf) {
 		rc = ipe1_dev_intf->hw_ops.init(ipe1_dev_intf->hw_priv,
 						NULL, 0);
-		if (rc < 0) {
-			pr_err("ipe1 dev init failed\n");
+		if (rc)
 			goto ipe1_dev_init_failed;
-		}
 	}
-	/* Set IRQ callback */
+
+	return rc;
+ipe1_dev_init_failed:
+	ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
+ipe0_dev_init_failed:
+	bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
+bps_dev_init_failed:
+	a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
+a5_dev_init_failed:
+	return rc;
+}
+
+static int cam_icp_mgr_fw_download(struct cam_icp_hw_mgr *hw_mgr)
+{
+	int rc;
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	struct cam_hw_info *a5_dev = NULL;
+	struct cam_icp_a5_set_irq_cb irq_cb;
+	struct cam_icp_a5_set_fw_buf_info fw_buf_info;
+
+	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+	a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
+
 	irq_cb.icp_hw_mgr_cb = cam_icp_hw_mgr_cb;
-	irq_cb.data = hw_mgr_priv;
+	irq_cb.data = hw_mgr;
 	rc = a5_dev_intf->hw_ops.process_cmd(
 		a5_dev_intf->hw_priv,
 		CAM_ICP_A5_SET_IRQ_CB,
 		&irq_cb, sizeof(irq_cb));
-	if (rc < 0) {
-		pr_err("CAM_ICP_A5_SET_IRQ_CB failed\n");
-		rc = -EINVAL;
+	if (rc)
 		goto set_irq_failed;
-	}
 
 	fw_buf_info.kva = icp_hw_mgr.hfi_mem.fw_buf.kva;
 	fw_buf_info.iova = icp_hw_mgr.hfi_mem.fw_buf.iova;
@@ -853,12 +844,9 @@
 	rc = a5_dev_intf->hw_ops.process_cmd(
 		a5_dev_intf->hw_priv,
 		CAM_ICP_A5_CMD_SET_FW_BUF,
-		&fw_buf_info,
-		sizeof(fw_buf_info));
-	if (rc < 0) {
-		pr_err("CAM_ICP_A5_CMD_SET_FW_BUF failed\n");
+		&fw_buf_info, sizeof(fw_buf_info));
+	if (rc)
 		goto set_irq_failed;
-	}
 
 	cam_hfi_enable_cpu(a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
 
@@ -866,10 +854,24 @@
 		a5_dev_intf->hw_priv,
 		CAM_ICP_A5_CMD_FW_DOWNLOAD,
 		NULL, 0);
-	if (rc < 0) {
-		pr_err("FW download is failed\n");
-		goto set_irq_failed;
-	}
+	if (rc)
+		goto fw_download_failed;
+
+	return rc;
+fw_download_failed:
+	cam_hfi_disable_cpu(a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+set_irq_failed:
+	return rc;
+}
+
+static int cam_icp_mgr_hfi_init(struct cam_icp_hw_mgr *hw_mgr)
+{
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	struct cam_hw_info *a5_dev = NULL;
+	struct hfi_mem_info hfi_mem;
+
+	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+	a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
 
 	hfi_mem.qtbl.kva = icp_hw_mgr.hfi_mem.qtbl.kva;
 	hfi_mem.qtbl.iova = icp_hw_mgr.hfi_mem.qtbl.iova;
@@ -905,65 +907,157 @@
 
 	hfi_mem.shmem.iova = icp_hw_mgr.hfi_mem.shmem.iova_start;
 	hfi_mem.shmem.len = icp_hw_mgr.hfi_mem.shmem.iova_len;
-
-	rc = cam_hfi_init(0, &hfi_mem,
+	return cam_hfi_init(0, &hfi_mem,
 		a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base,
 		hw_mgr->a5_debug);
-	if (rc < 0) {
-		pr_err("hfi_init is failed\n");
-		goto set_irq_failed;
-	}
+}
 
-	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+static int cam_icp_mgr_send_fw_init(struct cam_icp_hw_mgr *hw_mgr)
+{
+	int rc;
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	unsigned long rem_jiffies;
+	int timeout = 5000;
 
-	ICP_DBG("Sending HFI init command\n");
+	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
 	reinit_completion(&hw_mgr->a5_complete);
-
+	ICP_DBG("Sending HFI init command\n");
 	rc = a5_dev_intf->hw_ops.process_cmd(
 		a5_dev_intf->hw_priv,
 		CAM_ICP_A5_SEND_INIT,
 		NULL, 0);
+	if (rc)
+		return rc;
 
 	ICP_DBG("Wait for INIT DONE Message\n");
 	rem_jiffies = wait_for_completion_timeout(&icp_hw_mgr.a5_complete,
-			msecs_to_jiffies((timeout)));
+		msecs_to_jiffies((timeout)));
 	if (!rem_jiffies) {
 		rc = -ETIMEDOUT;
-		pr_err("FW response timed out %d\n", rc);
-		goto set_irq_failed;
+		ICP_DBG("FW response timed out %d\n", rc);
+	}
+	ICP_DBG("Done Waiting for INIT DONE Message\n");
+
+	return rc;
+}
+
+static int cam_icp_mgr_download_fw(void *hw_mgr_priv, void *download_fw_args)
+{
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	struct cam_hw_info *a5_dev = NULL;
+	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+	int rc = 0;
+
+	if (!hw_mgr) {
+		pr_err("hw_mgr is NULL\n");
+		return -EINVAL;
 	}
 
-	ICP_DBG("Done Waiting for INIT DONE Message\n");
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	if (hw_mgr->fw_download) {
+		ICP_DBG("FW already downloaded\n");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return rc;
+	}
+
+	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+	a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
+	rc = cam_icp_allocate_hfi_mem();
+	if (rc) {
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		goto alloc_hfi_mem_failed;
+	}
+
+	rc = cam_icp_mgr_device_init(hw_mgr);
+	if (rc) {
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		goto dev_init_fail;
+	}
+
+	rc = cam_icp_mgr_fw_download(hw_mgr);
+	if (rc) {
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		goto fw_download_failed;
+	}
+
+	rc = cam_icp_mgr_hfi_init(hw_mgr);
+	if (rc) {
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		goto hfi_init_failed;
+	}
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	rc = cam_icp_mgr_send_fw_init(hw_mgr);
+	if (rc)
+		goto fw_init_failed;
 
 	rc = a5_dev_intf->hw_ops.process_cmd(
 		a5_dev_intf->hw_priv,
 		CAM_ICP_A5_CMD_POWER_COLLAPSE,
 		NULL, 0);
-	if (rc) {
-		pr_err("icp power collapse failed\n");
-		goto set_irq_failed;
-	}
-
 	hw_mgr->fw_download = true;
 	hw_mgr->ctxt_cnt = 0;
 	ICP_DBG("FW download done successfully\n");
-
+	if (!download_fw_args)
+		cam_icp_mgr_hw_close(hw_mgr, NULL);
 	return rc;
 
-set_irq_failed:
-	if (ipe1_dev_intf)
-		rc = ipe1_dev_intf->hw_ops.deinit(ipe1_dev_intf->hw_priv,
-			NULL, 0);
-ipe1_dev_init_failed:
-	rc = ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
-ipe0_dev_init_failed:
-	rc = bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
-bps_dev_init_failed:
-	rc = a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
-a5_dev_init_failed:
-dev_intf_fail:
+fw_init_failed:
+	cam_hfi_deinit();
+hfi_init_failed:
+	cam_hfi_disable_cpu(a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+fw_download_failed:
+	cam_icp_mgr_device_deinit(hw_mgr);
+dev_init_fail:
 	cam_icp_free_hfi_mem();
-	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+alloc_hfi_mem_failed:
+	return rc;
+}
+
+static int cam_icp_mgr_handle_config_err(
+	struct cam_hw_config_args *config_args,
+	struct cam_icp_hw_ctx_data *ctx_data)
+{
+	struct cam_hw_done_event_data buf_data;
+
+	buf_data.num_handles = config_args->num_out_map_entries;
+	buf_data.request_id = *(uint64_t *)config_args->priv;
+	ctx_data->ctxt_event_cb(ctx_data->context_priv, true, &buf_data);
+
+	return 0;
+}
+
+static int cam_icp_mgr_enqueue_config(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_hw_config_args *config_args)
+{
+	int rc = 0;
+	uint64_t request_id = 0;
+	struct crm_workq_task *task;
+	struct hfi_cmd_work_data *task_data;
+	struct hfi_cmd_ipebps_async *hfi_cmd;
+	struct cam_hw_update_entry *hw_update_entries;
+
+	request_id = *(uint64_t *)config_args->priv;
+	hw_update_entries = config_args->hw_update_entries;
+	ICP_DBG("req_id = %lld %pK\n", request_id, config_args->priv);
+
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+	if (!task) {
+		pr_err("no empty task\n");
+		return -ENOMEM;
+	}
+
+	task_data = (struct hfi_cmd_work_data *)task->payload;
+	task_data->data = (void *)hw_update_entries->addr;
+	hfi_cmd = (struct hfi_cmd_ipebps_async *)hw_update_entries->addr;
+	ICP_DBG("request from hfi_cmd :%llu, hfi_cmd: %pK\n",
+		hfi_cmd->user_data2, hfi_cmd);
+	task_data->request_id = request_id;
+	task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
+	task->process_cb = cam_icp_mgr_process_cmd;
+	rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+
 	return rc;
 }
 
@@ -972,18 +1066,10 @@
 	int rc = 0;
 	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
 	struct cam_hw_config_args *config_args = config_hw_args;
-	uint32_t fw_handle;
-	int ctx_id = 0;
 	struct cam_icp_hw_ctx_data *ctx_data = NULL;
-	int32_t request_id = 0;
-	struct cam_hw_update_entry *hw_update_entries;
-	struct crm_workq_task *task;
-	struct hfi_cmd_work_data *task_data;
-	struct hfi_cmd_ipebps_async *hfi_cmd;
 
 	if (!hw_mgr || !config_args) {
-		pr_err("Invalid arguments %pK %pK\n",
-			hw_mgr, config_args);
+		pr_err("Invalid arguments %pK %pK\n", hw_mgr, config_args);
 		return -EINVAL;
 	}
 
@@ -993,62 +1079,34 @@
 	}
 
 	mutex_lock(&hw_mgr->hw_mgr_mutex);
-	fw_handle = *(uint32_t *)config_args->ctxt_to_hw_map;
-	ctx_id = cam_icp_mgr_get_ctx_from_fw_handle(hw_mgr, fw_handle);
-	if (ctx_id < 0) {
-		pr_err("Fw handle to ctx mapping is failed\n");
-		mutex_unlock(&hw_mgr->hw_mgr_mutex);
-		return -EINVAL;
-	}
-
-	ctx_data = &hw_mgr->ctx_data[ctx_id];
+	ctx_data = config_args->ctxt_to_hw_map;
 	if (!ctx_data->in_use) {
 		pr_err("ctx is not in use\n");
 		mutex_unlock(&hw_mgr->hw_mgr_mutex);
-		return -EINVAL;
+		rc = -EINVAL;
+		goto config_err;
 	}
-
-	request_id = *(uint32_t *)config_args->priv;
-	hw_update_entries = config_args->hw_update_entries;
-	ICP_DBG("req_id = %d\n", request_id);
-	ICP_DBG("fw_handle = %x req_id = %d %pK\n",
-		fw_handle, request_id, config_args->priv);
-	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
-	if (!task) {
-		pr_err("no empty task\n");
-		mutex_unlock(&hw_mgr->hw_mgr_mutex);
-		return -ENOMEM;
-	}
-
 	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 
-	task_data = (struct hfi_cmd_work_data *)task->payload;
-	if (!task_data) {
-		pr_err("task_data is NULL\n");
-		return -EINVAL;
-	}
+	rc = cam_icp_mgr_enqueue_config(hw_mgr, config_args);
+	if (rc)
+		goto config_err;
 
-	task_data->data = (void *)hw_update_entries->addr;
-	hfi_cmd = (struct hfi_cmd_ipebps_async *)hw_update_entries->addr;
-	ICP_DBG("request from hfi_cmd :%llu, hfi_cmd: %pK\n",
-		hfi_cmd->user_data2, hfi_cmd);
-	task_data->request_id = request_id;
-	task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
-	task->process_cb = cam_icp_mgr_process_cmd;
-	rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
-			CRM_TASK_PRIORITY_0);
+	return 0;
+config_err:
+	cam_icp_mgr_handle_config_err(config_args, ctx_data);
 	return rc;
 }
 
 static int cam_icp_mgr_prepare_frame_process_cmd(
-			struct cam_icp_hw_ctx_data *ctx_data,
-			struct hfi_cmd_ipebps_async *hfi_cmd,
-			uint32_t request_id,
-			uint32_t fw_cmd_buf_iova_addr)
+	struct cam_icp_hw_ctx_data *ctx_data,
+	struct hfi_cmd_ipebps_async *hfi_cmd,
+	uint64_t request_id,
+	uint32_t fw_cmd_buf_iova_addr)
 {
 	hfi_cmd->size = sizeof(struct hfi_cmd_ipebps_async);
 	hfi_cmd->pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT;
-	if (ctx_data->icp_dev_acquire_info.dev_type == CAM_ICP_RES_TYPE_BPS)
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
 		hfi_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_BPS_FRAME_PROCESS;
 	else
 		hfi_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_IPE_FRAME_PROCESS;
@@ -1058,80 +1116,28 @@
 	hfi_cmd->user_data1 = (uint64_t)ctx_data;
 	hfi_cmd->user_data2 = request_id;
 
-	ICP_DBG("ctx_data : %pK, request_id :%d cmd_buf %x\n",
-		(void *)ctx_data->context_priv,
-		request_id, fw_cmd_buf_iova_addr);
+	ICP_DBG("ctx_data : %pK, request_id :%lld cmd_buf %x\n",
+		(void *)ctx_data->context_priv, request_id,
+		fw_cmd_buf_iova_addr);
 
 	return 0;
 }
 
-static int cam_icp_mgr_prepare_hw_update(void *hw_mgr_priv,
-				void *prepare_hw_update_args)
+static int cam_icp_mgr_pkt_validation(struct cam_packet *packet)
 {
-	int        rc = 0, i, j;
-	int        ctx_id = 0;
-	uint32_t   fw_handle;
-	int32_t    idx;
-	uint64_t   iova_addr;
-	uint32_t   fw_cmd_buf_iova_addr;
-	size_t     fw_cmd_buf_len;
-	int32_t    sync_in_obj[CAM_ICP_IPE_IMAGE_MAX];
-	int32_t    merged_sync_in_obj;
-
-
-	struct cam_hw_prepare_update_args *prepare_args =
-		prepare_hw_update_args;
-	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
-	struct cam_icp_hw_ctx_data *ctx_data = NULL;
-	struct cam_packet *packet = NULL;
-	struct cam_cmd_buf_desc *cmd_desc = NULL;
-	struct cam_buf_io_cfg *io_cfg_ptr = NULL;
-	struct hfi_cmd_ipebps_async *hfi_cmd = NULL;
-
-	if ((!prepare_args) || (!hw_mgr)) {
-		pr_err("Invalid args\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&hw_mgr->hw_mgr_mutex);
-	fw_handle = *(uint32_t *)prepare_args->ctxt_to_hw_map;
-	ctx_id = cam_icp_mgr_get_ctx_from_fw_handle(hw_mgr, fw_handle);
-	if (ctx_id < 0) {
-		pr_err("Fw handle to ctx mapping is failed\n");
-		mutex_unlock(&hw_mgr->hw_mgr_mutex);
-		return -EINVAL;
-	}
-	mutex_unlock(&hw_mgr->hw_mgr_mutex);
-
-	ctx_data = &hw_mgr->ctx_data[ctx_id];
-	if (!ctx_data->in_use) {
-		pr_err("ctx is not in use\n");
-		return -EINVAL;
-	}
-
-	packet = prepare_args->packet;
-	if (!packet) {
-		pr_err("received packet is NULL\n");
-		return -EINVAL;
-	}
-
 	ICP_DBG("packet header : opcode = %x size = %x",
-			packet->header.op_code,
-			packet->header.size);
+		packet->header.op_code,	packet->header.size);
 
 	ICP_DBG(" req_id = %x flags = %x\n",
-			(uint32_t)packet->header.request_id,
-			packet->header.flags);
+		(uint32_t)packet->header.request_id, packet->header.flags);
 
 	ICP_DBG("packet data : c_off = %x c_num = %x\n",
-			packet->cmd_buf_offset,
-			packet->num_cmd_buf);
+		packet->cmd_buf_offset,	packet->num_cmd_buf);
 
 	ICP_DBG("io_off = %x io_num = %x p_off = %x p_num = %x %x %x\n",
-			packet->io_configs_offset,
-			packet->num_io_configs, packet->patch_offset,
-			packet->num_patches, packet->kmd_cmd_buf_index,
-			packet->kmd_cmd_buf_offset);
+		packet->io_configs_offset, packet->num_io_configs,
+		packet->patch_offset, packet->num_patches,
+		packet->kmd_cmd_buf_index, packet->kmd_cmd_buf_offset);
 
 	if (((packet->header.op_code & 0xff) !=
 		CAM_ICP_OPCODE_IPE_UPDATE) &&
@@ -1143,66 +1149,78 @@
 	}
 
 	if ((packet->num_cmd_buf > 1) || (!packet->num_patches) ||
-					(!packet->num_io_configs)) {
+		(!packet->num_io_configs)) {
 		pr_err("wrong number of cmd/patch info: %u %u\n",
-				packet->num_cmd_buf,
-				packet->num_patches);
+			packet->num_cmd_buf, packet->num_patches);
 		return -EINVAL;
 	}
 
-	/* process command buffer descriptors */
+	return 0;
+}
+
+static int cam_icp_mgr_process_cmd_desc(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_packet *packet,
+	uint32_t *fw_cmd_buf_iova_addr)
+{
+	int rc = 0;
+	uint64_t iova_addr;
+	size_t   fw_cmd_buf_len;
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+
 	cmd_desc = (struct cam_cmd_buf_desc *)
-			((uint32_t *) &packet->payload +
-				packet->cmd_buf_offset/4);
+		((uint32_t *) &packet->payload + packet->cmd_buf_offset/4);
 	ICP_DBG("packet = %pK cmd_desc = %pK size = %lu\n",
-			(void *)packet, (void *)cmd_desc,
-			sizeof(struct cam_cmd_buf_desc));
+		(void *)packet, (void *)cmd_desc,
+		sizeof(struct cam_cmd_buf_desc));
 
 	rc = cam_mem_get_io_buf(cmd_desc->mem_handle,
 		hw_mgr->iommu_hdl, &iova_addr, &fw_cmd_buf_len);
-	if (rc < 0) {
+	if (rc) {
 		pr_err("unable to get src buf info for cmd buf: %x\n",
-						hw_mgr->iommu_hdl);
+			hw_mgr->iommu_hdl);
 		return rc;
 	}
 	ICP_DBG("cmd_buf desc cpu and iova address: %pK %zu\n",
-				(void *)iova_addr, fw_cmd_buf_len);
-	fw_cmd_buf_iova_addr = iova_addr;
-	fw_cmd_buf_iova_addr = (fw_cmd_buf_iova_addr + cmd_desc->offset);
+		(void *)iova_addr, fw_cmd_buf_len);
 
-	/* Update Buffer Address from handles and patch information */
-	rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_hdl);
-	if (rc) {
-		pr_err("Patch processing failed\n");
-		return rc;
-	}
+	*fw_cmd_buf_iova_addr = iova_addr;
+	*fw_cmd_buf_iova_addr = (*fw_cmd_buf_iova_addr + cmd_desc->offset);
 
-	/* process io config out descriptors */
+	return rc;
+}
+
+static int cam_icp_mgr_process_io_cfg(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data,
+	struct cam_packet *packet,
+	struct cam_hw_prepare_update_args *prepare_args)
+{
+	int        rc = 0, i, j;
+	int32_t    sync_in_obj[CAM_ICP_IPE_IMAGE_MAX];
+	int32_t    merged_sync_in_obj;
+	struct cam_buf_io_cfg *io_cfg_ptr = NULL;
+
 	io_cfg_ptr = (struct cam_buf_io_cfg *) ((uint32_t *) &packet->payload +
 				packet->io_configs_offset/4);
 	ICP_DBG("packet = %pK io_cfg_ptr = %pK size = %lu\n",
-			(void *)packet, (void *)io_cfg_ptr,
-			sizeof(struct cam_buf_io_cfg));
+		(void *)packet, (void *)io_cfg_ptr,
+		sizeof(struct cam_buf_io_cfg));
 
 	prepare_args->num_out_map_entries = 0;
 	for (i = 0, j = 0; i < packet->num_io_configs; i++) {
 		if (io_cfg_ptr[i].direction == CAM_BUF_INPUT) {
 			ICP_DBG("direction is i : %d :%u\n",
-					i, io_cfg_ptr[i].direction);
+				i, io_cfg_ptr[i].direction);
 			ICP_DBG("fence is i : %d :%d\n",
-					i, io_cfg_ptr[i].fence);
+				i, io_cfg_ptr[i].fence);
 			continue;
 		}
 
-		prepare_args->out_map_entries[j].sync_id = io_cfg_ptr[i].fence;
-		prepare_args->out_map_entries[j++].resource_handle =
-							io_cfg_ptr[i].fence;
+		prepare_args->out_map_entries[j++].sync_id =
+			io_cfg_ptr[i].fence;
 		prepare_args->num_out_map_entries++;
 		ICP_DBG(" out fence = %x index = %d\n", io_cfg_ptr[i].fence, i);
 	}
-	ICP_DBG("out buf entries processing is done\n");
 
-	/* process io config in descriptors */
 	for (i = 0, j = 0; i < packet->num_io_configs; i++) {
 		if (io_cfg_ptr[i].direction == CAM_BUF_INPUT) {
 			sync_in_obj[j++] = io_cfg_ptr[i].fence;
@@ -1211,13 +1229,12 @@
 		}
 	}
 
-	if (j == 1)
+	if (j == 1) {
 		merged_sync_in_obj = sync_in_obj[j - 1];
-	else if (j > 1) {
+	} else if (j > 1) {
 		rc = cam_sync_merge(&sync_in_obj[0], j, &merged_sync_in_obj);
-		if (rc < 0) {
-			pr_err("unable to create in merged object: %d\n",
-								rc);
+		if (rc) {
+			pr_err("unable to create in merged object: %d\n", rc);
 			return rc;
 		}
 	} else {
@@ -1227,100 +1244,186 @@
 
 	prepare_args->in_map_entries[0].sync_id = merged_sync_in_obj;
 	prepare_args->in_map_entries[0].resource_handle =
-			ctx_data->icp_dev_acquire_info.dev_type;
+		ctx_data->icp_dev_acquire_info->dev_type;
 	prepare_args->num_in_map_entries = 1;
-	ICP_DBG("out buf entries processing is done\n");
+
+	return rc;
+}
+
+static int cam_icp_mgr_update_hfi_frame_process(
+	struct cam_icp_hw_ctx_data *ctx_data,
+	struct cam_packet *packet,
+	struct cam_hw_prepare_update_args *prepare_args,
+	int32_t *idx)
+{
+	int32_t index;
 
 	mutex_lock(&ctx_data->hfi_frame_process.lock);
-	idx = find_first_zero_bit(ctx_data->hfi_frame_process.bitmap,
-			ctx_data->hfi_frame_process.bits);
-	if (idx < 0 || idx >= CAM_FRAME_CMD_MAX) {
-		pr_err("request idx is wrong: %d\n", idx);
+	index = find_first_zero_bit(ctx_data->hfi_frame_process.bitmap,
+		ctx_data->hfi_frame_process.bits);
+	if (index < 0 || index >= CAM_FRAME_CMD_MAX) {
+		pr_err("request idx is wrong: %d\n", index);
 		mutex_unlock(&ctx_data->hfi_frame_process.lock);
 		return -EINVAL;
 	}
-	set_bit(idx, ctx_data->hfi_frame_process.bitmap);
+	set_bit(index, ctx_data->hfi_frame_process.bitmap);
 	mutex_unlock(&ctx_data->hfi_frame_process.lock);
 
-	ctx_data->hfi_frame_process.request_id[idx] = packet->header.request_id;
-	ICP_DBG("slot[%d]: %d\n", idx,
-		ctx_data->hfi_frame_process.request_id[idx]);
-	ctx_data->hfi_frame_process.num_out_resources[idx] =
-				prepare_args->num_out_map_entries;
-	for (i = 0; i < prepare_args->num_out_map_entries; i++)
-		ctx_data->hfi_frame_process.out_resource[idx][i] =
-			prepare_args->out_map_entries[i].resource_handle;
+	ctx_data->hfi_frame_process.request_id[index] =
+		packet->header.request_id;
+	ICP_DBG("slot[%d]: %lld\n", index,
+		ctx_data->hfi_frame_process.request_id[index]);
+	*idx = index;
+
+	return 0;
+}
+
+static int cam_icp_mgr_prepare_hw_update(void *hw_mgr_priv,
+	void *prepare_hw_update_args)
+{
+	int        rc = 0;
+	int32_t    idx;
+	uint32_t   fw_cmd_buf_iova_addr;
+	struct cam_icp_hw_ctx_data *ctx_data = NULL;
+	struct cam_packet *packet = NULL;
+	struct hfi_cmd_ipebps_async *hfi_cmd = NULL;
+	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_hw_prepare_update_args *prepare_args =
+		prepare_hw_update_args;
+
+	if ((!prepare_args) || (!hw_mgr) || (!prepare_args->packet)) {
+		pr_err("Invalid args\n");
+		return -EINVAL;
+	}
+
+	ctx_data = prepare_args->ctxt_to_hw_map;
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	if (!ctx_data->in_use) {
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		pr_err("ctx is not in use\n");
+		return -EINVAL;
+	}
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	packet = prepare_args->packet;
+
+	rc = cam_icp_mgr_pkt_validation(packet);
+	if (rc)
+		return rc;
+
+	rc = cam_icp_mgr_process_cmd_desc(hw_mgr, packet,
+		&fw_cmd_buf_iova_addr);
+	if (rc)
+		return rc;
+
+	/* Update Buffer Address from handles and patch information */
+	rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_hdl);
+	if (rc)
+		return rc;
+
+	rc = cam_icp_mgr_process_io_cfg(hw_mgr, ctx_data,
+		packet, prepare_args);
+	if (rc)
+		return rc;
+
+	rc = cam_icp_mgr_update_hfi_frame_process(ctx_data, packet,
+		prepare_args, &idx);
+	if (rc) {
+		if (prepare_args->in_map_entries[0].sync_id > 0)
+			cam_sync_destroy(
+				prepare_args->in_map_entries[0].sync_id);
+		return rc;
+	}
 
 	hfi_cmd = (struct hfi_cmd_ipebps_async *)
 			&ctx_data->hfi_frame_process.hfi_frame_cmd[idx];
 
 	cam_icp_mgr_prepare_frame_process_cmd(
-			ctx_data, hfi_cmd, packet->header.request_id,
-			fw_cmd_buf_iova_addr);
+		ctx_data, hfi_cmd, packet->header.request_id,
+		fw_cmd_buf_iova_addr);
 
 	prepare_args->num_hw_update_entries = 1;
 	prepare_args->hw_update_entries[0].addr = (uint64_t)hfi_cmd;
-
 	prepare_args->priv = &ctx_data->hfi_frame_process.request_id[idx];
-
-	ICP_DBG("slot : %d, hfi_cmd : %pK, request : %d\n",	idx,
+	ICP_DBG("slot : %d, hfi_cmd : %pK, request : %lld\n", idx,
 		(void *)hfi_cmd,
 		ctx_data->hfi_frame_process.request_id[idx]);
 
 	return rc;
 }
 
+static int cam_icp_mgr_send_abort_status(struct cam_icp_hw_ctx_data *ctx_data)
+{
+	struct hfi_frame_process_info *hfi_frame_process;
+	int idx;
+
+	mutex_lock(&ctx_data->hfi_frame_process.lock);
+	hfi_frame_process = &ctx_data->hfi_frame_process;
+	for (idx = 0; idx < CAM_FRAME_CMD_MAX; idx++) {
+		if (!hfi_frame_process->request_id[idx])
+			continue;
+
+		ctx_data->ctxt_event_cb(ctx_data->context_priv, true,
+			&hfi_frame_process->request_id[idx]);
+
+		/* now release memory for hfi frame process command */
+		hfi_frame_process->request_id[idx] = 0;
+		clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
+	}
+	mutex_unlock(&ctx_data->hfi_frame_process.lock);
+
+	return 0;
+}
+
 static int cam_icp_mgr_release_hw(void *hw_mgr_priv, void *release_hw_args)
 {
 	int rc = 0;
 	int ctx_id = 0;
-	int i;
-	uint32_t fw_handle;
 	struct cam_hw_release_args *release_hw = release_hw_args;
 	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
 	struct cam_icp_hw_ctx_data *ctx_data = NULL;
 
 	if (!release_hw || !hw_mgr) {
-		pr_err("Invalid args\n");
+		pr_err("Invalid args: %pK %pK\n", release_hw, hw_mgr);
 		return -EINVAL;
 	}
 
-	for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
-		ctx_data = &hw_mgr->ctx_data[i];
-		ICP_DBG("i = %d in_use = %u fw_handle = %u\n", i,
-				ctx_data->in_use, ctx_data->fw_handle);
-	}
-
-	mutex_lock(&hw_mgr->hw_mgr_mutex);
-	fw_handle = *(uint32_t *)release_hw->ctxt_to_hw_map;
-	ctx_id = cam_icp_mgr_get_ctx_from_fw_handle(hw_mgr, fw_handle);
-	if (ctx_id < 0) {
-		pr_err("Invalid ctx id\n");
-		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	ctx_data = release_hw->ctxt_to_hw_map;
+	ctx_id = ctx_data->ctx_id;
+	if (ctx_id < 0 || ctx_id >= CAM_ICP_CTX_MAX) {
+		pr_err("Invalid ctx id: %d\n", ctx_id);
 		return -EINVAL;
 	}
-	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	if (release_hw->active_req) {
+		cam_icp_mgr_abort_handle(ctx_data);
+		cam_icp_mgr_send_abort_status(ctx_data);
+	}
 
 	rc = cam_icp_mgr_release_ctx(hw_mgr, ctx_id);
-	if (rc)
-		return -EINVAL;
+	if (!hw_mgr->ctxt_cnt)
+		cam_icp_mgr_hw_close(hw_mgr, NULL);
 
-	ICP_DBG("fw handle %d\n", fw_handle);
 	return rc;
 }
 
 static int cam_icp_mgr_send_config_io(struct cam_icp_hw_ctx_data *ctx_data,
-			struct crm_workq_task *task, uint32_t io_buf_addr)
+	uint32_t io_buf_addr)
 {
 	int rc = 0;
 	struct hfi_cmd_work_data *task_data;
 	struct hfi_cmd_ipebps_async ioconfig_cmd;
 	unsigned long rem_jiffies;
 	int timeout = 5000;
+	struct crm_workq_task *task;
+
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+	if (!task)
+		return -ENOMEM;
 
 	ioconfig_cmd.size = sizeof(struct hfi_cmd_ipebps_async);
 	ioconfig_cmd.pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT;
-	if (ctx_data->icp_dev_acquire_info.dev_type == CAM_ICP_RES_TYPE_BPS)
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
 		ioconfig_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO;
 	else
 		ioconfig_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO;
@@ -1338,29 +1441,37 @@
 	task_data->request_id = 0;
 	task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
 	task->process_cb = cam_icp_mgr_process_cmd;
-	cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr, CRM_TASK_PRIORITY_0);
+	rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	if (rc)
+		return rc;
+
 	ICP_DBG("fw_hdl = %x ctx_data = %pK\n", ctx_data->fw_handle, ctx_data);
 
 	rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
 			msecs_to_jiffies((timeout)));
 	if (!rem_jiffies) {
 		rc = -ETIMEDOUT;
-		pr_err("FW response timed out %d\n", rc);
+		ICP_DBG("FW response timed out %d\n", rc);
 	}
 
 	return rc;
 }
 
 static int cam_icp_mgr_create_handle(uint32_t dev_type,
-	struct cam_icp_hw_ctx_data *ctx_data,
-	struct crm_workq_task *task)
+	struct cam_icp_hw_ctx_data *ctx_data)
 {
 	struct hfi_cmd_create_handle create_handle;
 	struct hfi_cmd_work_data *task_data;
 	unsigned long rem_jiffies;
 	int timeout = 5000;
+	struct crm_workq_task *task;
 	int rc = 0;
 
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+	if (!task)
+		return -ENOMEM;
+
 	create_handle.size = sizeof(struct hfi_cmd_create_handle);
 	create_handle.pkt_type = HFI_CMD_IPEBPS_CREATE_HANDLE;
 	create_handle.handle_type = dev_type;
@@ -1375,27 +1486,36 @@
 	task_data->request_id = 0;
 	task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
 	task->process_cb = cam_icp_mgr_process_cmd;
-	cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr, CRM_TASK_PRIORITY_0);
+	rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	if (rc)
+		return rc;
 
 	rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
 			msecs_to_jiffies((timeout)));
 	if (!rem_jiffies) {
 		rc = -ETIMEDOUT;
-		pr_err("FW response timed out %d\n", rc);
+		ICP_DBG("FW response timed out %d\n", rc);
 	}
 
 	return rc;
 }
 
-static int cam_icp_mgr_send_ping(struct cam_icp_hw_ctx_data *ctx_data,
-	struct crm_workq_task *task)
+static int cam_icp_mgr_send_ping(struct cam_icp_hw_ctx_data *ctx_data)
 {
 	struct hfi_cmd_ping_pkt ping_pkt;
 	struct hfi_cmd_work_data *task_data;
 	unsigned long rem_jiffies;
 	int timeout = 5000;
+	struct crm_workq_task *task;
 	int rc = 0;
 
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+	if (!task) {
+		pr_err("No free task to send ping command\n");
+		return -ENOMEM;
+	}
+
 	ping_pkt.size = sizeof(struct hfi_cmd_ping_pkt);
 	ping_pkt.pkt_type = HFI_CMD_SYS_PING;
 	ping_pkt.user_data = (uint64_t)ctx_data;
@@ -1409,32 +1529,86 @@
 	task_data->request_id = 0;
 	task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
 	task->process_cb = cam_icp_mgr_process_cmd;
-	cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr, CRM_TASK_PRIORITY_0);
+
+	rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	if (rc)
+		return rc;
 
 	rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
 			msecs_to_jiffies((timeout)));
 	if (!rem_jiffies) {
 		rc = -ETIMEDOUT;
-		pr_err("FW response timed out %d\n", rc);
+		ICP_DBG("FW response timed out %d\n", rc);
 	}
 
-
 	return rc;
 }
 
+static int cam_icp_get_acquire_info(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_hw_acquire_args *args,
+	struct cam_icp_hw_ctx_data *ctx_data)
+{
+	int i;
+	int acquire_size;
+	struct cam_icp_acquire_dev_info icp_dev_acquire_info;
+	struct cam_icp_res_info *p_icp_out = NULL;
+
+	if (copy_from_user(&icp_dev_acquire_info,
+		(void __user *)args->acquire_info,
+		sizeof(struct cam_icp_acquire_dev_info)))
+		return -EFAULT;
+
+	if (icp_dev_acquire_info.num_out_res > ICP_MAX_OUTPUT_SUPPORTED) {
+		pr_err("num of out resources exceeding : %u\n",
+			icp_dev_acquire_info.num_out_res);
+		return -EINVAL;
+	}
+
+	acquire_size = sizeof(struct cam_icp_acquire_dev_info) +
+		(icp_dev_acquire_info.num_out_res *
+		sizeof(struct cam_icp_res_info));
+	ctx_data->icp_dev_acquire_info = kzalloc(acquire_size, GFP_KERNEL);
+	if (!ctx_data->icp_dev_acquire_info)
+		return -ENOMEM;
+
+	if (copy_from_user(ctx_data->icp_dev_acquire_info,
+		(void __user *)args->acquire_info, acquire_size)) {
+		kfree(ctx_data->icp_dev_acquire_info);
+		ctx_data->icp_dev_acquire_info = NULL;
+		return -EFAULT;
+	}
+
+	ICP_DBG("%x %x %x %x %x %x %x\n",
+		ctx_data->icp_dev_acquire_info->dev_type,
+		ctx_data->icp_dev_acquire_info->in_res.format,
+		ctx_data->icp_dev_acquire_info->in_res.width,
+		ctx_data->icp_dev_acquire_info->in_res.height,
+		ctx_data->icp_dev_acquire_info->in_res.fps,
+		ctx_data->icp_dev_acquire_info->num_out_res,
+		ctx_data->icp_dev_acquire_info->scratch_mem_size);
+
+	p_icp_out = ctx_data->icp_dev_acquire_info->out_res;
+	for (i = 0; i < ctx_data->icp_dev_acquire_info->num_out_res; i++)
+		ICP_DBG("out[i] %x %x %x %x\n",
+			p_icp_out[i].format,
+			p_icp_out[i].width,
+			p_icp_out[i].height,
+			p_icp_out[i].fps);
+
+	return 0;
+}
+
 static int cam_icp_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
 {
-	int rc = 0, i, bitmap_size = 0, tmp_size;
+	int rc = 0, bitmap_size = 0;
 	uint32_t ctx_id = 0;
 	uint64_t io_buf_addr;
 	size_t io_buf_size;
 	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
 	struct cam_icp_hw_ctx_data *ctx_data = NULL;
 	struct cam_hw_acquire_args *args = acquire_hw_args;
-	struct cam_icp_acquire_dev_info icp_dev_acquire_info;
-	struct cam_icp_res_info *p_icp_out = NULL;
-	struct crm_workq_task *task;
-	uint8_t *tmp_acquire;
+	struct cam_icp_acquire_dev_info *icp_dev_acquire_info;
 
 	if ((!hw_mgr_priv) || (!acquire_hw_args)) {
 		pr_err("Invalid params: %pK %pK\n", hw_mgr_priv,
@@ -1447,140 +1621,71 @@
 		return -EINVAL;
 	}
 
-	if (copy_from_user(&icp_dev_acquire_info,
-			(void __user *)args->acquire_info,
-			sizeof(icp_dev_acquire_info)))
-		return -EFAULT;
-
-	if (icp_dev_acquire_info.num_out_res > ICP_IPE_MAX_OUTPUT_SUPPORTED) {
-		pr_err("num of out resources exceeding : %u\n",
-			icp_dev_acquire_info.num_out_res);
-		return -EINVAL;
-	}
-
-	ICP_DBG("%x %x %x %x %x %x %x\n",
-		icp_dev_acquire_info.dev_type,
-		icp_dev_acquire_info.in_res.format,
-		icp_dev_acquire_info.in_res.width,
-		icp_dev_acquire_info.in_res.height,
-		icp_dev_acquire_info.in_res.fps,
-		icp_dev_acquire_info.num_out_res,
-		icp_dev_acquire_info.scratch_mem_size);
-
-	tmp_size = sizeof(icp_dev_acquire_info) +
-			icp_dev_acquire_info.num_out_res *
-			sizeof(struct cam_icp_res_info);
-
-	tmp_acquire = kzalloc(tmp_size, GFP_KERNEL);
-	if (!tmp_acquire)
-		return -EINVAL;
-
-	if (copy_from_user(tmp_acquire,
-			(void __user *)args->acquire_info,
-			tmp_size)) {
-		kfree(tmp_acquire);
-		return -EFAULT;
-	}
-
-	p_icp_out =
-		(struct cam_icp_res_info *)(tmp_acquire +
-		sizeof(icp_dev_acquire_info)-
-		sizeof(struct cam_icp_res_info));
-	ICP_DBG("out[0] %x %x %x %x\n",
-		p_icp_out[0].format,
-		p_icp_out[0].width,
-		p_icp_out[0].height,
-		p_icp_out[0].fps);
-
-	ICP_DBG("out[1] %x %x %x %x\n",
-		p_icp_out[1].format,
-		p_icp_out[1].width,
-		p_icp_out[1].height,
-		p_icp_out[1].fps);
-
 	mutex_lock(&hw_mgr->hw_mgr_mutex);
 	ctx_id = cam_icp_mgr_get_free_ctx(hw_mgr);
 	if (ctx_id >= CAM_ICP_CTX_MAX) {
 		pr_err("No free ctx space in hw_mgr\n");
-		kfree(tmp_acquire);
 		mutex_unlock(&hw_mgr->hw_mgr_mutex);
-		return -EFAULT;
+		return -ENOSPC;
 	}
-
-	/* Fill ctx with acquire info */
 	ctx_data = &hw_mgr->ctx_data[ctx_id];
+	ctx_data->ctx_id = ctx_id;
 	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 
-	/* Fill ctx with acquire info */
 	mutex_lock(&ctx_data->ctx_mutex);
-	ctx_data->icp_dev_acquire_info = icp_dev_acquire_info;
-	for (i = 0; i < icp_dev_acquire_info.num_out_res; i++)
-		ctx_data->icp_out_acquire_info[i] = p_icp_out[i];
+	rc = cam_icp_get_acquire_info(hw_mgr, args, ctx_data);
+	if (rc) {
+		mutex_unlock(&ctx_data->ctx_mutex);
+		goto acquire_info_failed;
+	}
+	icp_dev_acquire_info = ctx_data->icp_dev_acquire_info;
 	mutex_unlock(&ctx_data->ctx_mutex);
 
 	/* Get IOCONFIG command info */
-	if (ctx_data->icp_dev_acquire_info.secure_mode)
+	if (icp_dev_acquire_info->secure_mode)
 		rc = cam_mem_get_io_buf(
-			icp_dev_acquire_info.io_config_cmd_handle,
+			icp_dev_acquire_info->io_config_cmd_handle,
 			hw_mgr->iommu_sec_hdl,
 			&io_buf_addr, &io_buf_size);
 	else
 		rc = cam_mem_get_io_buf(
-			icp_dev_acquire_info.io_config_cmd_handle,
+			icp_dev_acquire_info->io_config_cmd_handle,
 			hw_mgr->iommu_hdl,
 			&io_buf_addr, &io_buf_size);
 
+	if (rc) {
+		pr_err("unable to get src buf info from io desc\n");
+		goto get_io_buf_failed;
+	}
 	ICP_DBG("io_config_cmd_handle : %d\n",
-		icp_dev_acquire_info.io_config_cmd_handle);
+		icp_dev_acquire_info->io_config_cmd_handle);
 	ICP_DBG("io_buf_addr : %pK\n", (void *)io_buf_addr);
 	ICP_DBG("io_buf_size : %zu\n", io_buf_size);
-	if (rc < 0) {
-		pr_err("unable to get src buf info from io desc\n");
-		goto cmd_cpu_buf_failed;
-	}
 
-	mutex_lock(&icp_hw_mgr.hw_mgr_mutex);
-	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
-	if (!task) {
-		pr_err("no free task\n");
-		mutex_unlock(&icp_hw_mgr.hw_mgr_mutex);
-		goto get_create_task_failed;
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	if (!hw_mgr->ctxt_cnt) {
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		rc = cam_icp_mgr_download_fw(hw_mgr, ctx_data);
+		if (rc)
+			goto get_io_buf_failed;
+		mutex_lock(&hw_mgr->hw_mgr_mutex);
 	}
-	mutex_unlock(&icp_hw_mgr.hw_mgr_mutex);
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 
-	rc = cam_icp_mgr_send_ping(ctx_data, task);
+	rc = cam_icp_mgr_send_ping(ctx_data);
 	if (rc) {
 		pr_err("ping ack not received\n");
-		goto create_handle_failed;
+		goto send_ping_failed;
 	}
-	mutex_lock(&icp_hw_mgr.hw_mgr_mutex);
-	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
-	if (!task) {
-		pr_err("no free task\n");
-		mutex_unlock(&icp_hw_mgr.hw_mgr_mutex);
-		goto get_create_task_failed;
-	}
-	mutex_unlock(&icp_hw_mgr.hw_mgr_mutex);
 
-	/* Send create fw handle command */
-	rc = cam_icp_mgr_create_handle(icp_dev_acquire_info.dev_type,
-			ctx_data, task);
+	rc = cam_icp_mgr_create_handle(icp_dev_acquire_info->dev_type,
+		ctx_data);
 	if (rc) {
 		pr_err("create handle failed\n");
 		goto create_handle_failed;
 	}
 
-	/* Send IOCONFIG command */
-	mutex_lock(&icp_hw_mgr.hw_mgr_mutex);
-	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
-	if (!task) {
-		pr_err("no empty task\n");
-		mutex_unlock(&icp_hw_mgr.hw_mgr_mutex);
-		goto get_ioconfig_task_failed;
-	}
-	mutex_unlock(&icp_hw_mgr.hw_mgr_mutex);
-
-	rc = cam_icp_mgr_send_config_io(ctx_data, task, io_buf_addr);
+	rc = cam_icp_mgr_send_config_io(ctx_data, io_buf_addr);
 	if (rc) {
 		pr_err("IO Config command failed\n");
 		goto ioconfig_failed;
@@ -1588,42 +1693,46 @@
 
 	mutex_lock(&ctx_data->ctx_mutex);
 	ctx_data->context_priv = args->context_data;
-	args->ctxt_to_hw_map = &ctx_data->fw_handle;
+	args->ctxt_to_hw_map = ctx_data;
 
 	bitmap_size = BITS_TO_LONGS(CAM_FRAME_CMD_MAX) * sizeof(long);
 	ctx_data->hfi_frame_process.bitmap =
-			kzalloc(sizeof(bitmap_size), GFP_KERNEL);
+			kzalloc(bitmap_size, GFP_KERNEL);
+	if (!ctx_data->hfi_frame_process.bitmap)
+		goto ioconfig_failed;
 	ctx_data->hfi_frame_process.bits = bitmap_size * BITS_PER_BYTE;
 	mutex_init(&ctx_data->hfi_frame_process.lock);
+	hw_mgr->ctx_data[ctx_id].ctxt_event_cb = args->event_cb;
+	icp_dev_acquire_info->scratch_mem_size = ctx_data->scratch_mem_size;
 	mutex_unlock(&ctx_data->ctx_mutex);
 
-	hw_mgr->ctx_data[ctx_id].ctxt_event_cb = args->event_cb;
-
-	icp_dev_acquire_info.scratch_mem_size = ctx_data->scratch_mem_size;
 	if (copy_to_user((void __user *)args->acquire_info,
-				&icp_dev_acquire_info,
-			sizeof(icp_dev_acquire_info)))
+		icp_dev_acquire_info, sizeof(struct cam_icp_acquire_dev_info)))
 		goto copy_to_user_failed;
 
 	ICP_DBG("scratch mem size = %x fw_handle = %x\n",
-			(unsigned int)icp_dev_acquire_info.scratch_mem_size,
+			(unsigned int)icp_dev_acquire_info->scratch_mem_size,
 			(unsigned int)ctx_data->fw_handle);
-	kfree(tmp_acquire);
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	hw_mgr->ctxt_cnt++;
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
 	return 0;
 
 copy_to_user_failed:
+	kfree(ctx_data->hfi_frame_process.bitmap);
+	ctx_data->hfi_frame_process.bitmap = NULL;
 ioconfig_failed:
-get_ioconfig_task_failed:
-	mutex_lock(&icp_hw_mgr.hw_mgr_mutex);
-	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
-	mutex_unlock(&icp_hw_mgr.hw_mgr_mutex);
-	if (task)
-		cam_icp_mgr_destroy_handle(ctx_data, task);
+	cam_icp_mgr_destroy_handle(ctx_data);
+send_ping_failed:
 create_handle_failed:
-get_create_task_failed:
-cmd_cpu_buf_failed:
-	cam_icp_mgr_release_ctx(hw_mgr, ctx_id);
-	kfree(tmp_acquire);
+	if (!hw_mgr->ctxt_cnt)
+		cam_icp_mgr_hw_close(hw_mgr, NULL);
+get_io_buf_failed:
+	kfree(hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info);
+	hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info = NULL;
+acquire_info_failed:
+	cam_icp_mgr_put_ctx(ctx_data);
 	return rc;
 }
 
@@ -1639,25 +1748,22 @@
 	}
 
 	if (copy_from_user(&icp_hw_mgr.icp_caps,
-			(void __user *)query_cap->caps_handle,
-			sizeof(struct cam_icp_query_cap_cmd))) {
+		(void __user *)query_cap->caps_handle,
+		sizeof(struct cam_icp_query_cap_cmd))) {
 		pr_err("copy_from_user failed\n");
 		return -EFAULT;
 	}
 
 	mutex_lock(&hw_mgr->hw_mgr_mutex);
 	rc = hfi_get_hw_caps(&icp_hw_mgr.icp_caps);
-	if (rc < 0) {
-		pr_err("Unable to get caps from HFI: %d\n", rc);
+	if (rc)
 		goto hfi_get_caps_fail;
-	}
 
 	icp_hw_mgr.icp_caps.dev_iommu_handle.non_secure = hw_mgr->iommu_hdl;
 	icp_hw_mgr.icp_caps.dev_iommu_handle.secure = hw_mgr->iommu_sec_hdl;
 
 	if (copy_to_user((void __user *)query_cap->caps_handle,
-			&icp_hw_mgr.icp_caps,
-			sizeof(struct cam_icp_query_cap_cmd))) {
+		&icp_hw_mgr.icp_caps, sizeof(struct cam_icp_query_cap_cmd))) {
 		pr_err("copy_to_user failed\n");
 		rc = -EFAULT;
 		goto hfi_get_caps_fail;
@@ -1672,7 +1778,6 @@
 {
 	int count, i, rc = 0;
 	uint32_t num_dev;
-	uint32_t num_ipe_dev;
 	const char *name = NULL;
 	struct device_node *child_node = NULL;
 	struct platform_device *child_pdev = NULL;
@@ -1712,7 +1817,7 @@
 
 	/* Get number of a5 device nodes and a5 mem allocation */
 	rc = of_property_read_u32(of_node, "num-a5", &num_dev);
-	if (rc < 0) {
+	if (rc) {
 		pr_err("getting num of a5 failed\n");
 		goto num_dev_failed;
 	}
@@ -1725,14 +1830,14 @@
 	}
 
 	/* Get number of ipe device nodes and ipe mem allocation */
-	rc = of_property_read_u32(of_node, "num-ipe", &num_ipe_dev);
-	if (rc < 0) {
+	rc = of_property_read_u32(of_node, "num-ipe", &num_dev);
+	if (rc) {
 		pr_err("getting number of ipe dev nodes failed\n");
 		goto num_ipe_failed;
 	}
 
 	icp_hw_mgr.devices[CAM_ICP_DEV_IPE] = kzalloc(
-		sizeof(struct cam_hw_intf *) * num_ipe_dev, GFP_KERNEL);
+		sizeof(struct cam_hw_intf *) * num_dev, GFP_KERNEL);
 	if (!icp_hw_mgr.devices[CAM_ICP_DEV_IPE]) {
 		rc = -ENOMEM;
 		goto num_ipe_failed;
@@ -1740,7 +1845,7 @@
 
 	/* Get number of bps device nodes and bps mem allocation */
 	rc = of_property_read_u32(of_node, "num-bps", &num_dev);
-	if (rc < 0) {
+	if (rc) {
 		pr_err("read num bps devices failed\n");
 		goto num_bps_failed;
 	}
@@ -1753,8 +1858,8 @@
 
 	for (i = 0; i < count; i++) {
 		rc = of_property_read_string_index(of_node, "compat-hw-name",
-								i, &name);
-		if (rc < 0) {
+			i, &name);
+		if (rc) {
 			pr_err("getting dev object name failed\n");
 			goto compat_hw_name_failed;
 		}
@@ -1776,7 +1881,7 @@
 		}
 
 		child_dev_intf = (struct cam_hw_intf *)platform_get_drvdata(
-								child_pdev);
+			child_pdev);
 		if (!child_dev_intf) {
 			pr_err("no child device\n");
 			of_node_put(child_node);
@@ -1793,28 +1898,27 @@
 	}
 
 	rc = cam_smmu_get_handle("icp", &icp_hw_mgr.iommu_hdl);
-	if (rc < 0) {
-		pr_err("icp get iommu handle failed\n");
+	if (rc) {
+		pr_err("icp get iommu handle failed: %d\n", rc);
 		goto compat_hw_name_failed;
 	}
 
-	pr_err("mmu handle :%d\n", icp_hw_mgr.iommu_hdl);
 	rc = cam_smmu_ops(icp_hw_mgr.iommu_hdl, CAM_SMMU_ATTACH);
-	if (rc < 0) {
+	if (rc) {
 		pr_err("icp attach failed: %d\n", rc);
 		goto icp_attach_failed;
 	}
 
 	rc = cam_req_mgr_workq_create("icp_command_queue", ICP_WORKQ_NUM_TASK,
 		&icp_hw_mgr.cmd_work, CRM_WORKQ_USAGE_NON_IRQ);
-	if (rc < 0) {
+	if (rc) {
 		pr_err("unable to create a worker\n");
 		goto cmd_work_failed;
 	}
 
 	rc = cam_req_mgr_workq_create("icp_message_queue", ICP_WORKQ_NUM_TASK,
 		&icp_hw_mgr.msg_work, CRM_WORKQ_USAGE_IRQ);
-	if (rc < 0) {
+	if (rc) {
 		pr_err("unable to create a worker\n");
 		goto msg_work_failed;
 	}
@@ -1856,6 +1960,7 @@
 cmd_work_failed:
 	cam_smmu_ops(icp_hw_mgr.iommu_hdl, CAM_SMMU_DETACH);
 icp_attach_failed:
+	cam_smmu_destroy_handle(icp_hw_mgr.iommu_hdl);
 	icp_hw_mgr.iommu_hdl = 0;
 compat_hw_name_failed:
 	kfree(icp_hw_mgr.devices[CAM_ICP_DEV_BPS]);
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
similarity index 93%
rename from drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
index 32d796a..6fa32fa 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
@@ -39,8 +39,8 @@
 
 #define ICP_PACKET_SIZE         0
 #define ICP_PACKET_TYPE         1
-#define ICP_PACKET_IPCODE       2
-#define ICP_IPE_MAX_OUTPUT_SUPPORTED 6
+#define ICP_PACKET_OPCODE       2
+#define ICP_MAX_OUTPUT_SUPPORTED 6
 
 /**
  * struct icp_hfi_mem_info
@@ -100,7 +100,7 @@
 	void *bitmap;
 	size_t bits;
 	struct mutex lock;
-	int32_t request_id[CAM_FRAME_CMD_MAX];
+	uint64_t request_id[CAM_FRAME_CMD_MAX];
 	uint32_t num_out_resources[CAM_FRAME_CMD_MAX];
 	uint32_t out_resource[CAM_FRAME_CMD_MAX][CAM_MAX_OUT_RES];
 };
@@ -113,13 +113,13 @@
  * @scratch_mem_size: Scratch memory size
  * @acquire_dev_cmd: Acquire command
  * @icp_dev_acquire_info: Acquire device info
- * @icp_out_acquire_info: Acquire out resource info
  * @ctxt_event_cb: Context callback function
  * @in_use: Flag for context usage
  * @role: Role of a context in case of chaining
  * @chain_ctx: Peer context
  * @hfi_frame_process: Frame process command
  * @wait_complete: Completion info
+ * @ctx_id: Context Id
  * @temp_payload: Payload for destroy handle data
  */
 struct cam_icp_hw_ctx_data {
@@ -128,15 +128,15 @@
 	uint32_t fw_handle;
 	uint32_t scratch_mem_size;
 	struct cam_acquire_dev_cmd acquire_dev_cmd;
-	struct cam_icp_acquire_dev_info icp_dev_acquire_info;
-	struct cam_icp_res_info icp_out_acquire_info[CAM_MAX_OUT_RES];
+	struct cam_icp_acquire_dev_info *icp_dev_acquire_info;
 	cam_hw_event_cb_func ctxt_event_cb;
-	uint32_t in_use;
+	bool in_use;
 	uint32_t role;
 	struct cam_icp_hw_ctx_data *chain_ctx;
 	struct hfi_frame_process_info hfi_frame_process;
 	struct completion wait_complete;
 	struct ipe_bps_destroy temp_payload;
+	uint32_t ctx_id;
 };
 
 /**
@@ -183,4 +183,6 @@
 	bool a5_debug;
 };
 
+static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args);
+static int cam_icp_mgr_download_fw(void *hw_mgr_priv, void *download_fw_args);
 #endif /* CAM_ICP_HW_MGR_H */
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/include/cam_icp_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/include/cam_icp_hw_mgr_intf.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/Makefile b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/Makefile
new file mode 100644
index 0000000..f904ea4
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/fw_inc
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += ipe_dev.o ipe_core.o ipe_soc.o
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
similarity index 98%
rename from drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_core.c
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
index 07f63d2..99b45aa 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
@@ -72,7 +72,7 @@
 	}
 
 	cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
-	cpas_vote.ahb_vote.vote.level = CAM_TURBO_VOTE;
+	cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
 	cpas_vote.axi_vote.compressed_bw = ICP_TURBO_VOTE;
 	cpas_vote.axi_vote.uncompressed_bw = ICP_TURBO_VOTE;
 
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_core.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_core.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.h
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_dev.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_dev.c
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_dev.c
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_dev.c
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
similarity index 95%
rename from drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_soc.c
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
index 527e716..e691dad 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_soc.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
@@ -66,7 +66,8 @@
 {
 	int rc = 0;
 
-	rc = cam_soc_util_enable_platform_resource(soc_info, true, false);
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		CAM_TURBO_VOTE, false);
 	if (rc) {
 		pr_err("%s: enable platform failed\n", __func__);
 		return rc;
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_soc.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_soc.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.h
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
index a9064fa..83009d2 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -14,11 +14,13 @@
 #include <linux/videodev2.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
+#include <linux/ratelimit.h>
 
 #include "cam_isp_context.h"
 #include "cam_isp_log.h"
 #include "cam_mem_mgr.h"
 #include "cam_sync_api.h"
+#include "cam_req_mgr_dev.h"
 
 #undef CDBG
 #define CDBG(fmt, args...) pr_debug(fmt, ##args)
@@ -537,7 +539,7 @@
 	 */
 	list_for_each_entry(req, &ctx->active_req_list, list) {
 		if (++cnt > 2) {
-			pr_err("%s: Apply failed due to pipeline congestion\n",
+			pr_err_ratelimited("%s: Apply failed due to congest\n",
 				__func__);
 			rc = -EFAULT;
 			goto end;
@@ -627,6 +629,94 @@
 	return rc;
 }
 
+static int __cam_isp_ctx_flush_req(struct cam_context *ctx,
+	struct list_head *req_list, struct cam_req_mgr_flush_request *flush_req)
+{
+	int i, rc;
+	uint32_t cancel_req_id_found = 0;
+	struct cam_ctx_request           *req;
+	struct cam_ctx_request           *req_temp;
+	struct cam_isp_ctx_req           *req_isp;
+
+	spin_lock(&ctx->lock);
+	if (list_empty(req_list)) {
+		spin_unlock(&ctx->lock);
+		CDBG("%s: request list is empty\n", __func__);
+		return 0;
+	}
+
+	list_for_each_entry_safe(req, req_temp, req_list, list) {
+		if ((flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ)
+				&& (req->request_id != flush_req->req_id))
+			continue;
+
+		list_del_init(&req->list);
+		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+		for (i = 0; i < req_isp->num_fence_map_out; i++) {
+			if (req_isp->fence_map_out[i].sync_id != -1) {
+				CDBG("%s: Flush req 0x%llx, fence %d\n",
+					__func__, req->request_id,
+					req_isp->fence_map_out[i].sync_id);
+				rc = cam_sync_signal(
+					req_isp->fence_map_out[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+				if (rc)
+					pr_err_ratelimited("%s: signal fence failed\n",
+						__func__);
+				req_isp->fence_map_out[i].sync_id = -1;
+			}
+		}
+		list_add_tail(&req->list, &ctx->free_req_list);
+
+		/* If flush request id found, exit the loop */
+		if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
+			cancel_req_id_found = 1;
+			break;
+		}
+	}
+	spin_unlock(&ctx->lock);
+
+	if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ &&
+		!cancel_req_id_found)
+		CDBG("%s:Flush request id:%lld is not found in the list\n",
+			__func__, flush_req->req_id);
+
+	return 0;
+}
+
+static int __cam_isp_ctx_flush_req_in_top_state(
+	struct cam_context *ctx,
+	struct cam_req_mgr_flush_request *flush_req)
+{
+	int rc = 0;
+
+	CDBG("%s: try to flush pending list\n", __func__);
+	rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
+	CDBG("%s: Flush request in top state %d\n",
+		__func__, ctx->state);
+	return rc;
+}
+
+static int __cam_isp_ctx_flush_req_in_ready(
+	struct cam_context *ctx,
+	struct cam_req_mgr_flush_request *flush_req)
+{
+	int rc = 0;
+
+	CDBG("%s: try to flush pending list\n", __func__);
+	rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
+
+	/* if nothing is in pending req list, change state to acquire*/
+	spin_lock(&ctx->lock);
+	if (list_empty(&ctx->pending_req_list))
+		ctx->state = CAM_CTX_ACQUIRED;
+	spin_unlock(&ctx->lock);
+
+	CDBG("%s: Flush request in ready state. next state %d\n",
+		__func__, ctx->state);
+	return rc;
+}
+
 static struct cam_ctx_ops
 	cam_isp_ctx_activated_state_machine[CAM_ISP_CTX_ACTIVATED_MAX] = {
 	/* SOF */
@@ -679,12 +769,10 @@
 	struct cam_release_dev_cmd *cmd)
 {
 	int rc = 0;
-	int i;
 	struct cam_hw_release_args       rel_arg;
-	struct cam_ctx_request	        *req;
-	struct cam_isp_ctx_req	        *req_isp;
 	struct cam_isp_context *ctx_isp =
 		(struct cam_isp_context *) ctx->ctx_priv;
+	struct cam_req_mgr_flush_request flush_req;
 
 	if (ctx_isp->hw_ctx) {
 		rel_arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
@@ -704,25 +792,16 @@
 	 * But we still add some sanity check code here to help the debug
 	 */
 	if (!list_empty(&ctx->active_req_list))
-		pr_err("%s: Active list is empty.\n", __func__);
+		pr_err("%s: Active list is not empty\n", __func__);
 
-	/* flush the pending list */
-	while (!list_empty(&ctx->pending_req_list)) {
-		req = list_first_entry(&ctx->pending_req_list,
-			struct cam_ctx_request, list);
-		list_del_init(&req->list);
-		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
-		pr_err("%s: signal fence in pending list. fence num %d\n",
-			__func__, req_isp->num_fence_map_out);
-		for (i = 0; i < req_isp->num_fence_map_out; i++) {
-			if (req_isp->fence_map_out[i].sync_id != -1) {
-				cam_sync_signal(
-					req_isp->fence_map_out[i].sync_id,
-					CAM_SYNC_STATE_SIGNALED_ERROR);
-			}
-		}
-		list_add_tail(&req->list, &ctx->free_req_list);
-	}
+	/* Flush all the pending request list  */
+	flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL;
+	flush_req.link_hdl = ctx->link_hdl;
+	flush_req.dev_hdl = ctx->dev_hdl;
+
+	CDBG("%s: try to flush pending list\n", __func__);
+	rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, &flush_req);
+
 	ctx->state = CAM_CTX_AVAILABLE;
 	CDBG("%s: next state %d\n", __func__, ctx->state);
 	return rc;
@@ -1252,6 +1331,7 @@
 			.link = __cam_isp_ctx_link_in_acquired,
 			.unlink = __cam_isp_ctx_unlink_in_acquired,
 			.get_dev_info = __cam_isp_ctx_get_dev_info_in_acquired,
+			.flush_req = __cam_isp_ctx_flush_req_in_top_state,
 		},
 		.irq_ops = NULL,
 	},
@@ -1264,6 +1344,7 @@
 		},
 		.crm_ops = {
 			.unlink = __cam_isp_ctx_unlink_in_ready,
+			.flush_req = __cam_isp_ctx_flush_req_in_ready,
 		},
 		.irq_ops = NULL,
 	},
@@ -1276,6 +1357,7 @@
 		},
 		.crm_ops = {
 			.apply_req = __cam_isp_ctx_apply_req,
+			.flush_req = __cam_isp_ctx_flush_req_in_top_state,
 		},
 		.irq_ops = __cam_isp_ctx_handle_irq_in_activated,
 	},
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
index f07c45e..c718bba 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
@@ -72,7 +72,8 @@
 {
 	int rc = 0;
 
-	rc = cam_soc_util_enable_platform_resource(soc_info, true, true);
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		CAM_TURBO_VOTE, true);
 	if (rc) {
 		pr_err("%s: enable platform failed\n", __func__);
 		return rc;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
index 3670ca9..9f8f8c5 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
@@ -112,8 +112,8 @@
 	ahb_vote.type       = CAM_VOTE_ABSOLUTE;
 	ahb_vote.vote.level = CAM_SVS_VOTE;
 
-	axi_vote.compressed_bw   = 640000000;
-	axi_vote.uncompressed_bw = 640000000;
+	axi_vote.compressed_bw   = 10640000000L;
+	axi_vote.uncompressed_bw = 10640000000L;
 
 	rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote);
 	if (rc) {
@@ -122,7 +122,8 @@
 		goto end;
 	}
 
-	rc = cam_soc_util_enable_platform_resource(soc_info, true, true);
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		CAM_TURBO_VOTE, true);
 	if (rc) {
 		pr_err("Error! enable platform failed\n");
 		goto stop_cpas;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
index 92a17d8..c4fae99 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -28,7 +28,8 @@
 
 #define FRAME_BASED_EN 0
 
-#define MAX_BUF_UPDATE_REG_NUM   20
+#define MAX_BUF_UPDATE_REG_NUM   \
+	(sizeof(struct cam_vfe_bus_ver2_reg_offset_bus_client)/4)
 #define MAX_REG_VAL_PAIR_SIZE    \
 		(MAX_BUF_UPDATE_REG_NUM * 2 * CAM_PACKET_MAX_PLANES)
 
@@ -370,6 +371,7 @@
 	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS:
 		switch (format) {
 		case CAM_FORMAT_PLAIN64:
+		case CAM_FORMAT_PLAIN128:
 			return 1;
 		default:
 			break;
@@ -628,7 +630,8 @@
 		rsrc_data->height = 1;
 		rsrc_data->pack_fmt = 0x0;
 		rsrc_data->en_cfg = 0x3;
-	} else if (rsrc_data->index < 5) {
+	} else if (rsrc_data->index < 5 ||
+		rsrc_data->index == 7 || rsrc_data->index == 8) {
 		switch (plane) {
 		case PLANE_Y:
 			switch (rsrc_data->format) {
@@ -663,6 +666,12 @@
 		}
 		rsrc_data->pack_fmt = 0xE;
 		rsrc_data->en_cfg = 0x1;
+	} else if (rsrc_data->index >= 11) {
+		rsrc_data->width = 0;
+		rsrc_data->height = 0;
+		rsrc_data->pack_fmt = 0x0;
+		rsrc_data->stride = 1;
+		rsrc_data->en_cfg = 0x3;
 	} else {
 		rsrc_data->width = rsrc_data->width * 4;
 		rsrc_data->height = rsrc_data->height / 2;
@@ -1652,6 +1661,7 @@
 	struct cam_vfe_bus_ver2_wm_resource_data *wm_data = NULL;
 	uint32_t *reg_val_pair;
 	uint32_t  i, j, size = 0;
+	uint32_t  frame_inc = 0;
 
 	/*
 	 * Need the entire buf io config so we can get the stride info
@@ -1672,13 +1682,19 @@
 	if (update_buf->num_buf != vfe_out_data->num_wm) {
 		pr_err("Failed! Invalid number buffers:%d required:%d\n",
 			update_buf->num_buf, vfe_out_data->num_wm);
-		return -ENOMEM;
+		return -EINVAL;
 	}
 
 	reg_val_pair = &vfe_out_data->common_data->io_buf_update[0];
 	io_cfg = update_buf->io_cfg;
 
 	for (i = 0, j = 0; i < vfe_out_data->num_wm; i++) {
+		if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) {
+			pr_err("reg_val_pair %d exceeds the array limit %lu\n",
+				j, MAX_REG_VAL_PAIR_SIZE);
+			return -ENOMEM;
+		}
+
 		wm_data = vfe_out_data->wm_res[i]->res_priv;
 
 		/* For initial configuration program all bus registers */
@@ -1833,6 +1849,11 @@
 
 		CDBG("image address 0x%x\n", reg_val_pair[j-1]);
 
+		frame_inc = io_cfg->planes[i].plane_stride *
+			io_cfg->planes[i].slice_height;
+		CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+			wm_data->hw_regs->frame_inc, frame_inc);
+
 		/* enable the WM */
 		CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
 			wm_data->hw_regs->cfg,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
index 3f3c2a3..6dd67df 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
@@ -119,10 +119,6 @@
 			rsrc_data->camif_reg->line_skip_pattern);
 	cam_io_w_mb(0x1, rsrc_data->mem_base +
 			rsrc_data->camif_reg->pixel_skip_pattern);
-	cam_io_w_mb(0x0, rsrc_data->mem_base +
-			rsrc_data->camif_reg->skip_period);
-	cam_io_w_mb(0x1, rsrc_data->mem_base +
-			rsrc_data->camif_reg->irq_subsample_pattern);
 
 	/* epoch config with 20 line */
 	cam_io_w_mb(0x00140014,
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
index f47b1dc..edfc245 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
@@ -722,7 +722,7 @@
 static int cam_mem_util_unmap(int32_t idx)
 {
 	int rc = 0;
-	enum cam_smmu_region_id region;
+	enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
 
 	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
 		pr_err("Incorrect index\n");
@@ -735,14 +735,17 @@
 		if (tbl.bufq[idx].i_hdl && tbl.bufq[idx].kmdvaddr)
 			ion_unmap_kernel(tbl.client, tbl.bufq[idx].i_hdl);
 
-	if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
-		region = CAM_SMMU_REGION_IO;
+	if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE ||
+		tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
 
-	if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
-		region = CAM_SMMU_REGION_SHARED;
+		if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
+			region = CAM_SMMU_REGION_IO;
 
-	rc = cam_mem_util_unmap_hw_va(idx,
-		region);
+		if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
+			region = CAM_SMMU_REGION_SHARED;
+
+		rc = cam_mem_util_unmap_hw_va(idx, region);
+	}
 
 	mutex_lock(&tbl.bufq[idx].q_lock);
 	tbl.bufq[idx].flags = 0;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
index 7bc26ec..38048d5 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
@@ -58,7 +58,7 @@
 	spin_unlock_bh(&hdl_tbl_lock);
 
 	bitmap_size = BITS_TO_LONGS(CAM_REQ_MGR_MAX_HANDLES) * sizeof(long);
-	hdl_tbl->bitmap = kzalloc(sizeof(bitmap_size), GFP_KERNEL);
+	hdl_tbl->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
 	if (!hdl_tbl->bitmap) {
 		rc = -ENOMEM;
 		goto bitmap_alloc_fail;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
index 91b68cf..0a96f18 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
@@ -330,40 +330,31 @@
 	return rc;
 }
 
-static int32_t cam_actuator_vreg_control(struct cam_actuator_ctrl_t *a_ctrl,
+static int32_t cam_actuator_vreg_control(
+	struct cam_actuator_ctrl_t *a_ctrl,
 	int config)
 {
-	int rc = 0, i, cnt;
-	struct cam_actuator_vreg *vreg_cfg;
+	int rc = 0, cnt;
+	struct cam_hw_soc_info  *soc_info;
 
-	vreg_cfg = &a_ctrl->vreg_cfg;
-	cnt = vreg_cfg->num_vreg;
+	soc_info = &a_ctrl->soc_info;
+	cnt = soc_info->num_rgltr;
+
 	if (!cnt)
 		return 0;
 
-	if (cnt >= MSM_ACTUATOR_MAX_VREGS) {
+	if (cnt >= CAM_SOC_MAX_REGULATOR) {
 		pr_err("%s:%d Regulators more than supported %d\n",
 			__func__, __LINE__, cnt);
 		return -EINVAL;
 	}
 
-	for (i = 0; i < cnt; i++) {
-		if (a_ctrl->io_master_info.master_type ==
-			CCI_MASTER) {
-			rc = msm_camera_config_single_vreg(
-				&(a_ctrl->v4l2_dev_str.pdev->dev),
-				&vreg_cfg->cam_vreg[i],
-				(struct regulator **)&vreg_cfg->data[i],
-				config);
-		} else if (a_ctrl->io_master_info.master_type ==
-			I2C_MASTER) {
-			rc = msm_camera_config_single_vreg(
-				&(a_ctrl->io_master_info.client->dev),
-				&vreg_cfg->cam_vreg[i],
-				(struct regulator **)&vreg_cfg->data[i],
-				config);
-		}
-	}
+	if (config)
+		rc = cam_soc_util_enable_platform_resource(soc_info, false, 0,
+			false);
+	else
+		rc = cam_soc_util_disable_platform_resource(soc_info, false,
+			false);
 
 	return rc;
 }
@@ -371,6 +362,9 @@
 static int32_t cam_actuator_power_up(struct cam_actuator_ctrl_t *a_ctrl)
 {
 	int rc = 0;
+	struct cam_hw_soc_info  *soc_info =
+		&a_ctrl->soc_info;
+	struct msm_camera_gpio_num_info *gpio_num_info = NULL;
 
 	rc = cam_actuator_vreg_control(a_ctrl, 1);
 	if (rc < 0) {
@@ -379,28 +373,23 @@
 		return rc;
 	}
 
-	if (a_ctrl->gconf &&
-		a_ctrl->gconf->gpio_num_info &&
-		a_ctrl->gconf->gpio_num_info->valid[SENSOR_VAF] == 1) {
-		rc = msm_camera_request_gpio_table(
-			a_ctrl->gconf->cam_gpio_req_tbl,
-			a_ctrl->gconf->cam_gpio_req_tbl_size, 1);
+	gpio_num_info = a_ctrl->gpio_num_info;
+
+	if (soc_info->gpio_data &&
+		gpio_num_info &&
+		gpio_num_info->valid[SENSOR_VAF] == 1) {
+		rc = cam_soc_util_request_platform_resource(&a_ctrl->soc_info,
+			NULL, NULL);
+		rc = cam_soc_util_enable_platform_resource(&a_ctrl->soc_info,
+			false, 0, false);
 		if (rc < 0) {
 			pr_err("%s:%d :Error: Failed in req gpio: %d\n",
 				__func__, __LINE__, rc);
 			return rc;
 		}
-		if (a_ctrl->cam_pinctrl_status) {
-			rc = pinctrl_select_state(
-				a_ctrl->pinctrl_info.pinctrl,
-				a_ctrl->pinctrl_info.gpio_state_active);
-			if (rc < 0)
-				pr_err("%s:%d :Error: cannot set pin to active state: %d",
-					__func__, __LINE__, rc);
-		}
 
 		gpio_set_value_cansleep(
-			a_ctrl->gconf->gpio_num_info->gpio_num[SENSOR_VAF],
+			gpio_num_info->gpio_num[SENSOR_VAF],
 			1);
 	}
 
@@ -413,6 +402,9 @@
 static int32_t cam_actuator_power_down(struct cam_actuator_ctrl_t *a_ctrl)
 {
 	int32_t rc = 0;
+	struct cam_hw_soc_info *soc_info =
+		&a_ctrl->soc_info;
+	struct msm_camera_gpio_num_info *gpio_num_info = NULL;
 
 	rc = cam_actuator_vreg_control(a_ctrl, 0);
 	if (rc < 0) {
@@ -420,35 +412,21 @@
 		return rc;
 	}
 
-	if (a_ctrl->gconf &&
-		a_ctrl->gconf->gpio_num_info &&
-		a_ctrl->gconf->gpio_num_info->
-			valid[SENSOR_VAF] == 1) {
+	gpio_num_info = a_ctrl->gpio_num_info;
+
+	if (soc_info->gpio_data &&
+		gpio_num_info &&
+		gpio_num_info->valid[SENSOR_VAF] == 1) {
 
 		gpio_set_value_cansleep(
-			a_ctrl->gconf->gpio_num_info->
-				gpio_num[SENSOR_VAF],
+			gpio_num_info->gpio_num[SENSOR_VAF],
 			GPIOF_OUT_INIT_LOW);
 
-		if (a_ctrl->cam_pinctrl_status) {
-			rc = pinctrl_select_state(
-				a_ctrl->pinctrl_info.pinctrl,
-				a_ctrl->pinctrl_info.
-					gpio_state_suspend);
-			if (rc < 0)
-				pr_err("%s:%d cannot set pin to suspend state: %d",
-					__func__, __LINE__, rc);
-
-			devm_pinctrl_put(
-				a_ctrl->pinctrl_info.pinctrl);
-		}
-		a_ctrl->cam_pinctrl_status = 0;
-		rc = msm_camera_request_gpio_table(
-			a_ctrl->gconf->cam_gpio_req_tbl,
-			a_ctrl->gconf->cam_gpio_req_tbl_size,
-			0);
+		rc = cam_soc_util_release_platform_resource(&a_ctrl->soc_info);
+		rc |= cam_soc_util_disable_platform_resource(&a_ctrl->soc_info,
+					0, 0);
 		if (rc < 0)
-			pr_err("%s:%d Failed in selecting state: %d\n",
+			pr_err("%s:%d Failed to disable platform resources: %d\n",
 				__func__, __LINE__, rc);
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
index 3835680..48e3c2e 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
@@ -76,7 +76,6 @@
 		INIT_LIST_HEAD(&(a_ctrl->i2c_data.per_frame[i].list_head));
 
 	/* Initialize sensor device type */
-	a_ctrl->of_node = client->dev.of_node;
 	a_ctrl->io_master_info.master_type = I2C_MASTER;
 
 	rc = cam_actuator_parse_dt(a_ctrl, &client->dev);
@@ -203,11 +202,9 @@
 	if (!a_ctrl)
 		return -ENOMEM;
 
-	/* Initialize actuator device type */
-	a_ctrl->of_node = pdev->dev.of_node;
-
 	/*fill in platform device*/
 	a_ctrl->v4l2_dev_str.pdev = pdev;
+	a_ctrl->soc_info.pdev = pdev;
 
 	a_ctrl->io_master_info.master_type = CCI_MASTER;
 
@@ -259,6 +256,14 @@
 		goto free_mem;
 	}
 
+	rc = cam_soc_util_request_platform_resource(&a_ctrl->soc_info,
+			NULL, NULL);
+	if (rc < 0) {
+		pr_err("%s:%d :Error: Requesting Platform Resources failed rc %d",
+			__func__, __LINE__, rc);
+		goto free_ctrl;
+	}
+
 	a_ctrl->bridge_intf.device_hdl = -1;
 	a_ctrl->bridge_intf.ops.get_dev_info =
 		cam_actuator_publish_dev_info;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
index 22ef29e..19fe4af 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
@@ -32,7 +32,7 @@
 #include <cam_sensor_cmn_header.h>
 #include <cam_subdev.h>
 #include "cam_sensor_util.h"
-#include "cam_sensor_soc_api.h"
+#include "cam_soc_util.h"
 
 #define NUM_MASTERS 2
 #define NUM_QUEUES 2
@@ -60,18 +60,6 @@
 };
 
 /**
- * struct cam_actuator_vreg
- * @cam_vreg: Regulator structure
- * @data: Regulator data
- * @num_vreg: Number of regulators
- */
-struct cam_actuator_vreg {
-	struct camera_vreg_t *cam_vreg;
-	void *data[MSM_ACTUATOR_MAX_VREGS];
-	int num_vreg;
-};
-
-/**
  * struct intf_params
  * @device_hdl: Device Handle
  * @session_hdl: Session Handle
@@ -107,18 +95,16 @@
 	struct i2c_driver *i2c_driver;
 	enum cci_i2c_master_t cci_i2c_master;
 	struct camera_io_master io_master_info;
+	struct cam_hw_soc_info soc_info;
 	struct mutex actuator_mutex;
 	uint32_t id;
 	enum msm_actuator_state_t act_apply_state;
-	struct cam_actuator_vreg vreg_cfg;
-	struct msm_camera_gpio_conf *gconf;
-	struct msm_pinctrl_info pinctrl_info;
+	struct msm_camera_gpio_num_info *gpio_num_info;
 	uint8_t cam_pinctrl_status;
 	struct cam_subdev v4l2_dev_str;
 	struct i2c_data_settings i2c_data;
 	struct cam_actuator_query_cap act_info;
 	struct intf_params bridge_intf;
-	struct device_node *of_node;
 	char device_name[20];
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c
index 767f3b0..584e4d2 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c
@@ -10,68 +10,68 @@
  * GNU General Public License for more details.
  */
 
-#include "cam_actuator_soc.h"
 #include <linux/of.h>
 #include <linux/of_gpio.h>
 #include <cam_sensor_cmn_header.h>
 #include <cam_sensor_util.h>
 #include <cam_sensor_io.h>
 #include <cam_req_mgr_util.h>
+#include "cam_actuator_soc.h"
+#include "cam_soc_util.h"
 
 int32_t cam_actuator_parse_dt(struct cam_actuator_ctrl_t *a_ctrl,
 	struct device *dev)
 {
 	int32_t                   rc = 0;
-	struct cam_actuator_vreg *vreg_cfg;
+	struct cam_hw_soc_info *soc_info = &a_ctrl->soc_info;
+	struct device_node *of_node = NULL;
+	struct platform_device *pdev = NULL;
+
+	if (!soc_info->pdev) {
+		pr_err("%s:%d :Error:soc_info is not initialized\n",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+
+	pdev = soc_info->pdev;
+	of_node = pdev->dev.of_node;
 
 	/* Initialize mutex */
 	mutex_init(&(a_ctrl->actuator_mutex));
 
-	rc = of_property_read_u32(a_ctrl->of_node, "cell-index",
-		&(a_ctrl->id));
-	CDBG("cell-index %d, rc %d\n", a_ctrl->id, rc);
+	rc = cam_soc_util_get_dt_properties(soc_info);
 	if (rc < 0) {
-		pr_err("%s:%d :Error: parsing dt for cellindex rc %d\n",
+		pr_err("%s:%d :Error: parsing common soc dt(rc %d)\n",
 			__func__, __LINE__, rc);
 		return rc;
 	}
-
-	rc = of_property_read_u32(a_ctrl->of_node, "qcom,cci-master",
+	rc = of_property_read_u32(of_node, "cci-master",
 		&(a_ctrl->cci_i2c_master));
-	CDBG("qcom,cci-master %d, rc %d\n", a_ctrl->cci_i2c_master, rc);
+	CDBG("cci-master %d, rc %d\n", a_ctrl->cci_i2c_master, rc);
 	if (rc < 0 || a_ctrl->cci_i2c_master >= MASTER_MAX) {
 		pr_err("%s:%d :Error: Wrong info from dt CCI master as : %d\n",
 			__func__, __LINE__, a_ctrl->cci_i2c_master);
 		return rc;
 	}
 
-	if (of_find_property(a_ctrl->of_node,
-			"qcom,cam-vreg-name", NULL)) {
-		vreg_cfg = &(a_ctrl->vreg_cfg);
-		rc = cam_sensor_get_dt_vreg_data(dev->of_node,
-			&vreg_cfg->cam_vreg, &vreg_cfg->num_vreg);
-		if (rc < 0) {
-			pr_err("%s:%d :Error: parsing regulator dt: %d\n",
-				__func__, __LINE__, rc);
-			return rc;
-		}
-	}
-	rc = msm_sensor_driver_get_gpio_data(&(a_ctrl->gconf),
-		a_ctrl->of_node);
-	if (rc < 0) {
-		pr_err("%s:%d No/Error Actuator GPIOs\n",
-			__func__, __LINE__);
-	} else {
-		a_ctrl->cam_pinctrl_status = 1;
-		rc = msm_camera_pinctrl_init(
-			&(a_ctrl->pinctrl_info), dev);
-		if (rc < 0) {
-			pr_err("ERR:%s: Error in reading actuator pinctrl\n",
-				__func__);
-			a_ctrl->cam_pinctrl_status = 0;
-			rc = 0;
-		}
+	if (!soc_info->gpio_data) {
+		pr_info("%s:%d No GPIO found\n", __func__, __LINE__);
+		rc = 0;
+		return rc;
 	}
 
+	if (!soc_info->gpio_data->cam_gpio_common_tbl_size) {
+		pr_info("%s:%d No GPIO found\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	rc = cam_sensor_util_init_gpio_pin_tbl(soc_info,
+		&a_ctrl->gpio_num_info);
+
+	if ((rc < 0) || (!a_ctrl->gpio_num_info)) {
+		pr_err("%s:%d No/Error Actuator GPIOs\n",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
index 746b786..83e0c19 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
@@ -44,8 +44,11 @@
 	enum cci_i2c_master_t master)
 {
 	int32_t rc = 0;
+	struct cam_hw_soc_info *soc_info =
+		&cci_dev->soc_info;
+	void __iomem *base = soc_info->reg_map[0].mem_base;
 
-	cam_io_w_mb(1 << master, cci_dev->base + CCI_HALT_REQ_ADDR);
+	cam_io_w_mb(1 << master, base + CCI_HALT_REQ_ADDR);
 	rc = wait_for_completion_timeout(
 		&cci_dev->cci_master_info[master].reset_complete, CCI_TIMEOUT);
 	if (rc < 0) {
@@ -59,10 +62,10 @@
 		/* Set proper mask to RESET CMD address based on MASTER */
 		if (master == MASTER_0)
 			cam_io_w_mb(CCI_M0_RESET_RMSK,
-				cci_dev->base + CCI_RESET_CMD_ADDR);
+				base + CCI_RESET_CMD_ADDR);
 		else
 			cam_io_w_mb(CCI_M1_RESET_RMSK,
-				cci_dev->base + CCI_RESET_CMD_ADDR);
+				base + CCI_RESET_CMD_ADDR);
 
 		/* wait for reset done irq */
 		rc = wait_for_completion_timeout(
@@ -82,8 +85,11 @@
 	int32_t rc = 0;
 	uint32_t read_val = 0;
 	uint32_t reg_offset = master * 0x200 + queue * 0x100;
+	struct cam_hw_soc_info *soc_info =
+		&cci_dev->soc_info;
+	void __iomem *base = soc_info->reg_map[0].mem_base;
 
-	read_val = cam_io_r_mb(cci_dev->base +
+	read_val = cam_io_r_mb(base +
 		CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
 	CDBG("%s line %d CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d len %d max %d\n",
 		__func__, __LINE__, read_val, len,
@@ -95,18 +101,18 @@
 
 		CDBG("%s:%d CCI_I2C_REPORT_CMD\n", __func__, __LINE__);
 		cam_io_w_mb(report_val,
-			cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+			base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
 			reg_offset);
 		read_val++;
 		CDBG("%s:%d CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d, queue: %d\n",
 			__func__, __LINE__, read_val, queue);
-		cam_io_w_mb(read_val, cci_dev->base +
+		cam_io_w_mb(read_val, base +
 			CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
 		reg_val = 1 << ((master * 2) + queue);
 		CDBG("%s:%d CCI_QUEUE_START_ADDR\n", __func__, __LINE__);
 		atomic_set(&cci_dev->cci_master_info[master].
 						done_pending[queue], 1);
-		cam_io_w_mb(reg_val, cci_dev->base +
+		cam_io_w_mb(reg_val, base +
 			CCI_QUEUE_START_ADDR);
 		CDBG("%s line %d wait_for_completion_timeout\n",
 			__func__, __LINE__);
@@ -136,6 +142,9 @@
 {
 	int32_t rc = 0;
 	uint32_t reg_offset = master * 0x200 + queue * 0x100;
+	struct cam_hw_soc_info *soc_info =
+		&cci_dev->soc_info;
+	void __iomem *base = soc_info->reg_map[0].mem_base;
 
 	if (!cci_dev) {
 		pr_err("%s: failed %d", __func__, __LINE__);
@@ -150,7 +159,7 @@
 	CDBG("%s CCI_I2C_M0_Q0_LOAD_DATA_ADDR:val 0x%x:0x%x\n",
 		__func__, CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
 		reg_offset, val);
-	cam_io_w_mb(val, cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+	cam_io_w_mb(val, base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
 		reg_offset);
 	return rc;
 }
@@ -260,21 +269,25 @@
 	enum cci_i2c_master_t master,
 	enum cci_i2c_queue_t queue)
 {
+	struct cam_hw_soc_info *soc_info =
+		&cci_dev->soc_info;
+	void __iomem *base = soc_info->reg_map[0].mem_base;
+
 	uint32_t reg_offset = master * 0x200 + queue * 0x100;
-	uint32_t read_val = cam_io_r_mb(cci_dev->base +
+	uint32_t read_val = cam_io_r_mb(base +
 		CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
 	uint32_t report_val = CCI_I2C_REPORT_CMD | (1 << 8);
 
 	CDBG("%s:%d CCI_I2C_REPORT_CMD curr_w_cnt: %d\n",
 		__func__, __LINE__, read_val);
 	cam_io_w_mb(report_val,
-		cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+		base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
 		reg_offset);
 	read_val++;
 
 	CDBG("%s:%d CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d\n",
 		__func__, __LINE__, read_val);
-	cam_io_w_mb(read_val, cci_dev->base +
+	cam_io_w_mb(read_val, base +
 		CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
 }
 
@@ -282,12 +295,16 @@
 	enum cci_i2c_master_t master,
 	enum cci_i2c_queue_t queue)
 {
+	struct cam_hw_soc_info *soc_info =
+		&cci_dev->soc_info;
+	void __iomem *base = soc_info->reg_map[0].mem_base;
+
 	uint32_t reg_val = 1 << ((master * 2) + queue);
 
 	cam_cci_load_report_cmd(cci_dev, master, queue);
 	atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1);
 	atomic_set(&cci_dev->cci_master_info[master].done_pending[queue], 1);
-	cam_io_w_mb(reg_val, cci_dev->base +
+	cam_io_w_mb(reg_val, base +
 		CCI_QUEUE_START_ADDR);
 
 	return cam_cci_wait(cci_dev, master, queue);
@@ -339,8 +356,11 @@
 {
 	uint32_t read_val = 0;
 	uint32_t reg_offset = master * 0x200 + queue * 0x100;
+	struct cam_hw_soc_info *soc_info =
+		&cci_dev->soc_info;
+	void __iomem *base = soc_info->reg_map[0].mem_base;
 
-	read_val = cam_io_r_mb(cci_dev->base +
+	read_val = cam_io_r_mb(base +
 		CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
 	CDBG("%s line %d CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d max %d\n",
 		__func__, __LINE__, read_val,
@@ -354,12 +374,15 @@
 	enum cci_i2c_master_t master,
 	enum cci_i2c_queue_t queue)
 {
+	struct cam_hw_soc_info *soc_info =
+		&cci_dev->soc_info;
+	void __iomem *base = soc_info->reg_map[0].mem_base;
 	uint32_t reg_val = 1 << ((master * 2) + queue);
 
 	if (atomic_read(&cci_dev->cci_master_info[master].q_free[queue]) == 0) {
 		cam_cci_load_report_cmd(cci_dev, master, queue);
 		atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1);
-		cam_io_w_mb(reg_val, cci_dev->base +
+		cam_io_w_mb(reg_val, base +
 			CCI_QUEUE_START_ADDR);
 	}
 }
@@ -461,52 +484,53 @@
 	return cycles_per_us;
 }
 
-uint32_t *cam_cci_get_clk_rates(struct cci_device *cci_dev,
+void cam_cci_get_clk_rates(struct cci_device *cci_dev,
 	struct cam_cci_ctrl *c_ctrl)
+
 {
-	uint32_t j;
-	int32_t idx;
+	int32_t src_clk_idx, j;
 	uint32_t cci_clk_src;
 	unsigned long clk;
 	struct cam_cci_clk_params_t *clk_params = NULL;
-	struct device_node *of_node = cci_dev->v4l2_dev_str.pdev->dev.of_node;
+
 	enum i2c_freq_mode i2c_freq_mode = c_ctrl->cci_info->i2c_freq_mode;
+	struct cam_hw_soc_info *soc_info = &cci_dev->soc_info;
 
 	if (i2c_freq_mode >= I2C_MAX_MODES ||
 		i2c_freq_mode < I2C_STANDARD_MODE) {
 		pr_err("%s:%d Invalid frequency mode: %d\n",
 			__func__, __LINE__, (int32_t)i2c_freq_mode);
-		return NULL;
+		cci_dev->clk_level_index = -1;
+		return;
 	}
 
 	clk_params = &cci_dev->cci_clk_params[i2c_freq_mode];
 	cci_clk_src = clk_params->cci_clk_src;
 
-	idx = of_property_match_string(of_node,
-		"clock-names", CCI_CLK_SRC_NAME);
-	if (idx < 0) {
+	src_clk_idx = soc_info->src_clk_idx;
+
+	if (src_clk_idx < 0) {
 		cci_dev->cycles_per_us = CYCLES_PER_MICRO_SEC_DEFAULT;
-		return cci_dev->cci_clk_rates[0];
+		cci_dev->clk_level_index = 0;
+		return;
 	}
 
 	if (cci_clk_src == 0) {
-		clk = cci_dev->cci_clk_rates[0][idx];
+		clk = soc_info->clk_rate[0][src_clk_idx];
 		cci_dev->cycles_per_us = cam_cci_cycles_per_ms(clk);
-		return cci_dev->cci_clk_rates[0];
+		cci_dev->clk_level_index = 0;
+		return;
 	}
 
-	CDBG("%s:%d CCI: 3 cases:%d idx: %d\n", __func__,
-		__LINE__, (int32_t)cci_dev->num_clk_cases, idx);
-	for (j = 0; j < cci_dev->num_clk_cases; j++) {
-		clk = cci_dev->cci_clk_rates[j][idx];
+	for (j = 0; j < CAM_MAX_VOTE; j++) {
+		clk = soc_info->clk_rate[j][src_clk_idx];
 		if (clk == cci_clk_src) {
 			cci_dev->cycles_per_us = cam_cci_cycles_per_ms(clk);
-			cci_dev->cci_clk_src = cci_clk_src;
-			return cci_dev->cci_clk_rates[j];
+			cci_dev->clk_level_index = j;
+			return;
 		}
 	}
-
-	return NULL;
+	return;
 }
 
 static int32_t cam_cci_set_clk_param(struct cci_device *cci_dev,
@@ -515,6 +539,9 @@
 	struct cam_cci_clk_params_t *clk_params = NULL;
 	enum cci_i2c_master_t master = c_ctrl->cci_info->cci_i2c_master;
 	enum i2c_freq_mode i2c_freq_mode = c_ctrl->cci_info->i2c_freq_mode;
+	struct cam_hw_soc_info *soc_info =
+		&cci_dev->soc_info;
+	void __iomem *base = soc_info->reg_map[0].mem_base;
 
 	if ((i2c_freq_mode >= I2C_MAX_MODES) || (i2c_freq_mode < 0)) {
 		pr_err("%s:%d invalid i2c_freq_mode = %d",
@@ -529,33 +556,33 @@
 	if (master == MASTER_0) {
 		cam_io_w_mb(clk_params->hw_thigh << 16 |
 			clk_params->hw_tlow,
-			cci_dev->base + CCI_I2C_M0_SCL_CTL_ADDR);
+			base + CCI_I2C_M0_SCL_CTL_ADDR);
 		cam_io_w_mb(clk_params->hw_tsu_sto << 16 |
 			clk_params->hw_tsu_sta,
-			cci_dev->base + CCI_I2C_M0_SDA_CTL_0_ADDR);
+			base + CCI_I2C_M0_SDA_CTL_0_ADDR);
 		cam_io_w_mb(clk_params->hw_thd_dat << 16 |
 			clk_params->hw_thd_sta,
-			cci_dev->base + CCI_I2C_M0_SDA_CTL_1_ADDR);
+			base + CCI_I2C_M0_SDA_CTL_1_ADDR);
 		cam_io_w_mb(clk_params->hw_tbuf,
-			cci_dev->base + CCI_I2C_M0_SDA_CTL_2_ADDR);
+			base + CCI_I2C_M0_SDA_CTL_2_ADDR);
 		cam_io_w_mb(clk_params->hw_scl_stretch_en << 8 |
 			clk_params->hw_trdhld << 4 | clk_params->hw_tsp,
-			cci_dev->base + CCI_I2C_M0_MISC_CTL_ADDR);
+			base + CCI_I2C_M0_MISC_CTL_ADDR);
 	} else if (master == MASTER_1) {
 		cam_io_w_mb(clk_params->hw_thigh << 16 |
 			clk_params->hw_tlow,
-			cci_dev->base + CCI_I2C_M1_SCL_CTL_ADDR);
+			base + CCI_I2C_M1_SCL_CTL_ADDR);
 		cam_io_w_mb(clk_params->hw_tsu_sto << 16 |
 			clk_params->hw_tsu_sta,
-			cci_dev->base + CCI_I2C_M1_SDA_CTL_0_ADDR);
+			base + CCI_I2C_M1_SDA_CTL_0_ADDR);
 		cam_io_w_mb(clk_params->hw_thd_dat << 16 |
 			clk_params->hw_thd_sta,
-			cci_dev->base + CCI_I2C_M1_SDA_CTL_1_ADDR);
+			base + CCI_I2C_M1_SDA_CTL_1_ADDR);
 		cam_io_w_mb(clk_params->hw_tbuf,
-			cci_dev->base + CCI_I2C_M1_SDA_CTL_2_ADDR);
+			base + CCI_I2C_M1_SDA_CTL_2_ADDR);
 		cam_io_w_mb(clk_params->hw_scl_stretch_en << 8 |
 			clk_params->hw_trdhld << 4 | clk_params->hw_tsp,
-			cci_dev->base + CCI_I2C_M1_MISC_CTL_ADDR);
+			base + CCI_I2C_M1_MISC_CTL_ADDR);
 	}
 	cci_dev->i2c_freq_mode[master] = i2c_freq_mode;
 
@@ -576,6 +603,9 @@
 	uint16_t reg_addr = 0, cmd_size = i2c_msg->size;
 	uint32_t read_val = 0, reg_offset, val, delay = 0;
 	uint32_t max_queue_size, queue_size = 0, cmd = 0;
+	struct cam_hw_soc_info *soc_info =
+		&cci_dev->soc_info;
+	void __iomem *base = soc_info->reg_map[0].mem_base;
 
 	if (i2c_cmd == NULL) {
 		pr_err("%s:%d Failed line\n", __func__,
@@ -605,7 +635,7 @@
 	reg_offset = master * 0x200 + queue * 0x100;
 
 	cam_io_w_mb(cci_dev->cci_wait_sync_cfg.cid,
-		cci_dev->base + CCI_SET_CID_SYNC_TIMER_ADDR +
+		base + CCI_SET_CID_SYNC_TIMER_ADDR +
 		cci_dev->cci_wait_sync_cfg.csid *
 		CCI_SET_CID_SYNC_TIMER_OFFSET);
 
@@ -616,7 +646,7 @@
 	CDBG("%s CCI_I2C_M0_Q0_LOAD_DATA_ADDR:val 0x%x:0x%x\n",
 		__func__, CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
 		reg_offset, val);
-	cam_io_w_mb(val, cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+	cam_io_w_mb(val, base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
 		reg_offset);
 
 	atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 0);
@@ -635,7 +665,7 @@
 		val = CCI_I2C_WAIT_SYNC_CMD |
 			((cci_dev->cci_wait_sync_cfg.line) << 4);
 		cam_io_w_mb(val,
-			cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+			base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
 			reg_offset);
 	}
 
@@ -655,7 +685,7 @@
 			return -EINVAL;
 		}
 
-		read_val = cam_io_r_mb(cci_dev->base +
+		read_val = cam_io_r_mb(base +
 			CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
 		CDBG("%s line %d CUR_WORD_CNT_ADDR %d len %d max %d\n",
 			__func__, __LINE__, read_val, len, max_queue_size);
@@ -736,7 +766,7 @@
 		}
 		len = ((i-1)/4) + 1;
 
-		read_val = cam_io_r_mb(cci_dev->base +
+		read_val = cam_io_r_mb(base +
 			CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
 		for (h = 0, k = 0; h < len; h++) {
 			cmd = 0;
@@ -744,12 +774,12 @@
 				cmd |= (data[k++] << (j * 8));
 			CDBG("%s LOAD_DATA_ADDR 0x%x, q: %d, len:%d, cnt: %d\n",
 				__func__, cmd, queue, len, read_val);
-			cam_io_w_mb(cmd, cci_dev->base +
+			cam_io_w_mb(cmd, base +
 				CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
 				master * 0x200 + queue * 0x100);
 
 			read_val += 1;
-			cam_io_w_mb(read_val, cci_dev->base +
+			cam_io_w_mb(read_val, base +
 				CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
 		}
 
@@ -761,11 +791,11 @@
 			cmd |= CCI_I2C_WAIT_CMD;
 			CDBG("%s CCI_I2C_M0_Q0_LOAD_DATA_ADDR 0x%x\n",
 				__func__, cmd);
-			cam_io_w_mb(cmd, cci_dev->base +
+			cam_io_w_mb(cmd, base +
 				CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
 				master * 0x200 + queue * 0x100);
 			read_val += 1;
-			cam_io_w_mb(read_val, cci_dev->base +
+			cam_io_w_mb(read_val, base +
 				CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
 		}
 	}
@@ -791,6 +821,8 @@
 	enum cci_i2c_queue_t queue = QUEUE_1;
 	struct cci_device *cci_dev = NULL;
 	struct cam_cci_read_cfg *read_cfg = NULL;
+	struct cam_hw_soc_info *soc_info = NULL;
+	void __iomem *base = NULL;
 
 	cci_dev = v4l2_get_subdevdata(sd);
 	master = c_ctrl->cci_info->cci_i2c_master;
@@ -801,6 +833,10 @@
 		pr_err("%s:%d Invalid I2C master addr\n", __func__, __LINE__);
 		return -EINVAL;
 	}
+
+	soc_info = &cci_dev->soc_info;
+	base = soc_info->reg_map[0].mem_base;
+
 	mutex_lock(&cci_dev->cci_master_info[master].mutex_q[queue]);
 
 	/*
@@ -894,14 +930,14 @@
 		goto rel_mutex;
 	}
 
-	val = cam_io_r_mb(cci_dev->base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR
+	val = cam_io_r_mb(base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR
 			+ master * 0x200 + queue * 0x100);
 	CDBG("%s cur word cnt 0x%x\n", __func__, val);
-	cam_io_w_mb(val, cci_dev->base + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR
+	cam_io_w_mb(val, base + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR
 			+ master * 0x200 + queue * 0x100);
 
 	val = 1 << ((master * 2) + queue);
-	cam_io_w_mb(val, cci_dev->base + CCI_QUEUE_START_ADDR);
+	cam_io_w_mb(val, base + CCI_QUEUE_START_ADDR);
 	CDBG("%s:%d E wait_for_completion_timeout\n", __func__,
 		__LINE__);
 
@@ -921,7 +957,7 @@
 		rc = 0;
 	}
 
-	read_words = cam_io_r_mb(cci_dev->base +
+	read_words = cam_io_r_mb(base +
 		CCI_I2C_M0_READ_BUF_LEVEL_ADDR + master * 0x100);
 	exp_words = ((read_cfg->num_byte / 4) + 1);
 	if (read_words != exp_words) {
@@ -936,7 +972,7 @@
 		read_cfg->num_byte);
 	first_byte = 0;
 	do {
-		val = cam_io_r_mb(cci_dev->base +
+		val = cam_io_r_mb(base +
 			CCI_I2C_M0_READ_DATA_ADDR + master * 0x100);
 		CDBG("%s read val 0x%x\n", __func__, val);
 		for (i = 0; (i < 4) && (index < read_cfg->num_byte); i++) {
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.h
index f6e82dc..a28d5d8 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.h
@@ -23,7 +23,7 @@
  *
  * This API gets CCI clk rates
  */
-uint32_t *cam_cci_get_clk_rates(struct cci_device *cci_dev,
+void cam_cci_get_clk_rates(struct cci_device *cci_dev,
 	struct cam_cci_ctrl *c_ctrl);
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
index 6764b8a..63655a4 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
@@ -49,10 +49,13 @@
 {
 	uint32_t irq;
 	struct cci_device *cci_dev = data;
+	struct cam_hw_soc_info *soc_info =
+		&cci_dev->soc_info;
+	void __iomem *base = soc_info->reg_map[0].mem_base;
 
-	irq = cam_io_r_mb(cci_dev->base + CCI_IRQ_STATUS_0_ADDR);
-	cam_io_w_mb(irq, cci_dev->base + CCI_IRQ_CLEAR_0_ADDR);
-	cam_io_w_mb(0x1, cci_dev->base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+	irq = cam_io_r_mb(base + CCI_IRQ_STATUS_0_ADDR);
+	cam_io_w_mb(irq, base + CCI_IRQ_CLEAR_0_ADDR);
+	cam_io_w_mb(0x1, base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
 
 	if (irq & CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK) {
 		if (cci_dev->cci_master_info[MASTER_0].reset_pending == TRUE) {
@@ -123,24 +126,24 @@
 	if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK) {
 		cci_dev->cci_master_info[MASTER_0].reset_pending = TRUE;
 		cam_io_w_mb(CCI_M0_RESET_RMSK,
-			cci_dev->base + CCI_RESET_CMD_ADDR);
+			base + CCI_RESET_CMD_ADDR);
 	}
 	if (irq & CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK) {
 		cci_dev->cci_master_info[MASTER_1].reset_pending = TRUE;
 		cam_io_w_mb(CCI_M1_RESET_RMSK,
-			cci_dev->base + CCI_RESET_CMD_ADDR);
+			base + CCI_RESET_CMD_ADDR);
 	}
 	if (irq & CCI_IRQ_STATUS_0_I2C_M0_ERROR_BMSK) {
 		pr_err("%s:%d MASTER_0 error 0x%x\n", __func__, __LINE__, irq);
 		cci_dev->cci_master_info[MASTER_0].status = -EINVAL;
 		cam_io_w_mb(CCI_M0_HALT_REQ_RMSK,
-			cci_dev->base + CCI_HALT_REQ_ADDR);
+			base + CCI_HALT_REQ_ADDR);
 	}
 	if (irq & CCI_IRQ_STATUS_0_I2C_M1_ERROR_BMSK) {
 		pr_err("%s:%d MASTER_1 error 0x%x\n", __func__, __LINE__, irq);
 		cci_dev->cci_master_info[MASTER_1].status = -EINVAL;
 		cam_io_w_mb(CCI_M1_HALT_REQ_RMSK,
-			cci_dev->base + CCI_HALT_REQ_ADDR);
+			base + CCI_HALT_REQ_ADDR);
 	}
 	return IRQ_HANDLED;
 }
@@ -150,8 +153,10 @@
 {
 	struct cci_device *cci_dev = v4l2_get_subdevdata(sd);
 	irqreturn_t ret;
+	struct cam_hw_soc_info *soc_info =
+		&cci_dev->soc_info;
 
-	ret = cam_cci_irq(cci_dev->irq->start, cci_dev);
+	ret = cam_cci_irq(soc_info->irq_line->start, cci_dev);
 	*handled = TRUE;
 	return 0;
 }
@@ -171,6 +176,7 @@
 {
 	struct cam_cpas_register_params cpas_parms;
 	struct cci_device *new_cci_dev;
+	struct cam_hw_soc_info *soc_info = NULL;
 	int rc = 0;
 
 	new_cci_dev = kzalloc(sizeof(struct cci_device),
@@ -178,8 +184,12 @@
 	if (!new_cci_dev)
 		return -ENOMEM;
 
+	soc_info = &new_cci_dev->soc_info;
+
 	new_cci_dev->v4l2_dev_str.pdev = pdev;
 
+	soc_info->pdev = pdev;
+
 	rc = cam_cci_parse_dt_info(pdev, new_cci_dev);
 	if (rc < 0) {
 		pr_err("%s: %d Resource get Failed: %d\n",
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
index 996fc62..6268a1b 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
@@ -31,12 +31,12 @@
 #include <media/v4l2-ioctl.h>
 #include <media/v4l2-subdev.h>
 #include <cam_sensor_cmn_header.h>
-#include <cam_sensor_soc_api.h>
 #include <cam_io_util.h>
 #include <cam_sensor_util.h>
 #include <cam_subdev.h>
 #include <cam_cpas_api.h>
 #include "cam_cci_hwreg.h"
+#include "cam_soc_util.h"
 
 #define V4L2_IDENT_CCI 50005
 #define CCI_I2C_QUEUE_0_SIZE 128
@@ -206,36 +206,25 @@
  */
 struct cci_device {
 	struct v4l2_subdev subdev;
-	struct resource *irq;
-	void __iomem *base;
+	struct cam_hw_soc_info soc_info;
 	uint32_t hw_version;
 	uint8_t ref_count;
 	enum cam_cci_state_t cci_state;
-	size_t num_clk;
-	struct clk **cci_clk;
-	struct msm_cam_clk_info *cci_clk_info;
 	struct cam_cci_i2c_queue_info
 		cci_i2c_queue_info[NUM_MASTERS][NUM_QUEUES];
 	struct cam_cci_master_info cci_master_info[NUM_MASTERS];
 	enum i2c_freq_mode i2c_freq_mode[NUM_MASTERS];
 	struct cam_cci_clk_params_t cci_clk_params[I2C_MAX_MODES];
-	struct gpio *cci_gpio_tbl;
-	uint8_t cci_gpio_tbl_size;
 	struct msm_pinctrl_info cci_pinctrl;
 	uint8_t cci_pinctrl_status;
-	uint32_t cci_clk_src;
-	struct camera_vreg_t *cci_vreg;
-	struct regulator *cci_reg_ptr[MAX_REGULATOR];
-	int32_t regulator_count;
 	uint8_t support_seq_write;
 	struct workqueue_struct *write_wq[MASTER_MAX];
 	struct cam_cci_wait_sync_cfg cci_wait_sync_cfg;
 	uint8_t valid_sync;
 	struct cam_subdev v4l2_dev_str;
 	uint32_t cycles_per_us;
+	int32_t clk_level_index;
 	uint8_t payload_size;
-	size_t num_clk_cases;
-	uint32_t **cci_clk_rates;
 	char device_name[20];
 	uint32_t cpas_handle;
 };
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
index 59cdfaa..d976788 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
@@ -13,46 +13,17 @@
 #include "cam_cci_dev.h"
 #include "cam_cci_core.h"
 
-static int32_t cam_cci_pinctrl_init(struct cci_device *cci_dev)
-{
-	struct msm_pinctrl_info *cci_pctrl = NULL;
-
-	cci_pctrl = &cci_dev->cci_pinctrl;
-	cci_pctrl->pinctrl = devm_pinctrl_get(&cci_dev->v4l2_dev_str.pdev->dev);
-	if (IS_ERR_OR_NULL(cci_pctrl->pinctrl)) {
-		pr_err("%s:%d devm_pinctrl_get cci_pinctrl failed\n",
-			__func__, __LINE__);
-		return -EINVAL;
-	}
-	cci_pctrl->gpio_state_active = pinctrl_lookup_state(
-						cci_pctrl->pinctrl,
-						CCI_PINCTRL_STATE_DEFAULT);
-	if (IS_ERR_OR_NULL(cci_pctrl->gpio_state_active)) {
-		pr_err("%s:%d look up state  for active state failed\n",
-			__func__, __LINE__);
-		return -EINVAL;
-	}
-	cci_pctrl->gpio_state_suspend = pinctrl_lookup_state(
-						cci_pctrl->pinctrl,
-						CCI_PINCTRL_STATE_SLEEP);
-	if (IS_ERR_OR_NULL(cci_pctrl->gpio_state_suspend)) {
-		pr_err("%s:%d look up state for suspend state failed\n",
-			__func__, __LINE__);
-		return -EINVAL;
-	}
-	return 0;
-}
-
 int cam_cci_init(struct v4l2_subdev *sd,
 	struct cam_cci_ctrl *c_ctrl)
 {
 	uint8_t i = 0, j = 0;
-	int32_t rc = 0, ret = 0;
+	int32_t rc = 0;
 	struct cci_device *cci_dev;
 	enum cci_i2c_master_t master = MASTER_0;
-	uint32_t *clk_rates  = NULL;
 	struct cam_ahb_vote ahb_vote;
 	struct cam_axi_vote axi_vote;
+	struct cam_hw_soc_info *soc_info = NULL;
+	void __iomem *base = NULL;
 
 	cci_dev = v4l2_get_subdevdata(sd);
 	if (!cci_dev || !c_ctrl) {
@@ -62,8 +33,21 @@
 		return rc;
 	}
 
+	soc_info = &cci_dev->soc_info;
+	base = soc_info->reg_map[0].mem_base;
+
+	if (!soc_info || !base) {
+		pr_err("%s:%d failed: invalid params %pK %pK\n", __func__,
+			__LINE__, soc_info, base);
+		rc = -EINVAL;
+		return rc;
+	}
+
+	CDBG("%s:%d Base address %pK\n", __func__, __LINE__, base);
+
 	if (cci_dev->ref_count++) {
-		CDBG("%s ref_count %d\n", __func__, cci_dev->ref_count);
+		CDBG("%s:%d ref_count %d\n", __func__, __LINE__,
+			cci_dev->ref_count);
 		master = c_ctrl->cci_info->cci_i2c_master;
 		CDBG("%s:%d master %d\n", __func__, __LINE__, master);
 		if (master < MASTER_MAX && master >= 0) {
@@ -80,10 +64,10 @@
 			/* Set proper mask to RESET CMD address */
 			if (master == MASTER_0)
 				cam_io_w_mb(CCI_M0_RESET_RMSK,
-					cci_dev->base + CCI_RESET_CMD_ADDR);
+					base + CCI_RESET_CMD_ADDR);
 			else
 				cam_io_w_mb(CCI_M1_RESET_RMSK,
-					cci_dev->base + CCI_RESET_CMD_ADDR);
+					base + CCI_RESET_CMD_ADDR);
 			/* wait for reset done irq */
 			rc = wait_for_completion_timeout(
 				&cci_dev->cci_master_info[master].
@@ -108,74 +92,24 @@
 		pr_err("%s:%d CPAS start failed\n",
 			__func__, __LINE__);
 	}
-
-	ret = cam_cci_pinctrl_init(cci_dev);
-	if (ret < 0) {
-		pr_err("%s:%d Initialization of pinctrl failed\n",
-				__func__, __LINE__);
-		cci_dev->cci_pinctrl_status = 0;
-	} else {
-		cci_dev->cci_pinctrl_status = 1;
-	}
-	rc = msm_camera_request_gpio_table(cci_dev->cci_gpio_tbl,
-		cci_dev->cci_gpio_tbl_size, 1);
-	if (cci_dev->cci_pinctrl_status) {
-		ret = pinctrl_select_state(cci_dev->cci_pinctrl.pinctrl,
-				cci_dev->cci_pinctrl.gpio_state_active);
-		if (ret)
-			pr_err("%s:%d cannot set pin to active state\n",
-				__func__, __LINE__);
-	}
-	if (rc < 0) {
-		CDBG("%s: request gpio failed\n", __func__);
-		goto request_gpio_failed;
-	}
-
-	rc = msm_camera_config_vreg(&cci_dev->v4l2_dev_str.pdev->dev,
-		cci_dev->cci_vreg, cci_dev->regulator_count, NULL, 0,
-		&cci_dev->cci_reg_ptr[0], 1);
-	if (rc < 0) {
-		pr_err("%s:%d cci config_vreg failed\n", __func__, __LINE__);
-		goto clk_enable_failed;
-	}
-
-	rc = msm_camera_enable_vreg(&cci_dev->v4l2_dev_str.pdev->dev,
-		cci_dev->cci_vreg, cci_dev->regulator_count, NULL, 0,
-		&cci_dev->cci_reg_ptr[0], 1);
-	if (rc < 0) {
-		pr_err("%s:%d cci enable_vreg failed\n", __func__, __LINE__);
-		goto reg_enable_failed;
-	}
-
-	clk_rates = cam_cci_get_clk_rates(cci_dev, c_ctrl);
-	if (!clk_rates) {
-		pr_err("%s: clk enable failed\n", __func__);
-		goto reg_enable_failed;
-	}
-
-	for (i = 0; i < cci_dev->num_clk; i++) {
-		cci_dev->cci_clk_info[i].clk_rate =
-			clk_rates[i];
-	}
-	rc = msm_camera_clk_enable(&cci_dev->v4l2_dev_str.pdev->dev,
-		cci_dev->cci_clk_info, cci_dev->cci_clk,
-		cci_dev->num_clk, true);
-	if (rc < 0) {
-		pr_err("%s: clk enable failed\n", __func__);
-		goto reg_enable_failed;
-	}
+	cam_cci_get_clk_rates(cci_dev, c_ctrl);
 
 	/* Re-initialize the completion */
 	reinit_completion(&cci_dev->cci_master_info[master].reset_complete);
 	for (i = 0; i < NUM_QUEUES; i++)
 		reinit_completion(&cci_dev->cci_master_info[master].
 			report_q[i]);
-	rc = msm_camera_enable_irq(cci_dev->irq, true);
+
+	/* Enable Regulators and IRQ*/
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		CAM_TURBO_VOTE, true);
 	if (rc < 0) {
-		pr_err("%s: irq enable failed\n", __func__);
-		return -EINVAL;
+		CDBG("%s:%d request platform resources failed\n", __func__,
+			__LINE__);
+		goto platform_enable_failed;
 	}
-	cci_dev->hw_version = cam_io_r_mb(cci_dev->base +
+
+	cci_dev->hw_version = cam_io_r_mb(base +
 		CCI_HW_VERSION_ADDR);
 	CDBG("%s:%d: hw_version = 0x%x\n", __func__, __LINE__,
 		cci_dev->hw_version);
@@ -195,7 +129,8 @@
 					max_queue_size =
 						CCI_I2C_QUEUE_1_SIZE;
 
-			CDBG("CCI Master[%d] :: Q0 size: %d Q1 size: %d\n", i,
+			CDBG("%s:%d : CCI Master[%d] :: Q0 : %d Q1 : %d\n",
+				__func__, __LINE__, i,
 				cci_dev->cci_i2c_queue_info[i][j].
 				max_queue_size,
 				cci_dev->cci_i2c_queue_info[i][j].
@@ -204,14 +139,14 @@
 	}
 
 	cci_dev->cci_master_info[MASTER_0].reset_pending = TRUE;
-	cam_io_w_mb(CCI_RESET_CMD_RMSK, cci_dev->base +
+	cam_io_w_mb(CCI_RESET_CMD_RMSK, base +
 			CCI_RESET_CMD_ADDR);
-	cam_io_w_mb(0x1, cci_dev->base + CCI_RESET_CMD_ADDR);
+	cam_io_w_mb(0x1, base + CCI_RESET_CMD_ADDR);
 	rc = wait_for_completion_timeout(
 		&cci_dev->cci_master_info[MASTER_0].reset_complete,
 		CCI_TIMEOUT);
 	if (rc <= 0) {
-		pr_err("%s: wait_for_completion_timeout %d\n",
+		pr_err("%s:%d wait_for_completion_timeout\n",
 			 __func__, __LINE__);
 		if (rc == 0)
 			rc = -ETIMEDOUT;
@@ -220,14 +155,15 @@
 	for (i = 0; i < MASTER_MAX; i++)
 		cci_dev->i2c_freq_mode[i] = I2C_MAX_MODES;
 	cam_io_w_mb(CCI_IRQ_MASK_0_RMSK,
-		cci_dev->base + CCI_IRQ_MASK_0_ADDR);
+		base + CCI_IRQ_MASK_0_ADDR);
 	cam_io_w_mb(CCI_IRQ_MASK_0_RMSK,
-		cci_dev->base + CCI_IRQ_CLEAR_0_ADDR);
-	cam_io_w_mb(0x1, cci_dev->base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+		base + CCI_IRQ_CLEAR_0_ADDR);
+	cam_io_w_mb(0x1, base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
 
 	for (i = 0; i < MASTER_MAX; i++) {
 		if (!cci_dev->write_wq[i]) {
-			pr_err("Failed to flush write wq\n");
+			pr_err("%s:%d Failed to flush write wq\n",
+				__func__, __LINE__);
 			rc = -ENOMEM;
 			goto reset_complete_failed;
 		} else {
@@ -239,25 +175,9 @@
 	return 0;
 
 reset_complete_failed:
-	msm_camera_enable_irq(cci_dev->irq, false);
-	msm_camera_clk_enable(&cci_dev->v4l2_dev_str.pdev->dev,
-		cci_dev->cci_clk_info, cci_dev->cci_clk,
-		cci_dev->num_clk, false);
-reg_enable_failed:
-	msm_camera_config_vreg(&cci_dev->v4l2_dev_str.pdev->dev,
-		cci_dev->cci_vreg, cci_dev->regulator_count, NULL, 0,
-		&cci_dev->cci_reg_ptr[0], 0);
-clk_enable_failed:
-	if (cci_dev->cci_pinctrl_status) {
-		ret = pinctrl_select_state(cci_dev->cci_pinctrl.pinctrl,
-				cci_dev->cci_pinctrl.gpio_state_suspend);
-		if (ret)
-			pr_err("%s:%d cannot set pin to suspend state\n",
-				__func__, __LINE__);
-	}
-	msm_camera_request_gpio_table(cci_dev->cci_gpio_tbl,
-		cci_dev->cci_gpio_tbl_size, 0);
-request_gpio_failed:
+	cam_soc_util_disable_platform_resource(soc_info, 1, 1);
+
+platform_enable_failed:
 	cci_dev->ref_count--;
 	cam_cpas_stop(cci_dev->cpas_handle);
 
@@ -267,12 +187,9 @@
 void cam_cci_soc_remove(struct platform_device *pdev,
 	struct cci_device *cci_dev)
 {
-	msm_camera_put_clk_info_and_rates(pdev,
-		&cci_dev->cci_clk_info, &cci_dev->cci_clk,
-		&cci_dev->cci_clk_rates, cci_dev->num_clk_cases,
-		cci_dev->num_clk);
+	struct cam_hw_soc_info *soc_info = &cci_dev->soc_info;
 
-	msm_camera_put_reg_base(pdev, cci_dev->base, "cci", true);
+	cam_soc_util_release_platform_resource(soc_info);
 }
 
 static void cam_cci_init_cci_params(struct cci_device *new_cci_dev)
@@ -293,76 +210,6 @@
 	}
 }
 
-static int32_t cam_cci_init_gpio_params(struct cci_device *cci_dev)
-{
-	int32_t rc = 0, i = 0;
-	uint32_t *val_array = NULL;
-	uint8_t tbl_size = 0;
-	struct device_node *of_node = cci_dev->v4l2_dev_str.pdev->dev.of_node;
-	struct gpio *gpio_tbl = NULL;
-
-	cci_dev->cci_gpio_tbl_size = tbl_size = of_gpio_count(of_node);
-	CDBG("%s gpio count %d\n", __func__, tbl_size);
-	if (!tbl_size) {
-		pr_err("%s:%d gpio count 0\n", __func__, __LINE__);
-		return -EINVAL;
-	}
-
-	gpio_tbl = cci_dev->cci_gpio_tbl =
-		kzalloc(sizeof(struct gpio) * tbl_size, GFP_KERNEL);
-	if (!gpio_tbl) {
-		pr_err("%s failed %d\n", __func__, __LINE__);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < tbl_size; i++) {
-		gpio_tbl[i].gpio = of_get_gpio(of_node, i);
-		CDBG("%s gpio_tbl[%d].gpio = %d\n", __func__, i,
-			gpio_tbl[i].gpio);
-	}
-
-	val_array = kcalloc(tbl_size, sizeof(uint32_t),
-		GFP_KERNEL);
-	if (!val_array) {
-		rc = -ENOMEM;
-		goto free_gpio_tbl;
-	}
-
-	rc = of_property_read_u32_array(of_node, "qcom,gpio-tbl-flags",
-		val_array, tbl_size);
-	if (rc < 0) {
-		pr_err("%s failed %d\n", __func__, __LINE__);
-		goto free_val_array;
-	}
-	for (i = 0; i < tbl_size; i++) {
-		gpio_tbl[i].flags = val_array[i];
-		CDBG("%s gpio_tbl[%d].flags = %ld\n", __func__, i,
-			gpio_tbl[i].flags);
-	}
-
-	for (i = 0; i < tbl_size; i++) {
-		rc = of_property_read_string_index(of_node,
-			"qcom,gpio-tbl-label", i, &gpio_tbl[i].label);
-		CDBG("%s gpio_tbl[%d].label = %s\n", __func__, i,
-			gpio_tbl[i].label);
-		if (rc < 0) {
-			pr_err("%s failed %d\n", __func__, __LINE__);
-			goto free_val_array;
-		}
-	}
-
-	kfree(val_array);
-	return rc;
-
-free_val_array:
-	kfree(val_array);
-free_gpio_tbl:
-	kfree(cci_dev->cci_gpio_tbl);
-	cci_dev->cci_gpio_tbl = NULL;
-	cci_dev->cci_gpio_tbl_size = 0;
-	return rc;
-}
-
 static void cam_cci_init_default_clk_params(struct cci_device *cci_dev,
 	uint8_t index)
 {
@@ -403,75 +250,78 @@
 			src_node = of_find_node_by_name(of_node,
 				"qcom,i2c_custom_mode");
 
-		rc = of_property_read_u32(src_node, "qcom,hw-thigh", &val);
-		CDBG("%s qcom,hw-thigh %d, rc %d\n", __func__, val, rc);
+		rc = of_property_read_u32(src_node, "hw-thigh", &val);
+		CDBG("%s:%d hw-thigh %d, rc %d\n", __func__, __LINE__, val, rc);
 		if (!rc) {
 			cci_dev->cci_clk_params[count].hw_thigh = val;
-			rc = of_property_read_u32(src_node, "qcom,hw-tlow",
+			rc = of_property_read_u32(src_node, "hw-tlow",
 				&val);
-			CDBG("%s qcom,hw-tlow %d, rc %d\n", __func__, val, rc);
+			CDBG("%s:%d hw-tlow %d, rc %d\n", __func__, __LINE__,
+				val, rc);
 		}
 		if (!rc) {
 			cci_dev->cci_clk_params[count].hw_tlow = val;
-			rc = of_property_read_u32(src_node, "qcom,hw-tsu-sto",
+			rc = of_property_read_u32(src_node, "hw-tsu-sto",
 				&val);
-			CDBG("%s qcom,hw-tsu-sto %d, rc %d\n",
-				__func__, val, rc);
+			CDBG("%s:%d hw-tsu-sto %d, rc %d\n",
+				__func__, __LINE__, val, rc);
 		}
 		if (!rc) {
 			cci_dev->cci_clk_params[count].hw_tsu_sto = val;
-			rc = of_property_read_u32(src_node, "qcom,hw-tsu-sta",
+			rc = of_property_read_u32(src_node, "hw-tsu-sta",
 				&val);
-			CDBG("%s qcom,hw-tsu-sta %d, rc %d\n",
-				__func__, val, rc);
+			CDBG("%s:%d hw-tsu-sta %d, rc %d\n",
+				__func__, __LINE__, val, rc);
 		}
 		if (!rc) {
 			cci_dev->cci_clk_params[count].hw_tsu_sta = val;
-			rc = of_property_read_u32(src_node, "qcom,hw-thd-dat",
+			rc = of_property_read_u32(src_node, "hw-thd-dat",
 				&val);
-			CDBG("%s qcom,hw-thd-dat %d, rc %d\n",
-				__func__, val, rc);
+			CDBG("%s:%d hw-thd-dat %d, rc %d\n",
+				__func__, __LINE__, val, rc);
 		}
 		if (!rc) {
 			cci_dev->cci_clk_params[count].hw_thd_dat = val;
-			rc = of_property_read_u32(src_node, "qcom,hw-thd-sta",
+			rc = of_property_read_u32(src_node, "hw-thd-sta",
 				&val);
-			CDBG("%s qcom,hw-thd-sta %d, rc %d\n", __func__,
+			CDBG("%s:%d hw-thd-sta %d, rc %d\n", __func__, __LINE__,
 				val, rc);
 		}
 		if (!rc) {
 			cci_dev->cci_clk_params[count].hw_thd_sta = val;
-			rc = of_property_read_u32(src_node, "qcom,hw-tbuf",
+			rc = of_property_read_u32(src_node, "hw-tbuf",
 				&val);
-			CDBG("%s qcom,hw-tbuf %d, rc %d\n", __func__, val, rc);
+			CDBG("%s:%d hw-tbuf %d, rc %d\n", __func__, __LINE__,
+				val, rc);
 		}
 		if (!rc) {
 			cci_dev->cci_clk_params[count].hw_tbuf = val;
 			rc = of_property_read_u32(src_node,
-				"qcom,hw-scl-stretch-en", &val);
-			CDBG("%s qcom,hw-scl-stretch-en %d, rc %d\n",
-				__func__, val, rc);
+				"hw-scl-stretch-en", &val);
+			CDBG("%s:%d hw-scl-stretch-en %d, rc %d\n",
+				__func__, __LINE__, val, rc);
 		}
 		if (!rc) {
 			cci_dev->cci_clk_params[count].hw_scl_stretch_en = val;
-			rc = of_property_read_u32(src_node, "qcom,hw-trdhld",
+			rc = of_property_read_u32(src_node, "hw-trdhld",
 				&val);
-			CDBG("%s qcom,hw-trdhld %d, rc %d\n",
-				__func__, val, rc);
+			CDBG("%s:%d hw-trdhld %d, rc %d\n",
+				__func__, __LINE__, val, rc);
 		}
 		if (!rc) {
 			cci_dev->cci_clk_params[count].hw_trdhld = val;
-			rc = of_property_read_u32(src_node, "qcom,hw-tsp",
+			rc = of_property_read_u32(src_node, "hw-tsp",
 				&val);
-			CDBG("%s qcom,hw-tsp %d, rc %d\n", __func__, val, rc);
+			CDBG("%s:%d hw-tsp %d, rc %d\n", __func__, __LINE__,
+				val, rc);
 		}
 		if (!rc) {
 			cci_dev->cci_clk_params[count].hw_tsp = val;
 			val = 0;
-			rc = of_property_read_u32(src_node, "qcom,cci-clk-src",
+			rc = of_property_read_u32(src_node, "cci-clk-src",
 				&val);
-			CDBG("%s qcom,cci-clk-src %d, rc %d\n",
-				__func__, val, rc);
+			CDBG("%s:%d cci-clk-src %d, rc %d\n",
+				__func__, __LINE__, val, rc);
 			cci_dev->cci_clk_params[count].cci_clk_src = val;
 		} else
 			cam_cci_init_default_clk_params(cci_dev, count);
@@ -484,141 +334,78 @@
 	struct cci_device *new_cci_dev)
 {
 	int rc = 0, i = 0;
+	struct cam_hw_soc_info *soc_info =
+		&new_cci_dev->soc_info;
 
-	/* Get Clock Info*/
-	rc = msm_camera_get_clk_info_and_rates(pdev,
-		&new_cci_dev->cci_clk_info, &new_cci_dev->cci_clk,
-		&new_cci_dev->cci_clk_rates, &new_cci_dev->num_clk_cases,
-		&new_cci_dev->num_clk);
+	rc = cam_soc_util_get_dt_properties(soc_info);
 	if (rc < 0) {
-		pr_err("%s: cam_cci_get_clk_info() failed", __func__);
-		kfree(new_cci_dev);
-		new_cci_dev = NULL;
-		return -EFAULT;
+		pr_err("%s:%d :Error: Parsing DT data failed:%d\n",
+			__func__, __LINE__, rc);
+		return -EINVAL;
 	}
 
 	new_cci_dev->ref_count = 0;
-	new_cci_dev->base = msm_camera_get_reg_base(pdev, "cci", true);
-	if (!new_cci_dev->base) {
-		pr_err("%s: no mem resource?\n", __func__);
-		return -ENODEV;
-	}
-	new_cci_dev->irq = msm_camera_get_irq(pdev, "cci");
-	if (!new_cci_dev->irq) {
-		pr_err("%s: no irq resource?\n", __func__);
-		return -ENODEV;
-	}
-	CDBG("%s line %d cci irq start %d end %d\n", __func__,
-		__LINE__,
-		(int) new_cci_dev->irq->start,
-		(int) new_cci_dev->irq->end);
-	rc = msm_camera_register_irq(pdev, new_cci_dev->irq,
-		cam_cci_irq, IRQF_TRIGGER_RISING, "cci", new_cci_dev);
-	if (rc < 0) {
-		pr_err("%s: irq request fail\n", __func__);
-		rc = -EBUSY;
-		goto cci_release_mem;
-	}
 
-	msm_camera_enable_irq(new_cci_dev->irq, false);
+	rc = cam_soc_util_request_platform_resource(soc_info,
+		cam_cci_irq, new_cci_dev);
+	if (rc < 0) {
+		pr_err("%s:%d :Error: requesting platform resources failed:%d\n",
+			__func__, __LINE__, rc);
+		return -EINVAL;
+	}
 	new_cci_dev->v4l2_dev_str.pdev = pdev;
 	cam_cci_init_cci_params(new_cci_dev);
 	cam_cci_init_clk_params(new_cci_dev);
-	rc = cam_cci_init_gpio_params(new_cci_dev);
-	if (rc < 0) {
-		pr_err("%s:%d :Error: In Initializing GPIO params:%d\n",
-			__func__, __LINE__, rc);
-		goto cci_release_mem;
-	}
-
-	rc = cam_sensor_get_dt_vreg_data(new_cci_dev->
-		v4l2_dev_str.pdev->dev.of_node,
-		&(new_cci_dev->cci_vreg), &(new_cci_dev->regulator_count));
-	if (rc < 0) {
-		pr_err("%s: cam_sensor_get_dt_vreg_data fail\n", __func__);
-		rc = -EFAULT;
-		goto cci_release_mem;
-	}
-
-	/* Parse VREG data */
-	if ((new_cci_dev->regulator_count < 0) ||
-		(new_cci_dev->regulator_count > MAX_REGULATOR)) {
-		pr_err("%s: invalid reg count = %d, max is %d\n", __func__,
-			new_cci_dev->regulator_count, MAX_REGULATOR);
-		rc = -EFAULT;
-		goto cci_invalid_vreg_data;
-	}
 
 	rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
 	if (rc)
-		pr_err("%s: failed to add child nodes, rc=%d\n", __func__, rc);
+		pr_err("%s:%d failed to add child nodes, rc=%d\n",
+			__func__, __LINE__, rc);
+
 	for (i = 0; i < MASTER_MAX; i++) {
 		new_cci_dev->write_wq[i] = create_singlethread_workqueue(
 			"cam_cci_wq");
 		if (!new_cci_dev->write_wq[i])
-			pr_err("Failed to create write wq\n");
+			pr_err("%s:%d Failed to create write wq\n",
+				__func__, __LINE__);
 	}
 	CDBG("%s line %d\n", __func__, __LINE__);
 	return 0;
-
-cci_invalid_vreg_data:
-	kfree(new_cci_dev->cci_vreg);
-	new_cci_dev->cci_vreg = NULL;
-cci_release_mem:
-	msm_camera_put_reg_base(pdev, new_cci_dev->base, "cci", true);
-
-	return rc;
 }
 
 int cam_cci_soc_release(struct cci_device *cci_dev)
 {
 	uint8_t i = 0, rc = 0;
+	struct cam_hw_soc_info *soc_info =
+		&cci_dev->soc_info;
 
 	if (!cci_dev->ref_count || cci_dev->cci_state != CCI_STATE_ENABLED) {
-		pr_err("%s invalid ref count %d / cci state %d\n",
-			__func__, cci_dev->ref_count, cci_dev->cci_state);
+		pr_err("%s:%d invalid ref count %d / cci state %d\n", __func__,
+			__LINE__, cci_dev->ref_count, cci_dev->cci_state);
 		return -EINVAL;
 	}
 	if (--cci_dev->ref_count) {
-		CDBG("%s ref_count Exit %d\n", __func__, cci_dev->ref_count);
+		CDBG("%s:%d ref_count Exit %d\n", __func__, __LINE__,
+			cci_dev->ref_count);
 		return 0;
 	}
 	for (i = 0; i < MASTER_MAX; i++)
 		if (cci_dev->write_wq[i])
 			flush_workqueue(cci_dev->write_wq[i]);
 
-	msm_camera_enable_irq(cci_dev->irq, false);
-	msm_camera_clk_enable(&cci_dev->v4l2_dev_str.pdev->dev,
-		cci_dev->cci_clk_info, cci_dev->cci_clk,
-		cci_dev->num_clk, false);
-
-	rc = msm_camera_enable_vreg(&cci_dev->v4l2_dev_str.pdev->dev,
-		cci_dev->cci_vreg, cci_dev->regulator_count, NULL, 0,
-		&cci_dev->cci_reg_ptr[0], 0);
-	if (rc < 0)
-		pr_err("%s:%d cci disable_vreg failed\n", __func__, __LINE__);
-
-	rc = msm_camera_config_vreg(&cci_dev->v4l2_dev_str.pdev->dev,
-		cci_dev->cci_vreg, cci_dev->regulator_count, NULL, 0,
-		&cci_dev->cci_reg_ptr[0], 0);
-	if (rc < 0)
-		pr_err("%s:%d cci unconfig_vreg failed\n", __func__, __LINE__);
-
-	if (cci_dev->cci_pinctrl_status) {
-		rc = pinctrl_select_state(cci_dev->cci_pinctrl.pinctrl,
-				cci_dev->cci_pinctrl.gpio_state_suspend);
-		if (rc)
-			pr_err("%s:%d cannot set pin to active state\n",
-				__func__, __LINE__);
-	}
-	cci_dev->cci_pinctrl_status = 0;
-	msm_camera_request_gpio_table(cci_dev->cci_gpio_tbl,
-		cci_dev->cci_gpio_tbl_size, 0);
 	for (i = 0; i < MASTER_MAX; i++)
 		cci_dev->i2c_freq_mode[i] = I2C_MAX_MODES;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+	if (rc) {
+		pr_err("%s:%d: platform resources disable failed, rc=%d\n",
+			__func__, __LINE__, rc);
+		return rc;
+	}
+
 	cci_dev->cci_state = CCI_STATE_DISABLED;
 	cci_dev->cycles_per_us = 0;
-	cci_dev->cci_clk_src = 0;
+	soc_info->src_clk_idx = 0;
 
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.h
index ca4bbe0..331227b 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.h
@@ -14,6 +14,7 @@
 #define _CAM_CCI_SOC_H_
 
 #include "cam_cci_core.h"
+#include "cam_soc_util.h"
 
 /**
  * @sd: V4L2 sub device
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
index 6751fdd..71a88bf 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
@@ -19,7 +19,9 @@
 void cam_csiphy_query_cap(struct csiphy_device *csiphy_dev,
 	struct cam_csiphy_query_cap *csiphy_cap)
 {
-	csiphy_cap->slot_info = csiphy_dev->v4l2_dev_str.pdev->id;
+	struct cam_hw_soc_info *soc_info = &csiphy_dev->soc_info;
+
+	csiphy_cap->slot_info = soc_info->index;
 	csiphy_cap->version = csiphy_dev->hw_version;
 	csiphy_cap->clk_lane = csiphy_dev->clk_lane;
 }
@@ -27,14 +29,18 @@
 void cam_csiphy_reset(struct csiphy_device *csiphy_dev)
 {
 	int32_t  i;
+	void __iomem *base = NULL;
 	uint32_t size =
 		csiphy_dev->ctrl_reg->csiphy_reg.csiphy_reset_array_size;
+	struct cam_hw_soc_info *soc_info = &csiphy_dev->soc_info;
+
+	base = soc_info->reg_map[0].mem_base;
 
 	for (i = 0; i < size; i++) {
 		cam_io_w(
 			csiphy_dev->ctrl_reg->
 			csiphy_reset_reg[i].reg_data,
-			csiphy_dev->base +
+			base +
 			csiphy_dev->ctrl_reg->
 			csiphy_reset_reg[i].reg_addr);
 
@@ -118,11 +124,13 @@
 void cam_csiphy_cphy_irq_config(struct csiphy_device *csiphy_dev)
 {
 	int32_t i;
+	void __iomem *csiphybase =
+		csiphy_dev->soc_info.reg_map[0].mem_base;
 
 	for (i = 0; i < csiphy_dev->num_irq_registers; i++)
 		cam_io_w(csiphy_dev->ctrl_reg->
 			csiphy_irq_reg[i].reg_data,
-			csiphy_dev->base +
+			csiphybase +
 			csiphy_dev->ctrl_reg->
 			csiphy_irq_reg[i].reg_addr);
 }
@@ -130,10 +138,12 @@
 void cam_csiphy_cphy_irq_disable(struct csiphy_device *csiphy_dev)
 {
 	int32_t i;
+	void __iomem *csiphybase =
+		csiphy_dev->soc_info.reg_map[0].mem_base;
 
 	for (i = 0; i < csiphy_dev->num_irq_registers; i++)
 		cam_io_w(0x0,
-			csiphy_dev->base +
+			csiphybase +
 			csiphy_dev->ctrl_reg->
 			csiphy_irq_reg[i].reg_addr);
 }
@@ -144,6 +154,8 @@
 	uint8_t i;
 	struct csiphy_device *csiphy_dev =
 		(struct csiphy_device *)data;
+	struct cam_hw_soc_info *soc_info = NULL;
+	void __iomem *base = NULL;
 
 	if (!csiphy_dev) {
 		pr_err("%s:%d Invalid Args\n",
@@ -151,27 +163,30 @@
 		return -EINVAL;
 	}
 
+	soc_info = &csiphy_dev->soc_info;
+	base =  csiphy_dev->soc_info.reg_map[0].mem_base;
+
 	for (i = 0; i < csiphy_dev->num_irq_registers; i++) {
 		irq = cam_io_r(
-			csiphy_dev->base +
+			base +
 			csiphy_dev->ctrl_reg->csiphy_reg.
 			mipi_csiphy_interrupt_status0_addr + 0x4*i);
 		cam_io_w(irq,
-			csiphy_dev->base +
+			base +
 			csiphy_dev->ctrl_reg->csiphy_reg.
 			mipi_csiphy_interrupt_clear0_addr + 0x4*i);
 		pr_err_ratelimited(
 			"%s CSIPHY%d_IRQ_STATUS_ADDR%d = 0x%x\n",
-			__func__, csiphy_dev->v4l2_dev_str.pdev->id, i, irq);
+			__func__, soc_info->index, i, irq);
 		cam_io_w(0x0,
-			csiphy_dev->base +
+			base +
 			csiphy_dev->ctrl_reg->csiphy_reg.
 			mipi_csiphy_interrupt_clear0_addr + 0x4*i);
 	}
-	cam_io_w(0x1, csiphy_dev->base +
+	cam_io_w(0x1, base +
 		csiphy_dev->ctrl_reg->
 		csiphy_reg.mipi_csiphy_glbl_irq_cmd_addr);
-	cam_io_w(0x0, csiphy_dev->base +
+	cam_io_w(0x0, base +
 		csiphy_dev->ctrl_reg->
 		csiphy_reg.mipi_csiphy_glbl_irq_cmd_addr);
 
@@ -196,7 +211,7 @@
 	lane_cnt = csiphy_dev->csiphy_info->lane_cnt;
 	lane_mask = csiphy_dev->csiphy_info->lane_mask & 0x1f;
 	settle_cnt = (csiphy_dev->csiphy_info->settle_time / 200000000);
-	csiphybase = csiphy_dev->base;
+	csiphybase = csiphy_dev->soc_info.reg_map[0].mem_base;
 
 	if (!csiphybase) {
 		pr_err("%s: csiphybase NULL\n", __func__);
@@ -243,14 +258,14 @@
 			csiphy_common_reg[i].csiphy_param_type) {
 			case CSIPHY_LANE_ENABLE:
 				cam_io_w(lane_enable,
-					csiphy_dev->base +
+					csiphybase +
 					csiphy_dev->ctrl_reg->
 					csiphy_common_reg[i].reg_addr);
 			break;
 			case CSIPHY_DEFAULT_PARAMS:
 				cam_io_w(csiphy_dev->ctrl_reg->
 					csiphy_common_reg[i].reg_data,
-					csiphy_dev->base +
+					csiphybase +
 					csiphy_dev->ctrl_reg->
 					csiphy_common_reg[i].reg_addr);
 			break;
@@ -270,22 +285,22 @@
 			switch (reg_array[lane_pos][i].csiphy_param_type) {
 			case CSIPHY_LANE_ENABLE:
 				cam_io_w(lane_enable,
-					csiphy_dev->base +
+					csiphybase +
 					reg_array[lane_pos][i].reg_addr);
 			break;
 			case CSIPHY_DEFAULT_PARAMS:
 				cam_io_w(reg_array[lane_pos][i].reg_data,
-					csiphy_dev->base +
+					csiphybase +
 					reg_array[lane_pos][i].reg_addr);
 			break;
 			case CSIPHY_SETTLE_CNT_LOWER_BYTE:
 				cam_io_w(settle_cnt & 0xFF,
-					csiphy_dev->base +
+					csiphybase +
 					reg_array[lane_pos][i].reg_addr);
 			break;
 			case CSIPHY_SETTLE_CNT_HIGHER_BYTE:
 				cam_io_w((settle_cnt >> 8) & 0xFF,
-					csiphy_dev->base +
+					csiphybase +
 					reg_array[lane_pos][i].reg_addr);
 			break;
 			default:
@@ -388,7 +403,7 @@
 	}
 		break;
 	case CAM_STOP_DEV: {
-		rc = cam_csiphy_soc_release(csiphy_dev);
+		rc = cam_csiphy_disable_hw(csiphy_dev);
 		if (rc < 0) {
 			pr_err("%s:%d Failed in csiphy release\n",
 				__func__, __LINE__);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c
index f2ece9d..7783b2e 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c
@@ -120,11 +120,13 @@
 	mutex_init(&new_csiphy_dev->mutex);
 	new_csiphy_dev->v4l2_dev_str.pdev = pdev;
 
+	new_csiphy_dev->soc_info.pdev = pdev;
+
 	new_csiphy_dev->ref_count = 0;
 
 	rc = cam_csiphy_parse_dt_info(pdev, new_csiphy_dev);
 	if (rc < 0) {
-		pr_err("%s:%d :ERROR: dt paring failed: %d\n",
+		pr_err("%s:%d :ERROR: dt parsing failed: %d\n",
 			__func__, __LINE__, rc);
 		goto csiphy_no_resource;
 	}
@@ -167,7 +169,7 @@
 	new_csiphy_dev->is_acquired_dev_combo_mode = 0;
 
 	cpas_parms.cam_cpas_client_cb = NULL;
-	cpas_parms.cell_index = pdev->id;
+	cpas_parms.cell_index = new_csiphy_dev->soc_info.index;
 	cpas_parms.dev = &pdev->dev;
 	cpas_parms.userdata = new_csiphy_dev;
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
index 9049e4e..c4258bd 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
@@ -31,9 +31,9 @@
 #include <cam_sensor_cmn_header.h>
 #include <cam_req_mgr_interface.h>
 #include <cam_subdev.h>
-#include <cam_sensor_soc_api.h>
 #include <cam_io_util.h>
 #include <cam_cpas_api.h>
+#include "cam_soc_util.h"
 
 #define MAX_CSIPHY                  3
 #define MAX_DPHY_DATA_LN            4
@@ -175,19 +175,11 @@
  *   device is for combo mode
  */
 struct csiphy_device {
-	struct resource *irq;
-	void __iomem *base;
 	struct mutex mutex;
 	uint32_t hw_version;
 	uint32_t csiphy_state;
 	struct csiphy_ctrl_t *ctrl_reg;
-	size_t num_clk;
 	uint32_t csiphy_max_clk;
-	int32_t num_vreg;
-	struct clk **csiphy_clk;
-	struct msm_cam_clk_info *csiphy_clk_info;
-	struct camera_vreg_t *csiphy_vreg;
-	struct regulator *csiphy_reg_ptr[MAX_REGULATOR];
 	struct msm_cam_clk_info csiphy_3p_clk_info[2];
 	struct clk *csiphy_3p_clk[2];
 	uint32_t csiphy_clk_index;
@@ -203,6 +195,7 @@
 	uint32_t acquire_count;
 	char device_name[20];
 	uint32_t is_acquired_dev_combo_mode;
+	struct cam_hw_soc_info   soc_info;
 	uint32_t cpas_handle;
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
index 540ec76..6b5aba9 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
@@ -13,12 +13,13 @@
 #include "cam_csiphy_soc.h"
 #include "cam_csiphy_core.h"
 #include "include/cam_csiphy_1_0_hwreg.h"
-#include "cam_sensor_util.h"
 
 int32_t cam_csiphy_enable_hw(struct csiphy_device *csiphy_dev)
 {
 	int32_t rc = 0;
-	long clk_rate = 0;
+	struct cam_hw_soc_info   *soc_info;
+
+	soc_info = &csiphy_dev->soc_info;
 
 	if (csiphy_dev->ref_count++) {
 		pr_err("%s:%d csiphy refcount = %d\n", __func__,
@@ -26,92 +27,58 @@
 		return rc;
 	}
 
-	rc = msm_camera_config_vreg(&csiphy_dev->v4l2_dev_str.pdev->dev,
-		csiphy_dev->csiphy_vreg,
-		csiphy_dev->num_vreg, NULL, 0,
-		&csiphy_dev->csiphy_reg_ptr[0], 1);
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		CAM_TURBO_VOTE, ENABLE_IRQ);
 	if (rc < 0) {
-		pr_err("%s:%d failed regulator get\n", __func__, __LINE__);
-		goto csiphy_config_regulator_fail;
+		pr_err("%s:%d failed to enable platform resources %d\n",
+			__func__, __LINE__, rc);
+		return rc;
 	}
 
-	rc = msm_camera_enable_vreg(&csiphy_dev->v4l2_dev_str.pdev->dev,
-		csiphy_dev->csiphy_vreg,
-		csiphy_dev->num_vreg, NULL, 0,
-		&csiphy_dev->csiphy_reg_ptr[0], 1);
-	if (rc < 0) {
-		pr_err("%s:%d failed to enable regulators\n", __func__, rc);
-		goto csiphy_regulator_fail;
-	}
+	rc = cam_soc_util_set_clk_rate(
+		soc_info->clk[csiphy_dev->csiphy_clk_index],
+		soc_info->clk_name[csiphy_dev->csiphy_clk_index],
+		soc_info->clk_rate[0][csiphy_dev->csiphy_clk_index]);
 
-	/*Enable clocks*/
-	rc = msm_camera_clk_enable(&csiphy_dev->v4l2_dev_str.pdev->dev,
-		csiphy_dev->csiphy_clk_info, csiphy_dev->csiphy_clk,
-		csiphy_dev->num_clk, true);
 	if (rc < 0) {
-		pr_err("%s: csiphy clk enable failed\n", __func__);
-		csiphy_dev->ref_count--;
-		goto csiphy_regulator_fail;
-	}
-
-	clk_rate = msm_camera_clk_set_rate(&csiphy_dev->v4l2_dev_str.pdev->dev,
-		csiphy_dev->csiphy_clk[csiphy_dev->csiphy_clk_index],
-		clk_rate);
-	if (clk_rate < 0) {
-		pr_err("csiphy_clk_set_rate failed\n");
-		goto csiphy_clk_enable_fail;
-	}
-
-	rc = msm_camera_enable_irq(csiphy_dev->irq, ENABLE_IRQ);
-	if (rc < 0) {
-		pr_err("%s:%d :ERROR: irq enable failed\n",
+		pr_err("%s:%d csiphy_clk_set_rate failed\n",
 			__func__, __LINE__);
-		goto csiphy_clk_enable_fail;
-		return -EINVAL;
+		goto csiphy_disable_platform_resource;
 	}
 
 	cam_csiphy_reset(csiphy_dev);
 
 	return rc;
-csiphy_clk_enable_fail:
-	msm_camera_clk_enable(&csiphy_dev->v4l2_dev_str.pdev->dev,
-		csiphy_dev->csiphy_clk_info, csiphy_dev->csiphy_clk,
-		csiphy_dev->num_clk, false);
-csiphy_regulator_fail:
-	msm_camera_enable_vreg(&csiphy_dev->v4l2_dev_str.pdev->dev,
-		csiphy_dev->csiphy_vreg,
-		csiphy_dev->num_vreg, NULL, 0,
-		&csiphy_dev->csiphy_reg_ptr[0], 0);
-csiphy_config_regulator_fail:
-	msm_camera_config_vreg(&csiphy_dev->v4l2_dev_str.pdev->dev,
-		csiphy_dev->csiphy_vreg,
-		csiphy_dev->num_vreg, NULL, 0,
-		&csiphy_dev->csiphy_reg_ptr[0], 0);
+
+
+csiphy_disable_platform_resource:
+	cam_soc_util_disable_platform_resource(soc_info, true, true);
 
 	return rc;
 }
 
-int32_t cam_csiphy_disable_hw(struct platform_device *pdev)
+int32_t cam_csiphy_disable_hw(struct csiphy_device *csiphy_dev)
 {
-	struct csiphy_device *csiphy_dev =
-		platform_get_drvdata(pdev);
+	struct cam_hw_soc_info   *soc_info;
 
-	/*Disable regulators*/
-	msm_camera_enable_vreg(&csiphy_dev->v4l2_dev_str.pdev->dev,
-		csiphy_dev->csiphy_vreg,
-		csiphy_dev->num_vreg, NULL, 0,
-		&csiphy_dev->csiphy_reg_ptr[0], 0);
+	if (!csiphy_dev || !csiphy_dev->ref_count) {
+		pr_err("%s:%d csiphy dev NULL / ref_count ZERO\n", __func__,
+			__LINE__);
+		return 0;
+	}
+	soc_info = &csiphy_dev->soc_info;
 
-	/*Disable clocks*/
-	msm_camera_clk_enable(&csiphy_dev->v4l2_dev_str.pdev->dev,
-		csiphy_dev->csiphy_clk_info, csiphy_dev->csiphy_clk,
-		csiphy_dev->num_clk, false);
+	if (--csiphy_dev->ref_count) {
+		pr_err("%s:%d csiphy refcount = %d\n", __func__,
+			__LINE__, csiphy_dev->ref_count);
+		return 0;
+	}
 
-	/*Disable IRQ*/
-	msm_camera_enable_irq(csiphy_dev->irq, false);
+	cam_csiphy_reset(csiphy_dev);
+
+	cam_soc_util_disable_platform_resource(soc_info, true, true);
 
 	return 0;
-
 }
 
 int32_t cam_csiphy_parse_dt_info(struct platform_device *pdev,
@@ -121,11 +88,16 @@
 	uint32_t  clk_cnt = 0;
 	char      *csi_3p_clk_name = "csi_phy_3p_clk";
 	char      *csi_3p_clk_src_name = "csiphy_3p_clk_src";
+	struct cam_hw_soc_info   *soc_info;
 
-	if (pdev->dev.of_node) {
-		of_property_read_u32((&pdev->dev)->of_node,
-			"cell-index", &pdev->id);
-		CDBG("%s: device id = %d\n", __func__, pdev->id);
+	csiphy_dev->is_csiphy_3phase_hw = 0;
+	soc_info = &csiphy_dev->soc_info;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc < 0) {
+		pr_err("%s:%d :Error: parsing common soc dt(rc %d)\n",
+			 __func__, __LINE__, rc);
+		return  rc;
 	}
 
 	csiphy_dev->is_csiphy_3phase_hw = 0;
@@ -151,124 +123,56 @@
 		return rc;
 	}
 
-	rc = msm_camera_get_clk_info(csiphy_dev->v4l2_dev_str.pdev,
-		&csiphy_dev->csiphy_clk_info,
-		&csiphy_dev->csiphy_clk,
-		&csiphy_dev->num_clk);
-	if (rc < 0) {
-		pr_err("%s:%d failed clock get\n", __func__, __LINE__);
-		return rc;
+	if (soc_info->num_clk > CSIPHY_NUM_CLK_MAX) {
+		pr_err("%s:%d invalid clk count=%d, max is %d\n", __func__,
+			__LINE__, soc_info->num_clk, CSIPHY_NUM_CLK_MAX);
+		return -EINVAL;
 	}
-
-	if (csiphy_dev->num_clk > CSIPHY_NUM_CLK_MAX) {
-		pr_err("%s: invalid clk count=%zu, max is %d\n", __func__,
-			csiphy_dev->num_clk, CSIPHY_NUM_CLK_MAX);
-		goto clk_mem_ovf_err;
-	}
-
-	for (i = 0; i < csiphy_dev->num_clk; i++) {
-		if (!strcmp(csiphy_dev->csiphy_clk_info[i].clk_name,
+	for (i = 0; i < soc_info->num_clk; i++) {
+		if (!strcmp(soc_info->clk_name[i],
 			csi_3p_clk_src_name)) {
 			csiphy_dev->csiphy_3p_clk_info[0].clk_name =
-				csiphy_dev->csiphy_clk_info[i].clk_name;
+				soc_info->clk_name[i];
 			csiphy_dev->csiphy_3p_clk_info[0].clk_rate =
-				csiphy_dev->csiphy_clk_info[i].clk_rate;
+				soc_info->clk_rate[0][i];
 			csiphy_dev->csiphy_3p_clk[0] =
-				csiphy_dev->csiphy_clk[i];
+				soc_info->clk[i];
 			continue;
-		} else if (!strcmp(csiphy_dev->csiphy_clk_info[i].clk_name,
-					csi_3p_clk_name)) {
+		} else if (!strcmp(soc_info->clk_name[i],
+				csi_3p_clk_name)) {
 			csiphy_dev->csiphy_3p_clk_info[1].clk_name =
-				csiphy_dev->csiphy_clk_info[i].clk_name;
+				soc_info->clk_name[i];
 			csiphy_dev->csiphy_3p_clk_info[1].clk_rate =
-				csiphy_dev->csiphy_clk_info[i].clk_rate;
+				soc_info->clk_rate[0][i];
 			csiphy_dev->csiphy_3p_clk[1] =
-				csiphy_dev->csiphy_clk[i];
+				soc_info->clk[i];
 			continue;
 		}
 
-		if (!strcmp(csiphy_dev->csiphy_clk_info[clk_cnt].clk_name,
+		if (!strcmp(soc_info->clk_name[i],
 			"csiphy_timer_src_clk")) {
 			csiphy_dev->csiphy_max_clk =
-				csiphy_dev->csiphy_clk_info[clk_cnt].clk_rate;
+			soc_info->clk_rate[0][clk_cnt];
 			csiphy_dev->csiphy_clk_index = clk_cnt;
 		}
-		CDBG("%s: clk_rate[%d] = %ld\n", __func__, clk_cnt,
-			csiphy_dev->csiphy_clk_info[clk_cnt].clk_rate);
+		CDBG("%s:%d clk_rate[%d] = %d\n", __func__, __LINE__, clk_cnt,
+			soc_info->clk_rate[0][clk_cnt]);
 		clk_cnt++;
 	}
+	rc = cam_soc_util_request_platform_resource(&csiphy_dev->soc_info,
+		cam_csiphy_irq, csiphy_dev);
 
-	rc = cam_sensor_get_dt_vreg_data(pdev->dev.of_node,
-		&(csiphy_dev->csiphy_vreg), &(csiphy_dev->num_vreg));
-	if (rc < 0) {
-		pr_err("%s:%d Reg get failed\n", __func__, __LINE__);
-		csiphy_dev->num_vreg = 0;
-	}
-
-	csiphy_dev->base = msm_camera_get_reg_base(pdev, "csiphy", true);
-	if (!csiphy_dev->base) {
-		pr_err("%s: no mem resource?\n", __func__);
-		rc = -ENODEV;
-		goto csiphy_no_resource;
-	}
-
-	csiphy_dev->irq = msm_camera_get_irq(pdev, "csiphy");
-	if (!csiphy_dev->irq) {
-		pr_err("%s: no irq resource?\n", __func__);
-		rc = -ENODEV;
-		goto csiphy_no_resource;
-	}
-
-	rc = msm_camera_register_irq(pdev, csiphy_dev->irq,
-		cam_csiphy_irq, IRQF_TRIGGER_RISING, "csiphy", csiphy_dev);
-	if (rc < 0) {
-		pr_err("%s: irq request fail\n", __func__);
-		rc = -EBUSY;
-		goto csiphy_no_resource;
-	}
-	msm_camera_enable_irq(csiphy_dev->irq, false);
-	return rc;
-
-csiphy_no_resource:
-	msm_camera_put_reg_base(pdev, csiphy_dev->base, "csiphy", true);
-clk_mem_ovf_err:
-	msm_camera_put_clk_info(csiphy_dev->v4l2_dev_str.pdev,
-		&csiphy_dev->csiphy_clk_info,
-		&csiphy_dev->csiphy_clk,
-		csiphy_dev->num_clk);
 	return rc;
 }
 
 int32_t cam_csiphy_soc_release(struct csiphy_device *csiphy_dev)
 {
-
-	if (!csiphy_dev || !csiphy_dev->ref_count) {
-		pr_err("%s csiphy dev NULL / ref_count ZERO\n", __func__);
+	if (!csiphy_dev) {
+		pr_err("%s:%d csiphy dev NULL\n", __func__, __LINE__);
 		return 0;
 	}
 
-	if (--csiphy_dev->ref_count) {
-		pr_err("%s:%d csiphy refcount = %d\n", __func__,
-			__LINE__, csiphy_dev->ref_count);
-		return 0;
-	}
-
-	cam_csiphy_reset(csiphy_dev);
-
-	msm_camera_enable_irq(csiphy_dev->irq, false);
-
-	msm_camera_clk_enable(&csiphy_dev->v4l2_dev_str.pdev->dev,
-		csiphy_dev->csiphy_clk_info, csiphy_dev->csiphy_clk,
-		csiphy_dev->num_clk, false);
-
-	msm_camera_enable_vreg(&csiphy_dev->v4l2_dev_str.pdev->dev,
-		csiphy_dev->csiphy_vreg, csiphy_dev->num_vreg,
-		NULL, 0, &csiphy_dev->csiphy_reg_ptr[0], 0);
-
-	msm_camera_config_vreg(&csiphy_dev->v4l2_dev_str.pdev->dev,
-		csiphy_dev->csiphy_vreg, csiphy_dev->num_vreg,
-		NULL, 0, &csiphy_dev->csiphy_reg_ptr[0], 0);
-
+	cam_soc_util_release_platform_resource(&csiphy_dev->soc_info);
 
 	return 0;
 }
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.h
index 27de3fc..94ec79f 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.h
@@ -60,4 +60,11 @@
  */
 int cam_csiphy_enable_hw(struct csiphy_device *csiphy_dev);
 
+/**
+ * @csiphy_dev: CSIPhy device structure
+ *
+ * This API disables SOC related parameters
+ */
+int cam_csiphy_disable_hw(struct csiphy_device *csiphy_dev);
+
 #endif /* _CAM_CSIPHY_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
index 4fc3aa1..031c340 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
@@ -14,6 +14,7 @@
 #include <cam_sensor_cmn_header.h>
 #include "cam_sensor_core.h"
 #include <cam_sensor_util.h>
+#include "cam_soc_util.h"
 
 static int32_t cam_sensor_i2c_pkt_parse(struct cam_sensor_ctrl_t *s_ctrl,
 	void *arg)
@@ -492,7 +493,7 @@
 	query_cap->ois_slot_id =
 		s_ctrl->sensordata->subdev_id[SUB_MODULE_OIS];
 	query_cap->slot_info =
-		s_ctrl->id;
+		s_ctrl->soc_info.index;
 }
 
 static uint16_t cam_sensor_id_by_mask(struct cam_sensor_ctrl_t *s_ctrl,
@@ -603,8 +604,7 @@
 
 		/* Parse and fill vreg params for powerup settings */
 		rc = msm_camera_fill_vreg_params(
-			s_ctrl->sensordata->power_info.cam_vreg,
-			s_ctrl->sensordata->power_info.num_vreg,
+			&s_ctrl->soc_info,
 			s_ctrl->sensordata->power_info.power_setting,
 			s_ctrl->sensordata->power_info.power_setting_size);
 		if (rc < 0) {
@@ -617,8 +617,7 @@
 
 		/* Parse and fill vreg params for powerdown settings*/
 		rc = msm_camera_fill_vreg_params(
-			s_ctrl->sensordata->power_info.cam_vreg,
-			s_ctrl->sensordata->power_info.num_vreg,
+			&s_ctrl->soc_info,
 			s_ctrl->sensordata->power_info.power_down_setting,
 			s_ctrl->sensordata->power_info.power_down_setting_size);
 		if (rc < 0) {
@@ -650,7 +649,8 @@
 		}
 
 		CDBG("%s:%d Probe Succeeded on the slot: %d\n",
-			__func__, __LINE__, s_ctrl->id);
+			__func__, __LINE__,
+			s_ctrl->soc_info.index);
 		rc = cam_sensor_power_down(s_ctrl);
 		if (rc < 0) {
 			pr_err("%s:%d :Error: fail in Sensor Power Down\n",
@@ -844,6 +844,8 @@
 	int rc;
 	struct cam_sensor_power_ctrl_t *power_info;
 	struct cam_camera_slave_info *slave_info;
+	struct cam_hw_soc_info *soc_info =
+		&s_ctrl->soc_info;
 
 	if (!s_ctrl) {
 		pr_err("%s:%d failed: %pK\n",
@@ -861,7 +863,7 @@
 		return -EINVAL;
 	}
 
-	rc = cam_sensor_core_power_up(power_info);
+	rc = cam_sensor_core_power_up(power_info, soc_info);
 	if (rc < 0) {
 		pr_err("%s:%d power up the core is failed:%d\n",
 			__func__, __LINE__, rc);
@@ -884,6 +886,7 @@
 int cam_sensor_power_down(struct cam_sensor_ctrl_t *s_ctrl)
 {
 	struct cam_sensor_power_ctrl_t *power_info;
+	struct cam_hw_soc_info *soc_info;
 	int rc = 0;
 
 	if (!s_ctrl) {
@@ -893,13 +896,14 @@
 	}
 
 	power_info = &s_ctrl->sensordata->power_info;
+	soc_info = &s_ctrl->soc_info;
 
 	if (!power_info) {
 		pr_err("%s:%d failed: power_info %pK\n",
 			__func__, __LINE__, power_info);
 		return -EINVAL;
 	}
-	rc = msm_camera_power_down(power_info);
+	rc = msm_camera_power_down(power_info, soc_info);
 	if (rc < 0) {
 		pr_err("%s:%d power down the core is failed:%d\n",
 			__func__, __LINE__, rc);
@@ -1003,3 +1007,49 @@
 	rc = cam_sensor_apply_settings(s_ctrl, apply->request_id);
 	return rc;
 }
+
+int32_t cam_sensor_flush_request(struct cam_req_mgr_flush_request *flush_req)
+{
+	int32_t rc = 0, i;
+	uint32_t cancel_req_id_found = 0;
+	struct cam_sensor_ctrl_t *s_ctrl = NULL;
+	struct i2c_settings_array *i2c_set = NULL;
+
+	if (!flush_req)
+		return -EINVAL;
+
+	s_ctrl = (struct cam_sensor_ctrl_t *)
+		cam_get_device_priv(flush_req->dev_hdl);
+	if (!s_ctrl) {
+		pr_err("%s: Device data is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+		i2c_set = &(s_ctrl->i2c_data.per_frame[i]);
+
+		if ((flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ)
+				&& (i2c_set->request_id != flush_req->req_id))
+			continue;
+
+		if (i2c_set->is_settings_valid == 1) {
+			rc = delete_request(i2c_set);
+			if (rc < 0)
+				pr_err("%s:%d :Error: delete request: %lld rc: %d\n",
+					__func__, __LINE__,
+					i2c_set->request_id, rc);
+
+			if (flush_req->type ==
+				CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
+				cancel_req_id_found = 1;
+				break;
+			}
+		}
+	}
+
+	if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ &&
+		!cancel_req_id_found)
+		CDBG("%s:Flush request id:%lld not found in the pending list\n",
+			__func__, flush_req->req_id);
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.h
index b23edce..c8158fa 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.h
@@ -53,6 +53,13 @@
 int cam_sensor_apply_request(struct cam_req_mgr_apply_request *apply);
 
 /**
+ * @flush: Req mgr structure for flushing request
+ *
+ * This API flushes the request that is mentioned
+ */
+int cam_sensor_flush_request(struct cam_req_mgr_flush_request *flush);
+
+/**
  * @info: Sub device info to req mgr
  *
  * Publish the subdevice info
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c
index 448ce51..c06a1b3 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c
@@ -166,6 +166,7 @@
 {
 	int32_t rc = 0, i = 0;
 	struct cam_sensor_ctrl_t *s_ctrl = NULL;
+	struct cam_hw_soc_info *soc_info = NULL;
 
 	/* Create sensor control structure */
 	s_ctrl = devm_kzalloc(&pdev->dev,
@@ -173,6 +174,9 @@
 	if (!s_ctrl)
 		return -ENOMEM;
 
+	soc_info = &s_ctrl->soc_info;
+	soc_info->pdev = pdev;
+
 	/* Initialize sensor device type */
 	s_ctrl->of_node = pdev->dev.of_node;
 	s_ctrl->is_probe_succeed = 0;
@@ -189,7 +193,7 @@
 	}
 
 	/* Fill platform device id*/
-	pdev->id = s_ctrl->id;
+	pdev->id = soc_info->index;
 
 	s_ctrl->v4l2_dev_str.internal_ops =
 		&cam_sensor_internal_ops;
@@ -230,6 +234,7 @@
 	s_ctrl->bridge_intf.ops.get_dev_info = cam_sensor_publish_dev_info;
 	s_ctrl->bridge_intf.ops.link_setup = cam_sensor_establish_link;
 	s_ctrl->bridge_intf.ops.apply_req = cam_sensor_apply_request;
+	s_ctrl->bridge_intf.ops.flush_req = cam_sensor_flush_request;
 
 	s_ctrl->sensordata->power_info.dev = &pdev->dev;
 	platform_set_drvdata(pdev, s_ctrl);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h
index f597c36..ae14c9d 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h
@@ -30,7 +30,6 @@
 #include <cam_cci_dev.h>
 #include <cam_sensor_cmn_header.h>
 #include <cam_subdev.h>
-#include <cam_sensor_soc_api.h>
 #include <cam_sensor_io.h>
 
 #define NUM_MASTERS 2
@@ -91,6 +90,7 @@
  */
 struct cam_sensor_ctrl_t {
 	struct platform_device *pdev;
+	struct cam_hw_soc_info soc_info;
 	struct mutex cam_sensor_mutex;
 	struct cam_sensor_board_info *sensordata;
 	enum cci_i2c_master_t cci_i2c_master;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.c
index 8cb1078..78edec1 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.c
@@ -17,6 +17,7 @@
 #include <cam_sensor_io.h>
 #include <cam_req_mgr_util.h>
 #include "cam_sensor_soc.h"
+#include "cam_soc_util.h"
 
 int32_t cam_sensor_get_sub_module_index(struct device_node *of_node,
 	struct cam_sensor_board_info *s_info)
@@ -31,15 +32,15 @@
 	for (i = 0; i < SUB_MODULE_MAX; i++)
 		sensor_info->subdev_id[i] = -1;
 
-	src_node = of_parse_phandle(of_node, "qcom,actuator-src", 0);
+	src_node = of_parse_phandle(of_node, "actuator-src", 0);
 	if (!src_node) {
 		CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
 	} else {
 		rc = of_property_read_u32(src_node, "cell-index", &val);
-		CDBG("%s qcom,actuator cell index %d, rc %d\n", __func__,
-			val, rc);
+		CDBG("%s:%d actuator cell index %d, rc %d\n", __func__,
+			__LINE__, val, rc);
 		if (rc < 0) {
-			pr_err("%s failed %d\n", __func__, __LINE__);
+			pr_err("%s:%d failed %d\n", __func__, __LINE__, rc);
 			of_node_put(src_node);
 			return rc;
 		}
@@ -47,15 +48,15 @@
 		of_node_put(src_node);
 	}
 
-	src_node = of_parse_phandle(of_node, "qcom,ois-src", 0);
+	src_node = of_parse_phandle(of_node, "ois-src", 0);
 	if (!src_node) {
 		CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
 	} else {
 		rc = of_property_read_u32(src_node, "cell-index", &val);
-		CDBG("%s qcom,ois cell index %d, rc %d\n", __func__,
+		CDBG("%s:%d ois cell index %d, rc %d\n", __func__, __LINE__,
 			val, rc);
 		if (rc < 0) {
-			pr_err("%s failed %d\n", __func__, __LINE__);
+			pr_err("%s:%d failed %d\n", __func__, __LINE__, rc);
 			of_node_put(src_node);
 			return rc;
 		}
@@ -63,15 +64,15 @@
 		of_node_put(src_node);
 	}
 
-	src_node = of_parse_phandle(of_node, "qcom,eeprom-src", 0);
+	src_node = of_parse_phandle(of_node, "eeprom-src", 0);
 	if (!src_node) {
 		CDBG("%s:%d eeprom src_node NULL\n", __func__, __LINE__);
 	} else {
 		rc = of_property_read_u32(src_node, "cell-index", &val);
-		CDBG("%s qcom,eeprom cell index %d, rc %d\n", __func__,
+		CDBG("%s:%d eeprom cell index %d, rc %d\n", __func__, __LINE__,
 			val, rc);
 		if (rc < 0) {
-			pr_err("%s failed %d\n", __func__, __LINE__);
+			pr_err("%s:%d failed %d\n", __func__, __LINE__, rc);
 			of_node_put(src_node);
 			return rc;
 		}
@@ -79,13 +80,13 @@
 		of_node_put(src_node);
 	}
 
-	src_node = of_parse_phandle(of_node, "qcom,led-flash-src", 0);
+	src_node = of_parse_phandle(of_node, "led-flash-src", 0);
 	if (!src_node) {
 		CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
 	} else {
 		rc = of_property_read_u32(src_node, "cell-index", &val);
-		CDBG("%s qcom,led flash cell index %d, rc %d\n", __func__,
-			val, rc);
+		CDBG("%s:%d led flash cell index %d, rc %d\n", __func__,
+			__LINE__, val, rc);
 		if (rc < 0) {
 			pr_err("%s:%d failed %d\n", __func__, __LINE__, rc);
 			of_node_put(src_node);
@@ -95,7 +96,7 @@
 		of_node_put(src_node);
 	}
 
-	rc = of_property_read_u32(of_node, "qcom,csiphy-sd-index", &val);
+	rc = of_property_read_u32(of_node, "csiphy-sd-index", &val);
 	if (rc < 0)
 		pr_err("%s:%d :Error: paring the dt node for csiphy rc %d\n",
 			__func__, __LINE__, rc);
@@ -110,27 +111,33 @@
 	int32_t rc = 0;
 	struct cam_sensor_board_info *sensordata = NULL;
 	struct device_node *of_node = s_ctrl->of_node;
-	uint32_t cell_id;
-
+	struct cam_hw_soc_info *soc_info = &s_ctrl->soc_info;
 	s_ctrl->sensordata = kzalloc(sizeof(*sensordata), GFP_KERNEL);
 	if (!s_ctrl->sensordata)
 		return -ENOMEM;
 
 	sensordata = s_ctrl->sensordata;
-	/*
-	 * Read cell index - this cell index will be the camera slot where
-	 * this camera will be mounted
-	 */
-	rc = of_property_read_u32(of_node, "cell-index", &cell_id);
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
 	if (rc < 0) {
-		pr_err("failed: cell-index rc %d", rc);
+		pr_err("%s:%d Failed to read DT properties rc %d",
+			__func__, __LINE__, rc);
 		goto FREE_SENSOR_DATA;
 	}
-	s_ctrl->id = cell_id;
+
+	rc =  cam_sensor_util_init_gpio_pin_tbl(soc_info,
+			&sensordata->power_info.gpio_num_info);
+	if (rc < 0) {
+		pr_err("%s:%d Failed to read gpios %d", __func__, __LINE__, rc);
+		goto FREE_SENSOR_DATA;
+	}
+
+	s_ctrl->id = soc_info->index;
 
 	/* Validate cell_id */
-	if (cell_id >= MAX_CAMERAS) {
-		pr_err("failed: invalid cell_id %d", cell_id);
+	if (s_ctrl->id >= MAX_CAMERAS) {
+		pr_err("%s:%d Failed invalid cell_id %d", __func__, __LINE__,
+			s_ctrl->id);
 		rc = -EINVAL;
 		goto FREE_SENSOR_DATA;
 	}
@@ -138,48 +145,33 @@
 	/* Read subdev info */
 	rc = cam_sensor_get_sub_module_index(of_node, sensordata);
 	if (rc < 0) {
-		pr_err("failed");
+		pr_err("%s:%d failed to get sub module index, rc=%d\n",
+			__func__, __LINE__, rc);
 		goto FREE_SENSOR_DATA;
 	}
 
-	/* Read vreg information */
-	rc = cam_sensor_get_dt_vreg_data(of_node,
-		&sensordata->power_info.cam_vreg,
-		&sensordata->power_info.num_vreg);
-	if (rc < 0) {
-		pr_err("failed: cam_sensor_get_dt_vreg_data rc %d", rc);
-		goto FREE_SENSOR_DATA;
-	}
-
-	/* Read gpio information */
-	rc = msm_sensor_driver_get_gpio_data
-		(&(sensordata->power_info.gpio_conf), of_node);
-	if (rc < 0) {
-		pr_err("failed: msm_sensor_driver_get_gpio_data rc %d", rc);
-		goto FREE_VREG_DATA;
-	}
-
 	/* Get CCI master */
-	rc = of_property_read_u32(of_node, "qcom,cci-master",
+	rc = of_property_read_u32(of_node, "cci-master",
 		&s_ctrl->cci_i2c_master);
-	CDBG("qcom,cci-master %d, rc %d", s_ctrl->cci_i2c_master, rc);
+	CDBG("%s:%d cci-master %d, rc %d", __func__, __LINE__,
+		s_ctrl->cci_i2c_master, rc);
 	if (rc < 0) {
 		/* Set default master 0 */
 		s_ctrl->cci_i2c_master = MASTER_0;
 		rc = 0;
 	}
 
-	if (of_property_read_u32(of_node, "qcom,sensor-position-pitch",
+	if (of_property_read_u32(of_node, "sensor-position-pitch",
 		&sensordata->pos_pitch) < 0) {
 		CDBG("%s:%d Invalid sensor position\n", __func__, __LINE__);
 		sensordata->pos_pitch = 360;
 	}
-	if (of_property_read_u32(of_node, "qcom,sensor-position-roll",
+	if (of_property_read_u32(of_node, "sensor-position-roll",
 		&sensordata->pos_roll) < 0) {
 		CDBG("%s:%d Invalid sensor position\n", __func__, __LINE__);
 		sensordata->pos_roll = 360;
 	}
-	if (of_property_read_u32(of_node, "qcom,sensor-position-yaw",
+	if (of_property_read_u32(of_node, "sensor-position-yaw",
 		&sensordata->pos_yaw) < 0) {
 		CDBG("%s:%d Invalid sensor position\n", __func__, __LINE__);
 		sensordata->pos_yaw = 360;
@@ -187,8 +179,6 @@
 
 	return rc;
 
-FREE_VREG_DATA:
-	kfree(sensordata->power_info.cam_vreg);
 FREE_SENSOR_DATA:
 	kfree(sensordata);
 	return rc;
@@ -223,43 +213,42 @@
 
 int32_t cam_sensor_parse_dt(struct cam_sensor_ctrl_t *s_ctrl)
 {
-	int32_t rc = 0;
+	int32_t i, rc = 0;
+	struct cam_hw_soc_info *soc_info = &s_ctrl->soc_info;
 
 	/* Parse dt information and store in sensor control structure */
 	rc = cam_sensor_driver_get_dt_data(s_ctrl);
 	if (rc < 0) {
-		pr_err("failed: rc %d", rc);
+		pr_err("%s:%d Failed to get dt data rc %d", __func__, __LINE__,
+			rc);
 		return rc;
 	}
 
 	/* Initialize mutex */
 	mutex_init(&(s_ctrl->cam_sensor_mutex));
 
-	pr_err("%s: %d\n", __func__, __LINE__);
+	CDBG("%s: %d\n", __func__, __LINE__);
 	/* Initialize default parameters */
+	for (i = 0; i < soc_info->num_clk; i++) {
+		soc_info->clk[i] = devm_clk_get(&soc_info->pdev->dev,
+					soc_info->clk_name[i]);
+		if (!soc_info->clk[i]) {
+			pr_err("%s:%d get failed for %s\n",
+				__func__, __LINE__, soc_info->clk_name[i]);
+			rc = -ENOENT;
+			return rc;
+		}
+	}
 	rc = msm_sensor_init_default_params(s_ctrl);
 	if (rc < 0) {
-		pr_err("failed: msm_sensor_init_default_params rc %d", rc);
-		goto FREE_DT_DATA;
-	}
-
-	/* Get clocks information */
-	rc = msm_camera_get_clk_info(s_ctrl->pdev,
-		&s_ctrl->sensordata->power_info.clk_info,
-		&s_ctrl->sensordata->power_info.clk_ptr,
-		&s_ctrl->sensordata->power_info.clk_info_size);
-	if (rc < 0) {
-		pr_err("failed: msm_camera_get_clk_info rc %d", rc);
+		pr_err("%s;%d failed: msm_sensor_init_default_params rc %d",
+			__func__, __LINE__, rc);
 		goto FREE_DT_DATA;
 	}
 
 	return rc;
 
 FREE_DT_DATA:
-	kfree(s_ctrl->sensordata->power_info.gpio_conf->gpio_num_info);
-	kfree(s_ctrl->sensordata->power_info.gpio_conf->cam_gpio_req_tbl);
-	kfree(s_ctrl->sensordata->power_info.gpio_conf);
-	kfree(s_ctrl->sensordata->power_info.cam_vreg);
 	kfree(s_ctrl->sensordata);
 	s_ctrl->sensordata = NULL;
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile
index 766828e..770391c 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile
@@ -3,4 +3,4 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
 
-obj-$(CONFIG_SPECTRA_CAMERA) +=  cam_sensor_util.o cam_sensor_soc_api.o
\ No newline at end of file
+obj-$(CONFIG_SPECTRA_CAMERA) +=  cam_sensor_util.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
index e5e4872..d12ff2b 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
@@ -263,14 +263,9 @@
 	uint16_t power_setting_size;
 	struct cam_sensor_power_setting *power_down_setting;
 	uint16_t power_down_setting_size;
-	struct msm_camera_gpio_conf *gpio_conf;
-	struct camera_vreg_t *cam_vreg;
-	int num_vreg;
-	struct clk **clk_ptr;
-	struct msm_cam_clk_info *clk_info;
+	struct msm_camera_gpio_num_info *gpio_num_info;
 	struct msm_pinctrl_info pinctrl_info;
 	uint8_t cam_pinctrl_status;
-	size_t clk_info_size;
 };
 
 struct cam_camera_slave_info {
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_soc_api.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_soc_api.c
deleted file mode 100644
index 2eed9ce..0000000
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_soc_api.c
+++ /dev/null
@@ -1,1331 +0,0 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/of_platform.h>
-#include <linux/msm-bus.h>
-#include "cam_sensor_soc_api.h"
-
-#define NO_SET_RATE -1
-#define INIT_RATE -2
-
-#ifdef CONFIG_CAM_SOC_API_DBG
-#define CDBG(fmt, args...) pr_err(fmt, ##args)
-#else
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-#endif
-
-int msm_cam_clk_sel_src(struct device *dev, struct msm_cam_clk_info *clk_info,
-		struct msm_cam_clk_info *clk_src_info, int num_clk)
-{
-	int i;
-	int rc = 0;
-	struct clk *mux_clk = NULL;
-	struct clk *src_clk = NULL;
-
-	for (i = 0; i < num_clk; i++) {
-		if (clk_src_info[i].clk_name) {
-			mux_clk = clk_get(dev, clk_info[i].clk_name);
-			if (IS_ERR(mux_clk)) {
-				pr_err("%s get failed\n",
-					 clk_info[i].clk_name);
-				continue;
-			}
-			src_clk = clk_get(dev, clk_src_info[i].clk_name);
-			if (IS_ERR(src_clk)) {
-				pr_err("%s get failed\n",
-					clk_src_info[i].clk_name);
-				continue;
-			}
-			clk_set_parent(mux_clk, src_clk);
-		}
-	}
-	return rc;
-}
-
-int msm_cam_clk_enable(struct device *dev, struct msm_cam_clk_info *clk_info,
-		struct clk **clk_ptr, int num_clk, int enable)
-{
-	int i;
-	int rc = 0;
-	long clk_rate;
-
-	if (enable) {
-		for (i = 0; i < num_clk; i++) {
-			CDBG("%s enable %s\n", __func__, clk_info[i].clk_name);
-			clk_ptr[i] = clk_get(dev, clk_info[i].clk_name);
-			if (IS_ERR(clk_ptr[i])) {
-				pr_err("%s get failed\n", clk_info[i].clk_name);
-				rc = PTR_ERR(clk_ptr[i]);
-				goto cam_clk_get_err;
-			}
-			if (clk_info[i].clk_rate > 0) {
-				clk_rate = clk_round_rate(clk_ptr[i],
-					clk_info[i].clk_rate);
-				if (clk_rate < 0) {
-					pr_err("%s round failed\n",
-						   clk_info[i].clk_name);
-					goto cam_clk_set_err;
-				}
-				rc = clk_set_rate(clk_ptr[i],
-					clk_rate);
-				if (rc < 0) {
-					pr_err("%s set failed\n",
-						clk_info[i].clk_name);
-					goto cam_clk_set_err;
-				}
-
-			} else if (clk_info[i].clk_rate == INIT_RATE) {
-				clk_rate = clk_get_rate(clk_ptr[i]);
-				if (clk_rate == 0) {
-					clk_rate =
-						  clk_round_rate(clk_ptr[i], 0);
-					if (clk_rate < 0) {
-						pr_err("%s round rate failed\n",
-							  clk_info[i].clk_name);
-						goto cam_clk_set_err;
-					}
-					rc = clk_set_rate(clk_ptr[i],
-								clk_rate);
-					if (rc < 0) {
-						pr_err("%s set rate failed\n",
-							  clk_info[i].clk_name);
-						goto cam_clk_set_err;
-					}
-				}
-			}
-			rc = clk_prepare(clk_ptr[i]);
-			if (rc < 0) {
-				pr_err("%s prepare failed\n",
-					   clk_info[i].clk_name);
-				goto cam_clk_prepare_err;
-			}
-
-			rc = clk_enable(clk_ptr[i]);
-			if (rc < 0) {
-				pr_err("%s enable failed\n",
-					   clk_info[i].clk_name);
-				goto cam_clk_enable_err;
-			}
-			if (clk_info[i].delay > 20)
-				msleep(clk_info[i].delay);
-			else if (clk_info[i].delay)
-				usleep_range(clk_info[i].delay * 1000,
-					(clk_info[i].delay * 1000) + 1000);
-		}
-	} else {
-		for (i = num_clk - 1; i >= 0; i--) {
-			if (clk_ptr[i] != NULL) {
-				CDBG("%s disable %s\n", __func__,
-					clk_info[i].clk_name);
-				clk_disable(clk_ptr[i]);
-				clk_unprepare(clk_ptr[i]);
-				clk_put(clk_ptr[i]);
-			}
-		}
-	}
-
-	return rc;
-
-cam_clk_enable_err:
-	clk_unprepare(clk_ptr[i]);
-cam_clk_prepare_err:
-cam_clk_set_err:
-	clk_put(clk_ptr[i]);
-cam_clk_get_err:
-	for (i--; i >= 0; i--) {
-		if (clk_ptr[i] != NULL) {
-			clk_disable(clk_ptr[i]);
-			clk_unprepare(clk_ptr[i]);
-			clk_put(clk_ptr[i]);
-		}
-	}
-
-	return rc;
-}
-
-int msm_camera_config_vreg(struct device *dev, struct camera_vreg_t *cam_vreg,
-		int num_vreg, enum msm_camera_vreg_name_t *vreg_seq,
-		int num_vreg_seq, struct regulator **reg_ptr, int config)
-{
-	int i = 0, j = 0;
-	int rc = 0;
-	struct camera_vreg_t *curr_vreg;
-
-	if (num_vreg_seq > num_vreg) {
-		pr_err("%s:%d vreg sequence invalid\n", __func__, __LINE__);
-		return -EINVAL;
-	}
-	if (!num_vreg_seq)
-		num_vreg_seq = num_vreg;
-
-	if (config) {
-		for (i = 0; i < num_vreg_seq; i++) {
-			if (vreg_seq) {
-				j = vreg_seq[i];
-				if (j >= num_vreg)
-					continue;
-			} else {
-				j = i;
-			}
-			curr_vreg = &cam_vreg[j];
-			reg_ptr[j] = regulator_get(dev,
-				curr_vreg->reg_name);
-			if (IS_ERR(reg_ptr[j])) {
-				pr_err("%s: %s get failed\n",
-					 __func__,
-					 curr_vreg->reg_name);
-				reg_ptr[j] = NULL;
-				goto vreg_get_fail;
-			}
-			if (regulator_count_voltages(reg_ptr[j]) > 0) {
-				rc = regulator_set_voltage(
-					reg_ptr[j],
-					curr_vreg->min_voltage,
-					curr_vreg->max_voltage);
-				if (rc < 0) {
-					pr_err("%s: %s set voltage failed\n",
-						__func__,
-						curr_vreg->reg_name);
-					goto vreg_set_voltage_fail;
-				}
-				if (curr_vreg->op_mode >= 0) {
-					rc = regulator_set_load(
-						reg_ptr[j],
-						curr_vreg->op_mode);
-					if (rc < 0) {
-						pr_err(
-						"%s:%s set optimum mode fail\n",
-						__func__,
-						curr_vreg->reg_name);
-						goto vreg_set_opt_mode_fail;
-					}
-				}
-			}
-		}
-	} else {
-		for (i = num_vreg_seq-1; i >= 0; i--) {
-			if (vreg_seq) {
-				j = vreg_seq[i];
-				if (j >= num_vreg)
-					continue;
-			} else {
-				j = i;
-			}
-			curr_vreg = &cam_vreg[j];
-			if (reg_ptr[j]) {
-				if (regulator_count_voltages(reg_ptr[j]) > 0) {
-					if (curr_vreg->op_mode >= 0) {
-						regulator_set_load(
-							reg_ptr[j], 0);
-					}
-					regulator_set_voltage(
-						reg_ptr[j], 0, curr_vreg->
-						max_voltage);
-				}
-				regulator_put(reg_ptr[j]);
-				reg_ptr[j] = NULL;
-			}
-		}
-	}
-
-	return 0;
-
-vreg_unconfig:
-	if (regulator_count_voltages(reg_ptr[j]) > 0)
-		regulator_set_load(reg_ptr[j], 0);
-
-vreg_set_opt_mode_fail:
-	if (regulator_count_voltages(reg_ptr[j]) > 0)
-		regulator_set_voltage(reg_ptr[j], 0,
-			curr_vreg->max_voltage);
-
-vreg_set_voltage_fail:
-	regulator_put(reg_ptr[j]);
-	reg_ptr[j] = NULL;
-
-vreg_get_fail:
-	for (i--; i >= 0; i--) {
-		if (vreg_seq) {
-			j = vreg_seq[i];
-			if (j >= num_vreg)
-				continue;
-		} else {
-			j = i;
-		}
-		curr_vreg = &cam_vreg[j];
-		goto vreg_unconfig;
-	}
-
-	return -ENODEV;
-}
-
-int msm_camera_enable_vreg(struct device *dev, struct camera_vreg_t *cam_vreg,
-		int num_vreg, enum msm_camera_vreg_name_t *vreg_seq,
-		int num_vreg_seq, struct regulator **reg_ptr, int enable)
-{
-	int i = 0, j = 0, rc = 0;
-
-	if (num_vreg_seq > num_vreg) {
-		pr_err("%s:%d vreg sequence invalid\n", __func__, __LINE__);
-		return -EINVAL;
-	}
-	if (!num_vreg_seq)
-		num_vreg_seq = num_vreg;
-
-	if (enable) {
-		for (i = 0; i < num_vreg_seq; i++) {
-			if (vreg_seq) {
-				j = vreg_seq[i];
-				if (j >= num_vreg)
-					continue;
-			} else
-				j = i;
-			if (IS_ERR(reg_ptr[j])) {
-				pr_err("%s: %s null regulator\n",
-					__func__, cam_vreg[j].reg_name);
-				goto disable_vreg;
-			}
-			rc = regulator_enable(reg_ptr[j]);
-			if (rc < 0) {
-				pr_err("%s: %s enable failed\n",
-					__func__, cam_vreg[j].reg_name);
-				goto disable_vreg;
-			}
-			if (cam_vreg[j].delay > 20)
-				msleep(cam_vreg[j].delay);
-			else if (cam_vreg[j].delay)
-				usleep_range(cam_vreg[j].delay * 1000,
-					(cam_vreg[j].delay * 1000) + 1000);
-		}
-	} else {
-		for (i = num_vreg_seq-1; i >= 0; i--) {
-			if (vreg_seq) {
-				j = vreg_seq[i];
-				if (j >= num_vreg)
-					continue;
-			} else
-				j = i;
-			regulator_disable(reg_ptr[j]);
-			if (cam_vreg[j].delay > 20)
-				msleep(cam_vreg[j].delay);
-			else if (cam_vreg[j].delay)
-				usleep_range(cam_vreg[j].delay * 1000,
-					(cam_vreg[j].delay * 1000) + 1000);
-		}
-	}
-
-	return rc;
-disable_vreg:
-	for (i--; i >= 0; i--) {
-		if (vreg_seq) {
-			j = vreg_seq[i];
-			if (j >= num_vreg)
-				continue;
-		} else
-			j = i;
-		regulator_disable(reg_ptr[j]);
-		if (cam_vreg[j].delay > 20)
-			msleep(cam_vreg[j].delay);
-		else if (cam_vreg[j].delay)
-			usleep_range(cam_vreg[j].delay * 1000,
-				(cam_vreg[j].delay * 1000) + 1000);
-	}
-
-	return rc;
-}
-
-int msm_camera_set_gpio_table(struct msm_gpio_set_tbl *gpio_tbl,
-	uint8_t gpio_tbl_size, int gpio_en)
-{
-	int rc = 0, i;
-
-	if (gpio_en) {
-		for (i = 0; i < gpio_tbl_size; i++) {
-			gpio_set_value_cansleep(gpio_tbl[i].gpio,
-				gpio_tbl[i].flags);
-			usleep_range(gpio_tbl[i].delay,
-				gpio_tbl[i].delay + 1000);
-		}
-	} else {
-		for (i = gpio_tbl_size - 1; i >= 0; i--) {
-			if (gpio_tbl[i].flags)
-				gpio_set_value_cansleep(gpio_tbl[i].gpio,
-					GPIOF_OUT_INIT_LOW);
-		}
-	}
-
-	return rc;
-}
-
-int msm_camera_config_single_vreg(struct device *dev,
-	struct camera_vreg_t *cam_vreg, struct regulator **reg_ptr, int config)
-{
-	int rc = 0;
-	const char *vreg_name = NULL;
-
-	if (!dev || !cam_vreg || !reg_ptr) {
-		pr_err("%s: get failed NULL parameter\n", __func__);
-		goto vreg_get_fail;
-	}
-	if (cam_vreg->type == VREG_TYPE_CUSTOM) {
-		if (cam_vreg->custom_vreg_name == NULL) {
-			pr_err("%s : can't find sub reg name",
-				__func__);
-			goto vreg_get_fail;
-		}
-		vreg_name = cam_vreg->custom_vreg_name;
-	} else {
-		if (cam_vreg->reg_name == NULL) {
-			pr_err("%s : can't find reg name", __func__);
-			goto vreg_get_fail;
-		}
-		vreg_name = cam_vreg->reg_name;
-	}
-
-	if (config) {
-		CDBG("%s enable %s\n", __func__, vreg_name);
-		*reg_ptr = regulator_get(dev, vreg_name);
-		if (IS_ERR(*reg_ptr)) {
-			pr_err("%s: %s get failed\n", __func__, vreg_name);
-			*reg_ptr = NULL;
-			goto vreg_get_fail;
-		}
-		if (regulator_count_voltages(*reg_ptr) > 0) {
-			CDBG("%s: voltage min=%d, max=%d\n",
-				__func__, cam_vreg->min_voltage,
-				cam_vreg->max_voltage);
-			rc = regulator_set_voltage(
-				*reg_ptr, cam_vreg->min_voltage,
-				cam_vreg->max_voltage);
-			if (rc < 0) {
-				pr_err("%s: %s set voltage failed\n",
-					__func__, vreg_name);
-				goto vreg_set_voltage_fail;
-			}
-			if (cam_vreg->op_mode >= 0) {
-				rc = regulator_set_load(*reg_ptr,
-					cam_vreg->op_mode);
-				if (rc < 0) {
-					pr_err(
-					"%s: %s set optimum mode failed\n",
-					__func__, vreg_name);
-					goto vreg_set_opt_mode_fail;
-				}
-			}
-		}
-		rc = regulator_enable(*reg_ptr);
-		if (rc < 0) {
-			pr_err("%s: %s regulator_enable failed\n", __func__,
-				vreg_name);
-			goto vreg_unconfig;
-		}
-	} else {
-		CDBG("%s disable %s\n", __func__, vreg_name);
-		if (*reg_ptr) {
-			CDBG("%s disable %s\n", __func__, vreg_name);
-			regulator_disable(*reg_ptr);
-			if (regulator_count_voltages(*reg_ptr) > 0) {
-				if (cam_vreg->op_mode >= 0)
-					regulator_set_load(*reg_ptr, 0);
-				regulator_set_voltage(
-					*reg_ptr, 0, cam_vreg->max_voltage);
-			}
-			regulator_put(*reg_ptr);
-			*reg_ptr = NULL;
-		} else {
-			pr_err("%s can't disable %s\n", __func__, vreg_name);
-		}
-	}
-
-	return 0;
-
-vreg_unconfig:
-	if (regulator_count_voltages(*reg_ptr) > 0)
-		regulator_set_load(*reg_ptr, 0);
-
-vreg_set_opt_mode_fail:
-	if (regulator_count_voltages(*reg_ptr) > 0)
-		regulator_set_voltage(*reg_ptr, 0,
-			cam_vreg->max_voltage);
-
-vreg_set_voltage_fail:
-	regulator_put(*reg_ptr);
-	*reg_ptr = NULL;
-
-vreg_get_fail:
-	return -EINVAL;
-}
-
-int msm_camera_request_gpio_table(struct gpio *gpio_tbl, uint8_t size,
-	int gpio_en)
-{
-	int rc = 0, i = 0, err = 0;
-
-	if (!gpio_tbl || !size) {
-		pr_err("%s:%d invalid gpio_tbl %pK / size %d\n", __func__,
-			__LINE__, gpio_tbl, size);
-		return -EINVAL;
-	}
-	for (i = 0; i < size; i++) {
-		CDBG("%s:%d i %d, gpio %d dir %ld\n", __func__, __LINE__, i,
-			gpio_tbl[i].gpio, gpio_tbl[i].flags);
-	}
-	if (gpio_en) {
-		for (i = 0; i < size; i++) {
-			err = gpio_request_one(gpio_tbl[i].gpio,
-				gpio_tbl[i].flags, gpio_tbl[i].label);
-			if (err) {
-				/*
-				 * After GPIO request fails, contine to
-				 * apply new gpios, outout a error message
-				 * for driver bringup debug
-				 */
-				pr_err("%s:%d gpio %d:%s request fails\n",
-					__func__, __LINE__,
-					gpio_tbl[i].gpio, gpio_tbl[i].label);
-			}
-		}
-	} else {
-		gpio_free_array(gpio_tbl, size);
-	}
-
-	return rc;
-}
-
-/* Get all clocks from DT */
-static int msm_camera_get_clk_info_internal(struct device *dev,
-			struct msm_cam_clk_info **clk_info,
-			struct clk ***clk_ptr,
-			size_t *num_clk)
-{
-	int rc = 0;
-	size_t cnt, tmp;
-	uint32_t *rates, i = 0;
-	const char *clk_ctl = NULL;
-	bool clock_cntl_support = false;
-	struct device_node *of_node;
-
-	of_node = dev->of_node;
-
-	cnt = of_property_count_strings(of_node, "clock-names");
-	if (cnt <= 0) {
-		pr_err("err: No clocks found in DT=%zu\n", cnt);
-		return -EINVAL;
-	}
-
-	tmp = of_property_count_u32_elems(of_node, "qcom,clock-rates");
-	if (tmp <= 0) {
-		pr_err("err: No clk rates device tree, count=%zu", tmp);
-		return -EINVAL;
-	}
-
-	if (cnt != tmp) {
-		pr_err("err: clk name/rates mismatch, strings=%zu, rates=%zu\n",
-			cnt, tmp);
-		return -EINVAL;
-	}
-
-	if (of_property_read_bool(of_node, "qcom,clock-cntl-support")) {
-		tmp = of_property_count_strings(of_node,
-				"qcom,clock-control");
-		if (tmp <= 0) {
-			pr_err("err: control strings not found in DT count=%zu",
-				tmp);
-			return -EINVAL;
-		}
-		if (cnt != tmp) {
-			pr_err("err: controls mismatch, strings=%zu, ctl=%zu\n",
-				cnt, tmp);
-			return -EINVAL;
-		}
-		clock_cntl_support = true;
-	}
-
-	*num_clk = cnt;
-
-	*clk_info = devm_kcalloc(dev, cnt,
-				sizeof(struct msm_cam_clk_info), GFP_KERNEL);
-	if (!*clk_info)
-		return -ENOMEM;
-
-	*clk_ptr = devm_kcalloc(dev, cnt, sizeof(struct clk *),
-				GFP_KERNEL);
-	if (!*clk_ptr) {
-		rc = -ENOMEM;
-		goto free_clk_info;
-	}
-
-	rates = devm_kcalloc(dev, cnt, sizeof(long), GFP_KERNEL);
-	if (!rates) {
-		rc = -ENOMEM;
-		goto free_clk_ptr;
-	}
-
-	rc = of_property_read_u32_array(of_node, "qcom,clock-rates",
-		rates, cnt);
-	if (rc < 0) {
-		pr_err("err: failed reading clock rates\n");
-		rc = -EINVAL;
-		goto free_rates;
-	}
-
-	for (i = 0; i < cnt; i++) {
-		rc = of_property_read_string_index(of_node, "clock-names",
-				i, &((*clk_info)[i].clk_name));
-		if (rc < 0) {
-			pr_err("%s reading clock-name failed index %d\n",
-				__func__, i);
-			rc = -EINVAL;
-			goto free_rates;
-		}
-
-		CDBG("dbg: clk-name[%d] = %s\n", i, (*clk_info)[i].clk_name);
-		if (clock_cntl_support) {
-			rc = of_property_read_string_index(of_node,
-				"qcom,clock-control", i, &clk_ctl);
-			if (rc < 0) {
-				pr_err("%s reading clock-control failed index %d\n",
-					__func__, i);
-				rc = -EINVAL;
-				goto free_rates;
-			}
-
-			if (!strcmp(clk_ctl, "NO_SET_RATE")) {
-				(*clk_info)[i].clk_rate = NO_SET_RATE;
-			} else if (!strcmp(clk_ctl, "INIT_RATE")) {
-				(*clk_info)[i].clk_rate = INIT_RATE;
-			} else if (!strcmp(clk_ctl, "SET_RATE")) {
-				(*clk_info)[i].clk_rate = rates[i];
-			} else {
-				pr_err("%s: error: clock control has invalid value\n",
-					 __func__);
-				rc = -EINVAL;
-				goto free_rates;
-			}
-		} else {
-			(*clk_info)[i].clk_rate =
-				(rates[i] == 0) ? (long)-1 : rates[i];
-		}
-
-		CDBG("dbg: clk-rate[%d] = rate: %ld\n",
-			i, (*clk_info)[i].clk_rate);
-
-		(*clk_ptr)[i] =
-			devm_clk_get(dev, (*clk_info)[i].clk_name);
-		if (IS_ERR((*clk_ptr)[i])) {
-			rc = PTR_ERR((*clk_ptr)[i]);
-			goto release_clk;
-		}
-		CDBG("clk ptr[%d] :%pK\n", i, (*clk_ptr)[i]);
-	}
-
-	devm_kfree(dev, rates);
-
-	return rc;
-
-release_clk:
-	for (--i; i >= 0; i--)
-		devm_clk_put(dev, (*clk_ptr)[i]);
-free_rates:
-	devm_kfree(dev, rates);
-free_clk_ptr:
-	devm_kfree(dev, *clk_ptr);
-free_clk_info:
-	devm_kfree(dev, *clk_info);
-	return rc;
-}
-
-/* Get all clocks from DT  for I2C devices */
-int msm_camera_i2c_dev_get_clk_info(struct device *dev,
-			struct msm_cam_clk_info **clk_info,
-			struct clk ***clk_ptr,
-			size_t *num_clk)
-{
-	int rc = 0;
-
-	if (!dev || !clk_info || !clk_ptr || !num_clk)
-		return -EINVAL;
-
-	rc = msm_camera_get_clk_info_internal(dev, clk_info, clk_ptr, num_clk);
-	return rc;
-}
-EXPORT_SYMBOL(msm_camera_i2c_dev_get_clk_info);
-
-/* Get all clocks from DT  for platform devices */
-int msm_camera_get_clk_info(struct platform_device *pdev,
-			struct msm_cam_clk_info **clk_info,
-			struct clk ***clk_ptr,
-			size_t *num_clk)
-{
-	int rc = 0;
-
-	if (!pdev || !&pdev->dev || !clk_info || !clk_ptr || !num_clk)
-		return -EINVAL;
-
-	rc = msm_camera_get_clk_info_internal(&pdev->dev,
-			clk_info, clk_ptr, num_clk);
-	return rc;
-}
-EXPORT_SYMBOL(msm_camera_get_clk_info);
-
-/* Get all clocks and multiple rates from DT */
-int msm_camera_get_clk_info_and_rates(
-			struct platform_device *pdev,
-			struct msm_cam_clk_info **pclk_info,
-			struct clk ***pclks,
-			uint32_t ***pclk_rates,
-			size_t *num_set,
-			size_t *num_clk)
-{
-	int rc = 0, tmp_var, cnt, tmp;
-	uint32_t i = 0, j = 0;
-	struct device_node *of_node;
-	uint32_t **rates;
-	struct clk **clks;
-	struct msm_cam_clk_info *clk_info;
-
-	if (!pdev || !pclk_info || !num_clk
-		|| !pclk_rates || !pclks || !num_set)
-		return -EINVAL;
-
-	of_node = pdev->dev.of_node;
-
-	cnt = of_property_count_strings(of_node, "clock-names");
-	if (cnt <= 0) {
-		pr_err("err: No clocks found in DT=%d\n", cnt);
-		return -EINVAL;
-	}
-
-	tmp = of_property_count_u32_elems(of_node, "qcom,clock-rates");
-	if (tmp <= 0) {
-		pr_err("err: No clk rates device tree, count=%d\n", tmp);
-		return -EINVAL;
-	}
-
-	if ((tmp % cnt) != 0) {
-		pr_err("err: clk name/rates mismatch, strings=%d, rates=%d\n",
-			cnt, tmp);
-		return -EINVAL;
-	}
-
-	*num_clk = cnt;
-	*num_set = (tmp / cnt);
-
-	clk_info = devm_kcalloc(&pdev->dev, cnt,
-				sizeof(struct msm_cam_clk_info), GFP_KERNEL);
-	if (!clk_info)
-		return -ENOMEM;
-
-	clks = devm_kcalloc(&pdev->dev, cnt, sizeof(struct clk *),
-				GFP_KERNEL);
-	if (!clks) {
-		rc = -ENOMEM;
-		goto free_clk_info;
-	}
-
-	rates = devm_kcalloc(&pdev->dev, *num_set,
-		sizeof(uint32_t *), GFP_KERNEL);
-	if (!rates) {
-		rc = -ENOMEM;
-		goto free_clk;
-	}
-
-	for (i = 0; i < *num_set; i++) {
-		rates[i] = devm_kcalloc(&pdev->dev, *num_clk,
-			sizeof(uint32_t), GFP_KERNEL);
-		if (!rates[i]) {
-			rc = -ENOMEM;
-			for (--i; i >= 0; i--)
-				devm_kfree(&pdev->dev, rates[i]);
-			goto free_rate;
-		}
-	}
-
-	tmp_var = 0;
-	for (i = 0; i < *num_set; i++) {
-		for (j = 0; j < *num_clk; j++) {
-			rc = of_property_read_u32_index(of_node,
-				"qcom,clock-rates", tmp_var++, &rates[i][j]);
-			if (rc < 0) {
-				pr_err("err: failed reading clock rates\n");
-				rc = -EINVAL;
-				goto free_rate_array;
-			}
-			CDBG("Clock rate idx %d idx %d value %d\n",
-				i, j, rates[i][j]);
-		}
-	}
-	for (i = 0; i < *num_clk; i++) {
-		rc = of_property_read_string_index(of_node, "clock-names",
-				i, &clk_info[i].clk_name);
-		if (rc < 0) {
-			pr_err("%s reading clock-name failed index %d\n",
-				__func__, i);
-			rc = -EINVAL;
-			goto free_rate_array;
-		}
-
-		CDBG("dbg: clk-name[%d] = %s\n", i, clk_info[i].clk_name);
-
-		clks[i] =
-			devm_clk_get(&pdev->dev, clk_info[i].clk_name);
-		if (IS_ERR(clks[i])) {
-			rc = PTR_ERR(clks[i]);
-			goto release_clk;
-		}
-		CDBG("clk ptr[%d] :%pK\n", i, clks[i]);
-	}
-	*pclk_info = clk_info;
-	*pclks = clks;
-	*pclk_rates = rates;
-
-	return rc;
-
-release_clk:
-	for (--i; i >= 0; i--)
-		devm_clk_put(&pdev->dev, clks[i]);
-free_rate_array:
-	for (i = 0; i < *num_set; i++)
-		devm_kfree(&pdev->dev, rates[i]);
-free_rate:
-	devm_kfree(&pdev->dev, rates);
-free_clk:
-	devm_kfree(&pdev->dev, clks);
-free_clk_info:
-	devm_kfree(&pdev->dev, clk_info);
-	return rc;
-}
-EXPORT_SYMBOL(msm_camera_get_clk_info_and_rates);
-
-/* Enable/Disable all clocks */
-int msm_camera_clk_enable(struct device *dev,
-		struct msm_cam_clk_info *clk_info,
-		struct clk **clk_ptr, int num_clk, int enable)
-{
-	int i;
-	int rc = 0;
-	long clk_rate;
-
-	if (enable) {
-		for (i = 0; i < num_clk; i++) {
-			pr_err("enable %s\n", clk_info[i].clk_name);
-			if (clk_info[i].clk_rate > 0) {
-				clk_rate = clk_round_rate(clk_ptr[i],
-					clk_info[i].clk_rate);
-				if (clk_rate < 0) {
-					pr_err("%s round failed\n",
-						   clk_info[i].clk_name);
-					goto cam_clk_set_err;
-				}
-				rc = clk_set_rate(clk_ptr[i],
-					clk_rate);
-				if (rc < 0) {
-					pr_err("%s set failed\n",
-						clk_info[i].clk_name);
-					goto cam_clk_set_err;
-				}
-
-			} else if (clk_info[i].clk_rate == INIT_RATE) {
-				clk_rate = clk_get_rate(clk_ptr[i]);
-				if (clk_rate == 0) {
-					clk_rate =
-						  clk_round_rate(clk_ptr[i], 0);
-					if (clk_rate < 0) {
-						pr_err("%s round rate failed\n",
-							  clk_info[i].clk_name);
-						goto cam_clk_set_err;
-					}
-					rc = clk_set_rate(clk_ptr[i],
-								clk_rate);
-					if (rc < 0) {
-						pr_err("%s set rate failed\n",
-							  clk_info[i].clk_name);
-						goto cam_clk_set_err;
-					}
-				}
-			}
-			rc = clk_prepare_enable(clk_ptr[i]);
-			if (rc < 0) {
-				pr_err("%s enable failed\n",
-					   clk_info[i].clk_name);
-				goto cam_clk_enable_err;
-			}
-			if (clk_info[i].delay > 20) {
-				msleep(clk_info[i].delay);
-			} else if (clk_info[i].delay) {
-				usleep_range(clk_info[i].delay * 1000,
-					(clk_info[i].delay * 1000) + 1000);
-			}
-		}
-	} else {
-		for (i = num_clk - 1; i >= 0; i--) {
-			if (clk_ptr[i] != NULL) {
-				pr_err("%s disable %s\n", __func__,
-					clk_info[i].clk_name);
-				clk_disable_unprepare(clk_ptr[i]);
-			}
-		}
-	}
-	return rc;
-
-cam_clk_enable_err:
-cam_clk_set_err:
-	for (i--; i >= 0; i--) {
-		if (clk_ptr[i] != NULL)
-			clk_disable_unprepare(clk_ptr[i]);
-	}
-	return rc;
-}
-EXPORT_SYMBOL(msm_camera_clk_enable);
-
-/* Set rate on a specific clock */
-long msm_camera_clk_set_rate(struct device *dev,
-			struct clk *clk,
-			long clk_rate)
-{
-	int rc = 0;
-	long rate = 0;
-
-	if (!dev || !clk || (clk_rate < 0))
-		return -EINVAL;
-
-	CDBG("clk : %pK, enable : %ld\n", clk, clk_rate);
-
-	if (clk_rate > 0) {
-		rate = clk_round_rate(clk, clk_rate);
-		if (rate < 0) {
-			pr_err("round rate failed\n");
-			return -EINVAL;
-		}
-
-		rc = clk_set_rate(clk, rate);
-		if (rc < 0) {
-			pr_err("set rate failed\n");
-			return -EINVAL;
-		}
-	}
-
-	return rate;
-}
-EXPORT_SYMBOL(msm_camera_clk_set_rate);
-
-/* release memory allocated for clocks */
-static int msm_camera_put_clk_info_internal(struct device *dev,
-				struct msm_cam_clk_info **clk_info,
-				struct clk ***clk_ptr, int cnt)
-{
-	int i;
-
-	for (i = cnt - 1; i >= 0; i--) {
-		if (clk_ptr[i] != NULL)
-			devm_clk_put(dev, (*clk_ptr)[i]);
-
-		CDBG("clk ptr[%d] :%pK\n", i, (*clk_ptr)[i]);
-	}
-	devm_kfree(dev, *clk_info);
-	devm_kfree(dev, *clk_ptr);
-	*clk_info = NULL;
-	*clk_ptr = NULL;
-	return 0;
-}
-
-/* release memory allocated for clocks for i2c devices */
-int msm_camera_i2c_dev_put_clk_info(struct device *dev,
-				struct msm_cam_clk_info **clk_info,
-				struct clk ***clk_ptr, int cnt)
-{
-	int rc = 0;
-
-	if (!dev || !clk_info || !clk_ptr)
-		return -EINVAL;
-
-	rc = msm_camera_put_clk_info_internal(dev, clk_info, clk_ptr, cnt);
-	return rc;
-}
-EXPORT_SYMBOL(msm_camera_i2c_dev_put_clk_info);
-
-/* release memory allocated for clocks for platform devices */
-int msm_camera_put_clk_info(struct platform_device *pdev,
-				struct msm_cam_clk_info **clk_info,
-				struct clk ***clk_ptr, int cnt)
-{
-	int rc = 0;
-
-	if (!pdev || !&pdev->dev || !clk_info || !clk_ptr)
-		return -EINVAL;
-
-	rc = msm_camera_put_clk_info_internal(&pdev->dev,
-			clk_info, clk_ptr, cnt);
-	return rc;
-}
-EXPORT_SYMBOL(msm_camera_put_clk_info);
-
-int msm_camera_put_clk_info_and_rates(struct platform_device *pdev,
-		struct msm_cam_clk_info **clk_info,
-		struct clk ***clk_ptr, uint32_t ***clk_rates,
-		size_t set, size_t cnt)
-{
-	int i;
-
-	for (i = set - 1; i >= 0; i--)
-		devm_kfree(&pdev->dev, (*clk_rates)[i]);
-
-	devm_kfree(&pdev->dev, *clk_rates);
-	for (i = cnt - 1; i >= 0; i--) {
-		if (clk_ptr[i] != NULL)
-			devm_clk_put(&pdev->dev, (*clk_ptr)[i]);
-		CDBG("clk ptr[%d] :%pK\n", i, (*clk_ptr)[i]);
-	}
-	devm_kfree(&pdev->dev, *clk_info);
-	devm_kfree(&pdev->dev, *clk_ptr);
-	*clk_info = NULL;
-	*clk_ptr = NULL;
-	*clk_rates = NULL;
-
-	return 0;
-}
-EXPORT_SYMBOL(msm_camera_put_clk_info_and_rates);
-
-/* Get regulators from DT */
-int msm_camera_get_regulator_info(struct platform_device *pdev,
-				struct msm_cam_regulator **vdd_info,
-				int *num_reg)
-{
-	uint32_t cnt;
-	int i, rc;
-	struct device_node *of_node;
-	char prop_name[32];
-	struct msm_cam_regulator *tmp_reg;
-
-	if (!pdev || !vdd_info || !num_reg)
-		return -EINVAL;
-
-	of_node = pdev->dev.of_node;
-
-	if (!of_get_property(of_node, "qcom,vdd-names", NULL)) {
-		pr_err("err: Regulators property not found\n");
-		return -EINVAL;
-	}
-
-	cnt = of_property_count_strings(of_node, "qcom,vdd-names");
-	if (cnt <= 0) {
-		pr_err("err: no regulators found in device tree, count=%d",
-			cnt);
-		return -EINVAL;
-	}
-
-	tmp_reg = devm_kcalloc(&pdev->dev, cnt,
-				sizeof(struct msm_cam_regulator), GFP_KERNEL);
-	if (!tmp_reg)
-		return -ENOMEM;
-
-	for (i = 0; i < cnt; i++) {
-		rc = of_property_read_string_index(of_node,
-			"qcom,vdd-names", i, &tmp_reg[i].name);
-		if (rc < 0) {
-			pr_err("Fail to fetch regulators: %d\n", i);
-			rc = -EINVAL;
-			goto err1;
-		}
-
-		CDBG("regulator-names[%d] = %s\n", i, tmp_reg[i].name);
-
-		snprintf(prop_name, 32, "%s-supply", tmp_reg[i].name);
-
-		if (of_get_property(of_node, prop_name, NULL)) {
-			tmp_reg[i].vdd =
-				devm_regulator_get(&pdev->dev, tmp_reg[i].name);
-			if (IS_ERR(tmp_reg[i].vdd)) {
-				rc = -EINVAL;
-				pr_err("Fail to get regulator :%d\n", i);
-				goto err1;
-			}
-		} else {
-			pr_err("Regulator phandle not found :%s\n",
-				tmp_reg[i].name);
-			rc = -EINVAL;
-			goto err1;
-		}
-		CDBG("vdd ptr[%d] :%pK\n", i, tmp_reg[i].vdd);
-	}
-
-	*num_reg = cnt;
-	*vdd_info = tmp_reg;
-
-	return 0;
-
-err1:
-	for (--i; i >= 0; i--)
-		devm_regulator_put(tmp_reg[i].vdd);
-	devm_kfree(&pdev->dev, tmp_reg);
-	return rc;
-}
-EXPORT_SYMBOL(msm_camera_get_regulator_info);
-
-
-/* Enable/Disable regulators */
-int msm_camera_regulator_enable(struct msm_cam_regulator *vdd_info,
-				int cnt, int enable)
-{
-	int i;
-	int rc;
-	struct msm_cam_regulator *tmp = vdd_info;
-
-	if (!tmp) {
-		pr_err("Invalid params");
-		return -EINVAL;
-	}
-	CDBG("cnt : %d\n", cnt);
-
-	for (i = 0; i < cnt; i++) {
-		if (tmp && !IS_ERR_OR_NULL(tmp->vdd)) {
-			CDBG("name : %s, enable : %d\n", tmp->name, enable);
-			if (enable) {
-				rc = regulator_enable(tmp->vdd);
-				if (rc < 0) {
-					pr_err("regulator enable failed %d\n",
-						i);
-					goto disable_reg;
-				}
-			} else {
-				rc = regulator_disable(tmp->vdd);
-				if (rc < 0)
-					pr_err("regulator disable failed %d\n",
-						i);
-			}
-		}
-		tmp++;
-	}
-
-	return 0;
-disable_reg:
-	for (--i; i > 0; i--) {
-		--tmp;
-		if (!IS_ERR_OR_NULL(tmp->vdd))
-			regulator_disable(tmp->vdd);
-	}
-	return rc;
-}
-EXPORT_SYMBOL(msm_camera_regulator_enable);
-
-/* Put regulators regulators */
-void msm_camera_put_regulators(struct platform_device *pdev,
-	struct msm_cam_regulator **vdd_info, int cnt)
-{
-	int i;
-
-	if (!vdd_info || !*vdd_info) {
-		pr_err("Invalid params\n");
-		return;
-	}
-
-	for (i = cnt - 1; i >= 0; i--) {
-		if (vdd_info[i] && !IS_ERR_OR_NULL(vdd_info[i]->vdd))
-			devm_regulator_put(vdd_info[i]->vdd);
-			CDBG("vdd ptr[%d] :%pK\n", i, vdd_info[i]->vdd);
-	}
-
-	devm_kfree(&pdev->dev, *vdd_info);
-	*vdd_info = NULL;
-}
-EXPORT_SYMBOL(msm_camera_put_regulators);
-
-struct resource *msm_camera_get_irq(struct platform_device *pdev,
-							char *irq_name)
-{
-	if (!pdev || !irq_name) {
-		pr_err("Invalid params\n");
-		return NULL;
-	}
-
-	CDBG("Get irq for %s\n", irq_name);
-	return platform_get_resource_byname(pdev, IORESOURCE_IRQ, irq_name);
-}
-EXPORT_SYMBOL(msm_camera_get_irq);
-
-int msm_camera_register_irq(struct platform_device *pdev,
-			struct resource *irq, irq_handler_t handler,
-			unsigned long irqflags, char *irq_name, void *dev_id)
-{
-	int rc = 0;
-
-	if (!pdev || !irq || !handler || !irq_name || !dev_id) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	rc = devm_request_irq(&pdev->dev, irq->start, handler,
-		irqflags, irq_name, dev_id);
-	if (rc < 0) {
-		pr_err("irq request fail\n");
-		rc = -EINVAL;
-	}
-
-	CDBG("Registered irq for %s[resource - %pK]\n", irq_name, irq);
-
-	return rc;
-}
-EXPORT_SYMBOL(msm_camera_register_irq);
-
-int msm_camera_register_threaded_irq(struct platform_device *pdev,
-			struct resource *irq, irq_handler_t handler_fn,
-			irq_handler_t thread_fn, unsigned long irqflags,
-			const char *irq_name, void *dev_id)
-{
-	int rc = 0;
-
-	if (!pdev || !irq || !irq_name || !dev_id) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	rc = devm_request_threaded_irq(&pdev->dev, irq->start, handler_fn,
-			thread_fn, irqflags, irq_name, dev_id);
-	if (rc < 0) {
-		pr_err("irq request fail\n");
-		rc = -EINVAL;
-	}
-
-	CDBG("Registered irq for %s[resource - %pK]\n", irq_name, irq);
-
-	return rc;
-}
-EXPORT_SYMBOL(msm_camera_register_threaded_irq);
-
-int msm_camera_enable_irq(struct resource *irq, int enable)
-{
-	if (!irq) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	CDBG("irq Enable %d\n", enable);
-	if (enable)
-		enable_irq(irq->start);
-	else
-		disable_irq(irq->start);
-
-	return 0;
-}
-EXPORT_SYMBOL(msm_camera_enable_irq);
-
-int msm_camera_unregister_irq(struct platform_device *pdev,
-	struct resource *irq, void *dev_id)
-{
-
-	if (!pdev || !irq || !dev_id) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	CDBG("Un Registering irq for [resource - %pK]\n", irq);
-	devm_free_irq(&pdev->dev, irq->start, dev_id);
-
-	return 0;
-}
-EXPORT_SYMBOL(msm_camera_unregister_irq);
-
-void __iomem *msm_camera_get_reg_base(struct platform_device *pdev,
-		char *device_name, int reserve_mem)
-{
-	struct resource *mem;
-	void *base;
-
-	if (!pdev || !device_name) {
-		pr_err("Invalid params\n");
-		return NULL;
-	}
-
-	CDBG("device name :%s\n", device_name);
-	mem = platform_get_resource_byname(pdev,
-			IORESOURCE_MEM, device_name);
-	if (!mem) {
-		pr_err("err: mem resource %s not found\n", device_name);
-		return NULL;
-	}
-
-	if (reserve_mem) {
-		CDBG("device:%pK, mem : %pK, size : %d\n",
-			&pdev->dev, mem, (int)resource_size(mem));
-		if (!devm_request_mem_region(&pdev->dev, mem->start,
-			resource_size(mem),
-			device_name)) {
-			pr_err("err: no valid mem region for device:%s\n",
-				device_name);
-			return NULL;
-		}
-	}
-
-	base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
-	if (!base) {
-		devm_release_mem_region(&pdev->dev, mem->start,
-				resource_size(mem));
-		pr_err("err: ioremap failed: %s\n", device_name);
-		return NULL;
-	}
-
-	CDBG("base : %pK\n", base);
-	return base;
-}
-EXPORT_SYMBOL(msm_camera_get_reg_base);
-
-uint32_t msm_camera_get_res_size(struct platform_device *pdev,
-	char *device_name)
-{
-	struct resource *mem;
-
-	if (!pdev || !device_name) {
-		pr_err("Invalid params\n");
-		return 0;
-	}
-
-	CDBG("device name :%s\n", device_name);
-	mem = platform_get_resource_byname(pdev,
-		IORESOURCE_MEM, device_name);
-	if (!mem) {
-		pr_err("err: mem resource %s not found\n", device_name);
-		return 0;
-	}
-	return resource_size(mem);
-}
-EXPORT_SYMBOL(msm_camera_get_res_size);
-
-
-int msm_camera_put_reg_base(struct platform_device *pdev,
-	void __iomem *base, char *device_name, int reserve_mem)
-{
-	struct resource *mem;
-
-	if (!pdev || !base || !device_name) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	CDBG("device name :%s\n", device_name);
-	mem = platform_get_resource_byname(pdev,
-			IORESOURCE_MEM, device_name);
-	if (!mem) {
-		pr_err("err: mem resource %s not found\n", device_name);
-		return -EINVAL;
-	}
-	CDBG("mem : %pK, size : %d\n", mem, (int)resource_size(mem));
-
-	devm_iounmap(&pdev->dev, base);
-	if (reserve_mem)
-		devm_release_mem_region(&pdev->dev,
-			mem->start, resource_size(mem));
-
-	return 0;
-}
-EXPORT_SYMBOL(msm_camera_put_reg_base);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_soc_api.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_soc_api.h
deleted file mode 100644
index c316090..0000000
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_soc_api.h
+++ /dev/null
@@ -1,473 +0,0 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef _CAM_SENSOR_SOC_API_H_
-#define _CAM_SENSOR_SOC_API_H_
-
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of_platform.h>
-#include <linux/spinlock_types.h>
-#include <linux/mutex.h>
-#include <linux/clk.h>
-#include <linux/regulator/consumer.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include "cam_sensor_cmn_header.h"
-
-struct msm_cam_regulator {
-	const char *name;
-	struct regulator *vdd;
-};
-
-struct msm_gpio_set_tbl {
-	unsigned int gpio;
-	unsigned long flags;
-	uint32_t delay;
-};
-
-/**
- * @brief      : Gets clock information from dtsi
- *
- * This function extracts the clocks information for a specific
- * platform device
- *
- * @param pdev   : Platform device to get clocks information
- * @param clk_info   : Pointer to populate clock information array
- * @param clk_ptr   : Pointer to populate clock resource pointers
- * @param num_clk: Pointer to populate the number of clocks
- *                 extracted from dtsi
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_get_clk_info(struct platform_device *pdev,
-			struct msm_cam_clk_info **clk_info,
-			struct clk ***clk_ptr,
-			size_t *num_clk);
-
-/**
- * @brief      : Gets clock information from dtsi
- *
- * This function extracts the clocks information for a specific
- * i2c device
- *
- * @param dev   : i2c device to get clocks information
- * @param clk_info   : Pointer to populate clock information array
- * @param clk_ptr   : Pointer to populate clock resource pointers
- * @param num_clk: Pointer to populate the number of clocks
- *                 extracted from dtsi
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_i2c_dev_get_clk_info(struct device *dev,
-			struct msm_cam_clk_info **clk_info,
-			struct clk ***clk_ptr,
-			size_t *num_clk);
-
-/**
- * @brief      : Gets clock information and rates from dtsi
- *
- * This function extracts the clocks information for a specific
- * platform device
- *
- * @param pdev   : Platform device to get clocks information
- * @param clk_info   : Pointer to populate clock information array
- * @param clk_ptr   : Pointer to populate clock resource pointers
- * @param clk_rates   : Pointer to populate clock rates
- * @param num_set: Pointer to populate the number of sets of rates
- * @param num_clk: Pointer to populate the number of clocks
- *                 extracted from dtsi
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_get_clk_info_and_rates(
-			struct platform_device *pdev,
-			struct msm_cam_clk_info **clk_info,
-			struct clk ***clk_ptr,
-			uint32_t ***clk_rates,
-			size_t *num_set,
-			size_t *num_clk);
-
-/**
- * @brief      : Puts clock information
- *
- * This function releases the memory allocated for the clocks
- *
- * @param pdev   : Pointer to platform device
- * @param clk_info   : Pointer to release the allocated memory
- * @param clk_ptr   : Pointer to release the clock resources
- * @param cnt   : Number of clk resources
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_put_clk_info(struct platform_device *pdev,
-				struct msm_cam_clk_info **clk_info,
-				struct clk ***clk_ptr, int cnt);
-
-/**
- * @brief      : Puts clock information
- *
- * This function releases the memory allocated for the clocks
- *
- * @param dev   : Pointer to i2c device
- * @param clk_info   : Pointer to release the allocated memory
- * @param clk_ptr   : Pointer to release the clock resources
- * @param cnt   : Number of clk resources
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_i2c_dev_put_clk_info(struct device *dev,
-			struct msm_cam_clk_info **clk_info,
-			struct clk ***clk_ptr, int cnt);
-
-/**
- * @brief      : Puts clock information
- *
- * This function releases the memory allocated for the clocks
- *
- * @param pdev   : Pointer to platform device
- * @param clk_info   : Pointer to release the allocated memory
- * @param clk_ptr   : Pointer to release the clock resources
- * @param clk_ptr   : Pointer to release the clock rates
- * @param set   : Number of sets of clock rates
- * @param cnt   : Number of clk resources
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_put_clk_info_and_rates(struct platform_device *pdev,
-		struct msm_cam_clk_info **clk_info,
-		struct clk ***clk_ptr, uint32_t ***clk_rates,
-		size_t set, size_t cnt);
-/**
- * @brief      : Enable clocks
- *
- * This function enables the clocks for a specified device
- *
- * @param dev   : Device to get clocks information
- * @param clk_info   : Pointer to populate clock information
- * @param clk_ptr   : Pointer to populate clock information
- * @param num_clk: Pointer to populate the number of clocks
- *                 extracted from dtsi
- * @param enable   : Flag to specify enable/disable
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-
-int msm_camera_clk_enable(struct device *dev,
-					struct msm_cam_clk_info *clk_info,
-					struct clk **clk_ptr,
-					int num_clk,
-					int enable);
-/**
- * @brief      : Set clock rate
- *
- * This function sets the rate for a specified clock and
- * returns the rounded value
- *
- * @param dev   : Device to get clocks information
- * @param clk   : Pointer to clock to set rate
- * @param clk_rate   : Rate to be set
- *
- * @return Status of operation. Negative in case of error. clk rate otherwise.
- */
-
-long msm_camera_clk_set_rate(struct device *dev,
-				struct clk *clk,
-				long clk_rate);
-/**
- * @brief      : Gets regulator info
- *
- * This function extracts the regulator information for a specific
- * platform device
- *
- * @param pdev   : platform device to get regulator information
- * @param vdd_info: Pointer to populate the regulator names
- * @param num_reg: Pointer to populate the number of regulators
- *                 extracted from dtsi
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_get_regulator_info(struct platform_device *pdev,
-		struct msm_cam_regulator **vdd_info, int *num_reg);
-/**
- * @brief      : Enable/Disable the regultors
- *
- * This function enables/disables the regulators for a specific
- * platform device
- *
- * @param vdd_info: Pointer to list of regulators
- * @param cnt: Number of regulators to enable/disable
- * @param enable: Flags specifies either enable/disable
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-
-int msm_camera_regulator_enable(struct msm_cam_regulator *vdd_info,
-				int cnt, int enable);
-
-/**
- * @brief      : Release the regulators
- *
- * This function releases the regulator resources.
- *
- * @param pdev: Pointer to platform device
- * @param vdd_info: Pointer to list of regulators
- * @param cnt: Number of regulators to release
- */
-
-void msm_camera_put_regulators(struct platform_device *pdev,
-	struct msm_cam_regulator **vdd_info, int cnt);
-/**
- * @brief      : Get the IRQ resource
- *
- * This function gets the irq resource from dtsi for a specific
- * platform device
- *
- * @param pdev   : Platform device to get IRQ
- * @param irq_name: Name of the IRQ resource to get from DTSI
- *
- * @return Pointer to resource if success else null
- */
-
-struct resource *msm_camera_get_irq(struct platform_device *pdev,
-							char *irq_name);
-/**
- * @brief      : Register the IRQ
- *
- * This function registers the irq resource for specified hardware
- *
- * @param pdev    : Platform device to register IRQ resource
- * @param irq	  : IRQ resource
- * @param handler : IRQ handler
- * @param irqflags : IRQ flags
- * @param irq_name: Name of the IRQ
- * @param dev	 : Token of the device
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-
-int msm_camera_register_irq(struct platform_device *pdev,
-						struct resource *irq,
-						irq_handler_t handler,
-						unsigned long irqflags,
-						char *irq_name,
-						void *dev);
-
-/**
- * @brief      : Register the threaded IRQ
- *
- * This function registers the irq resource for specified hardware
- *
- * @param pdev    : Platform device to register IRQ resource
- * @param irq	  : IRQ resource
- * @param handler_fn : IRQ handler function
- * @param thread_fn : thread handler function
- * @param irqflags : IRQ flags
- * @param irq_name: Name of the IRQ
- * @param dev	 : Token of the device
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-
-int msm_camera_register_threaded_irq(struct platform_device *pdev,
-						struct resource *irq,
-						irq_handler_t handler_fn,
-						irq_handler_t thread_fn,
-						unsigned long irqflags,
-						const char *irq_name,
-						void *dev);
-
-/**
- * @brief      : Enable/Disable the IRQ
- *
- * This function enables or disables a specific IRQ
- *
- * @param irq    : IRQ resource
- * @param flag   : flag to enable/disable
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-
-int msm_camera_enable_irq(struct resource *irq, int flag);
-
-/**
- * @brief      : UnRegister the IRQ
- *
- * This function Unregisters/Frees the irq resource
- *
- * @param pdev   : Pointer to platform device
- * @param irq    : IRQ resource
- * @param dev    : Token of the device
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-
-int msm_camera_unregister_irq(struct platform_device *pdev,
-	struct resource *irq, void *dev_id);
-
-/**
- * @brief      : Gets device register base
- *
- * This function extracts the device's register base from the dtsi
- * for the specified platform device
- *
- * @param pdev   : Platform device to get regulator infor
- * @param device_name   : Name of the device to fetch the register base
- * @param reserve_mem   : Flag to decide whether to reserve memory
- * region or not.
- *
- * @return Pointer to resource if success else null
- */
-
-void __iomem *msm_camera_get_reg_base(struct platform_device *pdev,
-		char *device_name, int reserve_mem);
-
-/**
- * @brief      :  Puts device register base
- *
- * This function releases the memory region for the specified
- * resource
- *
- * @param pdev   : Pointer to platform device
- * @param base   : Pointer to base to unmap
- * @param device_name : Device name
- * @param reserve_mem   : Flag to decide whether to release memory
- * region or not.
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-
-int msm_camera_put_reg_base(struct platform_device *pdev, void __iomem *base,
-		char *device_name, int reserve_mem);
-
-/**
- * @brief      : Gets resource size
- *
- * This function returns the size of the resource for the
- * specified platform device
- *
- * @param pdev   : Platform device to get regulator infor
- * @param device_name   : Name of the device to fetch the register base
- *
- * @return size of the resource
- */
-
-uint32_t msm_camera_get_res_size(struct platform_device *pdev,
-	char *device_name);
-
-/**
- * @brief      : Selects clock source
- *
- *
- * @param dev : Token of the device
- * @param clk_info : Clock Info structure
- * @param clk_src_info : Clock Info structure
- * @param num_clk : Number of clocks
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_cam_clk_sel_src(struct device *dev, struct msm_cam_clk_info *clk_info,
-		struct msm_cam_clk_info *clk_src_info, int num_clk);
-
-/**
- * @brief      : Enables the clock
- *
- *
- * @param dev : Token of the device
- * @param clk_info : Clock Info structure
- * @param clk_tr : Pointer to lock strucure
- * @param num_clk : Number of clocks
- * @param enable : Enable/disable the clock
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_cam_clk_enable(struct device *dev, struct msm_cam_clk_info *clk_info,
-		struct clk **clk_ptr, int num_clk, int enable);
-
-/**
- * @brief      : Configures voltage regulator
- *
- *
- * @param dev : Token of the device
- * @param cam_vreg : Regulator dt structure
- * @param num_vreg : Number of regulators
- * @param vreg_seq : Regulator sequence type
- * @param num_clk : Number of clocks
- * @param reg_ptr : Regulator pointer
- * @param config : Enable/disable configuring the regulator
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_config_vreg(struct device *dev, struct camera_vreg_t *cam_vreg,
-		int num_vreg, enum msm_camera_vreg_name_t *vreg_seq,
-		int num_vreg_seq, struct regulator **reg_ptr, int config);
-
-/**
- * @brief      : Enables voltage regulator
- *
- *
- * @param dev : Token of the device
- * @param cam_vreg : Regulator dt structure
- * @param num_vreg : Number of regulators
- * @param vreg_seq : Regulator sequence type
- * @param num_clk : Number of clocks
- * @param reg_ptr : Regulator pointer
- * @param config : Enable/disable configuring the regulator
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_enable_vreg(struct device *dev, struct camera_vreg_t *cam_vreg,
-		int num_vreg, enum msm_camera_vreg_name_t *vreg_seq,
-		int num_vreg_seq, struct regulator **reg_ptr, int enable);
-
-/**
- * @brief      : Sets table of GPIOs
- *
- * @param gpio_tbl : GPIO table parsed from dt
- * @param gpio_tbl_size : Size of GPIO table
- * @param gpio_en : Enable/disable the GPIO
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_set_gpio_table(struct msm_gpio_set_tbl *gpio_tbl,
-	uint8_t gpio_tbl_size, int gpio_en);
-
-/**
- * @brief      : Configures single voltage regulator
- *
- *
- * @param dev : Token of the device
- * @param cam_vreg : Regulator dt structure
- * @param num_vreg : Number of regulators
- * @param reg_ptr : Regulator pointer
- * @param config : Enable/disable configuring the regulator
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_config_single_vreg(struct device *dev,
-	struct camera_vreg_t *cam_vreg, struct regulator **reg_ptr, int config);
-
-/**
- * @brief      : Request table of gpios
- *
- *
- * @param gpio_tbl : Table of GPIOs
- * @param size : Size of table
- * @param gpio_en : Enable/disable the gpio
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_request_gpio_table(struct gpio *gpio_tbl, uint8_t size,
-	int gpio_en);
-
-#endif /* _CAM_SENSOR_SOC_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
index 44294e8..9f16e93 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
@@ -12,7 +12,6 @@
 
 #include <linux/kernel.h>
 #include "cam_sensor_util.h"
-#include "cam_sensor_soc_api.h"
 
 #define CAM_SENSOR_PINCTRL_STATE_SLEEP "cam_suspend"
 #define CAM_SENSOR_PINCTRL_STATE_DEFAULT "cam_default"
@@ -324,39 +323,47 @@
 	return rc;
 }
 
-int32_t msm_camera_fill_vreg_params(struct camera_vreg_t *cam_vreg,
-	int num_vreg, struct cam_sensor_power_setting *power_setting,
+int32_t msm_camera_fill_vreg_params(
+	struct cam_hw_soc_info *soc_info,
+	struct cam_sensor_power_setting *power_setting,
 	uint16_t power_setting_size)
 {
 	int32_t rc = 0, j = 0, i = 0;
+	uint32_t num_vreg;
 
 	/* Validate input parameters */
-	if (!cam_vreg || !power_setting) {
-		pr_err("%s:%d failed: cam_vreg %pK power_setting %pK", __func__,
-			__LINE__,  cam_vreg, power_setting);
+	if (!soc_info || !power_setting) {
+		pr_err("%s:%d failed: soc_info %pK power_setting %pK", __func__,
+			__LINE__,  soc_info, power_setting);
 		return -EINVAL;
 	}
 
-	/* Validate size of num_vreg */
+	num_vreg = soc_info->num_rgltr;
+
 	if (num_vreg <= 0) {
-		pr_err("failed: num_vreg %d", num_vreg);
+		pr_err("%s:%d failed: num_vreg %d", __func__, __LINE__,
+			num_vreg);
 		return -EINVAL;
 	}
 
+
 	for (i = 0; i < power_setting_size; i++) {
 		switch (power_setting[i].seq_type) {
 		case SENSOR_VDIG:
 			for (j = 0; j < num_vreg; j++) {
-				if (!strcmp(cam_vreg[j].reg_name, "cam_vdig")) {
+				if (!strcmp(soc_info->rgltr_name[j],
+					"cam_vdig")) {
+
 					CDBG("%s:%d i %d j %d cam_vdig\n",
 						__func__, __LINE__, i, j);
 					power_setting[i].seq_val = j;
+
 					if (VALIDATE_VOLTAGE(
-						cam_vreg[j].min_voltage,
-						cam_vreg[j].max_voltage,
+						soc_info->rgltr_min_volt[j],
+						soc_info->rgltr_max_volt[j],
 						power_setting[i].config_val)) {
-						cam_vreg[j].min_voltage =
-						cam_vreg[j].max_voltage =
+						soc_info->rgltr_min_volt[j] =
+						soc_info->rgltr_max_volt[j] =
 						power_setting[i].config_val;
 					}
 					break;
@@ -368,20 +375,24 @@
 
 		case SENSOR_VIO:
 			for (j = 0; j < num_vreg; j++) {
-				if (!strcmp(cam_vreg[j].reg_name, "cam_vio")) {
+
+				if (!strcmp(soc_info->rgltr_name[j],
+					"cam_vio")) {
 					CDBG("%s:%d i %d j %d cam_vio\n",
 						__func__, __LINE__, i, j);
 					power_setting[i].seq_val = j;
+
 					if (VALIDATE_VOLTAGE(
-						cam_vreg[j].min_voltage,
-						cam_vreg[j].max_voltage,
+						soc_info->rgltr_min_volt[j],
+						soc_info->rgltr_max_volt[j],
 						power_setting[i].config_val)) {
-						cam_vreg[j].min_voltage =
-						cam_vreg[j].max_voltage =
+						soc_info->rgltr_min_volt[j] =
+						soc_info->rgltr_max_volt[j] =
 						power_setting[i].config_val;
 					}
 					break;
 				}
+
 			}
 			if (j == num_vreg)
 				power_setting[i].seq_val = INVALID_VREG;
@@ -389,20 +400,24 @@
 
 		case SENSOR_VANA:
 			for (j = 0; j < num_vreg; j++) {
-				if (!strcmp(cam_vreg[j].reg_name, "cam_vana")) {
+
+				if (!strcmp(soc_info->rgltr_name[j],
+					"cam_vana")) {
 					CDBG("%s:%d i %d j %d cam_vana\n",
 						__func__, __LINE__, i, j);
 					power_setting[i].seq_val = j;
+
 					if (VALIDATE_VOLTAGE(
-						cam_vreg[j].min_voltage,
-						cam_vreg[j].max_voltage,
+						soc_info->rgltr_min_volt[j],
+						soc_info->rgltr_max_volt[j],
 						power_setting[i].config_val)) {
-						cam_vreg[j].min_voltage =
-						cam_vreg[j].max_voltage =
+						soc_info->rgltr_min_volt[j] =
+						soc_info->rgltr_max_volt[j] =
 						power_setting[i].config_val;
 					}
 					break;
 				}
+
 			}
 			if (j == num_vreg)
 				power_setting[i].seq_val = INVALID_VREG;
@@ -410,20 +425,25 @@
 
 		case SENSOR_VAF:
 			for (j = 0; j < num_vreg; j++) {
-				if (!strcmp(cam_vreg[j].reg_name, "cam_vaf")) {
+
+				if (!strcmp(soc_info->rgltr_name[j],
+					"cam_vaf")) {
 					CDBG("%s:%d i %d j %d cam_vaf\n",
 						__func__, __LINE__, i, j);
 					power_setting[i].seq_val = j;
+
 					if (VALIDATE_VOLTAGE(
-						cam_vreg[j].min_voltage,
-						cam_vreg[j].max_voltage,
+						soc_info->rgltr_min_volt[j],
+						soc_info->rgltr_max_volt[j],
 						power_setting[i].config_val)) {
-						cam_vreg[j].min_voltage =
-						cam_vreg[j].max_voltage =
+						soc_info->rgltr_min_volt[j] =
+						soc_info->rgltr_max_volt[j] =
 						power_setting[i].config_val;
 					}
+
 					break;
 				}
+
 			}
 			if (j == num_vreg)
 				power_setting[i].seq_val = INVALID_VREG;
@@ -431,38 +451,43 @@
 
 		case SENSOR_CUSTOM_REG1:
 			for (j = 0; j < num_vreg; j++) {
-				if (!strcmp(cam_vreg[j].reg_name,
+
+				if (!strcmp(soc_info->rgltr_name[j],
 					"cam_v_custom1")) {
 					CDBG("%s:%d i %d j %d cam_vcustom1\n",
 						__func__, __LINE__, i, j);
 					power_setting[i].seq_val = j;
+
 					if (VALIDATE_VOLTAGE(
-						cam_vreg[j].min_voltage,
-						cam_vreg[j].max_voltage,
+						soc_info->rgltr_min_volt[j],
+						soc_info->rgltr_max_volt[j],
 						power_setting[i].config_val)) {
-						cam_vreg[j].min_voltage =
-						cam_vreg[j].max_voltage =
+						soc_info->rgltr_min_volt[j] =
+						soc_info->rgltr_max_volt[j] =
 						power_setting[i].config_val;
 					}
 					break;
 				}
+
 			}
 			if (j == num_vreg)
 				power_setting[i].seq_val = INVALID_VREG;
 			break;
 		case SENSOR_CUSTOM_REG2:
 			for (j = 0; j < num_vreg; j++) {
-				if (!strcmp(cam_vreg[j].reg_name,
+
+				if (!strcmp(soc_info->rgltr_name[j],
 					"cam_v_custom2")) {
 					CDBG("%s:%d i %d j %d cam_vcustom2\n",
 						__func__, __LINE__, i, j);
 					power_setting[i].seq_val = j;
+
 					if (VALIDATE_VOLTAGE(
-						cam_vreg[j].min_voltage,
-						cam_vreg[j].max_voltage,
+						soc_info->rgltr_min_volt[j],
+						soc_info->rgltr_max_volt[j],
 						power_setting[i].config_val)) {
-						cam_vreg[j].min_voltage =
-						cam_vreg[j].max_voltage =
+						soc_info->rgltr_min_volt[j] =
+						soc_info->rgltr_max_volt[j] =
 						power_setting[i].config_val;
 					}
 					break;
@@ -483,269 +508,274 @@
 	return rc;
 }
 
-int32_t msm_camera_get_dt_gpio_req_tbl(struct device_node *of_node,
-	struct msm_camera_gpio_conf *gconf, uint16_t *gpio_array,
-	uint16_t gpio_array_size)
+int cam_sensor_util_request_gpio_table(
+		struct cam_hw_soc_info *soc_info, int gpio_en)
 {
-	int32_t rc = 0, i = 0;
-	uint32_t count = 0, *val_array = NULL;
+	int rc = 0, i = 0;
+	uint8_t size = 0;
+	struct cam_soc_gpio_data *gpio_conf =
+			soc_info->gpio_data;
+	struct gpio *gpio_tbl = gpio_conf->cam_gpio_req_tbl;
 
-	if (!of_get_property(of_node, "qcom,gpio-req-tbl-num", &count))
-		return 0;
+	size = gpio_conf->cam_gpio_req_tbl_size;
 
-	count /= sizeof(uint32_t);
-	if (!count) {
-		pr_err("%s qcom,gpio-req-tbl-num 0\n", __func__);
+	if (gpio_conf->cam_gpio_common_tbl_size <= 0) {
+		pr_info("%s:%d No GPIO entry\n", __func__, __LINE__);
 		return 0;
 	}
 
-	val_array = kcalloc(count, sizeof(uint32_t), GFP_KERNEL);
-	if (!val_array)
-		return -ENOMEM;
-
-	gconf->cam_gpio_req_tbl = kcalloc(count, sizeof(struct gpio),
-		GFP_KERNEL);
-	if (!gconf->cam_gpio_req_tbl) {
-		rc = -ENOMEM;
-		goto free_val_array;
-	}
-	gconf->cam_gpio_req_tbl_size = count;
-
-	rc = of_property_read_u32_array(of_node, "qcom,gpio-req-tbl-num",
-		val_array, count);
-	if (rc < 0) {
-		pr_err("%s failed %d\n", __func__, __LINE__);
-		goto free_gpio_req_tbl;
+	if (!gpio_tbl || !size) {
+		pr_err("%s:%d invalid gpio_tbl %pK / size %d\n", __func__,
+			__LINE__, gpio_tbl, size);
+		return -EINVAL;
 	}
 
-	for (i = 0; i < count; i++) {
-		if (val_array[i] >= gpio_array_size) {
-			pr_err("%s gpio req tbl index %d invalid\n",
-				__func__, val_array[i]);
-			return -EINVAL;
+	for (i = 0; i < size; i++) {
+		CDBG("%s:%d i %d, gpio %d dir %ld\n", __func__, __LINE__, i,
+				gpio_tbl[i].gpio, gpio_tbl[i].flags);
+	}
+
+	if (gpio_en) {
+		for (i = 0; i < size; i++) {
+			rc = gpio_request_one(gpio_tbl[i].gpio,
+					gpio_tbl[i].flags, gpio_tbl[i].label);
+			if (rc) {
+				/*
+				 * After GPIO request fails, contine to
+				 * apply new gpios, outout a error message
+				 * for driver bringup debug
+				 */
+				pr_err("%s:%d gpio %d:%s request fails\n",
+					__func__, __LINE__,
+					gpio_tbl[i].gpio, gpio_tbl[i].label);
+			}
 		}
-		gconf->cam_gpio_req_tbl[i].gpio = gpio_array[val_array[i]];
-		CDBG("%s cam_gpio_req_tbl[%d].gpio = %d\n", __func__, i,
-			gconf->cam_gpio_req_tbl[i].gpio);
+	} else {
+		gpio_free_array(gpio_tbl, size);
 	}
 
-	rc = of_property_read_u32_array(of_node, "qcom,gpio-req-tbl-flags",
-		val_array, count);
-	if (rc < 0) {
-		pr_err("%s failed %d\n", __func__, __LINE__);
-		goto free_gpio_req_tbl;
-	}
-
-	for (i = 0; i < count; i++) {
-		gconf->cam_gpio_req_tbl[i].flags = val_array[i];
-		CDBG("%s cam_gpio_req_tbl[%d].flags = %ld\n", __func__, i,
-			gconf->cam_gpio_req_tbl[i].flags);
-	}
-
-	for (i = 0; i < count; i++) {
-		rc = of_property_read_string_index(of_node,
-			"qcom,gpio-req-tbl-label", i,
-			&gconf->cam_gpio_req_tbl[i].label);
-		CDBG("%s cam_gpio_req_tbl[%d].label = %s\n", __func__, i,
-			gconf->cam_gpio_req_tbl[i].label);
-		if (rc < 0) {
-			pr_err("%s failed %d\n", __func__, __LINE__);
-			goto free_gpio_req_tbl;
-		}
-	}
-
-	kfree(val_array);
-
-	return rc;
-
-free_gpio_req_tbl:
-	kfree(gconf->cam_gpio_req_tbl);
-free_val_array:
-	kfree(val_array);
-	gconf->cam_gpio_req_tbl_size = 0;
-
 	return rc;
 }
 
-int msm_camera_init_gpio_pin_tbl(struct device_node *of_node,
-	struct msm_camera_gpio_conf *gconf, uint16_t *gpio_array,
-	uint16_t gpio_array_size)
+
+int cam_sensor_util_init_gpio_pin_tbl(
+	struct cam_hw_soc_info *soc_info,
+	struct msm_camera_gpio_num_info **pgpio_num_info)
 {
 	int rc = 0, val = 0;
+	uint32_t gpio_array_size;
+	struct platform_device *pdev = NULL;
+	struct device_node *of_node = NULL;
+	struct cam_soc_gpio_data *gconf = NULL;
+	struct msm_camera_gpio_num_info *gpio_num_info = NULL;
 
-	gconf->gpio_num_info = kzalloc(sizeof(struct msm_camera_gpio_num_info),
+	pdev = soc_info->pdev;
+	of_node = pdev->dev.of_node;
+
+	gconf = soc_info->gpio_data;
+	if (!gconf) {
+		pr_err("%s:%d No gpio_common_table is found\n",
+			 __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	if (!gconf->cam_gpio_common_tbl) {
+		pr_err("%s:%d gpio_common_table is not initialized\n",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+
+	gpio_array_size = gconf->cam_gpio_common_tbl_size;
+
+	if (!gpio_array_size) {
+		pr_err("%s:%d invalid size of gpio table\n",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+
+	*pgpio_num_info = kzalloc(sizeof(struct msm_camera_gpio_num_info),
 		GFP_KERNEL);
-	if (!gconf->gpio_num_info)
+	if (!*pgpio_num_info)
 		return -ENOMEM;
+	gpio_num_info =  *pgpio_num_info;
 
-	rc = of_property_read_u32(of_node, "qcom,gpio-vana", &val);
+	rc = of_property_read_u32(of_node, "gpio-vana", &val);
 	if (rc != -EINVAL) {
 		if (rc < 0) {
-			pr_err("%s:%d read qcom,gpio-vana failed rc %d\n",
+			pr_err("%s:%d read gpio-vana failed rc %d\n",
 				__func__, __LINE__, rc);
 			goto free_gpio_info;
 		} else if (val >= gpio_array_size) {
-			pr_err("%s:%d qcom,gpio-vana invalid %d\n",
+			pr_err("%s:%d gpio-vana invalid %d\n",
 				__func__, __LINE__, val);
 			rc = -EINVAL;
 			goto free_gpio_info;
 		}
-		gconf->gpio_num_info->gpio_num[SENSOR_VANA] =
-			gpio_array[val];
-		gconf->gpio_num_info->valid[SENSOR_VANA] = 1;
-		CDBG("%s qcom,gpio-vana %d\n", __func__,
-			gconf->gpio_num_info->gpio_num[SENSOR_VANA]);
+		gpio_num_info->gpio_num[SENSOR_VANA] =
+				gconf->cam_gpio_common_tbl[val].gpio;
+		gpio_num_info->valid[SENSOR_VANA] = 1;
+
+		CDBG("%s:%d gpio-vana %d\n", __func__, __LINE__,
+			gpio_num_info->gpio_num[SENSOR_VANA]);
 	}
 
-	rc = of_property_read_u32(of_node, "qcom,gpio-vio", &val);
+	rc = of_property_read_u32(of_node, "gpio-vio", &val);
 	if (rc != -EINVAL) {
 		if (rc < 0) {
-			pr_err("%s:%d read qcom,gpio-vio failed rc %d\n",
+			pr_err("%s:%d read gpio-vio failed rc %d\n",
 				__func__, __LINE__, rc);
 			goto free_gpio_info;
 		} else if (val >= gpio_array_size) {
-			pr_err("%s:%d qcom,gpio-vio invalid %d\n",
+			pr_err("%s:%d gpio-vio invalid %d\n",
 				__func__, __LINE__, val);
 			goto free_gpio_info;
 		}
-		gconf->gpio_num_info->gpio_num[SENSOR_VIO] =
-			gpio_array[val];
-		gconf->gpio_num_info->valid[SENSOR_VIO] = 1;
-		CDBG("%s qcom,gpio-vio %d\n", __func__,
-			gconf->gpio_num_info->gpio_num[SENSOR_VIO]);
+		gpio_num_info->gpio_num[SENSOR_VIO] =
+			gconf->cam_gpio_common_tbl[val].gpio;
+		gpio_num_info->valid[SENSOR_VIO] = 1;
+
+		CDBG("%s:%d gpio-vio %d\n", __func__, __LINE__,
+			gpio_num_info->gpio_num[SENSOR_VIO]);
 	}
 
-	rc = of_property_read_u32(of_node, "qcom,gpio-vaf", &val);
+	rc = of_property_read_u32(of_node, "gpio-vaf", &val);
 	if (rc != -EINVAL) {
 		if (rc < 0) {
-			pr_err("%s:%d read qcom,gpio-vaf failed rc %d\n",
+			pr_err("%s:%d read gpio-vaf failed rc %d\n",
 				__func__, __LINE__, rc);
 			goto free_gpio_info;
 		} else if (val >= gpio_array_size) {
-			pr_err("%s:%d qcom,gpio-vaf invalid %d\n",
+			pr_err("%s:%d gpio-vaf invalid %d\n",
 				__func__, __LINE__, val);
 			rc = -EINVAL;
 			goto free_gpio_info;
 		}
-		gconf->gpio_num_info->gpio_num[SENSOR_VAF] =
-			gpio_array[val];
-		gconf->gpio_num_info->valid[SENSOR_VAF] = 1;
-		CDBG("%s qcom,gpio-vaf %d\n", __func__,
-			gconf->gpio_num_info->gpio_num[SENSOR_VAF]);
+		gpio_num_info->gpio_num[SENSOR_VAF] =
+			gconf->cam_gpio_common_tbl[val].gpio;
+		gpio_num_info->valid[SENSOR_VAF] = 1;
+
+		CDBG("%s:%d gpio-vaf %d\n", __func__, __LINE__,
+			gpio_num_info->gpio_num[SENSOR_VAF]);
 	}
 
-	rc = of_property_read_u32(of_node, "qcom,gpio-vdig", &val);
+	rc = of_property_read_u32(of_node, "gpio-vdig", &val);
 	if (rc != -EINVAL) {
 		if (rc < 0) {
-			pr_err("%s:%d read qcom,gpio-vdig failed rc %d\n",
+			pr_err("%s:%d read gpio-vdig failed rc %d\n",
 				__func__, __LINE__, rc);
 			goto free_gpio_info;
 		} else if (val >= gpio_array_size) {
-			pr_err("%s:%d qcom,gpio-vdig invalid %d\n",
+			pr_err("%s:%d gpio-vdig invalid %d\n",
 				__func__, __LINE__, val);
 			rc = -EINVAL;
 			goto free_gpio_info;
 		}
-		gconf->gpio_num_info->gpio_num[SENSOR_VDIG] =
-			gpio_array[val];
-		gconf->gpio_num_info->valid[SENSOR_VDIG] = 1;
-		CDBG("%s qcom,gpio-vdig %d\n", __func__,
-			gconf->gpio_num_info->gpio_num[SENSOR_VDIG]);
+		gpio_num_info->gpio_num[SENSOR_VDIG] =
+			gconf->cam_gpio_common_tbl[val].gpio;
+		gpio_num_info->valid[SENSOR_VDIG] = 1;
+
+		CDBG("%s:%d gpio-vdig %d\n", __func__, __LINE__,
+			gpio_num_info->gpio_num[SENSOR_VDIG]);
 	}
 
-	rc = of_property_read_u32(of_node, "qcom,gpio-reset", &val);
+	rc = of_property_read_u32(of_node, "gpio-reset", &val);
 	if (rc != -EINVAL) {
 		if (rc < 0) {
-			pr_err("%s:%d read qcom,gpio-reset failed rc %d\n",
+			pr_err("%s:%d read gpio-reset failed rc %d\n",
 				__func__, __LINE__, rc);
 			goto free_gpio_info;
 		} else if (val >= gpio_array_size) {
-			pr_err("%s:%d qcom,gpio-reset invalid %d\n",
+			pr_err("%s:%d gpio-reset invalid %d\n",
 				__func__, __LINE__, val);
 			rc = -EINVAL;
 			goto free_gpio_info;
 		}
-		gconf->gpio_num_info->gpio_num[SENSOR_RESET] =
-			gpio_array[val];
-		gconf->gpio_num_info->valid[SENSOR_RESET] = 1;
-		CDBG("%s qcom,gpio-reset %d\n", __func__,
-			gconf->gpio_num_info->gpio_num[SENSOR_RESET]);
+		gpio_num_info->gpio_num[SENSOR_RESET] =
+			gconf->cam_gpio_common_tbl[val].gpio;
+		gpio_num_info->valid[SENSOR_RESET] = 1;
+
+		CDBG("%s:%d gpio-reset %d\n", __func__, __LINE__,
+			gpio_num_info->gpio_num[SENSOR_RESET]);
 	}
 
-	rc = of_property_read_u32(of_node, "qcom,gpio-standby", &val);
+	rc = of_property_read_u32(of_node, "gpio-standby", &val);
 	if (rc != -EINVAL) {
 		if (rc < 0) {
-			pr_err("%s:%d read qcom,gpio-standby failed rc %d\n",
+			pr_err("%s:%d read gpio-standby failed rc %d\n",
 				__func__, __LINE__, rc);
 			goto free_gpio_info;
 		} else if (val >= gpio_array_size) {
-			pr_err("%s:%d qcom,gpio-standby invalid %d\n",
+			pr_err("%s:%d gpio-standby invalid %d\n",
 				__func__, __LINE__, val);
 			rc = -EINVAL;
 			goto free_gpio_info;
 		}
-		gconf->gpio_num_info->gpio_num[SENSOR_STANDBY] =
-			gpio_array[val];
-		gconf->gpio_num_info->valid[SENSOR_STANDBY] = 1;
-		CDBG("%s qcom,gpio-standby %d\n", __func__,
-			gconf->gpio_num_info->gpio_num[SENSOR_STANDBY]);
+		gpio_num_info->gpio_num[SENSOR_STANDBY] =
+			gconf->cam_gpio_common_tbl[val].gpio;
+		gpio_num_info->valid[SENSOR_STANDBY] = 1;
+
+		CDBG("%s:%d gpio-standby %d\n", __func__, __LINE__,
+			gpio_num_info->gpio_num[SENSOR_STANDBY]);
 	}
 
-	rc = of_property_read_u32(of_node, "qcom,gpio-af-pwdm", &val);
+	rc = of_property_read_u32(of_node, "gpio-af-pwdm", &val);
 	if (rc != -EINVAL) {
 		if (rc < 0) {
-			pr_err("%s:%d read qcom,gpio-af-pwdm failed rc %d\n",
+			pr_err("%s:%d read gpio-af-pwdm failed rc %d\n",
 				__func__, __LINE__, rc);
 			goto free_gpio_info;
 		} else if (val >= gpio_array_size) {
-			pr_err("%s:%d qcom,gpio-af-pwdm invalid %d\n",
+			pr_err("%s:%d gpio-af-pwdm invalid %d\n",
 				__func__, __LINE__, val);
 			rc = -EINVAL;
 			goto free_gpio_info;
 		}
-		gconf->gpio_num_info->gpio_num[SENSOR_VAF_PWDM] =
-			gpio_array[val];
-		gconf->gpio_num_info->valid[SENSOR_VAF_PWDM] = 1;
-		CDBG("%s qcom,gpio-af-pwdm %d\n", __func__,
-			gconf->gpio_num_info->gpio_num[SENSOR_VAF_PWDM]);
+		gpio_num_info->gpio_num[SENSOR_VAF_PWDM] =
+			gconf->cam_gpio_common_tbl[val].gpio;
+		gpio_num_info->valid[SENSOR_VAF_PWDM] = 1;
+
+		CDBG("%s:%d gpio-af-pwdm %d\n", __func__, __LINE__,
+			gpio_num_info->gpio_num[SENSOR_VAF_PWDM]);
 	}
 
-	rc = of_property_read_u32(of_node, "qcom,gpio-custom1", &val);
+	rc = of_property_read_u32(of_node, "gpio-custom1", &val);
 	if (rc != -EINVAL) {
 		if (rc < 0) {
-			pr_err("%s:%d read qcom,gpio-custom1 failed rc %d\n",
+			pr_err("%s:%d read gpio-custom1 failed rc %d\n",
 				__func__, __LINE__, rc);
 			goto free_gpio_info;
 		} else if (val >= gpio_array_size) {
-			pr_err("%s:%d qcom,gpio-custom1 invalid %d\n",
+			pr_err("%s:%d gpio-custom1 invalid %d\n",
 				__func__, __LINE__, val);
 			rc = -EINVAL;
 			goto free_gpio_info;
 		}
-		gconf->gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO1] =
-			gpio_array[val];
-		gconf->gpio_num_info->valid[SENSOR_CUSTOM_GPIO1] = 1;
-		CDBG("%s qcom,gpio-custom1 %d\n", __func__,
-			gconf->gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO1]);
+		gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO1] =
+			gconf->cam_gpio_common_tbl[val].gpio;
+		gpio_num_info->valid[SENSOR_CUSTOM_GPIO1] = 1;
+
+		CDBG("%s:%d gpio-custom1 %d\n", __func__, __LINE__,
+			gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO1]);
 	}
 
-	rc = of_property_read_u32(of_node, "qcom,gpio-custom2", &val);
+	rc = of_property_read_u32(of_node, "gpio-custom2", &val);
 	if (rc != -EINVAL) {
 		if (rc < 0) {
-			pr_err("%s:%d read qcom,gpio-custom2 failed rc %d\n",
+			pr_err("%s:%d read gpio-custom2 failed rc %d\n",
 				__func__, __LINE__, rc);
 			goto free_gpio_info;
 		} else if (val >= gpio_array_size) {
-			pr_err("%s:%d qcom,gpio-custom2 invalid %d\n",
+			pr_err("%s:%d gpio-custom2 invalid %d\n",
 				__func__, __LINE__, val);
 			rc = -EINVAL;
 			goto free_gpio_info;
 		}
-		gconf->gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO2] =
-			gpio_array[val];
-		gconf->gpio_num_info->valid[SENSOR_CUSTOM_GPIO2] = 1;
-		CDBG("%s qcom,gpio-custom2 %d\n", __func__,
-			gconf->gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO2]);
+		gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO2] =
+			gconf->cam_gpio_common_tbl[val].gpio;
+		gpio_num_info->valid[SENSOR_CUSTOM_GPIO2] = 1;
+
+		CDBG("%s:%d gpio-custom2 %d\n", __func__, __LINE__,
+			gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO2]);
 	} else {
 		rc = 0;
 	}
@@ -753,142 +783,8 @@
 	return rc;
 
 free_gpio_info:
-	kfree(gconf->gpio_num_info);
-	gconf->gpio_num_info = NULL;
-	return rc;
-}
-
-int cam_sensor_get_dt_vreg_data(struct device_node *of_node,
-	struct camera_vreg_t **cam_vreg, int *num_vreg)
-{
-	int rc = 0, i = 0;
-	int32_t count = 0;
-	uint32_t *vreg_array = NULL;
-	struct camera_vreg_t *vreg = NULL;
-
-	count = of_property_count_strings(of_node, "qcom,cam-vreg-name");
-	CDBG("%s qcom,cam-vreg-name count %d\n", __func__, count);
-
-	if (!count || (count == -EINVAL)) {
-		pr_err("%s:%d number of entries is 0 or not present in dts\n",
-			__func__, __LINE__);
-		*num_vreg = 0;
-		return 0;
-	}
-
-	vreg = kcalloc(count, sizeof(*vreg), GFP_KERNEL);
-	if (!vreg)
-		return -ENOMEM;
-
-	*cam_vreg = vreg;
-	*num_vreg = count;
-	for (i = 0; i < count; i++) {
-		rc = of_property_read_string_index(of_node,
-			"qcom,cam-vreg-name", i,
-			&vreg[i].reg_name);
-		CDBG("%s reg_name[%d] = %s\n", __func__, i,
-			vreg[i].reg_name);
-		if (rc < 0) {
-			pr_err("%s failed %d\n", __func__, __LINE__);
-			goto free_vreg;
-		}
-	}
-
-	vreg_array = kcalloc(count, sizeof(uint32_t), GFP_KERNEL);
-	if (!vreg_array) {
-		rc = -ENOMEM;
-		goto free_vreg;
-	}
-
-	for (i = 0; i < count; i++)
-		vreg[i].type = VREG_TYPE_DEFAULT;
-
-	rc = of_property_read_u32_array(of_node, "qcom,cam-vreg-type",
-		vreg_array, count);
-	if (rc != -EINVAL) {
-		if (rc < 0) {
-			pr_err("%s failed %d\n", __func__, __LINE__);
-			goto free_vreg_array;
-		} else {
-			for (i = 0; i < count; i++) {
-				vreg[i].type = vreg_array[i];
-				CDBG("%s cam_vreg[%d].type = %d\n",
-					__func__, i, vreg[i].type);
-			}
-		}
-	} else {
-		CDBG("%s:%d no qcom,cam-vreg-type entries in dts\n",
-			__func__, __LINE__);
-		rc = 0;
-	}
-
-	rc = of_property_read_u32_array(of_node, "qcom,cam-vreg-min-voltage",
-		vreg_array, count);
-	if (rc != -EINVAL) {
-		if (rc < 0) {
-			pr_err("%s failed %d\n", __func__, __LINE__);
-			goto free_vreg_array;
-		} else {
-			for (i = 0; i < count; i++) {
-				vreg[i].min_voltage = vreg_array[i];
-				CDBG("%s cam_vreg[%d].min_voltage = %d\n",
-					__func__, i, vreg[i].min_voltage);
-			}
-		}
-	} else {
-		CDBG("%s:%d no qcom,cam-vreg-min-voltage entries in dts\n",
-			__func__, __LINE__);
-		rc = 0;
-	}
-
-	rc = of_property_read_u32_array(of_node, "qcom,cam-vreg-max-voltage",
-		vreg_array, count);
-	if (rc != -EINVAL) {
-		if (rc < 0) {
-			pr_err("%s failed %d\n", __func__, __LINE__);
-			goto free_vreg_array;
-		} else {
-			for (i = 0; i < count; i++) {
-				vreg[i].max_voltage = vreg_array[i];
-				CDBG("%s cam_vreg[%d].max_voltage = %d\n",
-					__func__, i, vreg[i].max_voltage);
-			}
-		}
-	} else {
-		CDBG("%s:%d no qcom,cam-vreg-max-voltage entries in dts\n",
-			__func__, __LINE__);
-		rc = 0;
-	}
-
-	rc = of_property_read_u32_array(of_node, "qcom,cam-vreg-op-mode",
-		vreg_array, count);
-	if (rc != -EINVAL) {
-		if (rc < 0) {
-			pr_err("%s failed %d\n", __func__, __LINE__);
-			goto free_vreg_array;
-		} else {
-			for (i = 0; i < count; i++) {
-				vreg[i].op_mode = vreg_array[i];
-				CDBG("%s cam_vreg[%d].op_mode = %d\n",
-					__func__, i, vreg[i].op_mode);
-			}
-		}
-	} else {
-		CDBG("%s:%d no qcom,cam-vreg-op-mode entries in dts\n",
-			__func__, __LINE__);
-		rc = 0;
-	}
-
-	kfree(vreg_array);
-
-	return rc;
-
-free_vreg_array:
-	kfree(vreg_array);
-free_vreg:
-	kfree(vreg);
-	*num_vreg = 0;
-
+	kfree(gpio_num_info);
+	gpio_num_info = NULL;
 	return rc;
 }
 
@@ -919,112 +815,55 @@
 	}
 	return 0;
 }
-
 int msm_cam_sensor_handle_reg_gpio(int seq_type,
-	struct msm_camera_gpio_conf *gconf, int val)
+	struct msm_camera_gpio_num_info *gpio_num_info, int val)
 {
-
 	int gpio_offset = -1;
 
-	if (!gconf) {
-		pr_err("ERR:%s: Input Parameters are not proper\n", __func__);
+	if (!gpio_num_info) {
+		pr_err("%s:%d Input Parameters are not proper\n",
+			__func__, __LINE__);
 		return -EINVAL;
 	}
+
 	CDBG("%s: %d Seq type: %d, config: %d", __func__, __LINE__,
 		seq_type, val);
 
 	gpio_offset = seq_type;
 
-	if ((gconf->gpio_num_info->valid[gpio_offset] == 1)) {
+	if (gpio_num_info->valid[gpio_offset] == 1) {
 		CDBG("%s: %d VALID GPIO offset: %d, seqtype: %d\n",
-			__func__, __LINE__,	gpio_offset, seq_type);
+			__func__, __LINE__, gpio_offset, seq_type);
 		gpio_set_value_cansleep(
-			gconf->gpio_num_info->gpio_num
+			gpio_num_info->gpio_num
 			[gpio_offset], val);
 	}
 
 	return 0;
 }
 
-int32_t msm_sensor_driver_get_gpio_data(
-	struct msm_camera_gpio_conf **gpio_conf,
-	struct device_node *of_node)
-{
-	int32_t                      rc = 0, i = 0;
-	uint16_t                    *gpio_array = NULL;
-	int16_t                     gpio_array_size = 0;
-	struct msm_camera_gpio_conf *gconf = NULL;
-
-	/* Validate input parameters */
-	if (!of_node) {
-		pr_err("failed: invalid param of_node %pK", of_node);
-		return -EINVAL;
-	}
-
-	gpio_array_size = of_gpio_count(of_node);
-	CDBG("gpio count %d\n", gpio_array_size);
-	if (gpio_array_size <= 0)
-		return 0;
-
-	gconf = kzalloc(sizeof(*gconf), GFP_KERNEL);
-	if (!gconf)
-		return -ENOMEM;
-
-	*gpio_conf = gconf;
-
-	gpio_array = kcalloc(gpio_array_size, sizeof(uint16_t), GFP_KERNEL);
-	if (!gpio_array)
-		goto free_gpio_conf;
-
-	for (i = 0; i < gpio_array_size; i++) {
-		gpio_array[i] = of_get_gpio(of_node, i);
-		CDBG("gpio_array[%d] = %d", i, gpio_array[i]);
-	}
-	rc = msm_camera_get_dt_gpio_req_tbl(of_node, gconf, gpio_array,
-		gpio_array_size);
-	if (rc < 0) {
-		pr_err("failed in msm_camera_get_dt_gpio_req_tbl\n");
-		goto free_gpio_array;
-	}
-
-	rc = msm_camera_init_gpio_pin_tbl(of_node, gconf, gpio_array,
-		gpio_array_size);
-	if (rc < 0) {
-		pr_err("failed in msm_camera_init_gpio_pin_tbl\n");
-		goto free_gpio_req_tbl;
-	}
-	kfree(gpio_array);
-
-	return rc;
-
-free_gpio_req_tbl:
-	kfree(gconf->cam_gpio_req_tbl);
-free_gpio_array:
-	kfree(gpio_array);
-free_gpio_conf:
-	kfree(gconf);
-	*gpio_conf = NULL;
-
-	return rc;
-}
-
-int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl)
+int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl,
+		struct cam_hw_soc_info *soc_info)
 {
 	int rc = 0, index = 0, no_gpio = 0, ret = 0, num_vreg, j = 0;
+	int32_t vreg_idx = -1;
 	struct cam_sensor_power_setting *power_setting = NULL;
-	struct camera_vreg_t *cam_vreg;
+	struct msm_camera_gpio_num_info *gpio_num_info = NULL;
 
 	CDBG("%s:%d\n", __func__, __LINE__);
 	if (!ctrl) {
-		pr_err("failed ctrl %pK\n", ctrl);
+		pr_err("%s:%d Invalid ctrl handle\n", __func__, __LINE__);
 		return -EINVAL;
 	}
 
-	cam_vreg = ctrl->cam_vreg;
-	num_vreg = ctrl->num_vreg;
+	gpio_num_info = ctrl->gpio_num_info;
+	num_vreg = soc_info->num_rgltr;
 
-	if (ctrl->gpio_conf->cam_gpiomux_conf_tbl != NULL)
-		CDBG("%s:%d mux install\n", __func__, __LINE__);
+	if ((num_vreg == 0) || (num_vreg > CAM_SOC_MAX_REGULATOR)) {
+		pr_err("%s:%d Regulators are not initialized\n",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
 
 	ret = msm_camera_pinctrl_init(&(ctrl->pinctrl_info), ctrl->dev);
 	if (ret < 0) {
@@ -1034,11 +873,11 @@
 	} else {
 		ctrl->cam_pinctrl_status = 1;
 	}
-	rc = msm_camera_request_gpio_table(
-		ctrl->gpio_conf->cam_gpio_req_tbl,
-		ctrl->gpio_conf->cam_gpio_req_tbl_size, 1);
+
+	rc = cam_sensor_util_request_gpio_table(soc_info, 1);
 	if (rc < 0)
 		no_gpio = rc;
+
 	if (ctrl->cam_pinctrl_status) {
 		ret = pinctrl_select_state(ctrl->pinctrl_info.pinctrl,
 			ctrl->pinctrl_info.gpio_state_active);
@@ -1048,38 +887,68 @@
 	}
 
 	for (index = 0; index < ctrl->power_setting_size; index++) {
-		CDBG("%s index %d\n", __func__, index);
+		CDBG("%s:%d index %d\n", __func__, __LINE__, index);
 		power_setting = &ctrl->power_setting[index];
 
 		switch (power_setting->seq_type) {
 		case SENSOR_MCLK:
-			if (power_setting->seq_val >= ctrl->clk_info_size) {
-				pr_err("%s:%d :Error: clk index %d >= max %zu\n",
+			if (power_setting->seq_val >= soc_info->num_clk) {
+				pr_err("%s:%d :Error: clk index %d >= max %u\n",
 					__func__, __LINE__,
 					power_setting->seq_val,
-					ctrl->clk_info_size);
+					soc_info->num_clk);
 				goto power_up_failed;
 			}
 			for (j = 0; j < num_vreg; j++) {
-				if (!strcmp(cam_vreg[j].reg_name,
+				if (!strcmp(soc_info->rgltr_name[j],
 					"cam_clk")) {
 					CDBG("%s:%d Enable cam_clk: %d\n",
 						__func__, __LINE__, j);
-					msm_camera_config_single_vreg(ctrl->dev,
-						&cam_vreg[j],
-						(struct regulator **)
-						&power_setting->data[0],
-						1);
+
+					soc_info->rgltr[j] =
+					regulator_get(
+						&soc_info->pdev->dev,
+						soc_info->rgltr_name[j]);
+
+					if (IS_ERR_OR_NULL(
+						soc_info->rgltr[j])) {
+						rc = PTR_ERR(
+							soc_info->rgltr[j]);
+						rc = rc ? rc : -EINVAL;
+						pr_err("%s:%d :vreg %s %d\n",
+							__func__, __LINE__,
+							soc_info->rgltr_name[j],
+							rc);
+						soc_info->rgltr[j] = NULL;
+					}
+
+					rc =  cam_soc_util_regulator_enable(
+					soc_info->rgltr[j],
+					soc_info->rgltr_name[j],
+					soc_info->rgltr_min_volt[j],
+					soc_info->rgltr_max_volt[j],
+					soc_info->rgltr_op_mode[j],
+					soc_info->rgltr_delay[j]);
+
+					power_setting->data[0] =
+						soc_info->rgltr[j];
 				}
 			}
 			if (power_setting->config_val)
-				ctrl->clk_info[power_setting->seq_val].
-					clk_rate = power_setting->config_val;
-			rc = msm_camera_clk_enable(ctrl->dev,
-				ctrl->clk_info, ctrl->clk_ptr,
-				ctrl->clk_info_size, true);
+				soc_info->clk_rate[0][power_setting->seq_val] =
+					power_setting->config_val;
+
+			for (j = 0; j < soc_info->num_clk; j++) {
+				rc = cam_soc_util_clk_enable(soc_info->clk[j],
+					soc_info->clk_name[j],
+					soc_info->clk_rate[0][j]);
+				if (rc)
+					break;
+			}
+
 			if (rc < 0) {
-				pr_err("%s: clk enable failed\n", __func__);
+				pr_err("%s:%d clk enable failed\n", __func__,
+					__LINE__);
 				goto power_up_failed;
 			}
 			break;
@@ -1088,27 +957,29 @@
 		case SENSOR_CUSTOM_GPIO1:
 		case SENSOR_CUSTOM_GPIO2:
 			if (no_gpio) {
-				pr_err("%s: request gpio failed\n", __func__);
+				pr_err("%s:%d request gpio failed\n", __func__,
+					__LINE__);
 				return no_gpio;
 			}
 			if (power_setting->seq_val >= CAM_VREG_MAX ||
-				!ctrl->gpio_conf->gpio_num_info) {
-				pr_err("%s gpio index %d >= max %d\n", __func__,
+				!gpio_num_info) {
+				pr_err("%s:%d gpio index %d >= max %d\n",
+					__func__, __LINE__,
 					power_setting->seq_val,
 					CAM_VREG_MAX);
 				goto power_up_failed;
 			}
 			CDBG("%s:%d gpio set val %d\n",
 				__func__, __LINE__,
-				ctrl->gpio_conf->gpio_num_info->gpio_num
+				gpio_num_info->gpio_num
 				[power_setting->seq_val]);
 
 			rc = msm_cam_sensor_handle_reg_gpio(
 				power_setting->seq_type,
-				ctrl->gpio_conf, 1);
+				gpio_num_info, 1);
 			if (rc < 0) {
-				pr_err("ERR:%s Error in handling VREG GPIO\n",
-					__func__);
+				pr_err("%s:%d Error in handling VREG GPIO\n",
+					__func__, __LINE__);
 				goto power_up_failed;
 			}
 			break;
@@ -1123,35 +994,61 @@
 				break;
 
 			if (power_setting->seq_val >= CAM_VREG_MAX) {
-				pr_err("%s vreg index %d >= max %d\n", __func__,
+				pr_err("%s:%d vreg index %d >= max %d\n",
+					__func__, __LINE__,
 					power_setting->seq_val,
 					CAM_VREG_MAX);
 				goto power_up_failed;
 			}
-			if (power_setting->seq_val < ctrl->num_vreg)
-				msm_camera_config_single_vreg(ctrl->dev,
-					&ctrl->cam_vreg
-					[power_setting->seq_val],
-					(struct regulator **)
-					&power_setting->data[0],
-					1);
+			if (power_setting->seq_val < num_vreg) {
+				CDBG("%s:%d Enable Regulator\n",
+					__func__, __LINE__);
+				vreg_idx = power_setting->seq_val;
+
+				soc_info->rgltr[vreg_idx] =
+					regulator_get(&soc_info->pdev->dev,
+						soc_info->rgltr_name[vreg_idx]);
+				if (IS_ERR_OR_NULL(
+					soc_info->rgltr[vreg_idx])) {
+					rc = PTR_ERR(soc_info->rgltr[vreg_idx]);
+					rc = rc ? rc : -EINVAL;
+
+					pr_err("%s:%d, %s get failed %d\n",
+						__func__, __LINE__,
+						soc_info->rgltr_name[vreg_idx],
+						rc);
+
+					soc_info->rgltr[vreg_idx] = NULL;
+				}
+
+				rc =  cam_soc_util_regulator_enable(
+					soc_info->rgltr[vreg_idx],
+					soc_info->rgltr_name[vreg_idx],
+					soc_info->rgltr_min_volt[vreg_idx],
+					soc_info->rgltr_max_volt[vreg_idx],
+					soc_info->rgltr_op_mode[vreg_idx],
+					soc_info->rgltr_delay[vreg_idx]);
+
+				power_setting->data[0] =
+						soc_info->rgltr[vreg_idx];
+			}
 			else
 				pr_err("%s: %d usr_idx:%d dts_idx:%d\n",
 					__func__, __LINE__,
-					power_setting->seq_val, ctrl->num_vreg);
+					power_setting->seq_val, num_vreg);
 
 			rc = msm_cam_sensor_handle_reg_gpio(
 				power_setting->seq_type,
-				ctrl->gpio_conf, 1);
+				gpio_num_info, 1);
 			if (rc < 0) {
-				pr_err("ERR:%s Error in handling VREG GPIO\n",
-					__func__);
+				pr_err("%s:%d Error in handling VREG GPIO\n",
+					__func__, __LINE__);
 				goto power_up_failed;
 			}
 			break;
 		default:
-			pr_err("%s error power seq type %d\n", __func__,
-				power_setting->seq_type);
+			pr_err("%s:%d error power seq type %d\n", __func__,
+				__LINE__, power_setting->seq_type);
 			break;
 		}
 		if (power_setting->delay > 20)
@@ -1165,21 +1062,22 @@
 power_up_failed:
 	pr_err("%s:%d failed\n", __func__, __LINE__);
 	for (index--; index >= 0; index--) {
-		CDBG("%s index %d\n", __func__, index);
+		CDBG("%s:%d index %d\n", __func__, __LINE__, index);
 		power_setting = &ctrl->power_setting[index];
-		CDBG("%s type %d\n", __func__, power_setting->seq_type);
+		CDBG("%s:%d type %d\n", __func__, __LINE__,
+			power_setting->seq_type);
 		switch (power_setting->seq_type) {
 		case SENSOR_RESET:
 		case SENSOR_STANDBY:
 		case SENSOR_CUSTOM_GPIO1:
 		case SENSOR_CUSTOM_GPIO2:
-			if (!ctrl->gpio_conf->gpio_num_info)
+			if (!gpio_num_info)
 				continue;
-			if (!ctrl->gpio_conf->gpio_num_info->valid
+			if (!gpio_num_info->valid
 				[power_setting->seq_val])
 				continue;
 			gpio_set_value_cansleep(
-				ctrl->gpio_conf->gpio_num_info->gpio_num
+				gpio_num_info->gpio_num
 				[power_setting->seq_val], GPIOF_OUT_INIT_LOW);
 			break;
 		case SENSOR_VANA:
@@ -1189,24 +1087,35 @@
 		case SENSOR_VAF_PWDM:
 		case SENSOR_CUSTOM_REG1:
 		case SENSOR_CUSTOM_REG2:
-			if (power_setting->seq_val < ctrl->num_vreg)
-				msm_camera_config_single_vreg(ctrl->dev,
-					&ctrl->cam_vreg
-					[power_setting->seq_val],
-					(struct regulator **)
-					&power_setting->data[0],
-					0);
+			if (power_setting->seq_val < num_vreg) {
+				CDBG("%s:%d Disable Regulator\n",
+					__func__, __LINE__);
+				vreg_idx = power_setting->seq_val;
+
+				rc =  cam_soc_util_regulator_disable(
+					soc_info->rgltr[vreg_idx],
+					soc_info->rgltr_name[vreg_idx],
+					soc_info->rgltr_min_volt[vreg_idx],
+					soc_info->rgltr_max_volt[vreg_idx],
+					soc_info->rgltr_op_mode[vreg_idx],
+					soc_info->rgltr_delay[vreg_idx]);
+
+				power_setting->data[0] =
+						soc_info->rgltr[vreg_idx];
+
+			}
 			else
 				pr_err("%s:%d:seq_val: %d > num_vreg: %d\n",
 					__func__, __LINE__,
-					power_setting->seq_val, ctrl->num_vreg);
+					power_setting->seq_val, num_vreg);
 
 			msm_cam_sensor_handle_reg_gpio(power_setting->seq_type,
-				ctrl->gpio_conf, GPIOF_OUT_INIT_LOW);
+				gpio_num_info, GPIOF_OUT_INIT_LOW);
+
 			break;
 		default:
-			pr_err("%s error power seq type %d\n", __func__,
-				power_setting->seq_type);
+			pr_err("%s:%d error power seq type %d\n", __func__,
+				__LINE__, power_setting->seq_type);
 			break;
 		}
 		if (power_setting->delay > 20) {
@@ -1225,9 +1134,8 @@
 		devm_pinctrl_put(ctrl->pinctrl_info.pinctrl);
 	}
 	ctrl->cam_pinctrl_status = 0;
-	msm_camera_request_gpio_table(
-		ctrl->gpio_conf->cam_gpio_req_tbl,
-		ctrl->gpio_conf->cam_gpio_req_tbl_size, 0);
+
+	cam_sensor_util_request_gpio_table(soc_info, 0);
 
 	return rc;
 }
@@ -1254,19 +1162,18 @@
 }
 
 static int cam_config_mclk_reg(struct cam_sensor_power_ctrl_t *ctrl,
-	int32_t index)
+	struct cam_hw_soc_info *soc_info, int32_t index)
 {
-	struct camera_vreg_t *cam_vreg;
 	int32_t num_vreg = 0, j = 0, rc = 0, idx = 0;
 	struct cam_sensor_power_setting *ps = NULL;
 	struct cam_sensor_power_setting *pd = NULL;
 
-	cam_vreg = ctrl->cam_vreg;
-	num_vreg = ctrl->num_vreg;
+	num_vreg = soc_info->num_rgltr;
+
 	pd = &ctrl->power_down_setting[index];
 
 	for (j = 0; j < num_vreg; j++) {
-		if (!strcmp(cam_vreg[j].reg_name, "cam_clk")) {
+		if (!strcmp(soc_info->rgltr_name[j], "cam_clk")) {
 
 			ps = NULL;
 			for (idx = 0; idx <
@@ -1278,62 +1185,77 @@
 				}
 			}
 
-			if (ps != NULL)
-				msm_camera_config_single_vreg(
-					ctrl->dev,
-					&cam_vreg[j],
-					(struct regulator **)
-					&ps->data[0], 0);
+			if (ps != NULL) {
+				CDBG("%s:%d Disable Regulator\n",
+					__func__, __LINE__);
+
+				rc = cam_soc_util_regulator_disable(
+					soc_info->rgltr[j],
+					soc_info->rgltr_name[j],
+					soc_info->rgltr_min_volt[j],
+					soc_info->rgltr_max_volt[j],
+					soc_info->rgltr_op_mode[j],
+					soc_info->rgltr_delay[j]);
+
+					ps->data[0] =
+						soc_info->rgltr[j];
+			}
 		}
 	}
 
 	return rc;
 }
 
-int msm_camera_power_down(struct cam_sensor_power_ctrl_t *ctrl)
+int msm_camera_power_down(struct cam_sensor_power_ctrl_t *ctrl,
+		struct cam_hw_soc_info *soc_info)
 {
-	int index = 0, ret = 0, num_vreg = 0;
+	int index = 0, ret = 0, num_vreg = 0, i;
 	struct cam_sensor_power_setting *pd = NULL;
 	struct cam_sensor_power_setting *ps;
-	struct camera_vreg_t *cam_vreg;
+	struct msm_camera_gpio_num_info *gpio_num_info = NULL;
 
 	CDBG("%s:%d\n", __func__, __LINE__);
-	if (!ctrl) {
-		pr_err("failed ctrl %pK\n", ctrl);
+	if (!ctrl || !soc_info) {
+		pr_err("%s:%d failed ctrl %pK\n", __func__, __LINE__, ctrl);
 		return -EINVAL;
 	}
 
-	cam_vreg = ctrl->cam_vreg;
-	num_vreg = ctrl->num_vreg;
+	gpio_num_info = ctrl->gpio_num_info;
+	num_vreg = soc_info->num_rgltr;
 
 	for (index = 0; index < ctrl->power_down_setting_size; index++) {
-		CDBG("%s index %d\n", __func__, index);
+		CDBG("%s:%d index %d\n", __func__, __LINE__, index);
 		pd = &ctrl->power_down_setting[index];
 		ps = NULL;
-		CDBG("%s type %d\n", __func__, pd->seq_type);
+		CDBG("%s:%d type %d\n", __func__, __LINE__, pd->seq_type);
 		switch (pd->seq_type) {
 		case SENSOR_MCLK:
-			ret = cam_config_mclk_reg(ctrl, index);
+			ret = cam_config_mclk_reg(ctrl, soc_info, index);
 			if (ret < 0) {
 				pr_err("%s:%d :Error: in config clk reg\n",
 					__func__, __LINE__);
 				return ret;
 			}
-			msm_camera_clk_enable(ctrl->dev,
-				ctrl->clk_info, ctrl->clk_ptr,
-				ctrl->clk_info_size, false);
+			//cam_soc_util_clk_disable_default(soc_info);
+			for (i = soc_info->num_clk - 1; i >= 0; i--) {
+				cam_soc_util_clk_disable(soc_info->clk[i],
+					soc_info->clk_name[i]);
+			}
+
 			break;
 		case SENSOR_RESET:
 		case SENSOR_STANDBY:
 		case SENSOR_CUSTOM_GPIO1:
 		case SENSOR_CUSTOM_GPIO2:
-			if (!ctrl->gpio_conf->gpio_num_info->valid
-				[pd->seq_val])
+
+			if (!gpio_num_info->valid[pd->seq_val])
 				continue;
+
 			gpio_set_value_cansleep(
-				ctrl->gpio_conf->gpio_num_info->gpio_num
+				gpio_num_info->gpio_num
 				[pd->seq_val],
 				(int) pd->config_val);
+
 			break;
 		case SENSOR_VANA:
 		case SENSOR_VDIG:
@@ -1344,33 +1266,43 @@
 		case SENSOR_CUSTOM_REG2:
 			if (pd->seq_val == INVALID_VREG)
 				break;
+
 			ps = msm_camera_get_power_settings(
 				ctrl, pd->seq_type,
 				pd->seq_val);
 			if (ps) {
-				if (pd->seq_val < ctrl->num_vreg)
-					msm_camera_config_single_vreg(ctrl->dev,
-						&ctrl->cam_vreg
-						[pd->seq_val],
-						(struct regulator **)
-						&ps->data[0],
-						0);
+				if (pd->seq_val < num_vreg) {
+					CDBG("%s:%d Disable Regulator\n",
+						__func__, __LINE__);
+					ret =  cam_soc_util_regulator_disable(
+					soc_info->rgltr[ps->seq_val],
+					soc_info->rgltr_name[ps->seq_val],
+					soc_info->rgltr_min_volt[ps->seq_val],
+					soc_info->rgltr_max_volt[ps->seq_val],
+					soc_info->rgltr_op_mode[ps->seq_val],
+					soc_info->rgltr_delay[ps->seq_val]);
+
+					ps->data[0] =
+						soc_info->rgltr[ps->seq_val];
+				}
 				else
 					pr_err("%s:%d:seq_val:%d > num_vreg: %d\n",
 						__func__, __LINE__, pd->seq_val,
-						ctrl->num_vreg);
+						num_vreg);
 			} else
-				pr_err("%s error in power up/down seq data\n",
-								__func__);
+				pr_err("%s:%d error in power up/down seq\n",
+					__func__, __LINE__);
+
 			ret = msm_cam_sensor_handle_reg_gpio(pd->seq_type,
-				ctrl->gpio_conf, GPIOF_OUT_INIT_LOW);
+				gpio_num_info, GPIOF_OUT_INIT_LOW);
+
 			if (ret < 0)
-				pr_err("ERR:%s Error while disabling VREG GPIO\n",
-					__func__);
+				pr_err("%s:%d Error disabling VREG GPIO\n",
+					__func__, __LINE__);
 			break;
 		default:
-			pr_err("%s error power seq type %d\n", __func__,
-				pd->seq_type);
+			pr_err("%s:%d error power seq type %d\n", __func__,
+				__LINE__, pd->seq_type);
 			break;
 		}
 		if (pd->delay > 20)
@@ -1390,9 +1322,8 @@
 	}
 
 	ctrl->cam_pinctrl_status = 0;
-	msm_camera_request_gpio_table(
-		ctrl->gpio_conf->cam_gpio_req_tbl,
-		ctrl->gpio_conf->cam_gpio_req_tbl_size, 0);
+
+	cam_sensor_util_request_gpio_table(soc_info, 0);
 
 	return 0;
 }
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h
index 7e7fc35..912f06b 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h
@@ -22,6 +22,7 @@
 #include <cam_req_mgr_util.h>
 #include <cam_req_mgr_interface.h>
 #include <cam_mem_mgr.h>
+#include "cam_soc_util.h"
 
 #define INVALID_VREG 100
 
@@ -29,34 +30,26 @@
 	struct camera_vreg_t *cam_vreg, int num_vreg,
 	struct cam_sensor_power_ctrl_t *power_info);
 
-int msm_camera_get_dt_gpio_req_tbl(struct device_node *of_node,
-	struct msm_camera_gpio_conf *gconf, uint16_t *gpio_array,
-	uint16_t gpio_array_size);
-
-int msm_camera_init_gpio_pin_tbl(struct device_node *of_node,
-	struct msm_camera_gpio_conf *gconf, uint16_t *gpio_array,
-	uint16_t gpio_array_size);
-
-int cam_sensor_get_dt_vreg_data(struct device_node *of_node,
-	struct camera_vreg_t **cam_vreg, int *num_vreg);
-
-int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl);
-
-int msm_camera_power_down(struct cam_sensor_power_ctrl_t *ctrl);
-
-int msm_camera_fill_vreg_params(struct camera_vreg_t *cam_vreg,
-	int num_vreg, struct cam_sensor_power_setting *power_setting,
-	uint16_t power_setting_size);
-
 int msm_camera_pinctrl_init
 	(struct msm_pinctrl_info *sensor_pctrl, struct device *dev);
 
-int32_t msm_sensor_driver_get_gpio_data(
-	struct msm_camera_gpio_conf **gpio_conf,
-	struct device_node *of_node);
-
 int cam_sensor_i2c_pkt_parser(struct i2c_settings_array *i2c_reg_settings,
 	struct cam_cmd_buf_desc *cmd_desc, int32_t num_cmd_buffers);
 
 int32_t delete_request(struct i2c_settings_array *i2c_array);
+int cam_sensor_util_request_gpio_table(
+	struct cam_hw_soc_info *soc_info, int gpio_en);
+
+int cam_sensor_util_init_gpio_pin_tbl(
+	struct cam_hw_soc_info *soc_info,
+	struct msm_camera_gpio_num_info **pgpio_num_info);
+int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl,
+		struct cam_hw_soc_info *soc_info);
+
+int msm_camera_power_down(struct cam_sensor_power_ctrl_t *ctrl,
+		struct cam_hw_soc_info *soc_info);
+
+int msm_camera_fill_vreg_params(struct cam_hw_soc_info *soc_info,
+	struct cam_sensor_power_setting *power_setting,
+	uint16_t power_setting_size);
 #endif /* _CAM_SENSOR_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h
new file mode 100644
index 0000000..5989f1a
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h
@@ -0,0 +1,57 @@
+/* Copyright (c) 2017, The Linux Foundataion. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_DEBUG_UTIL_H_
+#define _CAM_DEBUG_UTIL_H_
+
+#define DEFAULT     0xFFFF
+#define CAM_CDM    (1 << 0)
+#define CAM_CORE   (1 << 1)
+#define CAM_CPAS   (1 << 2)
+#define CAM_ISP    (1 << 3)
+#define CAM_CRM    (1 << 4)
+#define CAM_SENSOR (1 << 5)
+#define CAM_SMMU   (1 << 6)
+#define CAM_SYNC   (1 << 7)
+#define CAM_ICP    (1 << 8)
+#define CAM_JPEG   (1 << 9)
+#define CAM_FD     (1 << 10)
+#define CAM_LRME   (1 << 11)
+
+#define GROUP      DEFAULT
+#define TRACE_ON   0
+
+#define CAM_ERR(__module, fmt, args...)                                      \
+	do { if (GROUP & __module) {                                         \
+		if (TRACE_ON)                                                \
+			trace_printk(fmt, ##args);                           \
+		else                                                         \
+			pr_err(fmt, ##args);                                 \
+	} } while (0)
+
+#define CAM_WARN(__module, fmt, args...)                                     \
+	do { if (GROUP & __module) {                                         \
+		if (TRACE_ON)                                                \
+			trace_printk(fmt, ##args);                           \
+		else                                                         \
+			pr_warn(fmt, ##args);                                \
+	} } while (0)
+
+#define CAM_INFO(__module, fmt, args...)                                     \
+	do { if (GROUP & __module) {                                         \
+		if (TRACE_ON)                                                \
+			trace_printk(fmt, ##args);                           \
+		else                                                         \
+			pr_info(fmt, ##args);                                \
+	} } while (0)
+
+#endif /* _CAM_DEBUG_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
index 2dfb90a..0be2aaa 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
@@ -14,11 +14,87 @@
 
 #include <linux/of.h>
 #include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
 #include "cam_soc_util.h"
 
 #undef CDBG
 #define CDBG(fmt, args...) pr_debug(fmt, ##args)
 
+int cam_soc_util_get_level_from_string(const char *string,
+	enum cam_vote_level *level)
+{
+	if (!level)
+		return -EINVAL;
+
+	if (!strcmp(string, "suspend")) {
+		*level = CAM_SUSPEND_VOTE;
+	} else if (!strcmp(string, "minsvs")) {
+		*level = CAM_MINSVS_VOTE;
+	} else if (!strcmp(string, "lowsvs")) {
+		*level = CAM_LOWSVS_VOTE;
+	} else if (!strcmp(string, "svs")) {
+		*level = CAM_SVS_VOTE;
+	} else if (!strcmp(string, "svs_l1")) {
+		*level = CAM_SVSL1_VOTE;
+	} else if (!strcmp(string, "nominal")) {
+		*level = CAM_NOMINAL_VOTE;
+	} else if (!strcmp(string, "turbo")) {
+		*level = CAM_TURBO_VOTE;
+	} else {
+		pr_err("Invalid string %s\n", string);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * cam_soc_util_get_clk_level_to_apply()
+ *
+ * @brief:              Get the clock level to apply. If the requested level
+ *                      is not valid, bump the level to next available valid
+ *                      level. If no higher level found, return failure.
+ *
+ * @soc_info:           Device soc struct to be populated
+ * @req_level:          Requested level
+ * @apply_level         Level to apply
+ *
+ * @return:             success or failure
+ */
+static int cam_soc_util_get_clk_level_to_apply(
+	struct cam_hw_soc_info *soc_info, enum cam_vote_level req_level,
+	enum cam_vote_level *apply_level)
+{
+	if (req_level >= CAM_MAX_VOTE) {
+		pr_err("Invalid clock level parameter %d\n", req_level);
+		return -EINVAL;
+	}
+
+	if (soc_info->clk_level_valid[req_level] == true) {
+		*apply_level = req_level;
+	} else {
+		int i;
+
+		for (i = (req_level + 1); i < CAM_MAX_VOTE; i++)
+			if (soc_info->clk_level_valid[i] == true) {
+				*apply_level = i;
+				break;
+			}
+
+		if (i == CAM_MAX_VOTE) {
+			pr_err("No valid clock level found to apply, req=%d\n",
+				req_level);
+			return -EINVAL;
+		}
+	}
+
+	CDBG("Req level %d, Applying %d\n", req_level, *apply_level);
+
+	return 0;
+}
+
 int cam_soc_util_irq_enable(struct cam_hw_soc_info *soc_info)
 {
 	if (!soc_info) {
@@ -53,7 +129,7 @@
 	return 0;
 }
 
-int cam_soc_util_clk_enable(struct clk *clk, const char *clk_name,
+int cam_soc_util_set_clk_rate(struct clk *clk, const char *clk_name,
 	int32_t clk_rate)
 {
 	int rc = 0;
@@ -62,14 +138,13 @@
 	if (!clk || !clk_name)
 		return -EINVAL;
 
-	CDBG("enable %s, clk %pK rate %d\n",
-		clk_name, clk, clk_rate);
+	CDBG("set %s, rate %d\n", clk_name, clk_rate);
 	if (clk_rate > 0) {
 		clk_rate_round = clk_round_rate(clk, clk_rate);
 		CDBG("new_rate %ld\n", clk_rate_round);
 		if (clk_rate_round < 0) {
-			pr_err("%s: round failed for clock %s rc = %ld\n",
-				__func__, clk_name, clk_rate_round);
+			pr_err("round failed for clock %s rc = %ld\n",
+				clk_name, clk_rate_round);
 			return clk_rate_round;
 		}
 		rc = clk_set_rate(clk, clk_rate_round);
@@ -93,9 +168,25 @@
 			return rc;
 		}
 	}
+
+	return rc;
+}
+
+int cam_soc_util_clk_enable(struct clk *clk, const char *clk_name,
+	int32_t clk_rate)
+{
+	int rc = 0;
+
+	if (!clk || !clk_name)
+		return -EINVAL;
+
+	rc = cam_soc_util_set_clk_rate(clk, clk_name, clk_rate);
+	if (rc)
+		return rc;
+
 	rc = clk_prepare_enable(clk);
 	if (rc) {
-		pr_err("enable failed for %s\n", clk_name);
+		pr_err("enable failed for %s: rc(%d)\n", clk_name, rc);
 		return rc;
 	}
 
@@ -119,20 +210,32 @@
  * @brief:              This function enables the default clocks present
  *                      in soc_info
  *
- * @soc_info:           device soc struct to be populated
+ * @soc_info:           Device soc struct to be populated
+ * @clk_level:          Clk level to apply while enabling
  *
  * @return:             success or failure
  */
-static int cam_soc_util_clk_enable_default(struct cam_hw_soc_info *soc_info)
+static int cam_soc_util_clk_enable_default(struct cam_hw_soc_info *soc_info,
+	enum cam_vote_level clk_level)
 {
 	int i, rc = 0;
+	enum cam_vote_level apply_level;
 
-	if (soc_info->num_clk == 0)
+	if ((soc_info->num_clk == 0) ||
+		(soc_info->num_clk >= CAM_SOC_MAX_CLK)) {
+		pr_err("Invalid number of clock %d\n", soc_info->num_clk);
+		return -EINVAL;
+	}
+
+	rc = cam_soc_util_get_clk_level_to_apply(soc_info, clk_level,
+		&apply_level);
+	if (rc)
 		return rc;
 
 	for (i = 0; i < soc_info->num_clk; i++) {
 		rc = cam_soc_util_clk_enable(soc_info->clk[i],
-			soc_info->clk_name[i], soc_info->clk_rate[i]);
+			soc_info->clk_name[i],
+			soc_info->clk_rate[apply_level][i]);
 		if (rc)
 			goto clk_disable;
 	}
@@ -165,11 +268,9 @@
 	if (soc_info->num_clk == 0)
 		return;
 
-	for (i = soc_info->num_clk - 1; i >= 0; i--) {
-		CDBG("disable %s\n", soc_info->clk_name[i]);
+	for (i = soc_info->num_clk - 1; i >= 0; i--)
 		cam_soc_util_clk_disable(soc_info->clk[i],
 			soc_info->clk_name[i]);
-	}
 }
 
 /**
@@ -186,9 +287,13 @@
 {
 	struct device_node *of_node = NULL;
 	int count;
-	int i, rc;
+	int num_clk_rates, num_clk_levels;
+	int i, j, rc;
+	int32_t num_clk_level_strings;
 	struct platform_device *pdev = NULL;
 	const char *src_clk_str = NULL;
+	const char *clk_cntl_lvl_string = NULL;
+	enum cam_vote_level level;
 
 	if (!soc_info || !soc_info->pdev)
 		return -EINVAL;
@@ -224,31 +329,388 @@
 		}
 	}
 
-	rc = of_property_read_u32_array(of_node, "clock-rates",
-		soc_info->clk_rate, count);
-	if (rc) {
-		pr_err("reading clock-rates failed");
-		return rc;
+	num_clk_rates = of_property_count_u32_elems(of_node, "clock-rates");
+	if (num_clk_rates <= 0) {
+		pr_err("reading clock-rates count failed\n");
+		return -EINVAL;
 	}
 
+	if ((num_clk_rates % soc_info->num_clk) != 0) {
+		pr_err("mismatch clk/rates, No of clocks=%d, No of rates=%d\n",
+			soc_info->num_clk, num_clk_rates);
+		return -EINVAL;
+	}
+
+	num_clk_levels = (num_clk_rates / soc_info->num_clk);
+
+	num_clk_level_strings = of_property_count_strings(of_node,
+		"clock-cntl-level");
+	if (num_clk_level_strings != num_clk_levels) {
+		pr_err("Mismatch No of levels=%d, No of level string=%d\n",
+			num_clk_levels, num_clk_level_strings);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num_clk_levels; i++) {
+		rc = of_property_read_string_index(of_node,
+			"clock-cntl-level", i, &clk_cntl_lvl_string);
+		if (rc) {
+			pr_err("Error reading clock-cntl-level, rc=%d\n", rc);
+			return rc;
+		}
+
+		rc = cam_soc_util_get_level_from_string(clk_cntl_lvl_string,
+			&level);
+		if (rc)
+			return rc;
+
+		CDBG("[%d] : %s %d\n", i, clk_cntl_lvl_string, level);
+		soc_info->clk_level_valid[level] = true;
+		for (j = 0; j < soc_info->num_clk; j++) {
+			rc = of_property_read_u32_index(of_node, "clock-rates",
+				((i * soc_info->num_clk) + j),
+				&soc_info->clk_rate[level][j]);
+			if (rc) {
+				pr_err("Error reading clock-rates, rc=%d\n",
+					rc);
+				return rc;
+			}
+
+			soc_info->clk_rate[level][j] =
+				(soc_info->clk_rate[level][j] == 0) ?
+				(long)NO_SET_RATE :
+				soc_info->clk_rate[level][j];
+
+			CDBG("soc_info->clk_rate[%d][%d] = %d\n", level, j,
+				soc_info->clk_rate[level][j]);
+		}
+	}
+
+	soc_info->src_clk_idx = -1;
 	rc = of_property_read_string_index(of_node, "src-clock-name", 0,
 		&src_clk_str);
-	if (rc) {
+	if (rc || !src_clk_str) {
 		CDBG("No src_clk_str found\n");
-		soc_info->src_clk_idx = -1;
 		rc = 0;
 		/* Bottom loop is dependent on src_clk_str. So return here */
 		return rc;
 	}
 
 	for (i = 0; i < soc_info->num_clk; i++) {
-		soc_info->clk_rate[i] = (soc_info->clk_rate[i] == 0) ?
-			(long)-1 : soc_info->clk_rate[i];
-		if (src_clk_str &&
-			(strcmp(soc_info->clk_name[i], src_clk_str) == 0)) {
+		if (strcmp(soc_info->clk_name[i], src_clk_str) == 0) {
 			soc_info->src_clk_idx = i;
+			CDBG("src clock = %s, index = %d\n", src_clk_str, i);
+			break;
 		}
-		CDBG("clk_rate[%d] = %d\n", i, soc_info->clk_rate[i]);
+	}
+
+	return rc;
+}
+
+int cam_soc_util_set_clk_rate_level(struct cam_hw_soc_info *soc_info,
+	enum cam_vote_level clk_level)
+{
+	int i, rc = 0;
+	enum cam_vote_level apply_level;
+
+	if ((soc_info->num_clk == 0) ||
+		(soc_info->num_clk >= CAM_SOC_MAX_CLK)) {
+		pr_err("Invalid number of clock %d\n", soc_info->num_clk);
+		return -EINVAL;
+	}
+
+	rc = cam_soc_util_get_clk_level_to_apply(soc_info, clk_level,
+		&apply_level);
+	if (rc)
+		return rc;
+
+	for (i = 0; i < soc_info->num_clk; i++) {
+		rc = cam_soc_util_set_clk_rate(soc_info->clk[i],
+			soc_info->clk_name[i],
+			soc_info->clk_rate[apply_level][i]);
+		if (rc)
+			break;
+	}
+
+	return rc;
+};
+
+static int cam_soc_util_get_dt_gpio_req_tbl(struct device_node *of_node,
+	struct cam_soc_gpio_data *gconf, uint16_t *gpio_array,
+	uint16_t gpio_array_size)
+{
+	int32_t rc = 0, i = 0;
+	uint32_t count = 0;
+	uint32_t *val_array = NULL;
+
+	if (!of_get_property(of_node, "gpio-req-tbl-num", &count))
+		return 0;
+
+	count /= sizeof(uint32_t);
+	if (!count) {
+		pr_err("gpio-req-tbl-num 0\n");
+		return 0;
+	}
+
+	val_array = kcalloc(count, sizeof(uint32_t), GFP_KERNEL);
+	if (!val_array)
+		return -ENOMEM;
+
+	gconf->cam_gpio_req_tbl = kcalloc(count, sizeof(struct gpio),
+		GFP_KERNEL);
+	if (!gconf->cam_gpio_req_tbl) {
+		rc = -ENOMEM;
+		goto free_val_array;
+	}
+	gconf->cam_gpio_req_tbl_size = count;
+
+	rc = of_property_read_u32_array(of_node, "gpio-req-tbl-num",
+		val_array, count);
+	if (rc) {
+		pr_err("failed in reading gpio-req-tbl-num, rc = %d\n", rc);
+		goto free_gpio_req_tbl;
+	}
+
+	for (i = 0; i < count; i++) {
+		if (val_array[i] >= gpio_array_size) {
+			pr_err("gpio req tbl index %d invalid\n", val_array[i]);
+			goto free_gpio_req_tbl;
+		}
+		gconf->cam_gpio_req_tbl[i].gpio = gpio_array[val_array[i]];
+		CDBG("cam_gpio_req_tbl[%d].gpio = %d\n", i,
+			gconf->cam_gpio_req_tbl[i].gpio);
+	}
+
+	rc = of_property_read_u32_array(of_node, "gpio-req-tbl-flags",
+		val_array, count);
+	if (rc) {
+		pr_err("Failed in gpio-req-tbl-flags, rc %d\n", rc);
+		goto free_gpio_req_tbl;
+	}
+
+	for (i = 0; i < count; i++) {
+		gconf->cam_gpio_req_tbl[i].flags = val_array[i];
+		CDBG("cam_gpio_req_tbl[%d].flags = %ld\n", i,
+			gconf->cam_gpio_req_tbl[i].flags);
+	}
+
+	for (i = 0; i < count; i++) {
+		rc = of_property_read_string_index(of_node,
+			"gpio-req-tbl-label", i,
+			&gconf->cam_gpio_req_tbl[i].label);
+		if (rc) {
+			pr_err("Failed rc %d\n", rc);
+			goto free_gpio_req_tbl;
+		}
+		CDBG("cam_gpio_req_tbl[%d].label = %s\n", i,
+			gconf->cam_gpio_req_tbl[i].label);
+	}
+
+	kfree(val_array);
+
+	return rc;
+
+free_gpio_req_tbl:
+	kfree(gconf->cam_gpio_req_tbl);
+free_val_array:
+	kfree(val_array);
+	gconf->cam_gpio_req_tbl_size = 0;
+
+	return rc;
+}
+
+static int cam_soc_util_get_gpio_info(struct cam_hw_soc_info *soc_info)
+{
+	int32_t rc = 0, i = 0;
+	uint16_t *gpio_array = NULL;
+	int16_t gpio_array_size = 0;
+	struct cam_soc_gpio_data *gconf = NULL;
+	struct device_node *of_node = NULL;
+	struct platform_device *pdev = NULL;
+
+	if (!soc_info || !soc_info->pdev)
+		return -EINVAL;
+
+	pdev = soc_info->pdev;
+	of_node = pdev->dev.of_node;
+
+	/* Validate input parameters */
+	if (!of_node) {
+		pr_err("Invalid param of_node\n");
+		return -EINVAL;
+	}
+
+	gpio_array_size = of_gpio_count(of_node);
+
+	if (gpio_array_size <= 0)
+		return 0;
+
+	CDBG("gpio count %d\n", gpio_array_size);
+
+	gpio_array = kcalloc(gpio_array_size, sizeof(uint16_t), GFP_KERNEL);
+	if (!gpio_array)
+		goto free_gpio_conf;
+
+	for (i = 0; i < gpio_array_size; i++) {
+		gpio_array[i] = of_get_gpio(of_node, i);
+		CDBG("gpio_array[%d] = %d", i, gpio_array[i]);
+	}
+
+	gconf = kzalloc(sizeof(*gconf), GFP_KERNEL);
+	if (!gconf)
+		return -ENOMEM;
+
+	rc = cam_soc_util_get_dt_gpio_req_tbl(of_node, gconf, gpio_array,
+		gpio_array_size);
+	if (rc) {
+		pr_err("failed in msm_camera_get_dt_gpio_req_tbl\n");
+		goto free_gpio_array;
+	}
+
+	gconf->cam_gpio_common_tbl = kcalloc(gpio_array_size,
+				sizeof(struct gpio), GFP_KERNEL);
+	if (!gconf->cam_gpio_common_tbl) {
+		rc = -ENOMEM;
+		goto free_gpio_array;
+	}
+
+	for (i = 0; i <= gpio_array_size; i++)
+		gconf->cam_gpio_common_tbl[i].gpio = gpio_array[i];
+
+	gconf->cam_gpio_common_tbl_size = gpio_array_size;
+	soc_info->gpio_data = gconf;
+	kfree(gpio_array);
+
+	return rc;
+
+free_gpio_array:
+	kfree(gpio_array);
+free_gpio_conf:
+	kfree(gconf);
+	soc_info->gpio_data = NULL;
+
+	return rc;
+}
+
+static int cam_soc_util_request_gpio_table(
+	struct cam_hw_soc_info *soc_info, bool gpio_en)
+{
+	int rc = 0, i = 0;
+	uint8_t size = 0;
+	struct cam_soc_gpio_data *gpio_conf =
+			soc_info->gpio_data;
+	struct gpio *gpio_tbl = NULL;
+
+
+	if (!gpio_conf) {
+		CDBG("No GPIO entry\n");
+		return 0;
+	}
+	if (gpio_conf->cam_gpio_common_tbl_size <= 0) {
+		pr_err("GPIO table size is invalid\n");
+		return -EINVAL;
+	}
+	size = gpio_conf->cam_gpio_req_tbl_size;
+	gpio_tbl = gpio_conf->cam_gpio_req_tbl;
+
+	if (!gpio_tbl || !size) {
+		pr_err("Invalid gpio_tbl %pK / size %d\n",
+			gpio_tbl, size);
+		return -EINVAL;
+	}
+	for (i = 0; i < size; i++) {
+		CDBG("i=%d, gpio=%d dir=%ld\n", i,
+			gpio_tbl[i].gpio, gpio_tbl[i].flags);
+	}
+	if (gpio_en) {
+		for (i = 0; i < size; i++) {
+			rc = gpio_request_one(gpio_tbl[i].gpio,
+				gpio_tbl[i].flags, gpio_tbl[i].label);
+			if (rc) {
+				/*
+				 * After GPIO request fails, contine to
+				 * apply new gpios, outout a error message
+				 * for driver bringup debug
+				 */
+				pr_err("gpio %d:%s request fails\n",
+					gpio_tbl[i].gpio, gpio_tbl[i].label);
+			}
+		}
+	} else {
+		gpio_free_array(gpio_tbl, size);
+	}
+
+	return rc;
+}
+
+static int cam_soc_util_get_dt_regulator_info
+	(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0, count = 0, i = 0;
+	struct device_node *of_node = NULL;
+	struct platform_device *pdev = NULL;
+
+	if (!soc_info || !soc_info->pdev) {
+		pr_err("Invalid parameters\n");
+		return -EINVAL;
+	}
+
+	pdev = soc_info->pdev;
+	of_node = pdev->dev.of_node;
+
+	soc_info->num_rgltr = 0;
+	count = of_property_count_strings(of_node, "regulator-names");
+	if (count != -EINVAL) {
+		if (count <= 0) {
+			pr_err("no regulators found\n");
+			count = 0;
+			return -EINVAL;
+		}
+
+		soc_info->num_rgltr = count;
+
+	} else {
+		CDBG("No regulators node found\n");
+		return 0;
+	}
+
+	for (i = 0; i < soc_info->num_rgltr; i++) {
+		rc = of_property_read_string_index(of_node,
+			"regulator-names", i, &soc_info->rgltr_name[i]);
+		CDBG("rgltr_name[%d] = %s\n", i, soc_info->rgltr_name[i]);
+		if (rc) {
+			pr_err("no regulator resource at cnt=%d\n", i);
+			return -ENODEV;
+		}
+	}
+
+	if (!of_property_read_bool(of_node, "rgltr-cntrl-support")) {
+		CDBG("No regulator control parameter defined\n");
+		soc_info->rgltr_ctrl_support = false;
+		return 0;
+	}
+
+	soc_info->rgltr_ctrl_support = true;
+
+	rc = of_property_read_u32_array(of_node, "rgltr-min-voltage",
+		soc_info->rgltr_min_volt, soc_info->num_rgltr);
+	if (rc) {
+		pr_err("No minimum volatage value found, rc=%d\n", rc);
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32_array(of_node, "rgltr-max-voltage",
+		soc_info->rgltr_max_volt, soc_info->num_rgltr);
+	if (rc) {
+		pr_err("No maximum volatage value found, rc=%d\n", rc);
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32_array(of_node, "rgltr-load-current",
+		soc_info->rgltr_op_mode, soc_info->num_rgltr);
+	if (rc) {
+		pr_err("No Load curent found rc=%d\n", rc);
+		return -EINVAL;
 	}
 
 	return rc;
@@ -264,7 +726,6 @@
 		return -EINVAL;
 
 	pdev = soc_info->pdev;
-
 	of_node = pdev->dev.of_node;
 
 	rc = of_property_read_u32(of_node, "cell-index", &soc_info->index);
@@ -272,25 +733,6 @@
 		pr_err("device %s failed to read cell-index\n", pdev->name);
 		return rc;
 	}
-
-	count = of_property_count_strings(of_node, "regulator-names");
-	if (count <= 0) {
-		pr_err("no regulators found\n");
-		count = 0;
-	}
-	soc_info->num_rgltr = count;
-
-	for (i = 0; i < soc_info->num_rgltr; i++) {
-		rc = of_property_read_string_index(of_node,
-			"regulator-names", i, &soc_info->rgltr_name[i]);
-		CDBG("rgltr_name[%d] = %s\n", i, soc_info->rgltr_name[i]);
-		if (rc) {
-			pr_err("no regulator resource at cnt=%d\n", i);
-			rc = -ENODEV;
-			return rc;
-		}
-	}
-
 	count = of_property_count_strings(of_node, "reg-names");
 	if (count <= 0) {
 		pr_err("no reg-names found\n");
@@ -330,6 +772,7 @@
 		&soc_info->irq_name);
 	if (rc) {
 		pr_warn("No interrupt line present\n");
+		rc = 0;
 	} else {
 		soc_info->irq_line = platform_get_resource_byname(pdev,
 			IORESOURCE_IRQ, soc_info->irq_name);
@@ -340,7 +783,17 @@
 		}
 	}
 
+	rc = cam_soc_util_get_dt_regulator_info(soc_info);
+	if (rc)
+		return rc;
+
 	rc = cam_soc_util_get_dt_clk_info(soc_info);
+	if (rc)
+		return rc;
+
+	rc = cam_soc_util_get_gpio_info(soc_info);
+	if (rc)
+		return rc;
 
 	return rc;
 }
@@ -370,18 +823,207 @@
 	return rc;
 }
 
-int cam_soc_util_request_platform_resource(struct cam_hw_soc_info *soc_info,
+int cam_soc_util_regulator_disable(struct regulator *rgltr,
+	const char *rgltr_name, uint32_t rgltr_min_volt,
+	uint32_t rgltr_max_volt, uint32_t rgltr_op_mode,
+	uint32_t rgltr_delay_ms)
+{
+	int32_t rc = 0;
+
+	if (!rgltr) {
+		pr_err("Invalid NULL parameter\n");
+		return -EINVAL;
+	}
+
+	rc = regulator_disable(rgltr);
+	if (rc) {
+		pr_err("%s regulator disable failed\n", rgltr_name);
+		return rc;
+	}
+
+	if (rgltr_delay_ms > 20)
+		msleep(rgltr_delay_ms);
+	else if (rgltr_delay_ms)
+		usleep_range(rgltr_delay_ms * 1000,
+			(rgltr_delay_ms * 1000) + 1000);
+
+	if (regulator_count_voltages(rgltr) > 0) {
+		regulator_set_load(rgltr, 0);
+		regulator_set_voltage(rgltr, 0, rgltr_max_volt);
+	}
+
+	return rc;
+}
+
+
+int cam_soc_util_regulator_enable(struct regulator *rgltr,
+	const char *rgltr_name,
+	uint32_t rgltr_min_volt, uint32_t rgltr_max_volt,
+	uint32_t rgltr_op_mode, uint32_t rgltr_delay)
+{
+	int32_t rc = 0;
+
+	if (!rgltr) {
+		pr_err("Invalid NULL parameter\n");
+		return -EINVAL;
+	}
+
+	if (regulator_count_voltages(rgltr) > 0) {
+		CDBG("voltage min=%d, max=%d\n",
+			rgltr_min_volt, rgltr_max_volt);
+
+		rc = regulator_set_voltage(
+			rgltr, rgltr_min_volt, rgltr_max_volt);
+		if (rc) {
+			pr_err("%s set voltage failed\n", rgltr_name);
+			return rc;
+		}
+
+		rc = regulator_set_load(rgltr, rgltr_op_mode);
+		if (rc) {
+			pr_err("%s set optimum mode failed\n", rgltr_name);
+			return rc;
+		}
+	}
+
+	rc = regulator_enable(rgltr);
+	if (rc) {
+		pr_err("%s regulator_enable failed\n", rgltr_name);
+		return rc;
+	}
+
+	if (rgltr_delay > 20)
+		msleep(rgltr_delay);
+	else if (rgltr_delay)
+		usleep_range(rgltr_delay * 1000,
+			(rgltr_delay * 1000) + 1000);
+
+	return rc;
+}
+
+static int cam_soc_util_request_pinctrl(
+	struct cam_hw_soc_info *soc_info) {
+
+	struct cam_soc_pinctrl_info *device_pctrl = &soc_info->pinctrl_info;
+	struct device *dev = &soc_info->pdev->dev;
+
+	device_pctrl->pinctrl = devm_pinctrl_get(dev);
+	if (IS_ERR_OR_NULL(device_pctrl->pinctrl)) {
+		CDBG("Pinctrl not available\n");
+		device_pctrl->pinctrl = NULL;
+		return 0;
+	}
+	device_pctrl->gpio_state_active =
+		pinctrl_lookup_state(device_pctrl->pinctrl,
+				CAM_SOC_PINCTRL_STATE_DEFAULT);
+	if (IS_ERR_OR_NULL(device_pctrl->gpio_state_active)) {
+		pr_err("Failed to get the active state pinctrl handle\n");
+		device_pctrl->gpio_state_active = NULL;
+		return -EINVAL;
+	}
+	device_pctrl->gpio_state_suspend
+		= pinctrl_lookup_state(device_pctrl->pinctrl,
+				CAM_SOC_PINCTRL_STATE_SLEEP);
+	if (IS_ERR_OR_NULL(device_pctrl->gpio_state_suspend)) {
+		pr_err("Failed to get the suspend state pinctrl handle\n");
+		device_pctrl->gpio_state_suspend = NULL;
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void cam_soc_util_regulator_disable_default(
+	struct cam_hw_soc_info *soc_info)
+{
+	int j = 0;
+	uint32_t num_rgltr = soc_info->num_rgltr;
+
+	for (j = num_rgltr-1; j >= 0; j--) {
+		if (soc_info->rgltr_ctrl_support == true) {
+			cam_soc_util_regulator_disable(soc_info->rgltr[j],
+				soc_info->rgltr_name[j],
+				soc_info->rgltr_min_volt[j],
+				soc_info->rgltr_max_volt[j],
+				soc_info->rgltr_op_mode[j],
+				soc_info->rgltr_delay[j]);
+		} else {
+			if (soc_info->rgltr[j])
+				regulator_disable(soc_info->rgltr[j]);
+		}
+	}
+}
+
+static int cam_soc_util_regulator_enable_default(
+	struct cam_hw_soc_info *soc_info)
+{
+	int j = 0, rc = 0;
+	uint32_t num_rgltr = soc_info->num_rgltr;
+
+	for (j = 0; j < num_rgltr; j++) {
+		if (soc_info->rgltr_ctrl_support == true) {
+			rc = cam_soc_util_regulator_enable(soc_info->rgltr[j],
+				soc_info->rgltr_name[j],
+				soc_info->rgltr_min_volt[j],
+				soc_info->rgltr_max_volt[j],
+				soc_info->rgltr_op_mode[j],
+				soc_info->rgltr_delay[j]);
+		} else {
+			if (soc_info->rgltr[j])
+				rc = regulator_enable(soc_info->rgltr[j]);
+		}
+
+		if (rc) {
+			pr_err("%s enable failed\n", soc_info->rgltr_name[j]);
+			goto disable_rgltr;
+		}
+	}
+
+	return rc;
+disable_rgltr:
+
+	for (j--; j >= 0; j--) {
+		if (soc_info->rgltr_ctrl_support == true) {
+			cam_soc_util_regulator_disable(soc_info->rgltr[j],
+				soc_info->rgltr_name[j],
+				soc_info->rgltr_min_volt[j],
+				soc_info->rgltr_max_volt[j],
+				soc_info->rgltr_op_mode[j],
+				soc_info->rgltr_delay[j]);
+		} else {
+			if (soc_info->rgltr[j])
+				regulator_disable(soc_info->rgltr[j]);
+		}
+	}
+
+	return rc;
+}
+
+int cam_soc_util_request_platform_resource(
+	struct cam_hw_soc_info *soc_info,
 	irq_handler_t handler, void *irq_data)
 {
 	int i = 0, rc = 0;
 	struct platform_device *pdev = NULL;
 
-	if (!soc_info || !soc_info->pdev)
+
+	if (!soc_info || !soc_info->pdev) {
+		pr_err("Invalid parameters\n");
 		return -EINVAL;
+	}
 
 	pdev = soc_info->pdev;
 
 	for (i = 0; i < soc_info->num_mem_block; i++) {
+		if (soc_info->reserve_mem) {
+			if (!request_mem_region(soc_info->mem_block[i]->start,
+				resource_size(soc_info->mem_block[i]),
+				soc_info->mem_block_name[i])){
+				pr_err("Error Mem Region request Failed:%s\n",
+					soc_info->mem_block_name[i]);
+				rc = -ENOMEM;
+				goto unmap_base;
+			}
+		}
 		soc_info->reg_map[i].mem_base = ioremap(
 			soc_info->mem_block[i]->start,
 			resource_size(soc_info->mem_block[i]));
@@ -398,6 +1040,11 @@
 	}
 
 	for (i = 0; i < soc_info->num_rgltr; i++) {
+		if (soc_info->rgltr_name[i] == NULL) {
+			pr_err("can't find regulator name\n");
+			goto put_regulator;
+		}
+
 		rc = cam_soc_util_get_regulator(pdev, &soc_info->rgltr[i],
 			soc_info->rgltr_name[i]);
 		if (rc)
@@ -408,7 +1055,7 @@
 		rc = devm_request_irq(&pdev->dev, soc_info->irq_line->start,
 			handler, IRQF_TRIGGER_RISING,
 			soc_info->irq_name, irq_data);
-		if (rc < 0) {
+		if (rc) {
 			pr_err("irq request fail\n");
 			rc = -EBUSY;
 			goto put_regulator;
@@ -428,6 +1075,16 @@
 		}
 	}
 
+	rc = cam_soc_util_request_pinctrl(soc_info);
+	if (rc)
+		CDBG("Failed in request pinctrl, rc=%d\n", rc);
+
+	rc = cam_soc_util_request_gpio_table(soc_info, true);
+	if (rc) {
+		pr_err("Failed in request gpio table, rc=%d\n", rc);
+		goto put_clk;
+	}
+
 	return rc;
 
 put_clk:
@@ -461,6 +1118,9 @@
 	if (i == -1)
 		i = soc_info->num_reg_map;
 	for (i = i - 1; i >= 0; i--) {
+		if (soc_info->reserve_mem)
+			release_mem_region(soc_info->mem_block[i]->start,
+				resource_size(soc_info->mem_block[i]));
 		iounmap(soc_info->reg_map[i].mem_base);
 		soc_info->reg_map[i].mem_base = NULL;
 		soc_info->reg_map[i].size = 0;
@@ -474,8 +1134,11 @@
 	int i;
 	struct platform_device *pdev = NULL;
 
-	if (!soc_info || !soc_info->pdev)
+	if (!soc_info || !soc_info->pdev) {
+		pr_err("Invalid parameter\n");
 		return -EINVAL;
+	}
+
 
 	pdev = soc_info->pdev;
 
@@ -503,28 +1166,32 @@
 			soc_info->irq_line->start, soc_info->irq_data);
 	}
 
+	if (soc_info->pinctrl_info.pinctrl)
+		devm_pinctrl_put(soc_info->pinctrl_info.pinctrl);
+
+
+	/* release for gpio */
+	cam_soc_util_request_gpio_table(soc_info, false);
+
 	return 0;
 }
 
 int cam_soc_util_enable_platform_resource(struct cam_hw_soc_info *soc_info,
-	bool enable_clocks, bool enable_irq)
+	bool enable_clocks, enum cam_vote_level clk_level, bool enable_irq)
 {
-	int i, rc = 0;
+	int rc = 0;
 
 	if (!soc_info)
 		return -EINVAL;
 
-	for (i = 0; i < soc_info->num_rgltr; i++) {
-		rc = regulator_enable(soc_info->rgltr[i]);
-		if (rc) {
-			pr_err("Regulator enable %s failed\n",
-				soc_info->rgltr_name[i]);
-			goto disable_regulator;
-		}
+	rc = cam_soc_util_regulator_enable_default(soc_info);
+	if (rc) {
+		pr_err("Regulators enable failed\n");
+		return rc;
 	}
 
 	if (enable_clocks) {
-		rc = cam_soc_util_clk_enable_default(soc_info);
+		rc = cam_soc_util_clk_enable_default(soc_info, clk_level);
 		if (rc)
 			goto disable_regulator;
 	}
@@ -535,19 +1202,28 @@
 			goto disable_clk;
 	}
 
+	if (soc_info->pinctrl_info.pinctrl &&
+		soc_info->pinctrl_info.gpio_state_active) {
+		rc = pinctrl_select_state(soc_info->pinctrl_info.pinctrl,
+			soc_info->pinctrl_info.gpio_state_active);
+
+		if (rc)
+			goto disable_irq;
+	}
+
 	return rc;
 
+disable_irq:
+	if (enable_irq)
+		cam_soc_util_irq_disable(soc_info);
+
 disable_clk:
 	if (enable_clocks)
 		cam_soc_util_clk_disable_default(soc_info);
 
 disable_regulator:
-	if (i == -1)
-		i = soc_info->num_rgltr;
-	for (i = i - 1; i >= 0; i--) {
-		if (soc_info->rgltr[i])
-			regulator_disable(soc_info->rgltr[i]);
-	}
+	cam_soc_util_regulator_disable_default(soc_info);
+
 
 	return rc;
 }
@@ -555,7 +1231,7 @@
 int cam_soc_util_disable_platform_resource(struct cam_hw_soc_info *soc_info,
 	bool disable_clocks, bool disble_irq)
 {
-	int i, rc = 0;
+	int rc = 0;
 
 	if (!soc_info)
 		return -EINVAL;
@@ -563,18 +1239,16 @@
 	if (disable_clocks)
 		cam_soc_util_clk_disable_default(soc_info);
 
-	for (i = soc_info->num_rgltr - 1; i >= 0; i--) {
-		rc |= regulator_disable(soc_info->rgltr[i]);
-		if (rc) {
-			pr_err("Regulator disble %s failed\n",
-				soc_info->rgltr_name[i]);
-			continue;
-		}
-	}
+	cam_soc_util_regulator_disable_default(soc_info);
 
 	if (disble_irq)
 		rc |= cam_soc_util_irq_disable(soc_info);
 
+	if (soc_info->pinctrl_info.pinctrl &&
+		soc_info->pinctrl_info.gpio_state_suspend)
+		rc = pinctrl_select_state(soc_info->pinctrl_info.pinctrl,
+			soc_info->pinctrl_info.gpio_state_suspend);
+
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
index e556bba..7eb7578 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
@@ -16,6 +16,7 @@
 #include <linux/clk.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
+#include <linux/delay.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
 
@@ -37,6 +38,33 @@
 #define CAM_SOC_MAX_CLK             32
 
 /**
+ * enum cam_vote_level - Enum for voting level
+ *
+ * @CAM_SUSPEND_VOTE : Suspend vote
+ * @CAM_MINSVS_VOTE  : Min SVS vote
+ * @CAM_LOWSVS_VOTE  : Low SVS vote
+ * @CAM_SVS_VOTE     : SVS vote
+ * @CAM_SVSL1_VOTE   : SVS Plus vote
+ * @CAM_NOMINAL_VOTE : Nominal vote
+ * @CAM_TURBO_VOTE   : Turbo vote
+ * @CAM_MAX_VOTE     : Max voting level, This is invalid level.
+ */
+enum cam_vote_level {
+	CAM_SUSPEND_VOTE,
+	CAM_MINSVS_VOTE,
+	CAM_LOWSVS_VOTE,
+	CAM_SVS_VOTE,
+	CAM_SVSL1_VOTE,
+	CAM_NOMINAL_VOTE,
+	CAM_TURBO_VOTE,
+	CAM_MAX_VOTE,
+};
+
+/* pinctrl states */
+#define CAM_SOC_PINCTRL_STATE_SLEEP "cam_suspend"
+#define CAM_SOC_PINCTRL_STATE_DEFAULT "cam_default"
+
+/**
  * struct cam_soc_reg_map:   Information about the mapped register space
  *
  * @mem_base:               Starting location of MAPPED register space
@@ -51,6 +79,35 @@
 };
 
 /**
+ * struct cam_soc_pinctrl_info:   Information about pinctrl data
+ *
+ * @pinctrl:               pintrl object
+ * @gpio_state_active:     default pinctrl state
+ * @gpio_state_suspend     suspend state of pinctrl
+ **/
+struct cam_soc_pinctrl_info {
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *gpio_state_active;
+	struct pinctrl_state *gpio_state_suspend;
+};
+
+/**
+ * struct cam_soc_gpio_data:   Information about the gpio pins
+ *
+ * @cam_gpio_common_tbl:       It is list of al the gpios present in gpios node
+ * @cam_gpio_common_tbl_size:  It is equal to number of gpios prsent in
+ *                             gpios node in DTSI
+ * @cam_gpio_req_tbl            It is list of al the requesetd gpios
+ * @cam_gpio_req_tbl_size:      It is size of requested gpios
+ **/
+struct cam_soc_gpio_data {
+	struct gpio *cam_gpio_common_tbl;
+	uint8_t cam_gpio_common_tbl_size;
+	struct gpio *cam_gpio_req_tbl;
+	uint8_t cam_gpio_req_tbl_size;
+};
+
+/**
  * struct cam_hw_soc_info:  Soc information pertaining to specific instance of
  *                          Camera hardware driver module
  *
@@ -69,14 +126,25 @@
  * @num_reg_map:            Number of mapped register space associated
  *                          with mem_block. num_reg_map = num_mem_block in
  *                          most cases
+ * @reserve_mem:            Whether to reserve memory for Mem blocks
  * @num_rgltr:              Number of regulators
  * @rgltr_name:             Array of regulator names
+ * @rgltr_ctrl_support:     Whether regulator control is supported
+ * @rgltr_min_volt:         Array of minimum regulator voltage
+ * @rgltr_max_volt:         Array of maximum regulator voltage
+ * @rgltr_op_mode:          Array of regulator operation mode
+ * @rgltr_type:             Array of regulator names
  * @rgltr:                  Array of associated regulator resources
+ * @rgltr_delay:            Array of regulator delay values
  * @num_clk:                Number of clocks
  * @clk_name:               Array of clock names
  * @clk:                    Array of associated clock resources
- * @clk_rate:               Array of default clock rates
+ * @clk_rate:               2D array of clock rates representing clock rate
+ *                          values at different vote levels
  * @src_clk_idx:            Source clock index that is rate-controllable
+ * @clk_level_valid:        Indicates whether corresponding level is valid
+ * @gpio_data:              Pointer to gpio info
+ * @pinctrl_info:           Pointer to pinctrl info
  * @soc_private:            Soc private data
  *
  */
@@ -84,7 +152,6 @@
 	struct platform_device         *pdev;
 	uint32_t                        hw_version;
 	uint32_t                        index;
-
 	const char                     *irq_name;
 	struct resource                *irq_line;
 	void                           *irq_data;
@@ -95,16 +162,27 @@
 	struct resource                *mem_block[CAM_SOC_MAX_BLOCK];
 	struct cam_soc_reg_map          reg_map[CAM_SOC_MAX_BASE];
 	uint32_t                        num_reg_map;
+	uint32_t                        reserve_mem;
 
 	uint32_t                        num_rgltr;
 	const char                     *rgltr_name[CAM_SOC_MAX_REGULATOR];
+	uint32_t                        rgltr_ctrl_support;
+	uint32_t                        rgltr_min_volt[CAM_SOC_MAX_REGULATOR];
+	uint32_t                        rgltr_max_volt[CAM_SOC_MAX_REGULATOR];
+	uint32_t                        rgltr_op_mode[CAM_SOC_MAX_REGULATOR];
+	uint32_t                        rgltr_type[CAM_SOC_MAX_REGULATOR];
 	struct regulator               *rgltr[CAM_SOC_MAX_REGULATOR];
+	uint32_t                        rgltr_delay[CAM_SOC_MAX_REGULATOR];
 
 	uint32_t                        num_clk;
 	const char                     *clk_name[CAM_SOC_MAX_CLK];
 	struct clk                     *clk[CAM_SOC_MAX_CLK];
-	int32_t                         clk_rate[CAM_SOC_MAX_CLK];
+	int32_t                         clk_rate[CAM_MAX_VOTE][CAM_SOC_MAX_CLK];
 	int32_t                         src_clk_idx;
+	bool                            clk_level_valid[CAM_MAX_VOTE];
+
+	struct cam_soc_gpio_data       *gpio_data;
+	struct cam_soc_pinctrl_info     pinctrl_info;
 
 	void                           *soc_private;
 };
@@ -159,6 +237,18 @@
 	((!__soc_info || __base_index >= __soc_info->num_reg_map) ?  \
 		0 : __soc_info->reg_map[__base_index].size)
 
+/**
+ * cam_soc_util_get_level_from_string()
+ *
+ * @brief:              Get the associated vote level for the input string
+ *
+ * @string:             Input string to compare with.
+ * @level:              Vote level corresponds to input string.
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_get_level_from_string(const char *string,
+	enum cam_vote_level *level);
 
 /**
  * cam_soc_util_get_dt_properties()
@@ -208,6 +298,9 @@
  *                          TRUE: Enable all clocks in soc_info Now.
  *                          False: Don't enable clocks Now. Driver will
  *                                 enable independently.
+ * @clk_level:          Clock level to be applied.
+ *                      Applicable only if enable_clocks is true
+ *                          Valid range : 0 to (CAM_MAX_VOTE - 1)
  * @enable_irq:         Boolean flag:
  *                          TRUE: Enable IRQ in soc_info Now.
  *                          False: Don't enable IRQ Now. Driver will
@@ -216,7 +309,7 @@
  * @return:             Success or failure
  */
 int cam_soc_util_enable_platform_resource(struct cam_hw_soc_info *soc_info,
-	bool enable_clocks, bool enable_irq);
+	bool enable_clocks, enum cam_vote_level clk_level, bool enable_irq);
 
 /**
  * cam_soc_util_disable_platform_resource()
@@ -235,6 +328,20 @@
 	bool disable_clocks, bool disable_irq);
 
 /**
+ * cam_soc_util_set_clk_rate()
+ *
+ * @brief:              Set the rate on a given clock.
+ *
+ * @clk:                Clock that needs to be set
+ * @clk_name:           Clocks name associated with clk
+ * @clk_rate:           Clocks rate associated with clk
+ *
+ * @return:             success or failure
+ */
+int cam_soc_util_set_clk_rate(struct clk *clk, const char *clk_name,
+	int32_t clk_rate);
+
+/**
  * cam_soc_util_clk_enable()
  *
  * @brief:              Enable clock specified in params
@@ -249,6 +356,21 @@
 	int32_t clk_rate);
 
 /**
+ * cam_soc_util_set_clk_rate_level()
+ *
+ * @brief:              Apply clock rates for the requested level.
+ *                      This applies the new requested level for all
+ *                      the clocks listed in DT based on their values.
+ *
+ * @soc_info:           Device soc information
+ * @clk_level:          Clock level number to set
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_set_clk_rate_level(struct cam_hw_soc_info *soc_info,
+	enum cam_vote_level clk_level);
+
+/**
  * cam_soc_util_clk_disable()
  *
  * @brief:              Disable clock specified in params
@@ -283,6 +405,45 @@
 int cam_soc_util_irq_disable(struct cam_hw_soc_info *soc_info);
 
 /**
+ * cam_soc_util_regulator_enable()
+ *
+ * @brief:              Enable single regulator
+ *
+ * @rgltr               Regulator that needs to be turned ON
+ * @rgltr_name          Associated Regulator name
+ * @rgltr_min_volt:     Requested minimum volatage
+ * @rgltr_max_volt:     Requested maximum volatage
+ * @rgltr_op_mode:      Requested Load
+ * @rgltr_delay:        Requested delay needed aaftre enabling regulator
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_regulator_enable(struct regulator *rgltr,
+	const char *rgltr_name,
+	uint32_t rgltr_min_volt, uint32_t rgltr_max_volt,
+	uint32_t rgltr_op_mode, uint32_t rgltr_delay);
+
+/**
+ * cam_soc_util_regulator_enable()
+ *
+ * @brief:              Disable single regulator
+ *
+ * @rgltr               Regulator that needs to be turned ON
+ * @rgltr_name          Associated Regulator name
+ * @rgltr_min_volt:     Requested minimum volatage
+ * @rgltr_max_volt:     Requested maximum volatage
+ * @rgltr_op_mode:      Requested Load
+ * @rgltr_delay:        Requested delay needed aaftre enabling regulator
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_regulator_disable(struct regulator *rgltr,
+	const char *rgltr_name,
+	uint32_t rgltr_min_volt, uint32_t rgltr_max_volt,
+	uint32_t rgltr_op_mode, uint32_t rgltr_delay);
+
+
+/**
  * cam_soc_util_w()
  *
  * @brief:              Camera SOC util for register write
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/Makefile b/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/Makefile
deleted file mode 100644
index a4df0b8..0000000
--- a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
-ccflags-y += -Idrivers/media/platform/msm/camera/icp
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/include
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/a5_hw
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/fw_inc
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
-
-obj-$(CONFIG_SPECTRA_CAMERA) += a5_dev.o a5_core.o a5_soc.o
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/Makefile b/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/Makefile
deleted file mode 100644
index 6aeb5f1..0000000
--- a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
-ccflags-y += -Idrivers/media/platform/msm/camera/icp
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/include
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/bps_hw
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/fw_inc
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
-
-obj-$(CONFIG_SPECTRA_CAMERA) += bps_dev.o bps_core.o bps_soc.o
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/Makefile b/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/Makefile
deleted file mode 100644
index 8af20ae..0000000
--- a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
-ccflags-y += -Idrivers/media/platform/msm/camera/icp
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/include
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/ipe_hw
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/fw_inc
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
-
-obj-$(CONFIG_SPECTRA_CAMERA) += ipe_dev.o ipe_core.o ipe_soc.o
diff --git a/drivers/media/platform/msm/dvb/Kconfig b/drivers/media/platform/msm/dvb/Kconfig
new file mode 100644
index 0000000..e205c81
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/Kconfig
@@ -0,0 +1,10 @@
+config DVB_MPQ
+	tristate "Qualcomm Technologies Inc Multimedia Processor DVB Adapter"
+	depends on ARCH_QCOM && DVB_CORE
+	default n
+
+	help
+	  Support for Qualcomm Technologies Inc MPQ based DVB adapter.
+	  Say Y or M if you own such a device and want to use it.
+
+source "drivers/media/platform/msm/dvb/demux/Kconfig"
diff --git a/drivers/media/platform/msm/dvb/Makefile b/drivers/media/platform/msm/dvb/Makefile
new file mode 100644
index 0000000..862ebca
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_DVB_MPQ)	    += adapter/
+obj-$(CONFIG_DVB_MPQ_DEMUX) += demux/
diff --git a/drivers/media/platform/msm/dvb/adapter/Makefile b/drivers/media/platform/msm/dvb/adapter/Makefile
new file mode 100644
index 0000000..662bf99
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/adapter/Makefile
@@ -0,0 +1,7 @@
+ccflags-y += -Idrivers/media/dvb-core/
+ccflags-y += -Idrivers/media/platform/msm/dvb/include/
+ccflags-y += -Idrivers/media/platform/msm/dvb/demux/
+
+obj-$(CONFIG_DVB_MPQ) += mpq-adapter.o
+
+mpq-adapter-y := mpq_adapter.o mpq_stream_buffer.o
diff --git a/drivers/media/platform/msm/dvb/adapter/mpq_adapter.c b/drivers/media/platform/msm/dvb/adapter/mpq_adapter.c
new file mode 100644
index 0000000..1ccb98f
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/adapter/mpq_adapter.c
@@ -0,0 +1,208 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include "mpq_adapter.h"
+#include "mpq_dvb_debug.h"
+
+
+DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+
+/* data-structure holding MPQ adapter information */
+static struct
+{
+	/* MPQ adapter registered to dvb-core */
+	struct dvb_adapter adapter;
+
+	/* mutex protect against the data-structure */
+	struct mutex mutex;
+
+	/* List of stream interfaces registered to the MPQ adapter */
+	struct {
+		/* pointer to the stream buffer using for data tunneling */
+		struct mpq_streambuffer *stream_buffer;
+
+		/* callback triggered when the stream interface is registered */
+		mpq_adapter_stream_if_callback callback;
+
+		/* parameter passed to the callback function */
+		void *user_param;
+	} interfaces[MPQ_ADAPTER_MAX_NUM_OF_INTERFACES];
+} mpq_info;
+
+
+/**
+ * Initialize MPQ DVB adapter module.
+ *
+ * Return     error status
+ */
+static int __init mpq_adapter_init(void)
+{
+	int i;
+	int result;
+
+	MPQ_DVB_DBG_PRINT("%s executed\n", __func__);
+
+	mutex_init(&mpq_info.mutex);
+
+	/* reset stream interfaces list */
+	for (i = 0; i < MPQ_ADAPTER_MAX_NUM_OF_INTERFACES; i++) {
+		mpq_info.interfaces[i].stream_buffer = NULL;
+		mpq_info.interfaces[i].callback = NULL;
+	}
+
+	/* regsiter a new dvb-adapter to dvb-core */
+	result = dvb_register_adapter(&mpq_info.adapter,
+				      "Qualcomm technologies, inc. DVB adapter",
+				      THIS_MODULE, NULL, adapter_nr);
+	if (result < 0) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: dvb_register_adapter failed, errno %d\n",
+			__func__,
+			result);
+	}
+
+	return result;
+}
+
+
+/**
+ * Cleanup MPQ DVB adapter module.
+ */
+static void __exit mpq_adapter_exit(void)
+{
+	MPQ_DVB_DBG_PRINT("%s executed\n", __func__);
+
+	/* un-regsiter adapter from dvb-core */
+	dvb_unregister_adapter(&mpq_info.adapter);
+	mutex_destroy(&mpq_info.mutex);
+}
+
+struct dvb_adapter *mpq_adapter_get(void)
+{
+	return &mpq_info.adapter;
+}
+EXPORT_SYMBOL(mpq_adapter_get);
+
+
+int mpq_adapter_register_stream_if(
+		enum mpq_adapter_stream_if interface_id,
+		struct mpq_streambuffer *stream_buffer)
+{
+	int ret;
+
+	if (interface_id >= MPQ_ADAPTER_MAX_NUM_OF_INTERFACES) {
+		ret = -EINVAL;
+		goto register_failed;
+	}
+
+	if (mutex_lock_interruptible(&mpq_info.mutex)) {
+		ret = -ERESTARTSYS;
+		goto register_failed;
+	}
+
+	if (mpq_info.interfaces[interface_id].stream_buffer != NULL) {
+		/* already registered interface */
+		ret = -EINVAL;
+		goto register_failed_unlock_mutex;
+	}
+
+	mpq_info.interfaces[interface_id].stream_buffer = stream_buffer;
+	mutex_unlock(&mpq_info.mutex);
+
+	/*
+	 * If callback is installed, trigger it to notify that
+	 * stream interface was registered.
+	 */
+	if (mpq_info.interfaces[interface_id].callback != NULL) {
+		mpq_info.interfaces[interface_id].callback(
+				interface_id,
+				mpq_info.interfaces[interface_id].user_param);
+	}
+
+	return 0;
+
+register_failed_unlock_mutex:
+	mutex_unlock(&mpq_info.mutex);
+register_failed:
+	return ret;
+}
+EXPORT_SYMBOL(mpq_adapter_register_stream_if);
+
+
+int mpq_adapter_unregister_stream_if(
+		enum mpq_adapter_stream_if interface_id)
+{
+	if (interface_id >= MPQ_ADAPTER_MAX_NUM_OF_INTERFACES)
+		return -EINVAL;
+
+	if (mutex_lock_interruptible(&mpq_info.mutex))
+		return -ERESTARTSYS;
+
+	/* clear the registered interface */
+	mpq_info.interfaces[interface_id].stream_buffer = NULL;
+
+	mutex_unlock(&mpq_info.mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL(mpq_adapter_unregister_stream_if);
+
+
+int mpq_adapter_get_stream_if(
+		enum mpq_adapter_stream_if interface_id,
+		struct mpq_streambuffer **stream_buffer)
+{
+	if ((interface_id >= MPQ_ADAPTER_MAX_NUM_OF_INTERFACES) ||
+		(stream_buffer == NULL))
+		return -EINVAL;
+
+	if (mutex_lock_interruptible(&mpq_info.mutex))
+		return -ERESTARTSYS;
+
+	*stream_buffer = mpq_info.interfaces[interface_id].stream_buffer;
+
+	mutex_unlock(&mpq_info.mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL(mpq_adapter_get_stream_if);
+
+
+int mpq_adapter_notify_stream_if(
+		enum mpq_adapter_stream_if interface_id,
+		mpq_adapter_stream_if_callback callback,
+		void *user_param)
+{
+	if (interface_id >= MPQ_ADAPTER_MAX_NUM_OF_INTERFACES)
+		return -EINVAL;
+
+	if (mutex_lock_interruptible(&mpq_info.mutex))
+		return -ERESTARTSYS;
+
+	mpq_info.interfaces[interface_id].callback = callback;
+	mpq_info.interfaces[interface_id].user_param = user_param;
+
+	mutex_unlock(&mpq_info.mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL(mpq_adapter_notify_stream_if);
+
+
+module_init(mpq_adapter_init);
+module_exit(mpq_adapter_exit);
+
+MODULE_DESCRIPTION("Qualcomm Technologies Inc. MPQ adapter");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/dvb/adapter/mpq_stream_buffer.c b/drivers/media/platform/msm/dvb/adapter/mpq_stream_buffer.c
new file mode 100644
index 0000000..4f84c58
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/adapter/mpq_stream_buffer.c
@@ -0,0 +1,827 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/uaccess.h>
+#include "mpq_dvb_debug.h"
+#include "mpq_stream_buffer.h"
+
+
+int mpq_streambuffer_init(
+		struct mpq_streambuffer *sbuff,
+		enum mpq_streambuffer_mode mode,
+		struct mpq_streambuffer_buffer_desc *data_buffers,
+		u32 data_buff_num,
+		void *packet_buff,
+		size_t packet_buff_size)
+{
+	if ((sbuff == NULL) || (data_buffers == NULL) ||
+		(packet_buff == NULL) || (data_buff_num == 0))
+		return -EINVAL;
+
+	if (data_buff_num > 1) {
+		if (mode != MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR)
+			return -EINVAL;
+		/* Linear buffer group */
+		dvb_ringbuffer_init(
+			&sbuff->raw_data,
+			data_buffers,
+			data_buff_num *
+			sizeof(struct mpq_streambuffer_buffer_desc));
+	} else {
+		if (mode != MPQ_STREAMBUFFER_BUFFER_MODE_RING)
+			return -EINVAL;
+		/* Single ring-buffer */
+		dvb_ringbuffer_init(&sbuff->raw_data,
+			data_buffers[0].base, data_buffers[0].size);
+	}
+	sbuff->mode = mode;
+	sbuff->buffers = data_buffers;
+	sbuff->pending_buffers_count = 0;
+	sbuff->buffers_num = data_buff_num;
+	sbuff->cb = NULL;
+	dvb_ringbuffer_init(&sbuff->packet_data, packet_buff, packet_buff_size);
+
+	return 0;
+}
+EXPORT_SYMBOL(mpq_streambuffer_init);
+
+void mpq_streambuffer_terminate(struct mpq_streambuffer *sbuff)
+{
+	spin_lock(&sbuff->packet_data.lock);
+	spin_lock(&sbuff->raw_data.lock);
+	sbuff->packet_data.error = -ENODEV;
+	sbuff->raw_data.error = -ENODEV;
+	spin_unlock(&sbuff->raw_data.lock);
+	spin_unlock(&sbuff->packet_data.lock);
+
+	wake_up_all(&sbuff->raw_data.queue);
+	wake_up_all(&sbuff->packet_data.queue);
+}
+EXPORT_SYMBOL(mpq_streambuffer_terminate);
+
+ssize_t mpq_streambuffer_pkt_next(
+		struct mpq_streambuffer *sbuff,
+		ssize_t idx, size_t *pktlen)
+{
+	ssize_t packet_idx;
+
+	spin_lock(&sbuff->packet_data.lock);
+
+	/* buffer was released, return no packet available */
+	if (sbuff->packet_data.error == -ENODEV) {
+		spin_unlock(&sbuff->packet_data.lock);
+		return -ENODEV;
+	}
+
+	packet_idx = dvb_ringbuffer_pkt_next(&sbuff->packet_data, idx, pktlen);
+	spin_unlock(&sbuff->packet_data.lock);
+
+	return packet_idx;
+}
+EXPORT_SYMBOL(mpq_streambuffer_pkt_next);
+
+
+ssize_t mpq_streambuffer_pkt_read(
+		struct mpq_streambuffer *sbuff,
+		size_t idx,
+		struct mpq_streambuffer_packet_header *packet,
+		u8 *user_data)
+{
+	size_t ret;
+	size_t read_len;
+
+	spin_lock(&sbuff->packet_data.lock);
+
+	/* buffer was released, return no packet available */
+	if (sbuff->packet_data.error == -ENODEV) {
+		spin_unlock(&sbuff->packet_data.lock);
+		return -ENODEV;
+	}
+
+	/* read-out the packet header first */
+	ret = dvb_ringbuffer_pkt_read(
+				&sbuff->packet_data, idx, 0,
+				(u8 *)packet,
+				sizeof(struct mpq_streambuffer_packet_header));
+
+	/* verify length, at least packet header should exist */
+	if (ret != sizeof(struct mpq_streambuffer_packet_header)) {
+		spin_unlock(&sbuff->packet_data.lock);
+		return -EINVAL;
+	}
+
+	read_len = ret;
+
+	/* read-out private user-data if there are such */
+	if ((packet->user_data_len) && (user_data != NULL)) {
+		ret = dvb_ringbuffer_pkt_read(
+				&sbuff->packet_data,
+				idx,
+				sizeof(struct mpq_streambuffer_packet_header),
+				user_data,
+				packet->user_data_len);
+
+		if (ret < 0) {
+			spin_unlock(&sbuff->packet_data.lock);
+			return ret;
+		}
+
+		read_len += ret;
+	}
+
+	spin_unlock(&sbuff->packet_data.lock);
+
+	return read_len;
+}
+EXPORT_SYMBOL(mpq_streambuffer_pkt_read);
+
+
+int mpq_streambuffer_pkt_dispose(
+			struct mpq_streambuffer *sbuff,
+			size_t idx,
+			int dispose_data)
+{
+	int ret;
+	struct mpq_streambuffer_packet_header packet;
+
+	if (sbuff == NULL)
+		return -EINVAL;
+
+	spin_lock(&sbuff->packet_data.lock);
+
+	/* check if buffer was released */
+	if (sbuff->packet_data.error == -ENODEV) {
+		spin_unlock(&sbuff->packet_data.lock);
+		return -ENODEV;
+	}
+
+	/* read-out the packet header first */
+	ret = dvb_ringbuffer_pkt_read(&sbuff->packet_data, idx,
+			0,
+			(u8 *)&packet,
+			sizeof(struct mpq_streambuffer_packet_header));
+
+	spin_unlock(&sbuff->packet_data.lock);
+
+	if (ret != sizeof(struct mpq_streambuffer_packet_header))
+		return -EINVAL;
+
+	if ((sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR) ||
+		(dispose_data)) {
+		/* Advance the read pointer in the raw-data buffer first */
+		ret = mpq_streambuffer_data_read_dispose(sbuff,
+			packet.raw_data_len);
+		if (ret != 0)
+			return ret;
+	}
+
+	spin_lock(&sbuff->packet_data.lock);
+	spin_lock(&sbuff->raw_data.lock);
+
+	/* check if buffer was released */
+	if ((sbuff->packet_data.error == -ENODEV) ||
+		(sbuff->raw_data.error == -ENODEV)) {
+		spin_unlock(&sbuff->raw_data.lock);
+		spin_unlock(&sbuff->packet_data.lock);
+		return -ENODEV;
+	}
+
+	/* Move read pointer to the next linear buffer for subsequent reads */
+	if ((sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR) &&
+		(packet.raw_data_len > 0)) {
+		struct mpq_streambuffer_buffer_desc *desc;
+
+		desc = (struct mpq_streambuffer_buffer_desc *)
+				&sbuff->raw_data.data[sbuff->raw_data.pread];
+
+		desc->write_ptr = 0;
+		desc->read_ptr = 0;
+
+		DVB_RINGBUFFER_SKIP(&sbuff->raw_data,
+				sizeof(struct mpq_streambuffer_buffer_desc));
+		sbuff->pending_buffers_count--;
+
+		wake_up_all(&sbuff->raw_data.queue);
+	}
+
+	/* Now clear the packet from the packet header */
+	dvb_ringbuffer_pkt_dispose(&sbuff->packet_data, idx);
+
+	spin_unlock(&sbuff->raw_data.lock);
+	spin_unlock(&sbuff->packet_data.lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(mpq_streambuffer_pkt_dispose);
+
+int mpq_streambuffer_pkt_write(
+			struct mpq_streambuffer *sbuff,
+			struct mpq_streambuffer_packet_header *packet,
+			u8 *user_data)
+{
+	ssize_t idx;
+	size_t len;
+
+	if ((sbuff == NULL) || (packet == NULL))
+		return -EINVAL;
+
+	spin_lock(&sbuff->packet_data.lock);
+
+	/* check if buffer was released */
+	if (sbuff->packet_data.error == -ENODEV) {
+		spin_unlock(&sbuff->packet_data.lock);
+		return -ENODEV;
+	}
+
+	/* Make sure we can go to the next linear buffer */
+	if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR &&
+		sbuff->pending_buffers_count == sbuff->buffers_num &&
+		packet->raw_data_len) {
+		spin_unlock(&sbuff->packet_data.lock);
+		return -ENOSPC;
+	}
+
+	len = sizeof(struct mpq_streambuffer_packet_header) +
+		packet->user_data_len;
+
+	/* Make sure enough space available for packet header */
+	if (dvb_ringbuffer_free(&sbuff->packet_data) <
+		(len + DVB_RINGBUFFER_PKTHDRSIZE)) {
+		spin_unlock(&sbuff->packet_data.lock);
+		return -ENOSPC;
+	}
+
+	/* Starting writing packet header */
+	idx = dvb_ringbuffer_pkt_start(&sbuff->packet_data, len);
+
+	/* Write non-user private data header */
+	dvb_ringbuffer_write(&sbuff->packet_data,
+		(u8 *)packet,
+		sizeof(struct mpq_streambuffer_packet_header));
+
+	/* Write user's own private data header */
+	dvb_ringbuffer_write(&sbuff->packet_data,
+		user_data,
+		packet->user_data_len);
+
+	dvb_ringbuffer_pkt_close(&sbuff->packet_data, idx);
+
+	/* Move write pointer to next linear buffer for subsequent writes */
+	if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR &&
+		packet->raw_data_len) {
+		DVB_RINGBUFFER_PUSH(&sbuff->raw_data,
+				sizeof(struct mpq_streambuffer_buffer_desc));
+		sbuff->pending_buffers_count++;
+	}
+
+	spin_unlock(&sbuff->packet_data.lock);
+	wake_up_all(&sbuff->packet_data.queue);
+
+	return idx;
+}
+EXPORT_SYMBOL(mpq_streambuffer_pkt_write);
+
+ssize_t mpq_streambuffer_data_write(
+			struct mpq_streambuffer *sbuff,
+			const u8 *buf, size_t len)
+{
+	int res;
+
+	if ((sbuff == NULL) || (buf == NULL))
+		return -EINVAL;
+
+	spin_lock(&sbuff->raw_data.lock);
+
+	/* check if buffer was released */
+	if (sbuff->raw_data.error == -ENODEV) {
+		spin_unlock(&sbuff->raw_data.lock);
+		return -ENODEV;
+	}
+
+	if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+		if (unlikely(dvb_ringbuffer_free(&sbuff->raw_data) < len)) {
+			spin_unlock(&sbuff->raw_data.lock);
+			return -ENOSPC;
+		}
+		/*
+		 * Secure buffers are not permitted to be mapped into kernel
+		 * memory, and so buffer base address may be NULL
+		 */
+		if (sbuff->raw_data.data == NULL) {
+			spin_unlock(&sbuff->raw_data.lock);
+			return -EPERM;
+		}
+		res = dvb_ringbuffer_write(&sbuff->raw_data, buf, len);
+		wake_up_all(&sbuff->raw_data.queue);
+	} else {
+		/* Linear buffer group */
+		struct mpq_streambuffer_buffer_desc *desc;
+
+		desc = (struct mpq_streambuffer_buffer_desc *)
+				&sbuff->raw_data.data[sbuff->raw_data.pwrite];
+
+		/*
+		 * Secure buffers are not permitted to be mapped into kernel
+		 * memory, and so buffer base address may be NULL
+		 */
+		if (desc->base == NULL) {
+			spin_unlock(&sbuff->raw_data.lock);
+			return -EPERM;
+		}
+
+		if ((sbuff->pending_buffers_count == sbuff->buffers_num) ||
+			((desc->size - desc->write_ptr) < len)) {
+			MPQ_DVB_DBG_PRINT(
+				"%s: No space available! %d pending buffers out of %d total buffers. write_ptr=%d, size=%d\n",
+				__func__,
+				sbuff->pending_buffers_count,
+				sbuff->buffers_num,
+				desc->write_ptr,
+				desc->size);
+			spin_unlock(&sbuff->raw_data.lock);
+			return -ENOSPC;
+		}
+		memcpy(desc->base + desc->write_ptr, buf, len);
+		desc->write_ptr += len;
+		res = len;
+	}
+
+	spin_unlock(&sbuff->raw_data.lock);
+	return res;
+}
+EXPORT_SYMBOL(mpq_streambuffer_data_write);
+
+
+int mpq_streambuffer_data_write_deposit(
+				struct mpq_streambuffer *sbuff,
+				size_t len)
+{
+	if (sbuff == NULL)
+		return -EINVAL;
+
+	spin_lock(&sbuff->raw_data.lock);
+
+	/* check if buffer was released */
+	if (sbuff->raw_data.error == -ENODEV) {
+		spin_unlock(&sbuff->raw_data.lock);
+		return -ENODEV;
+	}
+
+	if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+		if (unlikely(dvb_ringbuffer_free(&sbuff->raw_data) < len)) {
+			spin_unlock(&sbuff->raw_data.lock);
+			return -ENOSPC;
+		}
+
+		DVB_RINGBUFFER_PUSH(&sbuff->raw_data, len);
+		wake_up_all(&sbuff->raw_data.queue);
+	} else {
+		/* Linear buffer group */
+		struct mpq_streambuffer_buffer_desc *desc =
+			(struct mpq_streambuffer_buffer_desc *)
+			&sbuff->raw_data.data[sbuff->raw_data.pwrite];
+
+		if ((sbuff->pending_buffers_count == sbuff->buffers_num) ||
+			 ((desc->size - desc->write_ptr) < len)) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: No space available!\n",
+				__func__);
+			spin_unlock(&sbuff->raw_data.lock);
+			return -ENOSPC;
+		}
+		desc->write_ptr += len;
+	}
+
+	spin_unlock(&sbuff->raw_data.lock);
+	return 0;
+}
+EXPORT_SYMBOL(mpq_streambuffer_data_write_deposit);
+
+
+ssize_t mpq_streambuffer_data_read(
+				struct mpq_streambuffer *sbuff,
+				u8 *buf, size_t len)
+{
+	ssize_t actual_len = 0;
+	u32 offset;
+
+	if ((sbuff == NULL) || (buf == NULL))
+		return -EINVAL;
+
+	spin_lock(&sbuff->raw_data.lock);
+
+	/* check if buffer was released */
+	if (sbuff->raw_data.error == -ENODEV) {
+		spin_unlock(&sbuff->raw_data.lock);
+		return -ENODEV;
+	}
+
+	if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+		/*
+		 * Secure buffers are not permitted to be mapped into kernel
+		 * memory, and so buffer base address may be NULL
+		 */
+		if (sbuff->raw_data.data == NULL) {
+			spin_unlock(&sbuff->raw_data.lock);
+			return -EPERM;
+		}
+
+		offset = sbuff->raw_data.pread;
+		actual_len = dvb_ringbuffer_avail(&sbuff->raw_data);
+		if (actual_len < len)
+			len = actual_len;
+		if (len)
+			dvb_ringbuffer_read(&sbuff->raw_data, buf, len);
+
+		wake_up_all(&sbuff->raw_data.queue);
+	} else {
+		/* Linear buffer group */
+		struct mpq_streambuffer_buffer_desc *desc;
+
+		desc = (struct mpq_streambuffer_buffer_desc *)
+				&sbuff->raw_data.data[sbuff->raw_data.pread];
+
+		/*
+		 * Secure buffers are not permitted to be mapped into kernel
+		 * memory, and so buffer base address may be NULL
+		 */
+		if (desc->base == NULL) {
+			spin_unlock(&sbuff->raw_data.lock);
+			return -EPERM;
+		}
+
+		actual_len = (desc->write_ptr - desc->read_ptr);
+		if (actual_len < len)
+			len = actual_len;
+		memcpy(buf, desc->base + desc->read_ptr, len);
+		offset = desc->read_ptr;
+		desc->read_ptr += len;
+	}
+
+	spin_unlock(&sbuff->raw_data.lock);
+
+	if (sbuff->cb)
+		sbuff->cb(sbuff, offset, len, sbuff->cb_user_data);
+
+	return len;
+}
+EXPORT_SYMBOL(mpq_streambuffer_data_read);
+
+
+ssize_t mpq_streambuffer_data_read_user(
+		struct mpq_streambuffer *sbuff,
+		u8 __user *buf, size_t len)
+{
+	ssize_t actual_len = 0;
+	u32 offset;
+
+	if ((sbuff == NULL) || (buf == NULL))
+		return -EINVAL;
+
+	/* check if buffer was released */
+	if (sbuff->raw_data.error == -ENODEV)
+		return -ENODEV;
+
+	if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+		/*
+		 * Secure buffers are not permitted to be mapped into kernel
+		 * memory, and so buffer base address may be NULL
+		 */
+		if (sbuff->raw_data.data == NULL)
+			return -EPERM;
+
+		offset = sbuff->raw_data.pread;
+		actual_len = dvb_ringbuffer_avail(&sbuff->raw_data);
+		if (actual_len < len)
+			len = actual_len;
+		if (len)
+			dvb_ringbuffer_read_user(&sbuff->raw_data, buf, len);
+		wake_up_all(&sbuff->raw_data.queue);
+	} else {
+		/* Linear buffer group */
+		struct mpq_streambuffer_buffer_desc *desc;
+
+		desc = (struct mpq_streambuffer_buffer_desc *)
+				&sbuff->raw_data.data[sbuff->raw_data.pread];
+
+		/*
+		 * Secure buffers are not permitted to be mapped into kernel
+		 * memory, and so buffer base address may be NULL
+		 */
+		if (desc->base == NULL)
+			return -EPERM;
+
+		actual_len = (desc->write_ptr - desc->read_ptr);
+		if (actual_len < len)
+			len = actual_len;
+		if (copy_to_user(buf, desc->base + desc->read_ptr, len))
+			return -EFAULT;
+
+		offset = desc->read_ptr;
+		desc->read_ptr += len;
+	}
+
+	if (sbuff->cb)
+		sbuff->cb(sbuff, offset, len, sbuff->cb_user_data);
+
+	return len;
+}
+EXPORT_SYMBOL(mpq_streambuffer_data_read_user);
+
+int mpq_streambuffer_data_read_dispose(
+			struct mpq_streambuffer *sbuff,
+			size_t len)
+{
+	u32 offset;
+
+	if (sbuff == NULL)
+		return -EINVAL;
+
+	spin_lock(&sbuff->raw_data.lock);
+
+	/* check if buffer was released */
+	if (sbuff->raw_data.error == -ENODEV) {
+		spin_unlock(&sbuff->raw_data.lock);
+		return -ENODEV;
+	}
+
+	if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+		if (unlikely(dvb_ringbuffer_avail(&sbuff->raw_data) < len)) {
+			spin_unlock(&sbuff->raw_data.lock);
+			return -EINVAL;
+		}
+
+		offset = sbuff->raw_data.pread;
+		DVB_RINGBUFFER_SKIP(&sbuff->raw_data, len);
+		wake_up_all(&sbuff->raw_data.queue);
+	} else {
+		struct mpq_streambuffer_buffer_desc *desc;
+
+		desc = (struct mpq_streambuffer_buffer_desc *)
+				&sbuff->raw_data.data[sbuff->raw_data.pread];
+		offset = desc->read_ptr;
+
+		if ((desc->read_ptr + len) > desc->size)
+			desc->read_ptr = desc->size;
+		else
+			desc->read_ptr += len;
+	}
+
+	spin_unlock(&sbuff->raw_data.lock);
+
+	if (sbuff->cb)
+		sbuff->cb(sbuff, offset, len, sbuff->cb_user_data);
+
+	return 0;
+}
+EXPORT_SYMBOL(mpq_streambuffer_data_read_dispose);
+
+
+int mpq_streambuffer_get_buffer_handle(
+	struct mpq_streambuffer *sbuff,
+	int read_buffer,
+	int *handle)
+{
+	struct mpq_streambuffer_buffer_desc *desc = NULL;
+
+	if ((sbuff == NULL) || (handle == NULL))
+		return -EINVAL;
+
+	spin_lock(&sbuff->raw_data.lock);
+
+	/* check if buffer was released */
+	if (sbuff->raw_data.error == -ENODEV) {
+		spin_unlock(&sbuff->raw_data.lock);
+		return -ENODEV;
+	}
+
+	if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+		*handle = sbuff->buffers[0].handle;
+	} else {
+		if (read_buffer)
+			desc = (struct mpq_streambuffer_buffer_desc *)
+				&sbuff->raw_data.data[sbuff->raw_data.pread];
+		else
+			desc = (struct mpq_streambuffer_buffer_desc *)
+				&sbuff->raw_data.data[sbuff->raw_data.pwrite];
+		*handle = desc->handle;
+	}
+
+	spin_unlock(&sbuff->raw_data.lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(mpq_streambuffer_get_buffer_handle);
+
+
+int mpq_streambuffer_register_data_dispose(
+	struct mpq_streambuffer *sbuff,
+	mpq_streambuffer_dispose_cb cb_func,
+	void *user_data)
+{
+	if ((sbuff == NULL) || (cb_func == NULL))
+		return -EINVAL;
+
+	sbuff->cb = cb_func;
+	sbuff->cb_user_data = user_data;
+
+	return 0;
+}
+EXPORT_SYMBOL(mpq_streambuffer_register_data_dispose);
+
+
+ssize_t mpq_streambuffer_data_free(
+	struct mpq_streambuffer *sbuff)
+{
+	struct mpq_streambuffer_buffer_desc *desc;
+
+	if (sbuff == NULL)
+		return -EINVAL;
+
+	spin_lock(&sbuff->raw_data.lock);
+
+	/* check if buffer was released */
+	if (sbuff->raw_data.error == -ENODEV) {
+		spin_unlock(&sbuff->raw_data.lock);
+		return -ENODEV;
+	}
+
+	if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+		spin_unlock(&sbuff->raw_data.lock);
+		return dvb_ringbuffer_free(&sbuff->raw_data);
+	}
+
+	if (sbuff->pending_buffers_count == sbuff->buffers_num) {
+		spin_unlock(&sbuff->raw_data.lock);
+		return 0;
+	}
+
+	desc = (struct mpq_streambuffer_buffer_desc *)
+		&sbuff->raw_data.data[sbuff->raw_data.pwrite];
+
+	spin_unlock(&sbuff->raw_data.lock);
+
+	return desc->size - desc->write_ptr;
+}
+EXPORT_SYMBOL(mpq_streambuffer_data_free);
+
+
+ssize_t mpq_streambuffer_data_avail(
+	struct mpq_streambuffer *sbuff)
+{
+	struct mpq_streambuffer_buffer_desc *desc;
+
+	if (sbuff == NULL)
+		return -EINVAL;
+
+	spin_lock(&sbuff->raw_data.lock);
+
+	/* check if buffer was released */
+	if (sbuff->raw_data.error == -ENODEV) {
+		spin_unlock(&sbuff->raw_data.lock);
+		return -ENODEV;
+	}
+
+	if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+		ssize_t avail = dvb_ringbuffer_avail(&sbuff->raw_data);
+
+		spin_unlock(&sbuff->raw_data.lock);
+		return avail;
+	}
+
+	desc = (struct mpq_streambuffer_buffer_desc *)
+		&sbuff->raw_data.data[sbuff->raw_data.pread];
+
+	spin_unlock(&sbuff->raw_data.lock);
+
+	return desc->write_ptr - desc->read_ptr;
+}
+EXPORT_SYMBOL(mpq_streambuffer_data_avail);
+
+int mpq_streambuffer_get_data_rw_offset(
+	struct mpq_streambuffer *sbuff,
+	u32 *read_offset,
+	u32 *write_offset)
+{
+	if (sbuff == NULL)
+		return -EINVAL;
+
+	spin_lock(&sbuff->raw_data.lock);
+
+	/* check if buffer was released */
+	if (sbuff->raw_data.error == -ENODEV) {
+		spin_unlock(&sbuff->raw_data.lock);
+		return -ENODEV;
+	}
+
+	if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+		if (read_offset)
+			*read_offset = sbuff->raw_data.pread;
+		if (write_offset)
+			*write_offset = sbuff->raw_data.pwrite;
+	} else {
+		struct mpq_streambuffer_buffer_desc *desc;
+
+		if (read_offset) {
+			desc = (struct mpq_streambuffer_buffer_desc *)
+				&sbuff->raw_data.data[sbuff->raw_data.pread];
+			*read_offset = desc->read_ptr;
+		}
+		if (write_offset) {
+			desc = (struct mpq_streambuffer_buffer_desc *)
+				&sbuff->raw_data.data[sbuff->raw_data.pwrite];
+			*write_offset = desc->write_ptr;
+		}
+	}
+
+	spin_unlock(&sbuff->raw_data.lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(mpq_streambuffer_get_data_rw_offset);
+
+ssize_t mpq_streambuffer_metadata_free(struct mpq_streambuffer *sbuff)
+{
+	ssize_t free;
+
+	if (sbuff == NULL)
+		return -EINVAL;
+
+	spin_lock(&sbuff->packet_data.lock);
+
+	/* check if buffer was released */
+	if (sbuff->packet_data.error == -ENODEV) {
+		spin_unlock(&sbuff->packet_data.lock);
+		return -ENODEV;
+	}
+
+	free = dvb_ringbuffer_free(&sbuff->packet_data);
+
+	spin_unlock(&sbuff->packet_data.lock);
+
+	return free;
+}
+EXPORT_SYMBOL(mpq_streambuffer_metadata_free);
+
+int mpq_streambuffer_flush(struct mpq_streambuffer *sbuff)
+{
+	struct mpq_streambuffer_buffer_desc *desc;
+	size_t len;
+	int idx;
+	int ret = 0;
+
+	if (sbuff == NULL)
+		return -EINVAL;
+
+	spin_lock(&sbuff->packet_data.lock);
+	spin_lock(&sbuff->raw_data.lock);
+
+	/* Check if buffer was released */
+	if (sbuff->packet_data.error == -ENODEV ||
+		sbuff->raw_data.error == -ENODEV) {
+		ret = -ENODEV;
+		goto end;
+	}
+
+	if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR)
+		while (sbuff->pending_buffers_count) {
+			desc = (struct mpq_streambuffer_buffer_desc *)
+				&sbuff->raw_data.data[sbuff->raw_data.pread];
+			desc->write_ptr = 0;
+			desc->read_ptr = 0;
+			DVB_RINGBUFFER_SKIP(&sbuff->raw_data,
+				sizeof(struct mpq_streambuffer_buffer_desc));
+			sbuff->pending_buffers_count--;
+		}
+	else
+		dvb_ringbuffer_flush(&sbuff->raw_data);
+
+	/*
+	 * Dispose all packets (simply flushing is not enough since we want
+	 * the packets' status to move to disposed).
+	 */
+	do {
+		idx = dvb_ringbuffer_pkt_next(&sbuff->packet_data, -1, &len);
+		if (idx >= 0)
+			dvb_ringbuffer_pkt_dispose(&sbuff->packet_data, idx);
+	} while (idx >= 0);
+
+end:
+	spin_unlock(&sbuff->raw_data.lock);
+	spin_unlock(&sbuff->packet_data.lock);
+	return ret;
+}
+EXPORT_SYMBOL(mpq_streambuffer_flush);
diff --git a/drivers/media/platform/msm/dvb/demux/Kconfig b/drivers/media/platform/msm/dvb/demux/Kconfig
new file mode 100644
index 0000000..b928212
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/Kconfig
@@ -0,0 +1,47 @@
+menuconfig DVB_MPQ_DEMUX
+	tristate "DVB Demux Device"
+	depends on DVB_MPQ && ION && ION_MSM
+	default n
+
+	help
+	  Support for Qualcomm Technologies Inc based dvb demux device.
+	  Say Y if you own such a device and want to use it.
+	  The Demux device is used to stream playback either
+	  from TSIF interface or from DVR interface.
+
+config DVB_MPQ_NUM_DMX_DEVICES
+	int "Number of demux devices"
+	depends on DVB_MPQ_DEMUX
+	default 4
+	range 1 255
+
+	help
+	  Configure number of demux devices.
+	  Depends on your use-cases for maximum concurrent stream playback.
+
+config DVB_MPQ_MEDIA_BOX_DEMUX
+	bool "Media box demux support"
+	depends on DVB_MPQ_DEMUX
+	default n
+	help
+		Use this option if your HW is Qualcomm Technologies Inc
+		media box and demux support is required on that media box.
+		Currently this config is being used for demux video events
+		optimization.
+
+config DVB_MPQ_TSPP1
+	bool "TSPPv1 plugin"
+	depends on DVB_MPQ_DEMUX && TSPP
+	help
+		Use this option if your HW has
+		Transport Stream Packet Processor(TSPP) version1 support.
+		Demux may take adavantage of HW capabilities to perform
+		some tasks in HW instead of SW.
+
+config DVB_MPQ_SW
+	bool "Software plugin"
+	depends on DVB_MPQ_DEMUX && !DVB_MPQ_TSPP1
+	help
+		Use this option if your HW does not have any
+		TSPP hardware support. All  demux tasks will be
+		performed in SW.
diff --git a/drivers/media/platform/msm/dvb/demux/Makefile b/drivers/media/platform/msm/dvb/demux/Makefile
new file mode 100644
index 0000000..c08fa85
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/Makefile
@@ -0,0 +1,14 @@
+
+ccflags-y += -Idrivers/media/dvb-core/
+ccflags-y += -Idrivers/media/platform/msm/dvb/include/
+ccflags-y += -Idrivers/misc/
+
+obj-$(CONFIG_DVB_MPQ_DEMUX) += mpq-dmx-hw-plugin.o
+
+mpq-dmx-hw-plugin-y := mpq_dmx_plugin_common.o
+
+mpq-dmx-hw-plugin-$(CONFIG_QSEECOM) += mpq_sdmx.o
+
+mpq-dmx-hw-plugin-$(CONFIG_DVB_MPQ_TSPP1) += mpq_dmx_plugin_tspp_v1.o
+
+mpq-dmx-hw-plugin-$(CONFIG_DVB_MPQ_SW) += mpq_dmx_plugin_sw.o
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
new file mode 100644
index 0000000..f16c1ba
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
@@ -0,0 +1,6712 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/file.h>
+#include <linux/scatterlist.h>
+#include "mpq_dvb_debug.h"
+#include "mpq_dmx_plugin_common.h"
+#include "mpq_sdmx.h"
+
+#define SDMX_MAJOR_VERSION_MATCH	(8)
+
+/* Length of mandatory fields that must exist in header of video PES */
+#define PES_MANDATORY_FIELDS_LEN			9
+
+/* Index of first byte in TS packet holding STC */
+#define STC_LOCATION_IDX			188
+
+#define MAX_PES_LENGTH	(SZ_64K)
+
+#define MAX_TS_PACKETS_FOR_SDMX_PROCESS	(500)
+
+/*
+ * PES header length field is 8 bits so PES header length after this field
+ * can be up to 256 bytes.
+ * Preceding fields of the PES header total to 9 bytes
+ * (including the PES header length field).
+ */
+#define MAX_PES_HEADER_LENGTH	(256 + PES_MANDATORY_FIELDS_LEN)
+
+/* TS packet with adaptation field only can take up the entire TSP */
+#define MAX_TSP_ADAPTATION_LENGTH (184)
+
+#define MAX_SDMX_METADATA_LENGTH	\
+	(TS_PACKET_HEADER_LENGTH +	\
+	MAX_TSP_ADAPTATION_LENGTH +	\
+	MAX_PES_HEADER_LENGTH)
+
+#define SDMX_METADATA_BUFFER_SIZE	(64*1024)
+#define SDMX_SECTION_BUFFER_SIZE	(64*1024)
+#define SDMX_PCR_BUFFER_SIZE		(64*1024)
+
+/* Number of demux devices, has default of linux configuration */
+static int mpq_demux_device_num = CONFIG_DVB_MPQ_NUM_DMX_DEVICES;
+module_param(mpq_demux_device_num, int, 0444);
+
+/* ION heap IDs used for allocating video output buffer */
+static int video_secure_ion_heap = ION_CP_MM_HEAP_ID;
+module_param(video_secure_ion_heap, int, 0644);
+MODULE_PARM_DESC(video_secure_ion_heap, "ION heap for secure video buffer allocation");
+
+static int video_nonsecure_ion_heap = ION_IOMMU_HEAP_ID;
+module_param(video_nonsecure_ion_heap, int, 0644);
+MODULE_PARM_DESC(video_nonsecure_ion_heap, "ION heap for non-secure video buffer allocation");
+
+/* ION heap IDs used for allocating audio  output buffer */
+static int audio_nonsecure_ion_heap = ION_IOMMU_HEAP_ID;
+module_param(audio_nonsecure_ion_heap, int, 0644);
+MODULE_PARM_DESC(audio_nonsecure_ion_heap, "ION heap for non-secure audio buffer allocation");
+
+/* Value of TS packet scramble bits field for even key */
+static int mpq_sdmx_scramble_even = 0x2;
+module_param(mpq_sdmx_scramble_even, int, 0644);
+
+/* Value of TS packet scramble bits field for odd key */
+static int mpq_sdmx_scramble_odd = 0x3;
+module_param(mpq_sdmx_scramble_odd, int, 0644);
+
+/*
+ * Default action (discard or pass) taken when scramble bit is not one of the
+ * pass-through / odd / even values.
+ * When set packets will be discarded, otherwise passed through.
+ */
+static int mpq_sdmx_scramble_default_discard = 1;
+module_param(mpq_sdmx_scramble_default_discard, int, 0644);
+
+/* Max number of TS packets allowed as input for a single sdmx process */
+static int mpq_sdmx_proc_limit = MAX_TS_PACKETS_FOR_SDMX_PROCESS;
+module_param(mpq_sdmx_proc_limit, int, 0644);
+
+/* Debug flag for secure demux process */
+static int mpq_sdmx_debug;
+module_param(mpq_sdmx_debug, int, 0644);
+
+/*
+ * Indicates whether the demux should search for frame boundaries
+ * and notify on video packets on frame-basis or whether to provide
+ * only video PES packet payloads as-is.
+ */
+static int video_framing = 1;
+module_param(video_framing, int, 0644);
+
+/* TSIF operation mode: 1 = TSIF_MODE_1,  2 = TSIF_MODE_2, 3 = TSIF_LOOPBACK */
+static int tsif_mode = 2;
+module_param(tsif_mode, int, 0644);
+
+/* Inverse TSIF clock signal */
+static int clock_inv;
+module_param(clock_inv, int, 0644);
+
+/* TSIF Timestamp source: 0 = TSIF Clock Reference, 1 = LPASS time counter */
+enum tsif_tts_source {
+	TSIF_TTS_TCR = 0,       /* Time stamps from TCR counter */
+	TSIF_TTS_LPASS_TIMER    /* Time stamps from AV/Qtimer Timer  */
+};
+
+/* Store all mpq feeds corresponding to 4 TS programs in a Transport Stream */
+static struct mpq_feed *store_mpq_audio_feed[CONFIG_DVB_MPQ_NUM_DMX_DEVICES] = {
+	NULL, NULL, NULL, NULL};
+static struct mpq_feed *store_mpq_video_feed[CONFIG_DVB_MPQ_NUM_DMX_DEVICES] = {
+	NULL, NULL, NULL, NULL};
+static int non_predicted_video_frame;
+/* trigger video ES frame events on MPEG2 B frames and H264 non-IDR frames */
+#ifdef CONFIG_DVB_MPQ_MEDIA_BOX_DEMUX
+static int video_b_frame_events = 1;
+#else
+static int video_b_frame_events;
+#endif
+
+/* Global data-structure for managing demux devices */
+static struct
+{
+	/* ION demux client used for memory allocation */
+	struct ion_client *ion_client;
+
+	/* demux devices array */
+	struct mpq_demux *devices;
+
+	/* Stream buffers objects used for tunneling to decoders */
+	struct mpq_streambuffer
+		decoder_buffers[MPQ_ADAPTER_MAX_NUM_OF_INTERFACES];
+
+	/* Indicates whether secure demux TZ application is available */
+	int secure_demux_app_loaded;
+} mpq_dmx_info;
+
+
+int mpq_dmx_get_param_scramble_odd(void)
+{
+	return mpq_sdmx_scramble_odd;
+}
+
+int mpq_dmx_get_param_scramble_even(void)
+{
+	return mpq_sdmx_scramble_even;
+}
+
+int mpq_dmx_get_param_scramble_default_discard(void)
+{
+	return mpq_sdmx_scramble_default_discard;
+}
+
+int mpq_dmx_get_param_tsif_mode(void)
+{
+	return tsif_mode;
+}
+
+int mpq_dmx_get_param_clock_inv(void)
+{
+	return clock_inv;
+}
+
+struct mpq_streambuffer *consumer_video_streambuffer(int dmx_ts_pes_video)
+{
+	struct mpq_streambuffer *streambuffer = NULL;
+	struct mpq_video_feed_info *feed_data = NULL;
+
+	switch (dmx_ts_pes_video) {
+	case DMX_PES_VIDEO0:
+		if (store_mpq_video_feed[0] != NULL) {
+			feed_data = &store_mpq_video_feed[0]->video_info;
+			feed_data->stream_interface =
+				MPQ_ADAPTER_VIDEO0_STREAM_IF;
+		}
+		break;
+	case DMX_PES_VIDEO1:
+		if (store_mpq_video_feed[1] != NULL) {
+			feed_data = &store_mpq_video_feed[1]->video_info;
+			feed_data->stream_interface =
+				MPQ_ADAPTER_VIDEO1_STREAM_IF;
+		}
+		break;
+	case DMX_PES_VIDEO2:
+		if (store_mpq_video_feed[2] != NULL) {
+			feed_data = &store_mpq_video_feed[2]->video_info;
+			feed_data->stream_interface =
+				MPQ_ADAPTER_VIDEO2_STREAM_IF;
+		}
+		break;
+	case DMX_PES_VIDEO3:
+		if (store_mpq_video_feed[3] != NULL) {
+			feed_data = &store_mpq_video_feed[3]->video_info;
+			feed_data->stream_interface =
+				MPQ_ADAPTER_VIDEO3_STREAM_IF;
+		}
+		break;
+	}
+
+	if (feed_data != NULL)
+		mpq_adapter_get_stream_if(feed_data->stream_interface,
+					  &streambuffer);
+
+	return streambuffer;
+}
+EXPORT_SYMBOL(consumer_video_streambuffer);
+
+struct mpq_streambuffer *consumer_audio_streambuffer(int dmx_ts_pes_audio)
+{
+	struct mpq_streambuffer *streambuffer = NULL;
+	struct mpq_audio_feed_info *feed_data = NULL;
+
+	switch (dmx_ts_pes_audio) {
+	case DMX_PES_AUDIO0:
+		if (store_mpq_audio_feed[0] != NULL) {
+			feed_data = &store_mpq_audio_feed[0]->audio_info;
+			feed_data->stream_interface =
+				MPQ_ADAPTER_AUDIO0_STREAM_IF;
+		}
+		break;
+	case DMX_PES_AUDIO1:
+		if (store_mpq_audio_feed[1] != NULL) {
+			feed_data = &store_mpq_audio_feed[1]->audio_info;
+			feed_data->stream_interface =
+				MPQ_ADAPTER_AUDIO1_STREAM_IF;
+		}
+		break;
+	case DMX_PES_AUDIO2:
+		if (store_mpq_audio_feed[2] != NULL) {
+			feed_data = &store_mpq_audio_feed[2]->audio_info;
+			feed_data->stream_interface =
+				MPQ_ADAPTER_AUDIO2_STREAM_IF;
+		}
+		break;
+	case DMX_PES_AUDIO3:
+		if (store_mpq_audio_feed[3] != NULL) {
+			feed_data = &store_mpq_audio_feed[3]->audio_info;
+			feed_data->stream_interface =
+				MPQ_ADAPTER_AUDIO3_STREAM_IF;
+		}
+		break;
+	}
+
+	if (feed_data != NULL)
+		mpq_adapter_get_stream_if(feed_data->stream_interface,
+					  &streambuffer);
+
+	return streambuffer;
+}
+EXPORT_SYMBOL(consumer_audio_streambuffer);
+
+
+
+/* Check that PES header is valid and that it is a video PES */
+static int mpq_dmx_is_valid_video_pes(struct pes_packet_header *pes_header)
+{
+	/* start-code valid? */
+	if ((pes_header->packet_start_code_prefix_1 != 0) ||
+		(pes_header->packet_start_code_prefix_2 != 0) ||
+		(pes_header->packet_start_code_prefix_3 != 1))
+		return -EINVAL;
+
+	/* stream_id is video? */
+	if ((pes_header->stream_id & 0xF0) != 0xE0)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int mpq_dmx_is_valid_audio_pes(struct pes_packet_header *pes_header)
+{
+	/* start-code valid? */
+	if ((pes_header->packet_start_code_prefix_1 != 0) ||
+	    (pes_header->packet_start_code_prefix_2 != 0) ||
+	    (pes_header->packet_start_code_prefix_3 != 1))
+		return -EINVAL;
+
+	/* Note: AC3 stream ID = 0xBD */
+	if (pes_header->stream_id == 0xBD)
+		return 0;
+
+	/* stream_id is audio? */ /* 110x xxxx = Audio Stream IDs */
+	if ((pes_header->stream_id & 0xE0) != 0xC0)
+		return -EINVAL;
+
+	return 0;
+}
+
+/* Check if a framing pattern is a video frame pattern or a header pattern */
+static inline int mpq_dmx_is_video_frame(
+				enum dmx_video_codec codec,
+				u64 pattern_type)
+{
+	switch (codec) {
+	case DMX_VIDEO_CODEC_MPEG2:
+		if (video_b_frame_events == 1)
+			if (pattern_type == DMX_IDX_MPEG_B_FRAME_START)
+				non_predicted_video_frame = 1;
+
+		if ((pattern_type == DMX_IDX_MPEG_I_FRAME_START) ||
+			(pattern_type == DMX_IDX_MPEG_P_FRAME_START) ||
+			(pattern_type == DMX_IDX_MPEG_B_FRAME_START))
+			return 1;
+		return 0;
+
+	case DMX_VIDEO_CODEC_H264:
+		if (video_b_frame_events == 1) {
+			if (pattern_type == DMX_IDX_H264_NON_IDR_BSLICE_START)
+				non_predicted_video_frame = 1;
+
+			if ((pattern_type == DMX_IDX_H264_IDR_ISLICE_START) ||
+			    (pattern_type ==
+				DMX_IDX_H264_NON_IDR_PSLICE_START) ||
+			    (pattern_type == DMX_IDX_H264_NON_IDR_BSLICE_START))
+				return 1;
+		} else {
+			if ((pattern_type == DMX_IDX_H264_IDR_START) ||
+			    (pattern_type == DMX_IDX_H264_NON_IDR_START))
+				return 1;
+		}
+		return 0;
+
+	case DMX_VIDEO_CODEC_VC1:
+		if (pattern_type == DMX_IDX_VC1_FRAME_START)
+			return 1;
+		return 0;
+
+	default:
+		return -EINVAL;
+	}
+}
+
+/*
+ * mpq_dmx_get_pattern_params - Returns the required video
+ * patterns for framing operation based on video codec.
+ *
+ * @video_codec: the video codec.
+ * @patterns: a pointer to the pattern parameters, updated by this function.
+ * @patterns_num: number of patterns, updated by this function.
+ */
+static inline int mpq_dmx_get_pattern_params(
+	enum dmx_video_codec video_codec,
+	const struct dvb_dmx_video_patterns
+		 *patterns[DVB_DMX_MAX_SEARCH_PATTERN_NUM],
+	int *patterns_num)
+{
+	switch (video_codec) {
+	case DMX_VIDEO_CODEC_MPEG2:
+		patterns[0] = dvb_dmx_get_pattern(DMX_IDX_MPEG_SEQ_HEADER);
+		patterns[1] = dvb_dmx_get_pattern(DMX_IDX_MPEG_GOP);
+		patterns[2] = dvb_dmx_get_pattern(DMX_IDX_MPEG_I_FRAME_START);
+		patterns[3] = dvb_dmx_get_pattern(DMX_IDX_MPEG_P_FRAME_START);
+		patterns[4] = dvb_dmx_get_pattern(DMX_IDX_MPEG_B_FRAME_START);
+		*patterns_num = 5;
+		break;
+
+	case DMX_VIDEO_CODEC_H264:
+		patterns[0] = dvb_dmx_get_pattern(DMX_IDX_H264_SPS);
+		patterns[1] = dvb_dmx_get_pattern(DMX_IDX_H264_PPS);
+		if (video_b_frame_events != 1) {
+			patterns[2] = dvb_dmx_get_pattern
+				(DMX_IDX_H264_IDR_START);
+			patterns[3] = dvb_dmx_get_pattern
+				(DMX_IDX_H264_NON_IDR_START);
+			patterns[4] = dvb_dmx_get_pattern(DMX_IDX_H264_SEI);
+			*patterns_num = 5;
+		} else {
+			patterns[2] = dvb_dmx_get_pattern
+				(DMX_IDX_H264_IDR_ISLICE_START);
+			patterns[3] = dvb_dmx_get_pattern
+				(DMX_IDX_H264_NON_IDR_PSLICE_START);
+			patterns[4] = dvb_dmx_get_pattern
+				(DMX_IDX_H264_NON_IDR_BSLICE_START);
+			patterns[5] = dvb_dmx_get_pattern(DMX_IDX_H264_SEI);
+			*patterns_num = 6;
+		}
+		break;
+
+	case DMX_VIDEO_CODEC_VC1:
+		patterns[0] = dvb_dmx_get_pattern(DMX_IDX_VC1_SEQ_HEADER);
+		patterns[1] = dvb_dmx_get_pattern(DMX_IDX_VC1_ENTRY_POINT);
+		patterns[2] = dvb_dmx_get_pattern(DMX_IDX_VC1_FRAME_START);
+		*patterns_num = 3;
+		break;
+
+	default:
+		MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+		*patterns_num = 0;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * mpq_dmx_update_decoder_stat -
+ * Update decoder output statistics in debug-fs.
+ *
+ * @mpq_feed: decoder feed object
+ */
+void mpq_dmx_update_decoder_stat(struct mpq_feed *mpq_feed)
+{
+	ktime_t curr_time;
+	u32 delta_time_ms;
+	struct mpq_demux *mpq_demux = mpq_feed->mpq_demux;
+	enum mpq_adapter_stream_if idx;
+
+	if (!dvb_dmx_is_video_feed(mpq_feed->dvb_demux_feed) &&
+	    !dvb_dmx_is_audio_feed(mpq_feed->dvb_demux_feed))
+		return;
+
+	if (dvb_dmx_is_video_feed(mpq_feed->dvb_demux_feed) &&
+	    mpq_feed->video_info.stream_interface <=
+			MPQ_ADAPTER_VIDEO3_STREAM_IF)
+		idx = mpq_feed->video_info.stream_interface;
+	else if (dvb_dmx_is_audio_feed(mpq_feed->dvb_demux_feed) &&
+		mpq_feed->audio_info.stream_interface <=
+			MPQ_ADAPTER_AUDIO3_STREAM_IF)
+		idx = mpq_feed->audio_info.stream_interface;
+	else
+		return;
+
+	curr_time = ktime_get();
+	if (unlikely(!mpq_demux->decoder_stat[idx].out_count)) {
+		mpq_demux->decoder_stat[idx].out_last_time = curr_time;
+		mpq_demux->decoder_stat[idx].out_count++;
+		return;
+	}
+
+	/* calculate time-delta between frame */
+	delta_time_ms = mpq_dmx_calc_time_delta(curr_time,
+		mpq_demux->decoder_stat[idx].out_last_time);
+
+	mpq_demux->decoder_stat[idx].out_interval_sum += delta_time_ms;
+
+	mpq_demux->decoder_stat[idx].out_interval_average =
+	  mpq_demux->decoder_stat[idx].out_interval_sum /
+	  mpq_demux->decoder_stat[idx].out_count;
+
+	if (delta_time_ms > mpq_demux->decoder_stat[idx].out_interval_max)
+		mpq_demux->decoder_stat[idx].out_interval_max = delta_time_ms;
+
+	mpq_demux->decoder_stat[idx].out_last_time = curr_time;
+	mpq_demux->decoder_stat[idx].out_count++;
+}
+
+/*
+ * mpq_dmx_update_sdmx_stat -
+ * Update SDMX statistics in debug-fs.
+ *
+ * @mpq_demux: mpq_demux object
+ * @bytes_processed: number of bytes processed by sdmx
+ * @process_start_time: time before sdmx process was triggered
+ * @process_end_time: time after sdmx process finished
+ */
+static inline void mpq_dmx_update_sdmx_stat(struct mpq_demux *mpq_demux,
+		u32 bytes_processed, ktime_t process_start_time,
+		ktime_t process_end_time)
+{
+	u32 packets_num;
+	u32 process_time;
+
+	mpq_demux->sdmx_process_count++;
+	packets_num = bytes_processed / mpq_demux->demux.ts_packet_size;
+	mpq_demux->sdmx_process_packets_sum += packets_num;
+	mpq_demux->sdmx_process_packets_average =
+		mpq_demux->sdmx_process_packets_sum /
+		mpq_demux->sdmx_process_count;
+
+	process_time =
+		mpq_dmx_calc_time_delta(process_end_time, process_start_time);
+
+	mpq_demux->sdmx_process_time_sum += process_time;
+	mpq_demux->sdmx_process_time_average =
+		mpq_demux->sdmx_process_time_sum /
+		mpq_demux->sdmx_process_count;
+
+	if ((mpq_demux->sdmx_process_count == 1) ||
+		(packets_num < mpq_demux->sdmx_process_packets_min))
+		mpq_demux->sdmx_process_packets_min = packets_num;
+
+	if ((mpq_demux->sdmx_process_count == 1) ||
+		(process_time > mpq_demux->sdmx_process_time_max))
+		mpq_demux->sdmx_process_time_max = process_time;
+}
+
+static int mpq_sdmx_log_level_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t mpq_sdmx_log_level_read(struct file *fp,
+	char __user *user_buffer, size_t count, loff_t *position)
+{
+	char user_str[16];
+	struct mpq_demux *mpq_demux = fp->private_data;
+	int ret;
+
+	ret = scnprintf(user_str, 16, "%d", mpq_demux->sdmx_log_level);
+	ret = simple_read_from_buffer(user_buffer, count, position,
+		user_str, ret+1);
+
+	return ret;
+}
+
+static ssize_t mpq_sdmx_log_level_write(struct file *fp,
+	const char __user *user_buffer, size_t count, loff_t *position)
+{
+	char user_str[16];
+	int ret;
+	int ret_count;
+	int level;
+	struct mpq_demux *mpq_demux = fp->private_data;
+
+	if (count >= 16)
+		return -EINVAL;
+
+	ret_count = simple_write_to_buffer(user_str, 16, position, user_buffer,
+		count);
+	if (ret_count < 0)
+		return ret_count;
+
+	ret = kstrtoint(user_str, 0, &level);
+	if (ret)
+		return ret;
+
+	if (level < SDMX_LOG_NO_PRINT || level > SDMX_LOG_VERBOSE)
+		return -EINVAL;
+
+	mutex_lock(&mpq_demux->mutex);
+	mpq_demux->sdmx_log_level = level;
+	if (mpq_demux->sdmx_session_handle != SDMX_INVALID_SESSION_HANDLE) {
+		ret = sdmx_set_log_level(mpq_demux->sdmx_session_handle,
+			mpq_demux->sdmx_log_level);
+		if (ret) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: Could not set sdmx log level. ret = %d\n",
+				__func__, ret);
+			mutex_unlock(&mpq_demux->mutex);
+			return -EINVAL;
+		}
+	}
+
+	mutex_unlock(&mpq_demux->mutex);
+	return ret_count;
+}
+
+static const struct file_operations sdmx_debug_fops = {
+	.open = mpq_sdmx_log_level_open,
+	.read = mpq_sdmx_log_level_read,
+	.write = mpq_sdmx_log_level_write,
+	.owner = THIS_MODULE,
+};
+
+/* Extend dvb-demux debugfs with common plug-in entries */
+void mpq_dmx_init_debugfs_entries(struct mpq_demux *mpq_demux)
+{
+	int i;
+	char file_name[50];
+	struct dentry *debugfs_decoder_dir;
+
+	/*
+	 * Extend dvb-demux debugfs with HW statistics.
+	 * Note that destruction of debugfs directory is done
+	 * when dvb-demux is terminated.
+	 */
+	mpq_demux->hw_notification_count = 0;
+	mpq_demux->hw_notification_interval = 0;
+	mpq_demux->hw_notification_size = 0;
+	mpq_demux->hw_notification_min_size = 0xFFFFFFFF;
+
+	if (mpq_demux->demux.dmx.debugfs_demux_dir == NULL)
+		return;
+
+	debugfs_create_u32(
+		"hw_notification_interval",
+		0664,
+		mpq_demux->demux.dmx.debugfs_demux_dir,
+		&mpq_demux->hw_notification_interval);
+
+	debugfs_create_u32(
+		"hw_notification_min_interval",
+		0664,
+		mpq_demux->demux.dmx.debugfs_demux_dir,
+		&mpq_demux->hw_notification_min_interval);
+
+	debugfs_create_u32(
+		"hw_notification_count",
+		0664,
+		mpq_demux->demux.dmx.debugfs_demux_dir,
+		&mpq_demux->hw_notification_count);
+
+	debugfs_create_u32(
+		"hw_notification_size",
+		0664,
+		mpq_demux->demux.dmx.debugfs_demux_dir,
+		&mpq_demux->hw_notification_size);
+
+	debugfs_create_u32(
+		"hw_notification_min_size",
+		0664,
+		mpq_demux->demux.dmx.debugfs_demux_dir,
+		&mpq_demux->hw_notification_min_size);
+
+	debugfs_decoder_dir = debugfs_create_dir("decoder",
+		mpq_demux->demux.dmx.debugfs_demux_dir);
+
+	for (i = 0;
+		 debugfs_decoder_dir &&
+		 (i < MPQ_ADAPTER_MAX_NUM_OF_INTERFACES);
+		 i++) {
+		snprintf(file_name, 50, "decoder%d_drop_count", i);
+		debugfs_create_u32(
+			file_name,
+			0444,
+			debugfs_decoder_dir,
+			&mpq_demux->decoder_stat[i].drop_count);
+
+		snprintf(file_name, 50, "decoder%d_out_count", i);
+		debugfs_create_u32(
+			file_name,
+			0444,
+			debugfs_decoder_dir,
+			&mpq_demux->decoder_stat[i].out_count);
+
+		snprintf(file_name, 50, "decoder%d_out_interval_sum", i);
+		debugfs_create_u32(
+			file_name,
+			0444,
+			debugfs_decoder_dir,
+			&mpq_demux->decoder_stat[i].out_interval_sum);
+
+		snprintf(file_name, 50, "decoder%d_out_interval_average", i);
+		debugfs_create_u32(
+			file_name,
+			0444,
+			debugfs_decoder_dir,
+			&mpq_demux->decoder_stat[i].out_interval_average);
+
+		snprintf(file_name, 50, "decoder%d_out_interval_max", i);
+		debugfs_create_u32(
+			file_name,
+			0444,
+			debugfs_decoder_dir,
+			&mpq_demux->decoder_stat[i].out_interval_max);
+
+		snprintf(file_name, 50, "decoder%d_ts_errors", i);
+		debugfs_create_u32(
+			file_name,
+			0444,
+			debugfs_decoder_dir,
+			&mpq_demux->decoder_stat[i].ts_errors);
+
+		snprintf(file_name, 50, "decoder%d_cc_errors", i);
+		debugfs_create_u32(
+			file_name,
+			0444,
+			debugfs_decoder_dir,
+			&mpq_demux->decoder_stat[i].cc_errors);
+	}
+
+	debugfs_create_u32(
+		"sdmx_process_count",
+		0664,
+		mpq_demux->demux.dmx.debugfs_demux_dir,
+		&mpq_demux->sdmx_process_count);
+
+	debugfs_create_u32(
+		"sdmx_process_time_sum",
+		0664,
+		mpq_demux->demux.dmx.debugfs_demux_dir,
+		&mpq_demux->sdmx_process_time_sum);
+
+	debugfs_create_u32(
+		"sdmx_process_time_average",
+		0664,
+		mpq_demux->demux.dmx.debugfs_demux_dir,
+		&mpq_demux->sdmx_process_time_average);
+
+	debugfs_create_u32(
+		"sdmx_process_time_max",
+		0664,
+		mpq_demux->demux.dmx.debugfs_demux_dir,
+		&mpq_demux->sdmx_process_time_max);
+
+	debugfs_create_u32(
+		"sdmx_process_packets_sum",
+		0664,
+		mpq_demux->demux.dmx.debugfs_demux_dir,
+		&mpq_demux->sdmx_process_packets_sum);
+
+	debugfs_create_u32(
+		"sdmx_process_packets_average",
+		0664,
+		mpq_demux->demux.dmx.debugfs_demux_dir,
+		&mpq_demux->sdmx_process_packets_average);
+
+	debugfs_create_u32(
+		"sdmx_process_packets_min",
+		0664,
+		mpq_demux->demux.dmx.debugfs_demux_dir,
+		&mpq_demux->sdmx_process_packets_min);
+
+	debugfs_create_file("sdmx_log_level",
+		0664,
+		mpq_demux->demux.dmx.debugfs_demux_dir,
+		mpq_demux,
+		&sdmx_debug_fops);
+}
+
+/* Update dvb-demux debugfs with HW notification statistics */
+void mpq_dmx_update_hw_statistics(struct mpq_demux *mpq_demux)
+{
+	ktime_t curr_time;
+	u32 delta_time_ms;
+
+	curr_time = ktime_get();
+	if (likely(mpq_demux->hw_notification_count)) {
+		/* calculate time-delta between notifications */
+		delta_time_ms = mpq_dmx_calc_time_delta(curr_time,
+			mpq_demux->last_notification_time);
+
+		mpq_demux->hw_notification_interval = delta_time_ms;
+
+		if ((mpq_demux->hw_notification_count == 1) ||
+			(mpq_demux->hw_notification_interval &&
+			 mpq_demux->hw_notification_interval <
+				mpq_demux->hw_notification_min_interval))
+			mpq_demux->hw_notification_min_interval =
+				mpq_demux->hw_notification_interval;
+	}
+
+	mpq_demux->hw_notification_count++;
+	mpq_demux->last_notification_time = curr_time;
+}
+
+static void mpq_sdmx_check_app_loaded(void)
+{
+	int session;
+	u32 version;
+	int ret;
+
+	ret = sdmx_open_session(&session);
+	if (ret != SDMX_SUCCESS) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: Could not initialize session with SDMX. ret = %d\n",
+			__func__, ret);
+		mpq_dmx_info.secure_demux_app_loaded = 0;
+		return;
+	}
+
+	/* Check proper sdmx major version */
+	ret = sdmx_get_version(session, &version);
+	if (ret != SDMX_SUCCESS) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: Could not get sdmx version. ret = %d\n",
+			__func__, ret);
+	} else {
+		if ((version >> 8) != SDMX_MAJOR_VERSION_MATCH)
+			MPQ_DVB_ERR_PRINT(
+				"%s: sdmx major version does not match. expected=%d, actual=%d\n",
+				__func__, SDMX_MAJOR_VERSION_MATCH,
+				(version >> 8));
+		else
+			MPQ_DVB_DBG_PRINT(
+				"%s: sdmx major version is ok = %d\n",
+				__func__, SDMX_MAJOR_VERSION_MATCH);
+	}
+
+	mpq_dmx_info.secure_demux_app_loaded = 1;
+	sdmx_close_session(session);
+}
+
+int mpq_dmx_plugin_init(mpq_dmx_init dmx_init_func)
+{
+	int i;
+	int j;
+	int result;
+	struct mpq_demux *mpq_demux;
+	struct dvb_adapter *mpq_adapter;
+	struct mpq_feed *feed;
+
+	MPQ_DVB_DBG_PRINT("%s executed, device num %d\n",
+					  __func__,
+					  mpq_demux_device_num);
+
+	mpq_adapter = mpq_adapter_get();
+
+	if (mpq_adapter == NULL) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: mpq_adapter is not valid\n",
+			__func__);
+		result = -EPERM;
+		goto init_failed;
+	}
+
+	if (mpq_demux_device_num == 0) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: mpq_demux_device_num set to 0\n",
+			__func__);
+
+		result = -EPERM;
+		goto init_failed;
+	}
+
+	mpq_dmx_info.devices = NULL;
+	mpq_dmx_info.ion_client = NULL;
+
+	mpq_dmx_info.secure_demux_app_loaded = 0;
+
+	/* Allocate memory for all MPQ devices */
+	mpq_dmx_info.devices =
+		vzalloc(mpq_demux_device_num*sizeof(struct mpq_demux));
+
+	if (!mpq_dmx_info.devices) {
+		MPQ_DVB_ERR_PRINT(
+				"%s: failed to allocate devices memory\n",
+				__func__);
+
+		result = -ENOMEM;
+		goto init_failed;
+	}
+
+	/*
+	 * Create a new ION client used by demux to allocate memory
+	 * for decoder's buffers.
+	 */
+	mpq_dmx_info.ion_client =
+		msm_ion_client_create("demux_client");
+	if (IS_ERR_OR_NULL(mpq_dmx_info.ion_client)) {
+		MPQ_DVB_ERR_PRINT(
+				"%s: msm_ion_client_create\n",
+				__func__);
+
+		result = PTR_ERR(mpq_dmx_info.ion_client);
+		if (!result)
+			result = -ENOMEM;
+		mpq_dmx_info.ion_client = NULL;
+		goto init_failed_free_demux_devices;
+	}
+
+	/* Initialize and register all demux devices to the system */
+	for (i = 0; i < mpq_demux_device_num; i++) {
+		mpq_demux = mpq_dmx_info.devices+i;
+		mpq_demux->idx = i;
+
+		/* initialize demux source to memory by default */
+		mpq_demux->source = DMX_SOURCE_DVR0 + i;
+
+		/*
+		 * Give the plugin pointer to the ion client so
+		 * that it can allocate memory from ION if it requires so
+		 */
+		mpq_demux->ion_client = mpq_dmx_info.ion_client;
+
+		mutex_init(&mpq_demux->mutex);
+
+		mpq_demux->num_secure_feeds = 0;
+		mpq_demux->num_active_feeds = 0;
+		mpq_demux->sdmx_filter_count = 0;
+		mpq_demux->sdmx_session_handle = SDMX_INVALID_SESSION_HANDLE;
+		mpq_demux->sdmx_eos = 0;
+		mpq_demux->sdmx_log_level = SDMX_LOG_NO_PRINT;
+		mpq_demux->ts_packet_timestamp_source = 0;
+
+		if (mpq_demux->demux.feednum > MPQ_MAX_DMX_FILES) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: err - actual feednum (%d) larger than max, enlarge MPQ_MAX_DMX_FILES!\n",
+				__func__,
+				mpq_demux->demux.feednum);
+			result = -EINVAL;
+			goto init_failed_free_demux_devices;
+		}
+
+		/* Initialize private feed info */
+		for (j = 0; j < MPQ_MAX_DMX_FILES; j++) {
+			feed = &mpq_demux->feeds[j];
+			memset(feed, 0, sizeof(*feed));
+			feed->sdmx_filter_handle = SDMX_INVALID_FILTER_HANDLE;
+			feed->mpq_demux = mpq_demux;
+			feed->session_id = 0;
+		}
+
+		/*
+		 * mpq_demux_plugin_hw_init should be implemented
+		 * by the specific plugin
+		 */
+		result = dmx_init_func(mpq_adapter, mpq_demux);
+		if (result < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: dmx_init_func (errno=%d)\n",
+				__func__,
+				result);
+
+			goto init_failed_free_demux_devices;
+		}
+
+		mpq_demux->is_initialized = 1;
+
+		/*
+		 * dvb-demux is now initialized,
+		 * update back-pointers of private feeds
+		 */
+		for (j = 0; j < MPQ_MAX_DMX_FILES; j++) {
+			feed = &mpq_demux->feeds[j];
+			feed->dvb_demux_feed = &mpq_demux->demux.feed[j];
+			mpq_demux->demux.feed[j].priv = feed;
+		}
+
+		/*
+		 * Add capability of receiving input from memory.
+		 * Every demux in our system may be connected to memory input,
+		 * or any live input.
+		 */
+		mpq_demux->fe_memory.source = DMX_MEMORY_FE;
+		result =
+			mpq_demux->demux.dmx.add_frontend(
+					&mpq_demux->demux.dmx,
+					&mpq_demux->fe_memory);
+
+		if (result < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: add_frontend (mem) failed (errno=%d)\n",
+				__func__,
+				result);
+
+			goto init_failed_free_demux_devices;
+		}
+	}
+
+	return 0;
+
+init_failed_free_demux_devices:
+	mpq_dmx_plugin_exit();
+init_failed:
+	return result;
+}
+
+void mpq_dmx_plugin_exit(void)
+{
+	int i;
+	struct mpq_demux *mpq_demux;
+
+	MPQ_DVB_DBG_PRINT("%s executed\n", __func__);
+
+	if (mpq_dmx_info.ion_client != NULL) {
+		ion_client_destroy(mpq_dmx_info.ion_client);
+		mpq_dmx_info.ion_client = NULL;
+	}
+
+	if (mpq_dmx_info.devices != NULL) {
+		for (i = 0; i < mpq_demux_device_num; i++) {
+			mpq_demux = mpq_dmx_info.devices + i;
+
+			if (!mpq_demux->is_initialized)
+				continue;
+
+			if (mpq_demux->mpq_dmx_plugin_release)
+				mpq_demux->mpq_dmx_plugin_release(mpq_demux);
+
+			mpq_demux->demux.dmx.remove_frontend(
+						&mpq_demux->demux.dmx,
+						&mpq_demux->fe_memory);
+
+			if (mpq_dmx_info.secure_demux_app_loaded)
+				mpq_sdmx_close_session(mpq_demux);
+			mutex_destroy(&mpq_demux->mutex);
+			dvb_dmxdev_release(&mpq_demux->dmxdev);
+			dvb_dmx_release(&mpq_demux->demux);
+		}
+
+		vfree(mpq_dmx_info.devices);
+		mpq_dmx_info.devices = NULL;
+	}
+}
+
+int mpq_dmx_set_source(
+		struct dmx_demux *demux,
+		const dmx_source_t *src)
+{
+	int i;
+	int dvr_index;
+	int dmx_index;
+	struct dvb_demux *dvb_demux = demux->priv;
+	struct mpq_demux *mpq_demux;
+
+	if ((mpq_dmx_info.devices == NULL) || (dvb_demux == NULL)) {
+		MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	mpq_demux = dvb_demux->priv;
+	if (mpq_demux == NULL) {
+		MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	/*
+	 * For dvr sources,
+	 * verify that this source is connected to the respective demux
+	 */
+	dmx_index = mpq_demux - mpq_dmx_info.devices;
+
+	if (*src >= DMX_SOURCE_DVR0) {
+		dvr_index = *src - DMX_SOURCE_DVR0;
+
+		if (dvr_index != dmx_index) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: can't connect demux%d to dvr%d\n",
+				__func__,
+				dmx_index,
+				dvr_index);
+			return -EINVAL;
+		}
+	}
+
+	/*
+	 * For front-end sources,
+	 * verify that this source is not already set to different demux
+	 */
+	for (i = 0; i < mpq_demux_device_num; i++) {
+		if ((&mpq_dmx_info.devices[i] != mpq_demux) &&
+			(mpq_dmx_info.devices[i].source == *src)) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: demux%d source can't be set,\n"
+				"demux%d occupies this source already\n",
+				__func__,
+				dmx_index,
+				i);
+			return -EBUSY;
+		}
+	}
+
+	mpq_demux->source = *src;
+	return 0;
+}
+
+/**
+ * Takes an ION allocated buffer's file descriptor and handles the details of
+ * mapping it into kernel memory and obtaining an ION handle for it.
+ * Internal helper function.
+ *
+ * @client: ION client
+ * @handle: ION file descriptor to map
+ * @priv_handle: returned ION handle. Must be freed when no longer needed
+ * @kernel_mem: returned kernel mapped pointer
+ *
+ * Note: mapping might not be possible in secured heaps/buffers, and so NULL
+ * might be returned in kernel_mem
+ *
+ * Return errors status
+ */
+static int mpq_map_buffer_to_kernel(
+	struct ion_client *client,
+	int handle,
+	struct ion_handle **priv_handle,
+	void **kernel_mem)
+{
+	struct ion_handle *ion_handle;
+	unsigned long ionflag = 0;
+	int ret;
+
+	if (client == NULL || priv_handle == NULL || kernel_mem == NULL) {
+		MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	ion_handle = ion_import_dma_buf_fd(client, handle);
+	if (IS_ERR_OR_NULL(ion_handle)) {
+		ret = PTR_ERR(ion_handle);
+		MPQ_DVB_ERR_PRINT("%s: ion_import_dma_buf failed %d\n",
+			__func__, ret);
+		if (!ret)
+			ret = -ENOMEM;
+
+		goto map_buffer_failed;
+	}
+
+	ret = ion_handle_get_flags(client, ion_handle, &ionflag);
+	if (ret) {
+		MPQ_DVB_ERR_PRINT("%s: ion_handle_get_flags failed %d\n",
+			__func__, ret);
+		goto map_buffer_failed_free_buff;
+	}
+
+	if (ionflag & ION_FLAG_SECURE) {
+		MPQ_DVB_DBG_PRINT("%s: secured buffer\n", __func__);
+		*kernel_mem = NULL;
+	} else {
+		size_t tmp;
+		*kernel_mem = ion_map_kernel(client, ion_handle);
+		if (IS_ERR_OR_NULL(*kernel_mem)) {
+			ret = PTR_ERR(*kernel_mem);
+			MPQ_DVB_ERR_PRINT("%s: ion_map_kernel failed, ret=%d\n",
+				__func__, ret);
+			if (!ret)
+				ret = -ENOMEM;
+			goto map_buffer_failed_free_buff;
+		}
+		ion_handle_get_size(client, ion_handle, &tmp);
+		MPQ_DVB_DBG_PRINT(
+			"%s: mapped to address 0x%p, size=%zu\n",
+			__func__, *kernel_mem, tmp);
+	}
+
+	*priv_handle = ion_handle;
+	return 0;
+
+map_buffer_failed_free_buff:
+	ion_free(client, ion_handle);
+map_buffer_failed:
+	return ret;
+}
+
+int mpq_dmx_map_buffer(struct dmx_demux *demux, struct dmx_buffer *dmx_buffer,
+		void **priv_handle, void **kernel_mem)
+{
+	struct dvb_demux *dvb_demux = demux->priv;
+	struct mpq_demux *mpq_demux;
+
+	if ((mpq_dmx_info.devices == NULL) || (dvb_demux == NULL) ||
+		(priv_handle == NULL) || (kernel_mem == NULL)) {
+		MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	mpq_demux = dvb_demux->priv;
+	if (mpq_demux == NULL) {
+		MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	return mpq_map_buffer_to_kernel(
+		mpq_demux->ion_client,
+		dmx_buffer->handle,
+		(struct ion_handle **)priv_handle, kernel_mem);
+}
+
+int mpq_dmx_unmap_buffer(struct dmx_demux *demux,
+		void *priv_handle)
+{
+	struct dvb_demux *dvb_demux = demux->priv;
+	struct ion_handle *ion_handle = priv_handle;
+	struct mpq_demux *mpq_demux;
+	unsigned long ionflag = 0;
+	int ret;
+
+	if ((mpq_dmx_info.devices == NULL) || (dvb_demux == NULL) ||
+		(priv_handle == NULL)) {
+		MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	mpq_demux = dvb_demux->priv;
+	if (mpq_demux == NULL) {
+		MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	ret = ion_handle_get_flags(mpq_demux->ion_client, ion_handle, &ionflag);
+	if (ret) {
+		MPQ_DVB_ERR_PRINT("%s: ion_handle_get_flags failed %d\n",
+			__func__, ret);
+		return -EINVAL;
+	}
+
+	if (!(ionflag & ION_FLAG_SECURE))
+		ion_unmap_kernel(mpq_demux->ion_client, ion_handle);
+
+	ion_free(mpq_demux->ion_client, ion_handle);
+
+	return 0;
+}
+
+int mpq_dmx_reuse_decoder_buffer(struct dvb_demux_feed *feed, int cookie)
+{
+	struct mpq_demux *mpq_demux = feed->demux->priv;
+
+	MPQ_DVB_DBG_PRINT("%s: cookie=%d\n", __func__, cookie);
+
+	if (cookie < 0) {
+		MPQ_DVB_ERR_PRINT("%s: invalid cookie parameter\n", __func__);
+		return -EINVAL;
+	}
+
+	if (dvb_dmx_is_video_feed(feed)) {
+		struct mpq_video_feed_info *feed_data;
+		struct mpq_feed *mpq_feed;
+		struct mpq_streambuffer *stream_buffer;
+		int ret;
+
+		mutex_lock(&mpq_demux->mutex);
+		mpq_feed = feed->priv;
+		feed_data = &mpq_feed->video_info;
+
+		spin_lock(&feed_data->video_buffer_lock);
+		stream_buffer = feed_data->video_buffer;
+		if (stream_buffer == NULL) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: invalid feed, feed_data->video_buffer is NULL\n",
+				__func__);
+			spin_unlock(&feed_data->video_buffer_lock);
+			mutex_unlock(&mpq_demux->mutex);
+			return -EINVAL;
+		}
+
+		ret = mpq_streambuffer_pkt_dispose(stream_buffer, cookie, 1);
+		spin_unlock(&feed_data->video_buffer_lock);
+		mutex_unlock(&mpq_demux->mutex);
+
+		return ret;
+	} else if (dvb_dmx_is_audio_feed(feed)) {
+		struct mpq_audio_feed_info *feed_data;
+		struct mpq_feed *mpq_feed;
+		struct mpq_streambuffer *stream_buffer;
+		int ret;
+
+		mutex_lock(&mpq_demux->mutex);
+		mpq_feed = feed->priv;
+		feed_data = &mpq_feed->audio_info;
+
+		spin_lock(&feed_data->audio_buffer_lock);
+		stream_buffer = feed_data->audio_buffer;
+		if (stream_buffer == NULL) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: invalid feed, feed_data->audio_buffer is NULL\n",
+				__func__);
+			spin_unlock(&feed_data->audio_buffer_lock);
+			mutex_unlock(&mpq_demux->mutex);
+			return -EINVAL;
+		}
+
+		ret = mpq_streambuffer_pkt_dispose(stream_buffer, cookie, 1);
+		spin_unlock(&feed_data->audio_buffer_lock);
+		mutex_unlock(&mpq_demux->mutex);
+
+		return ret;
+	}
+	MPQ_DVB_ERR_PRINT("%s: Invalid feed type %d\n",
+			__func__, feed->pes_type);
+
+	return -EINVAL;
+}
+
+/**
+ * Handles the details of internal decoder buffer allocation via ION.
+ * Internal helper function.
+ * @feed_data: decoder feed object
+ * @dec_buffs: buffer information
+ * @client: ION client
+ *
+ * Return error status
+ */
+static int mpq_dmx_init_internal_buffers(
+	struct mpq_demux *mpq_demux,
+	struct mpq_video_feed_info *feed_data,
+	struct dmx_decoder_buffers *dec_buffs)
+{
+	struct ion_handle *temp_handle = NULL;
+	void *payload_buffer = NULL;
+	int actual_buffer_size = 0;
+	int ret = 0;
+
+	MPQ_DVB_DBG_PRINT("%s: Internal decoder buffer allocation\n", __func__);
+
+	actual_buffer_size = dec_buffs->buffers_size;
+	actual_buffer_size += (SZ_4K - 1);
+	actual_buffer_size &= ~(SZ_4K - 1);
+
+	temp_handle = ion_alloc(mpq_demux->ion_client,
+		actual_buffer_size, SZ_4K,
+		ION_HEAP(video_secure_ion_heap) |
+		ION_HEAP(video_nonsecure_ion_heap),
+		mpq_demux->decoder_alloc_flags);
+
+	if (IS_ERR_OR_NULL(temp_handle)) {
+		ret = PTR_ERR(temp_handle);
+		MPQ_DVB_ERR_PRINT("%s: FAILED to allocate payload buffer %d\n",
+			__func__, ret);
+		if (!ret)
+			ret = -ENOMEM;
+		goto end;
+	}
+
+	payload_buffer = ion_map_kernel(mpq_demux->ion_client, temp_handle);
+
+	if (IS_ERR_OR_NULL(payload_buffer)) {
+		ret = PTR_ERR(payload_buffer);
+		MPQ_DVB_ERR_PRINT(
+			"%s: FAILED to map payload buffer %d\n",
+			__func__, ret);
+		if (!ret)
+			ret = -ENOMEM;
+		goto init_failed_free_payload_buffer;
+	}
+
+	feed_data->buffer_desc.decoder_buffers_num = 1;
+	feed_data->buffer_desc.ion_handle[0] = temp_handle;
+	feed_data->buffer_desc.desc[0].base = payload_buffer;
+	feed_data->buffer_desc.desc[0].size = actual_buffer_size;
+	feed_data->buffer_desc.desc[0].read_ptr = 0;
+	feed_data->buffer_desc.desc[0].write_ptr = 0;
+	feed_data->buffer_desc.desc[0].handle =
+		ion_share_dma_buf_fd(mpq_demux->ion_client, temp_handle);
+
+	if (feed_data->buffer_desc.desc[0].handle < 0) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: FAILED to share payload buffer %d\n",
+			__func__, ret);
+		ret = -ENOMEM;
+		goto init_failed_unmap_payload_buffer;
+	}
+
+	feed_data->buffer_desc.shared_file = fget(
+		feed_data->buffer_desc.desc[0].handle);
+
+	return 0;
+
+init_failed_unmap_payload_buffer:
+	ion_unmap_kernel(mpq_demux->ion_client, temp_handle);
+	feed_data->buffer_desc.desc[0].base = NULL;
+init_failed_free_payload_buffer:
+	ion_free(mpq_demux->ion_client, temp_handle);
+	feed_data->buffer_desc.ion_handle[0] = NULL;
+	feed_data->buffer_desc.desc[0].size = 0;
+	feed_data->buffer_desc.decoder_buffers_num = 0;
+	feed_data->buffer_desc.shared_file = NULL;
+end:
+	return ret;
+
+}
+
+/**
+ * Handles the details of external decoder buffers allocated by user.
+ * Each buffer is mapped into kernel memory and an ION handle is obtained, and
+ * decoder feed object is updated with related information.
+ * Internal helper function.
+ * @feed_data: decoder feed object
+ * @dec_buffs: buffer information
+ * @client: ION client
+ *
+ * Return error status
+ */
+static int mpq_dmx_init_external_buffers(
+	struct mpq_video_feed_info *feed_data,
+	struct dmx_decoder_buffers *dec_buffs,
+	struct ion_client *client)
+{
+	struct ion_handle *temp_handle = NULL;
+	void *payload_buffer = NULL;
+	int actual_buffer_size = 0;
+	int ret = 0;
+	int i;
+
+	/*
+	 * Payload buffer was allocated externally (through ION).
+	 * Map the ion handles to kernel memory
+	 */
+	MPQ_DVB_DBG_PRINT("%s: External decoder buffer allocation\n", __func__);
+
+	actual_buffer_size = dec_buffs->buffers_size;
+	if (!dec_buffs->is_linear) {
+		MPQ_DVB_DBG_PRINT("%s: Ex. Ring-buffer\n", __func__);
+		feed_data->buffer_desc.decoder_buffers_num = 1;
+	} else {
+		MPQ_DVB_DBG_PRINT("%s: Ex. Linear\n", __func__);
+		feed_data->buffer_desc.decoder_buffers_num =
+			dec_buffs->buffers_num;
+	}
+
+	for (i = 0; i < feed_data->buffer_desc.decoder_buffers_num; i++) {
+		ret = mpq_map_buffer_to_kernel(
+			client,
+			dec_buffs->handles[i],
+			&temp_handle,
+			&payload_buffer);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: Failed mapping buffer %d\n",
+				__func__, i);
+			goto init_failed;
+		}
+		feed_data->buffer_desc.ion_handle[i] = temp_handle;
+		feed_data->buffer_desc.desc[i].base = payload_buffer;
+		feed_data->buffer_desc.desc[i].handle =
+			dec_buffs->handles[i];
+		feed_data->buffer_desc.desc[i].size =
+			dec_buffs->buffers_size;
+		feed_data->buffer_desc.desc[i].read_ptr = 0;
+		feed_data->buffer_desc.desc[i].write_ptr = 0;
+
+		MPQ_DVB_DBG_PRINT(
+			"%s: Buffer #%d: base=0x%p, handle=%d, size=%d\n",
+			__func__, i,
+			feed_data->buffer_desc.desc[i].base,
+			feed_data->buffer_desc.desc[i].handle,
+			feed_data->buffer_desc.desc[i].size);
+	}
+
+	return 0;
+
+init_failed:
+	for (i = 0; i < feed_data->buffer_desc.decoder_buffers_num; i++) {
+		if (feed_data->buffer_desc.ion_handle[i]) {
+			if (feed_data->buffer_desc.desc[i].base) {
+				ion_unmap_kernel(client,
+					feed_data->buffer_desc.ion_handle[i]);
+				feed_data->buffer_desc.desc[i].base = NULL;
+			}
+			ion_free(client, feed_data->buffer_desc.ion_handle[i]);
+			feed_data->buffer_desc.ion_handle[i] = NULL;
+			feed_data->buffer_desc.desc[i].size = 0;
+		}
+	}
+	return ret;
+}
+
+/**
+ * Handles the details of initializing the mpq_streambuffer object according
+ * to the user decoder buffer configuration: External/Internal buffers and
+ * ring/linear buffering mode.
+ * Internal helper function.
+ * @feed:  dvb demux feed object, contains the buffers configuration
+ * @feed_data: decoder feed object
+ * @stream_buffer: stream buffer object to initialize
+ *
+ * Return error status
+ */
+static int mpq_dmx_init_streambuffer(
+	struct mpq_feed *feed,
+	struct mpq_video_feed_info *feed_data,
+	struct mpq_streambuffer *stream_buffer)
+{
+	int ret;
+	void *packet_buffer = NULL;
+	struct mpq_demux *mpq_demux = feed->mpq_demux;
+	struct ion_client *client = mpq_demux->ion_client;
+	struct dmx_decoder_buffers *dec_buffs = NULL;
+	enum mpq_streambuffer_mode mode;
+
+	dec_buffs = feed->dvb_demux_feed->feed.ts.decoder_buffers;
+
+	/* Allocate packet buffer holding the meta-data */
+	packet_buffer = vmalloc(VIDEO_META_DATA_BUFFER_SIZE);
+
+	if (packet_buffer == NULL) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: FAILED to allocate packets buffer\n",
+			__func__);
+
+		ret = -ENOMEM;
+		goto end;
+	}
+
+	MPQ_DVB_DBG_PRINT("%s: dec_buffs: num=%d, size=%d, linear=%d\n",
+			__func__,
+			dec_buffs->buffers_num,
+			dec_buffs->buffers_size,
+			dec_buffs->is_linear);
+
+	if (dec_buffs->buffers_num == 0)
+		ret = mpq_dmx_init_internal_buffers(
+			mpq_demux, feed_data, dec_buffs);
+	else
+		ret = mpq_dmx_init_external_buffers(
+			feed_data, dec_buffs, client);
+
+	if (ret != 0)
+		goto init_failed_free_packet_buffer;
+
+	mode = dec_buffs->is_linear ? MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR :
+		MPQ_STREAMBUFFER_BUFFER_MODE_RING;
+	ret = mpq_streambuffer_init(
+			feed_data->video_buffer,
+			mode,
+			feed_data->buffer_desc.desc,
+			feed_data->buffer_desc.decoder_buffers_num,
+			packet_buffer,
+			VIDEO_META_DATA_BUFFER_SIZE);
+
+	if (ret != 0)
+		goto init_failed_free_packet_buffer;
+
+	goto end;
+
+
+init_failed_free_packet_buffer:
+	vfree(packet_buffer);
+end:
+	return ret;
+}
+
+static void mpq_dmx_release_streambuffer(
+	struct mpq_feed *feed,
+	struct mpq_video_feed_info *feed_data,
+	struct mpq_streambuffer *video_buffer,
+	struct ion_client *client)
+{
+	int buf_num = 0;
+	int i;
+	struct dmx_decoder_buffers *dec_buffs =
+		feed->dvb_demux_feed->feed.ts.decoder_buffers;
+
+	mpq_adapter_unregister_stream_if(feed_data->stream_interface);
+
+	mpq_streambuffer_terminate(video_buffer);
+
+	vfree(video_buffer->packet_data.data);
+
+	buf_num = feed_data->buffer_desc.decoder_buffers_num;
+
+	for (i = 0; i < buf_num; i++) {
+		if (feed_data->buffer_desc.ion_handle[i]) {
+			if (feed_data->buffer_desc.desc[i].base) {
+				ion_unmap_kernel(client,
+					feed_data->buffer_desc.ion_handle[i]);
+				feed_data->buffer_desc.desc[i].base = NULL;
+			}
+
+			/*
+			 * Un-share the buffer if kernel it the one that
+			 * shared it.
+			 */
+			if (!dec_buffs->buffers_num &&
+				feed_data->buffer_desc.shared_file) {
+				fput(feed_data->buffer_desc.shared_file);
+				feed_data->buffer_desc.shared_file = NULL;
+			}
+
+			ion_free(client, feed_data->buffer_desc.ion_handle[i]);
+			feed_data->buffer_desc.ion_handle[i] = NULL;
+			feed_data->buffer_desc.desc[i].size = 0;
+		}
+	}
+}
+
+int mpq_dmx_flush_stream_buffer(struct dvb_demux_feed *feed)
+{
+	struct mpq_feed *mpq_feed = feed->priv;
+	struct mpq_video_feed_info *feed_data = &mpq_feed->video_info;
+	struct mpq_streambuffer *sbuff;
+	int ret = 0;
+
+	if (!dvb_dmx_is_video_feed(feed)) {
+		MPQ_DVB_DBG_PRINT("%s: not a video feed, feed type=%d\n",
+			__func__, feed->pes_type);
+		return 0;
+	}
+
+	spin_lock(&feed_data->video_buffer_lock);
+
+	sbuff = feed_data->video_buffer;
+	if (sbuff == NULL) {
+		MPQ_DVB_DBG_PRINT("%s: feed_data->video_buffer is NULL\n",
+			__func__);
+		spin_unlock(&feed_data->video_buffer_lock);
+		return -ENODEV;
+	}
+
+	feed_data->pending_pattern_len = 0;
+
+	ret = mpq_streambuffer_flush(sbuff);
+	if (ret)
+		MPQ_DVB_ERR_PRINT("%s: mpq_streambuffer_flush failed, ret=%d\n",
+			__func__, ret);
+
+	spin_unlock(&feed_data->video_buffer_lock);
+
+	return ret;
+}
+
+static int mpq_dmx_init_audio_internal_buffers(
+	struct mpq_demux *mpq_demux,
+	struct mpq_audio_feed_info *feed_data,
+	struct dmx_decoder_buffers *dec_buffs)
+{
+	struct ion_handle *temp_handle = NULL;
+	void *payload_buffer = NULL;
+	int actual_buffer_size = 0;
+	int ret = 0;
+
+	MPQ_DVB_DBG_PRINT("%s: Internal audio decoder buffer allocation\n",
+			  __func__);
+
+	actual_buffer_size = dec_buffs->buffers_size;
+	actual_buffer_size += (SZ_4K - 1);
+	actual_buffer_size &= ~(SZ_4K - 1);
+
+	temp_handle = ion_alloc(mpq_demux->ion_client,
+		actual_buffer_size, SZ_4K,
+		ION_HEAP(audio_nonsecure_ion_heap),
+		mpq_demux->decoder_alloc_flags);
+
+	if (IS_ERR_OR_NULL(temp_handle)) {
+		ret = PTR_ERR(temp_handle);
+		MPQ_DVB_ERR_PRINT(
+			"%s: FAILED to allocate audio payload buffer %d\n",
+			 __func__, ret);
+		if (!ret)
+			ret = -ENOMEM;
+		goto end;
+	}
+
+	payload_buffer = ion_map_kernel(mpq_demux->ion_client, temp_handle);
+
+	if (IS_ERR_OR_NULL(payload_buffer)) {
+		ret = PTR_ERR(payload_buffer);
+		MPQ_DVB_ERR_PRINT(
+			"%s: FAILED to map audio payload buffer %d\n",
+			__func__, ret);
+		if (!ret)
+			ret = -ENOMEM;
+		goto init_failed_free_payload_buffer;
+	}
+	feed_data->buffer_desc.decoder_buffers_num = 1;
+	feed_data->buffer_desc.ion_handle[0] = temp_handle;
+	feed_data->buffer_desc.desc[0].base = payload_buffer;
+	feed_data->buffer_desc.desc[0].size = actual_buffer_size;
+	feed_data->buffer_desc.desc[0].read_ptr = 0;
+	feed_data->buffer_desc.desc[0].write_ptr = 0;
+	feed_data->buffer_desc.desc[0].handle =
+		ion_share_dma_buf_fd(mpq_demux->ion_client, temp_handle);
+	if (feed_data->buffer_desc.desc[0].handle < 0) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: FAILED to share audio payload buffer %d\n",
+			__func__, ret);
+		ret = -ENOMEM;
+		goto init_failed_unmap_payload_buffer;
+	}
+
+	feed_data->buffer_desc.shared_file = fget(
+		feed_data->buffer_desc.desc[0].handle);
+
+	return 0;
+
+init_failed_unmap_payload_buffer:
+	ion_unmap_kernel(mpq_demux->ion_client, temp_handle);
+	feed_data->buffer_desc.desc[0].base = NULL;
+init_failed_free_payload_buffer:
+	ion_free(mpq_demux->ion_client, temp_handle);
+	feed_data->buffer_desc.ion_handle[0] = NULL;
+	feed_data->buffer_desc.desc[0].size = 0;
+	feed_data->buffer_desc.decoder_buffers_num = 0;
+	feed_data->buffer_desc.shared_file = NULL;
+end:
+	return ret;
+}
+
+static int mpq_dmx_init_audio_external_buffers(
+	struct mpq_audio_feed_info *feed_data,
+	struct dmx_decoder_buffers *dec_buffs,
+	struct ion_client *client)
+{
+	struct ion_handle *temp_handle = NULL;
+	void *payload_buffer = NULL;
+	int actual_buffer_size = 0;
+	int ret = 0;
+	int i;
+
+	/*
+	 * Payload buffer was allocated externally (through ION).
+	 * Map the ion handles to kernel memory
+	 */
+	MPQ_DVB_DBG_PRINT("%s: External audio decoder buffer allocation\n",
+				__func__);
+
+	actual_buffer_size = dec_buffs->buffers_size;
+	if (!dec_buffs->is_linear) {
+		MPQ_DVB_DBG_PRINT("%s: Ex. Ring-buffer\n", __func__);
+		feed_data->buffer_desc.decoder_buffers_num = 1;
+	} else {
+		MPQ_DVB_DBG_PRINT("%s: Ex. Linear\n", __func__);
+		feed_data->buffer_desc.decoder_buffers_num =
+			dec_buffs->buffers_num;
+	}
+
+	for (i = 0; i < feed_data->buffer_desc.decoder_buffers_num; i++) {
+		ret = mpq_map_buffer_to_kernel(
+			client,
+			dec_buffs->handles[i],
+			&temp_handle,
+			&payload_buffer);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: Failed mapping audio buffer %d\n",
+				__func__, i);
+			goto init_failed;
+		}
+		feed_data->buffer_desc.ion_handle[i] = temp_handle;
+		feed_data->buffer_desc.desc[i].base = payload_buffer;
+		feed_data->buffer_desc.desc[i].handle =
+			dec_buffs->handles[i];
+		feed_data->buffer_desc.desc[i].size =
+			dec_buffs->buffers_size;
+		feed_data->buffer_desc.desc[i].read_ptr = 0;
+		feed_data->buffer_desc.desc[i].write_ptr = 0;
+
+		MPQ_DVB_DBG_PRINT(
+			"%s: Audio Buffer #%d: base=0x%p, handle=%d, size=%d\n",
+			__func__, i,
+			feed_data->buffer_desc.desc[i].base,
+			feed_data->buffer_desc.desc[i].handle,
+			feed_data->buffer_desc.desc[i].size);
+	}
+
+	return 0;
+
+init_failed:
+	for (i = 0; i < feed_data->buffer_desc.decoder_buffers_num; i++) {
+		if (feed_data->buffer_desc.ion_handle[i]) {
+			if (feed_data->buffer_desc.desc[i].base) {
+				ion_unmap_kernel(client,
+					feed_data->buffer_desc.ion_handle[i]);
+				feed_data->buffer_desc.desc[i].base = NULL;
+			}
+			ion_free(client, feed_data->buffer_desc.ion_handle[i]);
+			feed_data->buffer_desc.ion_handle[i] = NULL;
+			feed_data->buffer_desc.desc[i].size = 0;
+		}
+	}
+	return ret;
+}
+static int mpq_dmx_init_audio_streambuffer(
+	struct mpq_feed *feed,
+	struct mpq_audio_feed_info *feed_data,
+	struct mpq_streambuffer *stream_buffer)
+{
+	int ret;
+	void *packet_buffer = NULL;
+	struct mpq_demux *mpq_demux = feed->mpq_demux;
+	struct ion_client *client = mpq_demux->ion_client;
+	struct dmx_decoder_buffers *dec_buffs = NULL;
+	enum mpq_streambuffer_mode mode;
+
+	dec_buffs = feed->dvb_demux_feed->feed.ts.decoder_buffers;
+
+	/* Allocate packet buffer holding the meta-data */
+	packet_buffer = vmalloc(AUDIO_META_DATA_BUFFER_SIZE);
+
+	if (packet_buffer == NULL) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: FAILED to allocate packets buffer\n", __func__);
+		ret = -ENOMEM;
+		goto end;
+	}
+
+	MPQ_DVB_DBG_PRINT("%s: dec_buffs: num=%d, size=%d, linear=%d\n",
+				__func__, dec_buffs->buffers_num,
+				dec_buffs->buffers_size,
+				dec_buffs->is_linear);
+
+	if (dec_buffs->buffers_num == 0)
+		ret = mpq_dmx_init_audio_internal_buffers(
+			mpq_demux, feed_data, dec_buffs);
+	else
+		ret = mpq_dmx_init_audio_external_buffers(
+			feed_data, dec_buffs, client);
+
+	if (ret != 0)
+		goto init_failed_free_packet_buffer;
+
+	mode = dec_buffs->is_linear ? MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR :
+		MPQ_STREAMBUFFER_BUFFER_MODE_RING;
+	ret = mpq_streambuffer_init(
+			feed_data->audio_buffer,
+			mode,
+			feed_data->buffer_desc.desc,
+			feed_data->buffer_desc.decoder_buffers_num,
+			packet_buffer,
+			AUDIO_META_DATA_BUFFER_SIZE);
+
+	if (ret != 0)
+		goto init_failed_free_packet_buffer;
+
+	goto end;
+
+
+init_failed_free_packet_buffer:
+	vfree(packet_buffer);
+end:
+	return ret;
+}
+
+static void mpq_dmx_release_audio_streambuffer(
+	struct mpq_feed *feed,
+	struct mpq_audio_feed_info *feed_data,
+	struct mpq_streambuffer *audio_buffer,
+	struct ion_client *client)
+{
+	int buf_num = 0;
+	int i;
+	struct dmx_decoder_buffers *dec_buffs =
+		feed->dvb_demux_feed->feed.ts.decoder_buffers;
+
+	mpq_adapter_unregister_stream_if(feed_data->stream_interface);
+
+	mpq_streambuffer_terminate(audio_buffer);
+
+	vfree(audio_buffer->packet_data.data);
+
+	buf_num = feed_data->buffer_desc.decoder_buffers_num;
+
+	for (i = 0; i < buf_num; i++) {
+		if (feed_data->buffer_desc.ion_handle[i]) {
+			if (feed_data->buffer_desc.desc[i].base) {
+				ion_unmap_kernel(client,
+					feed_data->buffer_desc.ion_handle[i]);
+				feed_data->buffer_desc.desc[i].base = NULL;
+			}
+
+			/*
+			 * Un-share the buffer if kernel is the one that
+			 * shared it.
+			 */
+			if (!dec_buffs->buffers_num &&
+				feed_data->buffer_desc.shared_file) {
+				fput(feed_data->buffer_desc.shared_file);
+				feed_data->buffer_desc.shared_file = NULL;
+			}
+
+			ion_free(client, feed_data->buffer_desc.ion_handle[i]);
+			feed_data->buffer_desc.ion_handle[i] = NULL;
+			feed_data->buffer_desc.desc[i].size = 0;
+		}
+	}
+}
+
+int mpq_dmx_flush_audio_stream_buffer(struct dvb_demux_feed *feed)
+{
+	struct mpq_feed *mpq_feed = feed->priv;
+	struct mpq_audio_feed_info *feed_data = &mpq_feed->audio_info;
+	struct mpq_streambuffer *sbuff;
+	int ret = 0;
+
+	if (!dvb_dmx_is_audio_feed(feed)) {
+		MPQ_DVB_DBG_PRINT("%s: not a audio feed, feed type=%d\n",
+			__func__, feed->pes_type);
+		return 0;
+	}
+
+	spin_lock(&feed_data->audio_buffer_lock);
+
+	sbuff = feed_data->audio_buffer;
+	if (sbuff == NULL) {
+		MPQ_DVB_DBG_PRINT("%s: feed_data->audio_buffer is NULL\n",
+			__func__);
+		spin_unlock(&feed_data->audio_buffer_lock);
+		return -ENODEV;
+	}
+
+	ret = mpq_streambuffer_flush(sbuff);
+	if (ret)
+		MPQ_DVB_ERR_PRINT("%s: mpq_streambuffer_flush failed, ret=%d\n",
+			__func__, ret);
+
+	spin_unlock(&feed_data->audio_buffer_lock);
+
+	return ret;
+}
+
+static int mpq_dmx_flush_buffer(struct dmx_ts_feed *ts_feed, size_t length)
+{
+	struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+	struct dvb_demux *demux = feed->demux;
+	int ret = 0;
+
+	if (mutex_lock_interruptible(&demux->mutex))
+		return -ERESTARTSYS;
+
+	dvbdmx_ts_reset_pes_state(feed);
+
+	if (dvb_dmx_is_video_feed(feed)) {
+		MPQ_DVB_DBG_PRINT("%s: flushing video buffer\n", __func__);
+
+		ret = mpq_dmx_flush_stream_buffer(feed);
+	} else if (dvb_dmx_is_audio_feed(feed)) {
+		MPQ_DVB_DBG_PRINT("%s: flushing audio buffer\n", __func__);
+
+		ret = mpq_dmx_flush_audio_stream_buffer(feed);
+	}
+
+	mutex_unlock(&demux->mutex);
+	return ret;
+}
+
+/**
+ * mpq_dmx_init_video_feed - Initializes of video feed information
+ * used to pass data directly to decoder.
+ *
+ * @mpq_feed: The mpq feed object
+ *
+ * Return     error code.
+ */
+int mpq_dmx_init_video_feed(struct mpq_feed *mpq_feed)
+{
+	int ret;
+	struct mpq_video_feed_info *feed_data = &mpq_feed->video_info;
+	struct mpq_demux *mpq_demux = mpq_feed->mpq_demux;
+	struct mpq_streambuffer *stream_buffer;
+
+	/* get and store framing information if required */
+	if (video_framing) {
+		mpq_dmx_get_pattern_params(
+			mpq_feed->dvb_demux_feed->video_codec,
+			feed_data->patterns, &feed_data->patterns_num);
+		if (!feed_data->patterns_num) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: FAILED to get framing pattern parameters\n",
+				__func__);
+
+			ret = -EINVAL;
+			goto init_failed_free_priv_data;
+		}
+	}
+
+	/* Register the new stream-buffer interface to MPQ adapter */
+	switch (mpq_feed->dvb_demux_feed->pes_type) {
+	case DMX_PES_VIDEO0:
+		store_mpq_video_feed[0] = mpq_feed;
+		feed_data->stream_interface =
+			MPQ_ADAPTER_VIDEO0_STREAM_IF;
+		break;
+
+	case DMX_PES_VIDEO1:
+		store_mpq_video_feed[1] = mpq_feed;
+		feed_data->stream_interface =
+			MPQ_ADAPTER_VIDEO1_STREAM_IF;
+		break;
+
+	case DMX_PES_VIDEO2:
+		store_mpq_video_feed[2] = mpq_feed;
+		feed_data->stream_interface =
+			MPQ_ADAPTER_VIDEO2_STREAM_IF;
+		break;
+
+	case DMX_PES_VIDEO3:
+		store_mpq_video_feed[3] = mpq_feed;
+		feed_data->stream_interface =
+			MPQ_ADAPTER_VIDEO3_STREAM_IF;
+		break;
+
+	default:
+		MPQ_DVB_ERR_PRINT(
+			"%s: Invalid pes type %d\n",
+			__func__,
+			mpq_feed->dvb_demux_feed->pes_type);
+		ret = -EINVAL;
+		goto init_failed_free_priv_data;
+	}
+
+	/* make sure not occupied already */
+	stream_buffer = NULL;
+	mpq_adapter_get_stream_if(
+			feed_data->stream_interface,
+			&stream_buffer);
+	if (stream_buffer != NULL) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: Video interface %d already occupied!\n",
+			__func__,
+			feed_data->stream_interface);
+		ret = -EBUSY;
+		goto init_failed_free_priv_data;
+	}
+
+	feed_data->video_buffer =
+		&mpq_dmx_info.decoder_buffers[feed_data->stream_interface];
+
+	ret = mpq_dmx_init_streambuffer(
+		mpq_feed, feed_data, feed_data->video_buffer);
+	if (ret) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: mpq_dmx_init_streambuffer failed, err = %d\n",
+			__func__, ret);
+		goto init_failed_free_priv_data;
+	}
+
+	ret = mpq_adapter_register_stream_if(
+			feed_data->stream_interface,
+			feed_data->video_buffer);
+
+	if (ret < 0) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: mpq_adapter_register_stream_if failed, err = %d\n",
+			__func__, ret);
+		goto init_failed_free_stream_buffer;
+	}
+
+	spin_lock_init(&feed_data->video_buffer_lock);
+
+	feed_data->pes_header_left_bytes = PES_MANDATORY_FIELDS_LEN;
+	feed_data->pes_header_offset = 0;
+	mpq_feed->dvb_demux_feed->pusi_seen = 0;
+	mpq_feed->dvb_demux_feed->peslen = 0;
+	feed_data->fullness_wait_cancel = 0;
+	mpq_streambuffer_get_data_rw_offset(feed_data->video_buffer, NULL,
+		&feed_data->frame_offset);
+	feed_data->last_pattern_offset = 0;
+	feed_data->pending_pattern_len = 0;
+	feed_data->last_framing_match_type = 0;
+	feed_data->found_sequence_header_pattern = 0;
+	memset(&feed_data->prefix_size, 0,
+			sizeof(struct dvb_dmx_video_prefix_size_masks));
+	feed_data->first_prefix_size = 0;
+	feed_data->saved_pts_dts_info.pts_exist = 0;
+	feed_data->saved_pts_dts_info.dts_exist = 0;
+	feed_data->new_pts_dts_info.pts_exist = 0;
+	feed_data->new_pts_dts_info.dts_exist = 0;
+	feed_data->saved_info_used = 1;
+	feed_data->new_info_exists = 0;
+	feed_data->first_pts_dts_copy = 1;
+	feed_data->tei_errs = 0;
+	feed_data->last_continuity = -1;
+	feed_data->continuity_errs = 0;
+	feed_data->ts_packets_num = 0;
+	feed_data->ts_dropped_bytes = 0;
+
+	mpq_demux->decoder_stat[feed_data->stream_interface].drop_count = 0;
+	mpq_demux->decoder_stat[feed_data->stream_interface].out_count = 0;
+	mpq_demux->decoder_stat[feed_data->stream_interface].
+		out_interval_sum = 0;
+	mpq_demux->decoder_stat[feed_data->stream_interface].
+		out_interval_max = 0;
+	mpq_demux->decoder_stat[feed_data->stream_interface].ts_errors = 0;
+	mpq_demux->decoder_stat[feed_data->stream_interface].cc_errors = 0;
+
+	return 0;
+
+init_failed_free_stream_buffer:
+	mpq_dmx_release_streambuffer(mpq_feed, feed_data,
+		feed_data->video_buffer, mpq_demux->ion_client);
+	mpq_adapter_unregister_stream_if(feed_data->stream_interface);
+init_failed_free_priv_data:
+	feed_data->video_buffer = NULL;
+	return ret;
+}
+
+/* Register the new stream-buffer interface to MPQ adapter */
+int mpq_dmx_init_audio_feed(struct mpq_feed *mpq_feed)
+{
+	int ret;
+	struct mpq_audio_feed_info *feed_data = &mpq_feed->audio_info;
+	struct mpq_demux *mpq_demux = mpq_feed->mpq_demux;
+	struct mpq_streambuffer *stream_buffer;
+
+	switch (mpq_feed->dvb_demux_feed->pes_type) {
+	case DMX_PES_AUDIO0:
+		store_mpq_audio_feed[0] = mpq_feed;
+		feed_data->stream_interface =
+			MPQ_ADAPTER_AUDIO0_STREAM_IF;
+		break;
+
+	case DMX_PES_AUDIO1:
+		store_mpq_audio_feed[1] = mpq_feed;
+		feed_data->stream_interface =
+			MPQ_ADAPTER_AUDIO1_STREAM_IF;
+		break;
+
+	case DMX_PES_AUDIO2:
+		store_mpq_audio_feed[2] = mpq_feed;
+		feed_data->stream_interface =
+			MPQ_ADAPTER_AUDIO2_STREAM_IF;
+		break;
+
+	case DMX_PES_AUDIO3:
+		store_mpq_audio_feed[3] = mpq_feed;
+		feed_data->stream_interface =
+			MPQ_ADAPTER_AUDIO3_STREAM_IF;
+		break;
+
+	default:
+		MPQ_DVB_ERR_PRINT(
+			"%s: Invalid pes type %d\n",
+			__func__,
+			mpq_feed->dvb_demux_feed->pes_type);
+		ret = -EINVAL;
+		goto init_failed_free_priv_data;
+	}
+
+	/* make sure not occupied already */
+	stream_buffer = NULL;
+	mpq_adapter_get_stream_if(
+			feed_data->stream_interface,
+			&stream_buffer);
+	if (stream_buffer != NULL) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: Audio interface %d already occupied!\n",
+			__func__, feed_data->stream_interface);
+		ret = -EBUSY;
+		goto init_failed_free_priv_data;
+	}
+
+	feed_data->audio_buffer =
+		&mpq_dmx_info.decoder_buffers[feed_data->stream_interface];
+
+	ret = mpq_dmx_init_audio_streambuffer(
+		mpq_feed, feed_data, feed_data->audio_buffer);
+	if (ret) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: mpq_dmx_init_streambuffer failed, err = %d\n",
+			__func__, ret);
+		goto init_failed_free_priv_data;
+	}
+
+	ret = mpq_adapter_register_stream_if(
+			feed_data->stream_interface,
+			feed_data->audio_buffer);
+
+	if (ret < 0) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: mpq_adapter_register_stream_if failed, err = %d\n",
+			__func__, ret);
+		goto init_failed_free_stream_buffer;
+	}
+
+	spin_lock_init(&feed_data->audio_buffer_lock);
+
+	feed_data->pes_header_left_bytes = PES_MANDATORY_FIELDS_LEN;
+	feed_data->pes_header_offset = 0;
+	mpq_feed->dvb_demux_feed->pusi_seen = 0;
+	mpq_feed->dvb_demux_feed->peslen = 0;
+	feed_data->fullness_wait_cancel = 0;
+	mpq_streambuffer_get_data_rw_offset(feed_data->audio_buffer, NULL,
+		&feed_data->frame_offset);
+	feed_data->saved_pts_dts_info.pts_exist = 0;
+	feed_data->saved_pts_dts_info.dts_exist = 0;
+	feed_data->new_pts_dts_info.pts_exist = 0;
+	feed_data->new_pts_dts_info.dts_exist = 0;
+	feed_data->saved_info_used = 1;
+	feed_data->new_info_exists = 0;
+	feed_data->first_pts_dts_copy = 1;
+	feed_data->tei_errs = 0;
+	feed_data->last_continuity = -1;
+	feed_data->continuity_errs = 0;
+	feed_data->ts_packets_num = 0;
+	feed_data->ts_dropped_bytes = 0;
+
+	mpq_demux->decoder_stat[feed_data->stream_interface].drop_count = 0;
+	mpq_demux->decoder_stat[feed_data->stream_interface].out_count = 0;
+	mpq_demux->decoder_stat[feed_data->stream_interface].
+		out_interval_sum = 0;
+	mpq_demux->decoder_stat[feed_data->stream_interface].
+		out_interval_max = 0;
+	mpq_demux->decoder_stat[feed_data->stream_interface].ts_errors = 0;
+	mpq_demux->decoder_stat[feed_data->stream_interface].cc_errors = 0;
+
+	return 0;
+
+init_failed_free_stream_buffer:
+	mpq_dmx_release_audio_streambuffer(mpq_feed, feed_data,
+		feed_data->audio_buffer, mpq_demux->ion_client);
+	mpq_adapter_unregister_stream_if(feed_data->stream_interface);
+init_failed_free_priv_data:
+	feed_data->audio_buffer = NULL;
+	return ret;
+}
+
+/**
+ * mpq_dmx_terminate_video_feed - terminate video feed information
+ * that was previously initialized in mpq_dmx_init_video_feed
+ *
+ * @mpq_feed: The mpq feed used for the video TS packets
+ *
+ * Return     error code.
+ */
+int mpq_dmx_terminate_video_feed(struct mpq_feed *mpq_feed)
+{
+	struct mpq_streambuffer *video_buffer;
+	struct mpq_video_feed_info *feed_data;
+	struct mpq_demux *mpq_demux;
+
+	if (mpq_feed == NULL)
+		return -EINVAL;
+
+	mpq_demux = mpq_feed->mpq_demux;
+	feed_data = &mpq_feed->video_info;
+
+	spin_lock(&feed_data->video_buffer_lock);
+	video_buffer = feed_data->video_buffer;
+	feed_data->video_buffer = NULL;
+	wake_up_all(&video_buffer->raw_data.queue);
+	spin_unlock(&feed_data->video_buffer_lock);
+
+	mpq_dmx_release_streambuffer(mpq_feed, feed_data,
+		video_buffer, mpq_demux->ion_client);
+
+	return 0;
+}
+
+int mpq_dmx_terminate_audio_feed(struct mpq_feed *mpq_feed)
+{
+	struct mpq_streambuffer *audio_buffer;
+	struct mpq_audio_feed_info *feed_data;
+	struct mpq_demux *mpq_demux;
+
+	if (mpq_feed == NULL)
+		return -EINVAL;
+
+	mpq_demux = mpq_feed->mpq_demux;
+	feed_data = &mpq_feed->audio_info;
+
+	spin_lock(&feed_data->audio_buffer_lock);
+	audio_buffer = feed_data->audio_buffer;
+	feed_data->audio_buffer = NULL;
+	wake_up_all(&audio_buffer->raw_data.queue);
+	spin_unlock(&feed_data->audio_buffer_lock);
+
+	mpq_dmx_release_audio_streambuffer(mpq_feed, feed_data,
+		audio_buffer, mpq_demux->ion_client);
+
+	return 0;
+}
+
+struct dvb_demux_feed *mpq_dmx_peer_rec_feed(struct dvb_demux_feed *feed)
+{
+	struct dvb_demux_feed *tmp;
+	struct dvb_demux *dvb_demux = feed->demux;
+
+	list_for_each_entry(tmp, &dvb_demux->feed_list, list_head) {
+		if (tmp != feed && tmp->state == DMX_STATE_GO &&
+			tmp->feed.ts.buffer.ringbuff ==
+			feed->feed.ts.buffer.ringbuff) {
+			MPQ_DVB_DBG_PRINT(
+				"%s: main feed pid=%d, secondary feed pid=%d\n",
+				__func__, tmp->pid, feed->pid);
+			return tmp;
+		}
+	}
+
+	return NULL;
+}
+
+static int mpq_sdmx_alloc_data_buf(struct mpq_feed *mpq_feed, size_t size)
+{
+	struct mpq_demux *mpq_demux = mpq_feed->mpq_demux;
+	void *buf_base;
+	int ret;
+
+	mpq_feed->sdmx_buf_handle = ion_alloc(mpq_demux->ion_client,
+		size,
+		SZ_4K,
+		ION_HEAP(ION_QSECOM_HEAP_ID),
+		0);
+	if (IS_ERR_OR_NULL(mpq_feed->sdmx_buf_handle)) {
+		ret = PTR_ERR(mpq_feed->sdmx_buf_handle);
+		mpq_feed->sdmx_buf_handle = NULL;
+		MPQ_DVB_ERR_PRINT(
+			"%s: FAILED to allocate sdmx buffer %d\n",
+			__func__, ret);
+		if (!ret)
+			ret = -ENOMEM;
+		goto end;
+	}
+
+	buf_base = ion_map_kernel(mpq_demux->ion_client,
+		mpq_feed->sdmx_buf_handle);
+	if (IS_ERR_OR_NULL(buf_base)) {
+		ret = PTR_ERR(buf_base);
+		MPQ_DVB_ERR_PRINT(
+			"%s: FAILED to map sdmx buffer %d\n",
+			__func__, ret);
+		if (!ret)
+			ret = -ENOMEM;
+		goto failed_free_buf;
+	}
+
+	dvb_ringbuffer_init(&mpq_feed->sdmx_buf, buf_base, size);
+
+	return 0;
+
+failed_free_buf:
+	ion_free(mpq_demux->ion_client, mpq_feed->sdmx_buf_handle);
+	mpq_feed->sdmx_buf_handle = NULL;
+end:
+	return ret;
+}
+
+static int mpq_sdmx_free_data_buf(struct mpq_feed *mpq_feed)
+{
+	struct mpq_demux *mpq_demux = mpq_feed->mpq_demux;
+
+	if (mpq_feed->sdmx_buf_handle) {
+		ion_unmap_kernel(mpq_demux->ion_client,
+			mpq_feed->sdmx_buf_handle);
+		mpq_feed->sdmx_buf.data = NULL;
+		ion_free(mpq_demux->ion_client,
+			mpq_feed->sdmx_buf_handle);
+		mpq_feed->sdmx_buf_handle = NULL;
+	}
+
+	return 0;
+}
+
+static int mpq_sdmx_init_metadata_buffer(struct mpq_demux *mpq_demux,
+	struct mpq_feed *feed, struct sdmx_buff_descr *metadata_buff_desc)
+{
+	void *metadata_buff_base;
+	ion_phys_addr_t temp;
+	int ret;
+	size_t size;
+
+	feed->metadata_buf_handle = ion_alloc(mpq_demux->ion_client,
+		SDMX_METADATA_BUFFER_SIZE,
+		SZ_4K,
+		ION_HEAP(ION_QSECOM_HEAP_ID),
+		0);
+	if (IS_ERR_OR_NULL(feed->metadata_buf_handle)) {
+		ret = PTR_ERR(feed->metadata_buf_handle);
+		feed->metadata_buf_handle = NULL;
+		MPQ_DVB_ERR_PRINT(
+			"%s: FAILED to allocate metadata buffer %d\n",
+			__func__, ret);
+		if (!ret)
+			ret = -ENOMEM;
+		goto end;
+	}
+
+	metadata_buff_base = ion_map_kernel(mpq_demux->ion_client,
+		feed->metadata_buf_handle);
+	if (IS_ERR_OR_NULL(metadata_buff_base)) {
+		ret = PTR_ERR(metadata_buff_base);
+		MPQ_DVB_ERR_PRINT(
+			"%s: FAILED to map metadata buffer %d\n",
+			__func__, ret);
+		if (!ret)
+			ret = -ENOMEM;
+		goto failed_free_metadata_buf;
+	}
+
+	ret = ion_phys(mpq_demux->ion_client,
+		feed->metadata_buf_handle,
+		&temp,
+		&size);
+	if (ret) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: FAILED to get physical address %d\n",
+			__func__, ret);
+		goto failed_unmap_metadata_buf;
+	}
+	metadata_buff_desc->size = size;
+	metadata_buff_desc->base_addr = (u64)temp;
+
+	dvb_ringbuffer_init(&feed->metadata_buf, metadata_buff_base,
+		SDMX_METADATA_BUFFER_SIZE);
+
+	return 0;
+
+failed_unmap_metadata_buf:
+	ion_unmap_kernel(mpq_demux->ion_client, feed->metadata_buf_handle);
+failed_free_metadata_buf:
+	ion_free(mpq_demux->ion_client, feed->metadata_buf_handle);
+	feed->metadata_buf_handle = NULL;
+end:
+	return ret;
+}
+
+static int mpq_sdmx_terminate_metadata_buffer(struct mpq_feed *mpq_feed)
+{
+	struct mpq_demux *mpq_demux = mpq_feed->mpq_demux;
+
+	if (mpq_feed->metadata_buf_handle) {
+		ion_unmap_kernel(mpq_demux->ion_client,
+			mpq_feed->metadata_buf_handle);
+		mpq_feed->metadata_buf.data = NULL;
+		ion_free(mpq_demux->ion_client,
+			mpq_feed->metadata_buf_handle);
+		mpq_feed->metadata_buf_handle = NULL;
+	}
+
+	return 0;
+}
+
+int mpq_dmx_terminate_feed(struct dvb_demux_feed *feed)
+{
+	int ret = 0;
+	struct mpq_demux *mpq_demux;
+	struct mpq_feed *mpq_feed;
+	struct mpq_feed *main_rec_feed = NULL;
+	struct dvb_demux_feed *tmp;
+
+	if (feed == NULL)
+		return -EINVAL;
+
+	mpq_demux = feed->demux->priv;
+
+	mutex_lock(&mpq_demux->mutex);
+	mpq_feed = feed->priv;
+
+	if (mpq_feed->sdmx_filter_handle != SDMX_INVALID_FILTER_HANDLE) {
+		if (mpq_feed->filter_type == SDMX_RAW_FILTER) {
+			tmp = mpq_dmx_peer_rec_feed(feed);
+			if (tmp)
+				main_rec_feed = tmp->priv;
+		}
+
+		if (main_rec_feed) {
+			/* This feed is part of a recording filter */
+			MPQ_DVB_DBG_PRINT(
+				"%s: Removing raw pid %d from filter %d\n",
+				__func__, feed->pid,
+				mpq_feed->sdmx_filter_handle);
+			ret = sdmx_remove_raw_pid(
+				mpq_demux->sdmx_session_handle,
+				mpq_feed->sdmx_filter_handle, feed->pid);
+			if (ret)
+				MPQ_DVB_ERR_PRINT(
+					"%s: SDMX_remove_raw_pid failed. ret = %d\n",
+					__func__, ret);
+
+			/* If this feed that we are removing was set as primary,
+			 * now other feeds should be set as primary
+			 */
+			if (!mpq_feed->secondary_feed)
+				main_rec_feed->secondary_feed = 0;
+		} else {
+			MPQ_DVB_DBG_PRINT("%s: Removing filter %d, pid %d\n",
+				__func__, mpq_feed->sdmx_filter_handle,
+				feed->pid);
+			ret = sdmx_remove_filter(mpq_demux->sdmx_session_handle,
+				mpq_feed->sdmx_filter_handle);
+			if (ret) {
+				MPQ_DVB_ERR_PRINT(
+					"%s: SDMX_remove_filter failed. ret = %d\n",
+					__func__, ret);
+			}
+
+			mpq_demux->sdmx_filter_count--;
+			mpq_feed->sdmx_filter_handle =
+				SDMX_INVALID_FILTER_HANDLE;
+		}
+
+		mpq_sdmx_close_session(mpq_demux);
+		if (mpq_demux->num_secure_feeds > 0)
+			mpq_demux->num_secure_feeds--;
+		else
+			MPQ_DVB_DBG_PRINT("%s: Invalid secure feed count= %u\n",
+				 __func__, mpq_demux->num_secure_feeds);
+	}
+
+	if (dvb_dmx_is_video_feed(feed)) {
+		ret = mpq_dmx_terminate_video_feed(mpq_feed);
+		if (ret)
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_dmx_terminate_video_feed failed. ret = %d\n",
+				__func__, ret);
+	} else if (dvb_dmx_is_audio_feed(feed)) {
+		ret = mpq_dmx_terminate_audio_feed(mpq_feed);
+		if (ret)
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_dmx_terminate_audio_feed failed. ret = %d\n",
+				__func__, ret);
+	}
+
+	if (mpq_feed->sdmx_buf_handle) {
+		wake_up_all(&mpq_feed->sdmx_buf.queue);
+		mpq_sdmx_free_data_buf(mpq_feed);
+	}
+
+	mpq_sdmx_terminate_metadata_buffer(mpq_feed);
+	if (mpq_demux->num_active_feeds > 0)
+		mpq_demux->num_active_feeds--;
+	else
+		MPQ_DVB_DBG_PRINT("%s: Invalid num_active_feeds count = %u\n",
+				  __func__, mpq_demux->num_active_feeds);
+
+	mutex_unlock(&mpq_demux->mutex);
+
+	return ret;
+}
+
+int mpq_dmx_decoder_fullness_init(struct dvb_demux_feed *feed)
+{
+	struct mpq_feed *mpq_feed;
+
+	if (dvb_dmx_is_video_feed(feed)) {
+		struct mpq_video_feed_info *feed_data;
+
+		mpq_feed = feed->priv;
+		feed_data = &mpq_feed->video_info;
+		feed_data->fullness_wait_cancel = 0;
+
+		return 0;
+	} else if (dvb_dmx_is_audio_feed(feed)) {
+		struct mpq_audio_feed_info *feed_data;
+
+		mpq_feed = feed->priv;
+		feed_data = &mpq_feed->audio_info;
+		feed_data->fullness_wait_cancel = 0;
+
+		return 0;
+	}
+
+	MPQ_DVB_DBG_PRINT("%s: Invalid feed type %d\n", __func__,
+			   feed->pes_type);
+
+	return -EINVAL;
+}
+
+/**
+ * Returns whether the free space of decoder's output
+ * buffer is larger than specific number of bytes.
+ *
+ * @sbuff: MPQ stream buffer used for decoder data.
+ * @required_space: number of required free bytes in the buffer
+ *
+ * Return 1 if required free bytes are available, 0 otherwise.
+ */
+static inline int mpq_dmx_check_video_decoder_fullness(
+	struct mpq_streambuffer *sbuff,
+	size_t required_space)
+{
+	ssize_t free = mpq_streambuffer_data_free(sbuff);
+	ssize_t free_meta = mpq_streambuffer_metadata_free(sbuff);
+
+	/* Verify meta-data buffer can contain at least 1 packet */
+	if (free_meta < VIDEO_META_DATA_PACKET_SIZE)
+		return 0;
+
+	/*
+	 * For linear buffers, verify there's enough space for this TSP
+	 * and an additional buffer is free, as framing might required one
+	 * more buffer to be available.
+	 */
+	if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR)
+		return (free >= required_space &&
+			sbuff->pending_buffers_count < sbuff->buffers_num-1);
+	else
+		/* Ring buffer mode */
+		return (free >= required_space);
+}
+
+static inline int mpq_dmx_check_audio_decoder_fullness(
+	struct mpq_streambuffer *sbuff,
+	size_t required_space)
+{
+	ssize_t free = mpq_streambuffer_data_free(sbuff);
+	ssize_t free_meta = mpq_streambuffer_metadata_free(sbuff);
+
+	/* Verify meta-data buffer can contain at least 1 packet */
+	if (free_meta < AUDIO_META_DATA_PACKET_SIZE)
+		return 0;
+
+	/*
+	 * For linear buffers, verify there's enough space for this TSP
+	 * and an additional buffer is free, as framing might required one
+	 * more buffer to be available.
+	 */
+	if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR)
+		return (free >= required_space &&
+			sbuff->pending_buffers_count < sbuff->buffers_num-1);
+	else
+		return (free >= required_space); /* Ring buffer mode */
+}
+
+/**
+ * Checks whether decoder's output buffer has free space
+ * for specific number of bytes, if not, the function waits
+ * until the amount of free-space is available.
+ *
+ * @feed: decoder's feed object
+ * @required_space: number of required free bytes in the buffer
+ * @lock_feed: indicates whether mutex should be held before
+ * accessing the feed information. If the caller of this function
+ * already holds a mutex then this should be set to 0 and 1 otherwise.
+ *
+ * Return 0 if required space is available and error code
+ * in case waiting on buffer fullness was aborted.
+ */
+static int mpq_dmx_decoder_fullness_check(
+		struct dvb_demux_feed *feed,
+		size_t required_space,
+		int lock_feed)
+{
+	struct mpq_demux *mpq_demux = feed->demux->priv;
+	struct mpq_streambuffer *sbuff = NULL;
+	struct mpq_video_feed_info *feed_data;
+	struct mpq_feed *mpq_feed;
+	int ret = 0;
+
+	if (!dvb_dmx_is_video_feed(feed)) {
+		MPQ_DVB_DBG_PRINT("%s: Invalid feed type %d\n",
+			__func__,
+			feed->pes_type);
+		return -EINVAL;
+	}
+
+	if (lock_feed) {
+		mutex_lock(&mpq_demux->mutex);
+	} else if (!mutex_is_locked(&mpq_demux->mutex)) {
+		MPQ_DVB_ERR_PRINT(
+				"%s: Mutex should have been locked\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	mpq_feed = feed->priv;
+	feed_data = &mpq_feed->video_info;
+
+	sbuff = feed_data->video_buffer;
+	if (sbuff == NULL) {
+		if (lock_feed)
+			mutex_unlock(&mpq_demux->mutex);
+		MPQ_DVB_ERR_PRINT("%s: mpq_streambuffer object is NULL\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if ((feed_data->video_buffer != NULL) &&
+		(!feed_data->fullness_wait_cancel) &&
+		(!mpq_dmx_check_video_decoder_fullness(sbuff,
+						       required_space))) {
+		DEFINE_WAIT(__wait);
+
+		for (;;) {
+			prepare_to_wait(&sbuff->raw_data.queue,
+				&__wait,
+				TASK_INTERRUPTIBLE);
+			if (!feed_data->video_buffer ||
+				feed_data->fullness_wait_cancel ||
+				mpq_dmx_check_video_decoder_fullness(sbuff,
+					required_space))
+				break;
+
+			if (!signal_pending(current)) {
+				mutex_unlock(&mpq_demux->mutex);
+				schedule();
+				mutex_lock(&mpq_demux->mutex);
+				continue;
+			}
+
+			ret = -ERESTARTSYS;
+			break;
+		}
+		finish_wait(&sbuff->raw_data.queue, &__wait);
+	}
+
+	if (ret < 0) {
+		if (lock_feed)
+			mutex_unlock(&mpq_demux->mutex);
+		return ret;
+	}
+
+	if ((feed_data->fullness_wait_cancel) ||
+		(feed_data->video_buffer == NULL)) {
+		if (lock_feed)
+			mutex_unlock(&mpq_demux->mutex);
+		return -EINVAL;
+	}
+
+	if (lock_feed)
+		mutex_unlock(&mpq_demux->mutex);
+	return 0;
+}
+
+static int mpq_dmx_audio_decoder_fullness_check(
+		struct dvb_demux_feed *feed,
+		size_t required_space,
+		int lock_feed)
+{
+	struct mpq_demux *mpq_demux = feed->demux->priv;
+	struct mpq_streambuffer *sbuff = NULL;
+	struct mpq_audio_feed_info *feed_data;
+	struct mpq_feed *mpq_feed;
+	int ret = 0;
+
+	if (!dvb_dmx_is_audio_feed(feed)) {
+		MPQ_DVB_DBG_PRINT("%s: Invalid feed type %d\n",
+			__func__,
+			feed->pes_type);
+		return -EINVAL;
+	}
+
+	if (lock_feed) {
+		mutex_lock(&mpq_demux->mutex);
+	} else if (!mutex_is_locked(&mpq_demux->mutex)) {
+		MPQ_DVB_ERR_PRINT(
+				"%s: Mutex should have been locked\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	mpq_feed = feed->priv;
+	feed_data = &mpq_feed->audio_info;
+
+	sbuff = feed_data->audio_buffer;
+	if (sbuff == NULL) {
+		if (lock_feed)
+			mutex_unlock(&mpq_demux->mutex);
+		MPQ_DVB_ERR_PRINT("%s: mpq_streambuffer object is NULL\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if ((feed_data->audio_buffer != NULL) &&
+		(!feed_data->fullness_wait_cancel) &&
+		(!mpq_dmx_check_audio_decoder_fullness(sbuff,
+						       required_space))) {
+		DEFINE_WAIT(__wait);
+
+		for (;;) {
+			prepare_to_wait(&sbuff->raw_data.queue,
+					&__wait, TASK_INTERRUPTIBLE);
+			if (!feed_data->audio_buffer ||
+				feed_data->fullness_wait_cancel ||
+				mpq_dmx_check_audio_decoder_fullness(sbuff,
+					required_space))
+				break;
+
+			if (!signal_pending(current)) {
+				mutex_unlock(&mpq_demux->mutex);
+				schedule();
+				mutex_lock(&mpq_demux->mutex);
+				continue;
+			}
+
+			ret = -ERESTARTSYS;
+			break;
+		}
+		finish_wait(&sbuff->raw_data.queue, &__wait);
+	}
+
+	if (ret < 0) {
+		if (lock_feed)
+			mutex_unlock(&mpq_demux->mutex);
+		return ret;
+	}
+
+	if ((feed_data->fullness_wait_cancel) ||
+		(feed_data->audio_buffer == NULL)) {
+		if (lock_feed)
+			mutex_unlock(&mpq_demux->mutex);
+		return -EINVAL;
+	}
+
+	if (lock_feed)
+		mutex_unlock(&mpq_demux->mutex);
+	return 0;
+}
+
+int mpq_dmx_decoder_fullness_wait(
+		struct dvb_demux_feed *feed,
+		size_t required_space)
+{
+	if (dvb_dmx_is_video_feed(feed))
+		return mpq_dmx_decoder_fullness_check(feed, required_space, 1);
+	else if (dvb_dmx_is_audio_feed(feed))
+		return mpq_dmx_audio_decoder_fullness_check(feed,
+							    required_space, 1);
+
+	return 0;
+}
+
+int mpq_dmx_decoder_fullness_abort(struct dvb_demux_feed *feed)
+{
+	if (dvb_dmx_is_video_feed(feed)) {
+		struct mpq_feed *mpq_feed;
+		struct mpq_video_feed_info *feed_data;
+		struct dvb_ringbuffer *video_buff;
+
+		mpq_feed = feed->priv;
+		feed_data = &mpq_feed->video_info;
+
+		feed_data->fullness_wait_cancel = 1;
+
+		spin_lock(&feed_data->video_buffer_lock);
+		if (feed_data->video_buffer == NULL) {
+			MPQ_DVB_DBG_PRINT(
+				"%s: video_buffer released\n", __func__);
+			spin_unlock(&feed_data->video_buffer_lock);
+			return 0;
+		}
+
+		video_buff = &feed_data->video_buffer->raw_data;
+		wake_up_all(&video_buff->queue);
+		spin_unlock(&feed_data->video_buffer_lock);
+
+		return 0;
+	} else if (dvb_dmx_is_audio_feed(feed)) {
+		struct mpq_feed *mpq_feed;
+		struct mpq_audio_feed_info *feed_data;
+		struct dvb_ringbuffer *audio_buff;
+
+		mpq_feed = feed->priv;
+		feed_data = &mpq_feed->audio_info;
+
+		feed_data->fullness_wait_cancel = 1;
+
+		spin_lock(&feed_data->audio_buffer_lock);
+		if (feed_data->audio_buffer == NULL) {
+			MPQ_DVB_DBG_PRINT(
+				"%s: audio_buffer released\n", __func__);
+			spin_unlock(&feed_data->audio_buffer_lock);
+			return 0;
+		}
+
+		audio_buff = &feed_data->audio_buffer->raw_data;
+		wake_up_all(&audio_buff->queue);
+		spin_unlock(&feed_data->audio_buffer_lock);
+
+		return 0;
+	}
+
+	MPQ_DVB_ERR_PRINT(
+			"%s: Invalid feed type %d\n", __func__, feed->pes_type);
+
+	return -EINVAL;
+}
+
+int mpq_dmx_parse_mandatory_pes_header(
+				struct dvb_demux_feed *feed,
+				struct mpq_video_feed_info *feed_data,
+				struct pes_packet_header *pes_header,
+				const u8 *buf,
+				u32 *ts_payload_offset,
+				int *bytes_avail)
+{
+	int left_size, copy_len;
+
+	if (feed_data->pes_header_offset < PES_MANDATORY_FIELDS_LEN) {
+		left_size =
+			PES_MANDATORY_FIELDS_LEN -
+			feed_data->pes_header_offset;
+
+		copy_len = (left_size > *bytes_avail) ?
+					*bytes_avail :
+					left_size;
+
+		memcpy((u8 *)((u8 *)pes_header + feed_data->pes_header_offset),
+				(buf + *ts_payload_offset),
+				copy_len);
+
+		feed_data->pes_header_offset += copy_len;
+
+		if (left_size > *bytes_avail)
+			return -EINVAL;
+
+		/* else - we have beginning of PES header */
+		*bytes_avail -= left_size;
+		*ts_payload_offset += left_size;
+
+		/* Make sure the PES packet is valid */
+		if (mpq_dmx_is_valid_video_pes(pes_header) < 0) {
+			/*
+			 * Since the new PES header parsing
+			 * failed, reset pusi_seen to drop all
+			 * data until next PUSI
+			 */
+			feed->pusi_seen = 0;
+			feed_data->pes_header_offset = 0;
+
+			MPQ_DVB_ERR_PRINT(
+				"%s: invalid packet\n",
+				__func__);
+
+			return -EINVAL;
+		}
+
+		feed_data->pes_header_left_bytes =
+			pes_header->pes_header_data_length;
+	}
+
+	return 0;
+}
+
+int mpq_dmx_parse_mandatory_audio_pes_header(
+				struct dvb_demux_feed *feed,
+				struct mpq_audio_feed_info *feed_data,
+				struct pes_packet_header *pes_header,
+				const u8 *buf,
+				u32 *ts_payload_offset,
+				int *bytes_avail)
+{
+	int left_size, copy_len;
+
+	if (feed_data->pes_header_offset < PES_MANDATORY_FIELDS_LEN) {
+		left_size =
+			PES_MANDATORY_FIELDS_LEN -
+			feed_data->pes_header_offset;
+
+		copy_len = (left_size > *bytes_avail) ?
+					*bytes_avail :
+					left_size;
+
+		memcpy((u8 *)((u8 *)pes_header + feed_data->pes_header_offset),
+				(buf + *ts_payload_offset),
+				copy_len);
+
+		feed_data->pes_header_offset += copy_len;
+
+		if (left_size > *bytes_avail)
+			return -EINVAL;
+
+		/* else - we have beginning of PES header */
+		*bytes_avail -= left_size;
+		*ts_payload_offset += left_size;
+
+		/* Make sure the PES packet is valid */
+		if (mpq_dmx_is_valid_audio_pes(pes_header) < 0) {
+			/*
+			 * Since the new PES header parsing
+			 * failed, reset pusi_seen to drop all
+			 * data until next PUSI
+			 */
+			feed->pusi_seen = 0;
+			feed_data->pes_header_offset = 0;
+
+			MPQ_DVB_ERR_PRINT(
+				"%s: invalid packet\n",
+				__func__);
+
+			return -EINVAL;
+		}
+
+		feed_data->pes_header_left_bytes =
+			pes_header->pes_header_data_length;
+	}
+
+	return 0;
+}
+
+static inline void mpq_dmx_get_pts_dts(struct mpq_video_feed_info *feed_data,
+				struct pes_packet_header *pes_header)
+{
+	struct dmx_pts_dts_info *info = &(feed_data->new_pts_dts_info);
+
+	/* Get PTS/DTS information from PES header */
+
+	if ((pes_header->pts_dts_flag == 2) ||
+		(pes_header->pts_dts_flag == 3)) {
+		info->pts_exist = 1;
+
+		info->pts =
+			((u64)pes_header->pts_1 << 30) |
+			((u64)pes_header->pts_2 << 22) |
+			((u64)pes_header->pts_3 << 15) |
+			((u64)pes_header->pts_4 << 7) |
+			(u64)pes_header->pts_5;
+	} else {
+		info->pts_exist = 0;
+		info->pts = 0;
+	}
+
+	if (pes_header->pts_dts_flag == 3) {
+		info->dts_exist = 1;
+
+		info->dts =
+			((u64)pes_header->dts_1 << 30) |
+			((u64)pes_header->dts_2 << 22) |
+			((u64)pes_header->dts_3 << 15) |
+			((u64)pes_header->dts_4 << 7) |
+			(u64)pes_header->dts_5;
+	} else {
+		info->dts_exist = 0;
+		info->dts = 0;
+	}
+
+	feed_data->new_info_exists = 1;
+}
+
+static inline void mpq_dmx_get_audio_pts_dts(
+				struct mpq_audio_feed_info *feed_data,
+				struct pes_packet_header *pes_header)
+{
+	struct dmx_pts_dts_info *info = &(feed_data->new_pts_dts_info);
+
+	/* Get PTS/DTS information from PES header */
+
+	if ((pes_header->pts_dts_flag == 2) ||
+		(pes_header->pts_dts_flag == 3)) {
+		info->pts_exist = 1;
+
+		info->pts =
+			((u64)pes_header->pts_1 << 30) |
+			((u64)pes_header->pts_2 << 22) |
+			((u64)pes_header->pts_3 << 15) |
+			((u64)pes_header->pts_4 << 7) |
+			(u64)pes_header->pts_5;
+	} else {
+		info->pts_exist = 0;
+		info->pts = 0;
+	}
+
+	if (pes_header->pts_dts_flag == 3) {
+		info->dts_exist = 1;
+
+		info->dts =
+			((u64)pes_header->dts_1 << 30) |
+			((u64)pes_header->dts_2 << 22) |
+			((u64)pes_header->dts_3 << 15) |
+			((u64)pes_header->dts_4 << 7) |
+			(u64)pes_header->dts_5;
+	} else {
+		info->dts_exist = 0;
+		info->dts = 0;
+	}
+
+	feed_data->new_info_exists = 1;
+}
+
+int mpq_dmx_parse_remaining_pes_header(
+				struct dvb_demux_feed *feed,
+				struct mpq_video_feed_info *feed_data,
+				struct pes_packet_header *pes_header,
+				const u8 *buf,
+				u32 *ts_payload_offset,
+				int *bytes_avail)
+{
+	int left_size, copy_len;
+
+	/* Remaining header bytes that need to be processed? */
+	if (!feed_data->pes_header_left_bytes)
+		return 0;
+
+	/* Did we capture the PTS value (if exists)? */
+	if ((*bytes_avail != 0) &&
+		(feed_data->pes_header_offset <
+		 (PES_MANDATORY_FIELDS_LEN+5)) &&
+		((pes_header->pts_dts_flag == 2) ||
+		 (pes_header->pts_dts_flag == 3))) {
+
+		/* 5 more bytes should be there */
+		left_size =
+			PES_MANDATORY_FIELDS_LEN + 5 -
+			feed_data->pes_header_offset;
+
+		copy_len = (left_size > *bytes_avail) ?
+					*bytes_avail :
+					left_size;
+
+		memcpy((u8 *)((u8 *)pes_header + feed_data->pes_header_offset),
+			(buf + *ts_payload_offset),
+			copy_len);
+
+		feed_data->pes_header_offset += copy_len;
+		feed_data->pes_header_left_bytes -= copy_len;
+
+		if (left_size > *bytes_avail)
+			return -EINVAL;
+
+		/* else - we have the PTS */
+		*bytes_avail -= copy_len;
+		*ts_payload_offset += copy_len;
+	}
+
+	/* Did we capture the DTS value (if exist)? */
+	if ((*bytes_avail != 0) &&
+		(feed_data->pes_header_offset <
+		 (PES_MANDATORY_FIELDS_LEN+10)) &&
+		(pes_header->pts_dts_flag == 3)) {
+
+		/* 5 more bytes should be there */
+		left_size =
+			PES_MANDATORY_FIELDS_LEN + 10 -
+			feed_data->pes_header_offset;
+
+		copy_len = (left_size > *bytes_avail) ?
+					*bytes_avail :
+					left_size;
+
+		memcpy((u8 *)((u8 *)pes_header + feed_data->pes_header_offset),
+			(buf + *ts_payload_offset),
+			copy_len);
+
+		feed_data->pes_header_offset += copy_len;
+		feed_data->pes_header_left_bytes -= copy_len;
+
+		if (left_size > *bytes_avail)
+			return -EINVAL;
+
+		/* else - we have the DTS */
+		*bytes_avail -= copy_len;
+		*ts_payload_offset += copy_len;
+	}
+
+	/* Any more header bytes?! */
+	if (feed_data->pes_header_left_bytes >= *bytes_avail) {
+		feed_data->pes_header_left_bytes -= *bytes_avail;
+		return -EINVAL;
+	}
+
+	/* get PTS/DTS information from PES header to be written later */
+	mpq_dmx_get_pts_dts(feed_data, pes_header);
+
+	/* Got PES header, process payload */
+	*bytes_avail -= feed_data->pes_header_left_bytes;
+	*ts_payload_offset += feed_data->pes_header_left_bytes;
+	feed_data->pes_header_left_bytes = 0;
+
+	return 0;
+}
+
+int mpq_dmx_parse_remaining_audio_pes_header(
+				struct dvb_demux_feed *feed,
+				struct mpq_audio_feed_info *feed_data,
+				struct pes_packet_header *pes_header,
+				const u8 *buf,
+				u32 *ts_payload_offset,
+				int *bytes_avail)
+{
+	int left_size, copy_len;
+
+	/* Remaining header bytes that need to be processed? */
+	if (!feed_data->pes_header_left_bytes)
+		return 0;
+
+	/* Did we capture the PTS value (if exists)? */
+	if ((*bytes_avail != 0) &&
+		(feed_data->pes_header_offset <
+		 (PES_MANDATORY_FIELDS_LEN+5)) &&
+		((pes_header->pts_dts_flag == 2) ||
+		 (pes_header->pts_dts_flag == 3))) {
+
+		/* 5 more bytes should be there */
+		left_size =
+			PES_MANDATORY_FIELDS_LEN + 5 -
+			feed_data->pes_header_offset;
+
+		copy_len =
+			(left_size > *bytes_avail) ? *bytes_avail : left_size;
+
+		memcpy((u8 *)((u8 *)pes_header + feed_data->pes_header_offset),
+			(buf + *ts_payload_offset), copy_len);
+
+		feed_data->pes_header_offset += copy_len;
+		feed_data->pes_header_left_bytes -= copy_len;
+
+		if (left_size > *bytes_avail)
+			return -EINVAL;
+
+		/* else - we have the PTS */
+		*bytes_avail -= copy_len;
+		*ts_payload_offset += copy_len;
+	}
+
+	/* Did we capture the DTS value (if exist)? */
+	if ((*bytes_avail != 0) &&
+		(feed_data->pes_header_offset <
+		 (PES_MANDATORY_FIELDS_LEN+10)) &&
+		(pes_header->pts_dts_flag == 3)) {
+
+		/* 5 more bytes should be there */
+		left_size =
+			PES_MANDATORY_FIELDS_LEN + 10 -
+			feed_data->pes_header_offset;
+
+		copy_len = (left_size > *bytes_avail) ?
+					*bytes_avail :
+					left_size;
+
+		memcpy((u8 *)((u8 *)pes_header + feed_data->pes_header_offset),
+			(buf + *ts_payload_offset),
+			copy_len);
+
+		feed_data->pes_header_offset += copy_len;
+		feed_data->pes_header_left_bytes -= copy_len;
+
+		if (left_size > *bytes_avail)
+			return -EINVAL;
+
+		/* else - we have the DTS */
+		*bytes_avail -= copy_len;
+		*ts_payload_offset += copy_len;
+	}
+
+	/* Any more header bytes?! */
+	if (feed_data->pes_header_left_bytes >= *bytes_avail) {
+		feed_data->pes_header_left_bytes -= *bytes_avail;
+		return -EINVAL;
+	}
+
+	/* get PTS/DTS information from PES header to be written later */
+	mpq_dmx_get_audio_pts_dts(feed_data, pes_header);
+
+	/* Got PES header, process payload */
+	*bytes_avail -= feed_data->pes_header_left_bytes;
+	*ts_payload_offset += feed_data->pes_header_left_bytes;
+	feed_data->pes_header_left_bytes = 0;
+
+	return 0;
+}
+
+static void mpq_dmx_check_continuity(struct mpq_video_feed_info *feed_data,
+					int current_continuity,
+					int discontinuity_indicator)
+{
+	const int max_continuity = 0x0F; /* 4 bits in the TS packet header */
+
+	/* sanity check */
+	if (unlikely((current_continuity < 0) ||
+			(current_continuity > max_continuity))) {
+		MPQ_DVB_DBG_PRINT(
+			"%s: received invalid continuity counter value %d\n",
+					__func__, current_continuity);
+		return;
+	}
+
+	/* reset last continuity */
+	if ((feed_data->last_continuity == -1) ||
+		(discontinuity_indicator)) {
+		feed_data->last_continuity = current_continuity;
+		return;
+	}
+
+	/* check for continuity errors */
+	if (current_continuity !=
+			((feed_data->last_continuity + 1) & max_continuity))
+		feed_data->continuity_errs++;
+
+	/* save for next time */
+	feed_data->last_continuity = current_continuity;
+}
+
+static void mpq_dmx_check_audio_continuity(
+					struct mpq_audio_feed_info *feed_data,
+					int current_continuity,
+					int discontinuity_indicator)
+{
+	const int max_continuity = 0x0F; /* 4 bits in the TS packet header */
+
+	/* sanity check */
+	if (unlikely((current_continuity < 0) ||
+			(current_continuity > max_continuity))) {
+		MPQ_DVB_DBG_PRINT(
+			"%s: received invalid continuity counter value %d\n",
+					__func__, current_continuity);
+		return;
+	}
+
+	/* reset last continuity */
+	if ((feed_data->last_continuity == -1) || (discontinuity_indicator)) {
+		feed_data->last_continuity = current_continuity;
+		return;
+	}
+
+	/* check for continuity errors */
+	if (current_continuity !=
+			((feed_data->last_continuity + 1) & max_continuity))
+		feed_data->continuity_errs++;
+
+	/* save for next time */
+	feed_data->last_continuity = current_continuity;
+}
+
+static inline void mpq_dmx_prepare_es_event_data(
+			struct mpq_streambuffer_packet_header *packet,
+			struct mpq_adapter_video_meta_data *meta_data,
+			struct mpq_video_feed_info *feed_data,
+			struct mpq_streambuffer *stream_buffer,
+			struct dmx_data_ready *data,
+			int cookie)
+{
+	struct dmx_pts_dts_info *pts_dts;
+
+	if (meta_data->packet_type == DMX_PES_PACKET) {
+		pts_dts = &meta_data->info.pes.pts_dts_info;
+		data->buf.stc = meta_data->info.pes.stc;
+	} else {
+		pts_dts = &meta_data->info.framing.pts_dts_info;
+		data->buf.stc = meta_data->info.framing.stc;
+	}
+
+	pts_dts = meta_data->packet_type == DMX_PES_PACKET ?
+		&meta_data->info.pes.pts_dts_info :
+		&meta_data->info.framing.pts_dts_info;
+
+	data->data_length = 0;
+	data->buf.handle = packet->raw_data_handle;
+	data->buf.cookie = cookie;
+	data->buf.offset = packet->raw_data_offset;
+	data->buf.len = packet->raw_data_len;
+	data->buf.pts_exists = pts_dts->pts_exist;
+	data->buf.pts = pts_dts->pts;
+	data->buf.dts_exists = pts_dts->dts_exist;
+	data->buf.dts = pts_dts->dts;
+	data->buf.tei_counter = feed_data->tei_errs;
+	data->buf.cont_err_counter = feed_data->continuity_errs;
+	data->buf.ts_packets_num = feed_data->ts_packets_num;
+	data->buf.ts_dropped_bytes = feed_data->ts_dropped_bytes;
+	data->status = DMX_OK_DECODER_BUF;
+
+	MPQ_DVB_DBG_PRINT("%s: cookie=%d\n", __func__, data->buf.cookie);
+
+	/* reset counters */
+	feed_data->ts_packets_num = 0;
+	feed_data->ts_dropped_bytes = 0;
+	feed_data->tei_errs = 0;
+	feed_data->continuity_errs = 0;
+}
+
+static inline void mpq_dmx_prepare_audio_es_event_data(
+			struct mpq_streambuffer_packet_header *packet,
+			struct mpq_adapter_audio_meta_data *meta_data,
+			struct mpq_audio_feed_info *feed_data,
+			struct mpq_streambuffer *stream_buffer,
+			struct dmx_data_ready *data,
+			int cookie)
+{
+	struct dmx_pts_dts_info *pts_dts;
+
+	pts_dts = &meta_data->info.pes.pts_dts_info;
+	data->buf.stc = meta_data->info.pes.stc;
+
+	data->data_length = 0;
+	data->buf.handle = packet->raw_data_handle;
+	data->buf.cookie = cookie;
+	data->buf.offset = packet->raw_data_offset;
+	data->buf.len = packet->raw_data_len;
+	data->buf.pts_exists = pts_dts->pts_exist;
+	data->buf.pts = pts_dts->pts;
+	data->buf.dts_exists = pts_dts->dts_exist;
+	data->buf.dts = pts_dts->dts;
+	data->buf.tei_counter = feed_data->tei_errs;
+	data->buf.cont_err_counter = feed_data->continuity_errs;
+	data->buf.ts_packets_num = feed_data->ts_packets_num;
+	data->buf.ts_dropped_bytes = feed_data->ts_dropped_bytes;
+	data->status = DMX_OK_DECODER_BUF;
+
+	MPQ_DVB_DBG_PRINT("%s: cookie=%d\n", __func__, data->buf.cookie);
+
+	/* reset counters */
+	feed_data->ts_packets_num = 0;
+	feed_data->ts_dropped_bytes = 0;
+	feed_data->tei_errs = 0;
+	feed_data->continuity_errs = 0;
+}
+
+static int mpq_sdmx_dvr_buffer_desc(struct mpq_demux *mpq_demux,
+	struct sdmx_buff_descr *buf_desc)
+{
+	struct dvb_ringbuffer *rbuf = (struct dvb_ringbuffer *)
+				mpq_demux->demux.dmx.dvr_input.ringbuff;
+	struct ion_handle *ion_handle =
+		mpq_demux->demux.dmx.dvr_input.priv_handle;
+	ion_phys_addr_t phys_addr;
+	size_t len;
+	int ret;
+
+	ret = ion_phys(mpq_demux->ion_client, ion_handle, &phys_addr, &len);
+	if (ret) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: Failed to obtain physical address of input buffer. ret = %d\n",
+			__func__, ret);
+		return ret;
+	}
+
+	buf_desc->base_addr = (u64)phys_addr;
+	buf_desc->size = rbuf->size;
+
+	return 0;
+}
+
+static inline int mpq_dmx_notify_overflow(struct dvb_demux_feed *feed)
+{
+	struct dmx_data_ready data;
+
+	data.data_length = 0;
+	data.status = DMX_OVERRUN_ERROR;
+	return feed->data_ready_cb.ts(&feed->feed.ts, &data);
+}
+
+/**
+ * mpq_dmx_decoder_frame_closure - Helper function to handle closing current
+ * pending frame upon reaching EOS.
+ *
+ * @mpq_demux - mpq demux instance
+ * @mpq_feed - mpq feed object
+ */
+static void mpq_dmx_decoder_frame_closure(struct mpq_demux *mpq_demux,
+		struct mpq_feed *mpq_feed)
+{
+	struct mpq_streambuffer_packet_header packet;
+	struct mpq_streambuffer *stream_buffer;
+	struct mpq_adapter_video_meta_data meta_data;
+	struct mpq_video_feed_info *feed_data;
+	struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+	struct dmx_data_ready data;
+	int cookie;
+
+	feed_data = &mpq_feed->video_info;
+
+	/*
+	 * spin-lock is taken to protect against manipulation of video
+	 * output buffer by the API (terminate video feed, re-use of video
+	 * buffers).
+	 */
+	spin_lock(&feed_data->video_buffer_lock);
+	stream_buffer = feed_data->video_buffer;
+
+	if (stream_buffer == NULL) {
+		MPQ_DVB_DBG_PRINT("%s: video_buffer released\n", __func__);
+		spin_unlock(&feed_data->video_buffer_lock);
+		return;
+	}
+
+	/* Report last pattern found */
+	if ((feed_data->pending_pattern_len) &&
+		mpq_dmx_is_video_frame(feed->video_codec,
+			feed_data->last_framing_match_type)) {
+		meta_data.packet_type = DMX_FRAMING_INFO_PACKET;
+		mpq_dmx_write_pts_dts(feed_data,
+			&(meta_data.info.framing.pts_dts_info));
+		mpq_dmx_save_pts_dts(feed_data);
+		packet.user_data_len =
+			sizeof(struct mpq_adapter_video_meta_data);
+		packet.raw_data_len = feed_data->pending_pattern_len;
+		packet.raw_data_offset = feed_data->frame_offset;
+		meta_data.info.framing.pattern_type =
+			feed_data->last_framing_match_type;
+		meta_data.info.framing.stc = feed_data->last_framing_match_stc;
+		meta_data.info.framing.continuity_error_counter =
+			feed_data->continuity_errs;
+		meta_data.info.framing.transport_error_indicator_counter =
+			feed_data->tei_errs;
+		meta_data.info.framing.ts_dropped_bytes =
+			feed_data->ts_dropped_bytes;
+		meta_data.info.framing.ts_packets_num =
+			feed_data->ts_packets_num;
+
+		mpq_streambuffer_get_buffer_handle(stream_buffer,
+			0, /* current write buffer handle */
+			&packet.raw_data_handle);
+
+		mpq_dmx_update_decoder_stat(mpq_feed);
+
+		/* Writing meta-data that includes the framing information */
+		cookie = mpq_streambuffer_pkt_write(stream_buffer, &packet,
+			(u8 *)&meta_data);
+		if (cookie >= 0) {
+			mpq_dmx_prepare_es_event_data(&packet, &meta_data,
+				feed_data, stream_buffer, &data, cookie);
+			feed->data_ready_cb.ts(&feed->feed.ts, &data);
+		} else {
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_streambuffer_pkt_write failed, ret=%d\n",
+				__func__, cookie);
+		}
+	}
+
+	spin_unlock(&feed_data->video_buffer_lock);
+}
+
+/**
+ * mpq_dmx_decoder_pes_closure - Helper function to handle closing current PES
+ * upon reaching EOS.
+ *
+ * @mpq_demux - mpq demux instance
+ * @mpq_feed - mpq feed object
+ */
+static void mpq_dmx_decoder_pes_closure(struct mpq_demux *mpq_demux,
+	struct mpq_feed *mpq_feed)
+{
+	struct mpq_streambuffer_packet_header packet;
+	struct mpq_streambuffer *stream_buffer;
+	struct mpq_adapter_video_meta_data meta_data;
+	struct mpq_video_feed_info *feed_data;
+	struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+	struct dmx_data_ready data;
+	int cookie;
+
+	feed_data = &mpq_feed->video_info;
+
+	/*
+	 * spin-lock is taken to protect against manipulation of video
+	 * output buffer by the API (terminate video feed, re-use of video
+	 * buffers).
+	 */
+	spin_lock(&feed_data->video_buffer_lock);
+	stream_buffer = feed_data->video_buffer;
+
+	if (stream_buffer == NULL) {
+		MPQ_DVB_DBG_PRINT("%s: video_buffer released\n", __func__);
+		spin_unlock(&feed_data->video_buffer_lock);
+		return;
+	}
+
+	/*
+	 * Close previous PES.
+	 * Push new packet to the meta-data buffer.
+	 */
+	if ((feed->pusi_seen) && (feed_data->pes_header_left_bytes == 0)) {
+		packet.raw_data_len = feed->peslen;
+		mpq_streambuffer_get_buffer_handle(stream_buffer,
+			0, /* current write buffer handle */
+			&packet.raw_data_handle);
+		packet.raw_data_offset = feed_data->frame_offset;
+		packet.user_data_len =
+			sizeof(struct mpq_adapter_video_meta_data);
+
+		mpq_dmx_write_pts_dts(feed_data,
+			&(meta_data.info.pes.pts_dts_info));
+
+		meta_data.packet_type = DMX_PES_PACKET;
+		meta_data.info.pes.stc = feed_data->prev_stc;
+
+		mpq_dmx_update_decoder_stat(mpq_feed);
+
+		cookie = mpq_streambuffer_pkt_write(stream_buffer, &packet,
+			(u8 *)&meta_data);
+		if (cookie >= 0) {
+			/* Save write offset where new PES will begin */
+			mpq_streambuffer_get_data_rw_offset(stream_buffer, NULL,
+				&feed_data->frame_offset);
+			mpq_dmx_prepare_es_event_data(&packet, &meta_data,
+				feed_data, stream_buffer, &data, cookie);
+			feed->data_ready_cb.ts(&feed->feed.ts, &data);
+		} else {
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_streambuffer_pkt_write failed, ret=%d\n",
+				__func__, cookie);
+		}
+	}
+	/* Reset PES info */
+	feed->peslen = 0;
+	feed_data->pes_header_offset = 0;
+	feed_data->pes_header_left_bytes = PES_MANDATORY_FIELDS_LEN;
+
+	spin_unlock(&feed_data->video_buffer_lock);
+}
+
+/*
+ * in audio handling although ES frames are send to decoder, close the
+ * pes packet
+ */
+static void mpq_dmx_decoder_audio_pes_closure(struct mpq_demux *mpq_demux,
+	struct mpq_feed *mpq_feed)
+{
+	struct mpq_streambuffer_packet_header packet;
+	struct mpq_streambuffer *stream_buffer;
+	struct mpq_adapter_audio_meta_data meta_data;
+	struct mpq_audio_feed_info *feed_data;
+	struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+	struct dmx_data_ready data;
+	int cookie;
+
+	feed_data = &mpq_feed->audio_info;
+
+	/*
+	 * spin-lock is taken to protect against manipulation of audio
+	 * output buffer by the API (terminate audio feed, re-use of audio
+	 * buffers).
+	 */
+	spin_lock(&feed_data->audio_buffer_lock);
+	stream_buffer = feed_data->audio_buffer;
+
+	if (stream_buffer == NULL) {
+		MPQ_DVB_DBG_PRINT("%s: audio_buffer released\n", __func__);
+		spin_unlock(&feed_data->audio_buffer_lock);
+		return;
+	}
+
+	/*
+	 * Close previous PES.
+	 * Push new packet to the meta-data buffer.
+	 */
+	if ((feed->pusi_seen) && (feed_data->pes_header_left_bytes == 0)) {
+		packet.raw_data_len = feed->peslen;
+		mpq_streambuffer_get_buffer_handle(stream_buffer,
+			0, /* current write buffer handle */
+			&packet.raw_data_handle);
+		packet.raw_data_offset = feed_data->frame_offset;
+		packet.user_data_len =
+			sizeof(struct mpq_adapter_audio_meta_data);
+
+		mpq_dmx_write_audio_pts_dts(feed_data,
+			&(meta_data.info.pes.pts_dts_info));
+
+		meta_data.packet_type = DMX_PES_PACKET;
+		meta_data.info.pes.stc = feed_data->prev_stc;
+
+		mpq_dmx_update_decoder_stat(mpq_feed);
+
+		cookie = mpq_streambuffer_pkt_write(stream_buffer, &packet,
+			(u8 *)&meta_data);
+		if (cookie >= 0) {
+			/* Save write offset where new PES will begin */
+			mpq_streambuffer_get_data_rw_offset(stream_buffer, NULL,
+				&feed_data->frame_offset);
+			mpq_dmx_prepare_audio_es_event_data(&packet, &meta_data,
+				feed_data, stream_buffer, &data, cookie);
+			feed->data_ready_cb.ts(&feed->feed.ts, &data);
+		} else {
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_sb_pkt_write failed, ret=%d\n",
+				__func__, cookie);
+		}
+	}
+	/* Reset PES info */
+	feed->peslen = 0;
+	feed_data->pes_header_offset = 0;
+	feed_data->pes_header_left_bytes = PES_MANDATORY_FIELDS_LEN;
+
+	spin_unlock(&feed_data->audio_buffer_lock);
+}
+
+static int mpq_dmx_process_video_packet_framing(
+			struct dvb_demux_feed *feed,
+			const u8 *buf,
+			u64 curr_stc)
+{
+	int bytes_avail;
+	u32 ts_payload_offset;
+	struct mpq_video_feed_info *feed_data;
+	const struct ts_packet_header *ts_header;
+	struct mpq_streambuffer *stream_buffer;
+	struct pes_packet_header *pes_header;
+	struct mpq_demux *mpq_demux;
+	struct mpq_feed *mpq_feed;
+
+	struct dvb_dmx_video_patterns_results framing_res;
+	struct mpq_streambuffer_packet_header packet;
+	struct mpq_adapter_video_meta_data meta_data;
+	int bytes_written = 0;
+	int bytes_to_write = 0;
+	int found_patterns = 0;
+	int first_pattern = 0;
+	int i;
+	int is_video_frame = 0;
+	int pending_data_len = 0;
+	int ret = 0;
+	int discontinuity_indicator = 0;
+	struct dmx_data_ready data;
+
+	mpq_demux = feed->demux->priv;
+
+	mpq_feed = feed->priv;
+	feed_data = &mpq_feed->video_info;
+
+	/*
+	 * spin-lock is taken to protect against manipulation of video
+	 * output buffer by the API (terminate video feed, re-use of video
+	 * buffers). Mutex on the video-feed cannot be held here
+	 * since SW demux holds a spin-lock while calling write_to_decoder
+	 */
+	spin_lock(&feed_data->video_buffer_lock);
+	stream_buffer = feed_data->video_buffer;
+
+	if (stream_buffer == NULL) {
+		MPQ_DVB_DBG_PRINT(
+			"%s: video_buffer released\n",
+			__func__);
+		spin_unlock(&feed_data->video_buffer_lock);
+		return 0;
+	}
+
+	ts_header = (const struct ts_packet_header *)buf;
+
+	pes_header = &feed_data->pes_header;
+
+	/* Make sure this TS packet has a payload and not scrambled */
+	if ((ts_header->sync_byte != 0x47) ||
+		(ts_header->adaptation_field_control == 0) ||
+		(ts_header->adaptation_field_control == 2) ||
+		(ts_header->transport_scrambling_control)) {
+		/* continue to next packet */
+		spin_unlock(&feed_data->video_buffer_lock);
+		return 0;
+	}
+
+	if (ts_header->payload_unit_start_indicator) { /* PUSI? */
+		if (feed->pusi_seen) { /* Did we see PUSI before? */
+			/*
+			 * Double check that we are not in middle of
+			 * previous PES header parsing.
+			 */
+			if (feed_data->pes_header_left_bytes != 0)
+				MPQ_DVB_ERR_PRINT(
+					"%s: received PUSI while handling PES header of previous PES\n",
+					__func__);
+
+			feed->peslen = 0;
+			feed_data->pes_header_offset = 0;
+			feed_data->pes_header_left_bytes =
+				PES_MANDATORY_FIELDS_LEN;
+		} else {
+			feed->pusi_seen = 1;
+		}
+	}
+
+	/*
+	 * Parse PES data only if PUSI was encountered,
+	 * otherwise the data is dropped
+	 */
+	if (!feed->pusi_seen) {
+		spin_unlock(&feed_data->video_buffer_lock);
+		return 0; /* drop and wait for next packets */
+	}
+
+	ts_payload_offset = sizeof(struct ts_packet_header);
+
+	/*
+	 * Skip adaptation field if exists.
+	 * Save discontinuity indicator if exists.
+	 */
+	if (ts_header->adaptation_field_control == 3) {
+		const struct ts_adaptation_field *adaptation_field =
+			(const struct ts_adaptation_field *)(buf +
+				ts_payload_offset);
+
+		discontinuity_indicator =
+			adaptation_field->discontinuity_indicator;
+		ts_payload_offset += buf[ts_payload_offset] + 1;
+	}
+
+	bytes_avail = TS_PACKET_SIZE - ts_payload_offset;
+
+	/* Get the mandatory fields of the video PES header */
+	if (mpq_dmx_parse_mandatory_pes_header(feed, feed_data,
+						pes_header, buf,
+						&ts_payload_offset,
+						&bytes_avail)) {
+		spin_unlock(&feed_data->video_buffer_lock);
+		return 0;
+	}
+
+	if (mpq_dmx_parse_remaining_pes_header(feed, feed_data,
+						pes_header, buf,
+						&ts_payload_offset,
+						&bytes_avail)) {
+		spin_unlock(&feed_data->video_buffer_lock);
+		return 0;
+	}
+
+	/*
+	 * If we reached here,
+	 * then we are now at the PES payload data
+	 */
+	if (bytes_avail == 0) {
+		spin_unlock(&feed_data->video_buffer_lock);
+		return 0;
+	}
+
+	/*
+	 * the decoder requires demux to do framing,
+	 * so search for the patterns now.
+	 */
+	found_patterns = dvb_dmx_video_pattern_search(
+				feed_data->patterns,
+				feed_data->patterns_num,
+				(buf + ts_payload_offset),
+				bytes_avail,
+				&feed_data->prefix_size,
+				&framing_res);
+
+	if (!feed_data->found_sequence_header_pattern) {
+		for (i = 0; i < found_patterns; i++) {
+			if ((framing_res.info[i].type ==
+				DMX_IDX_MPEG_SEQ_HEADER) ||
+			    (framing_res.info[i].type ==
+				DMX_IDX_H264_SPS) ||
+				(framing_res.info[i].type ==
+				DMX_IDX_VC1_SEQ_HEADER)) {
+
+				MPQ_DVB_DBG_PRINT(
+					"%s: Found Sequence Pattern, buf %p, i = %d, offset = %d, type = %lld\n",
+					__func__, buf, i,
+					framing_res.info[i].offset,
+					framing_res.info[i].type);
+
+				first_pattern = i;
+				feed_data->found_sequence_header_pattern = 1;
+				ts_payload_offset +=
+					framing_res.info[i].offset;
+				bytes_avail -= framing_res.info[i].offset;
+
+				if (framing_res.info[i].used_prefix_size) {
+					feed_data->first_prefix_size =
+						framing_res.info[i].
+							used_prefix_size;
+				}
+				break;
+			}
+		}
+	}
+
+	/*
+	 * If decoder requires demux to do framing,
+	 * pass data to decoder only after sequence header
+	 * or equivalent is found. Otherwise the data is dropped.
+	 */
+	if (!feed_data->found_sequence_header_pattern) {
+		feed_data->prev_stc = curr_stc;
+		spin_unlock(&feed_data->video_buffer_lock);
+		return 0;
+	}
+
+	/* Update error counters based on TS header */
+	feed_data->ts_packets_num++;
+	feed_data->tei_errs += ts_header->transport_error_indicator;
+	mpq_demux->decoder_stat[feed_data->stream_interface].ts_errors +=
+		ts_header->transport_error_indicator;
+	mpq_dmx_check_continuity(feed_data,
+				ts_header->continuity_counter,
+				discontinuity_indicator);
+	mpq_demux->decoder_stat[feed_data->stream_interface].cc_errors +=
+		feed_data->continuity_errs;
+
+	/* Need to back-up the PTS information of the very first frame */
+	if (feed_data->first_pts_dts_copy) {
+		for (i = first_pattern; i < found_patterns; i++) {
+			is_video_frame = mpq_dmx_is_video_frame(
+					feed->video_codec,
+					framing_res.info[i].type);
+
+			if (is_video_frame == 1) {
+				mpq_dmx_save_pts_dts(feed_data);
+				feed_data->first_pts_dts_copy = 0;
+				break;
+			}
+		}
+	}
+
+	/*
+	 * write prefix used to find first Sequence pattern, if needed.
+	 * feed_data->patterns[0]->pattern always contains the sequence
+	 * header pattern.
+	 */
+	if (feed_data->first_prefix_size) {
+		ret = mpq_streambuffer_data_write(stream_buffer,
+			feed_data->patterns[0]->pattern,
+			feed_data->first_prefix_size);
+		if (ret < 0) {
+			mpq_demux->decoder_stat
+				[feed_data->stream_interface].drop_count +=
+				feed_data->first_prefix_size;
+			feed_data->ts_dropped_bytes +=
+				feed_data->first_prefix_size;
+			MPQ_DVB_DBG_PRINT("%s: could not write prefix\n",
+				__func__);
+			if (ret == -ENOSPC)
+				mpq_dmx_notify_overflow(feed);
+		} else {
+			MPQ_DVB_DBG_PRINT(
+				"%s: Writing pattern prefix of size %d\n",
+				__func__, feed_data->first_prefix_size);
+			/*
+			 * update the length of the data we report
+			 * to include the size of the prefix that was used.
+			 */
+			feed_data->pending_pattern_len +=
+				feed_data->first_prefix_size;
+		}
+	}
+
+	feed->peslen += bytes_avail;
+	pending_data_len += bytes_avail;
+
+	meta_data.packet_type = DMX_FRAMING_INFO_PACKET;
+	packet.user_data_len = sizeof(struct mpq_adapter_video_meta_data);
+
+	/*
+	 * Go over all the patterns that were found in this packet.
+	 * For each pattern found, write the relevant data to the data
+	 * buffer, then write the respective meta-data.
+	 * Each pattern can only be reported when the next pattern is found
+	 * (in order to know the data length).
+	 * There are three possible cases for each pattern:
+	 * 1. This is the very first pattern we found in any TS packet in this
+	 *    feed.
+	 * 2. This is the first pattern found in this TS packet, but we've
+	 *    already found patterns in previous packets.
+	 * 3. This is not the first pattern in this packet, i.e., we've
+	 *    already found patterns in this TS packet.
+	 */
+	for (i = first_pattern; i < found_patterns; i++) {
+		if (i == first_pattern) {
+			/*
+			 * The way to identify the very first pattern:
+			 * 1. It's the first pattern found in this packet.
+			 * 2. The pending_pattern_len, which indicates the
+			 *    data length of the previous pattern that has
+			 *    not yet been reported, is usually 0. However,
+			 *    it may be larger than 0 if a prefix was used
+			 *    to find this pattern (i.e., the pattern was
+			 *    split over two TS packets). In that case,
+			 *    pending_pattern_len equals first_prefix_size.
+			 *    first_prefix_size is set to 0 later in this
+			 *    function.
+			 */
+			if (feed_data->first_prefix_size ==
+				feed_data->pending_pattern_len) {
+				/*
+				 * This is the very first pattern, so no
+				 * previous pending frame data exists.
+				 * Update frame info and skip to the
+				 * next frame.
+				 */
+				feed_data->last_framing_match_type =
+					framing_res.info[i].type;
+				feed_data->last_pattern_offset =
+					framing_res.info[i].offset;
+				if (framing_res.info[i].used_prefix_size)
+					feed_data->last_framing_match_stc =
+						feed_data->prev_stc;
+				else
+					feed_data->last_framing_match_stc =
+						curr_stc;
+				continue;
+			}
+			/*
+			 * This is the first pattern in this
+			 * packet and previous frame from
+			 * previous packet is pending for report
+			 */
+			bytes_to_write = framing_res.info[i].offset;
+		} else {
+			/* Previous pending frame is in the same packet */
+			bytes_to_write =
+				framing_res.info[i].offset -
+				feed_data->last_pattern_offset;
+		}
+
+		ret = mpq_streambuffer_data_write(
+			stream_buffer,
+			(buf + ts_payload_offset + bytes_written),
+			bytes_to_write);
+		if (ret < 0) {
+			mpq_demux->decoder_stat
+				[feed_data->stream_interface].drop_count +=
+				bytes_to_write;
+			feed_data->ts_dropped_bytes += bytes_to_write;
+			MPQ_DVB_DBG_PRINT(
+				"%s: Couldn't write %d bytes to data buffer, ret=%d\n",
+				__func__, bytes_to_write, ret);
+			if (ret == -ENOSPC)
+				mpq_dmx_notify_overflow(feed);
+		} else {
+			bytes_written += bytes_to_write;
+			pending_data_len -= bytes_to_write;
+			feed_data->pending_pattern_len += bytes_to_write;
+		}
+		non_predicted_video_frame = 0;
+
+		is_video_frame = mpq_dmx_is_video_frame(
+				feed->video_codec,
+				feed_data->last_framing_match_type);
+		if (is_video_frame == 1) {
+			mpq_dmx_write_pts_dts(feed_data,
+				&(meta_data.info.framing.pts_dts_info));
+			mpq_dmx_save_pts_dts(feed_data);
+
+			packet.raw_data_len = feed_data->pending_pattern_len -
+				framing_res.info[i].used_prefix_size;
+			packet.raw_data_offset = feed_data->frame_offset;
+			meta_data.info.framing.pattern_type =
+				feed_data->last_framing_match_type;
+			meta_data.info.framing.stc =
+				feed_data->last_framing_match_stc;
+			meta_data.info.framing.continuity_error_counter =
+				feed_data->continuity_errs;
+			meta_data.info.framing.
+				transport_error_indicator_counter =
+				 feed_data->tei_errs;
+			meta_data.info.framing.ts_dropped_bytes =
+				feed_data->ts_dropped_bytes;
+			meta_data.info.framing.ts_packets_num =
+				feed_data->ts_packets_num;
+
+			mpq_streambuffer_get_buffer_handle(
+				stream_buffer,
+				0,	/* current write buffer handle */
+				&packet.raw_data_handle);
+
+			mpq_dmx_update_decoder_stat(mpq_feed);
+
+			if (video_b_frame_events == 1) {
+				if (non_predicted_video_frame == 0) {
+					struct dmx_pts_dts_info *pts_dts;
+
+					pts_dts =
+					&meta_data.info.framing.pts_dts_info;
+					pts_dts->pts_exist = 0;
+					pts_dts->pts = 0;
+					pts_dts->dts_exist = 0;
+					pts_dts->dts = 0;
+				}
+			}
+			/*
+			 * Write meta-data that includes the framing information
+			 */
+			ret = mpq_streambuffer_pkt_write(stream_buffer, &packet,
+				(u8 *)&meta_data);
+			if (ret < 0) {
+				MPQ_DVB_ERR_PRINT
+					("%s: mpq_sb_pkt_write failed ret=%d\n",
+					 __func__, ret);
+				if (ret == -ENOSPC)
+					mpq_dmx_notify_overflow(feed);
+			} else {
+				mpq_dmx_prepare_es_event_data(
+					&packet, &meta_data, feed_data,
+					stream_buffer, &data, ret);
+
+				/* Trigger ES Data Event for VPTS */
+				feed->data_ready_cb.ts(&feed->feed.ts, &data);
+
+				if (feed_data->video_buffer->mode ==
+					MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR)
+					feed_data->frame_offset = 0;
+				else
+					mpq_streambuffer_get_data_rw_offset(
+						feed_data->video_buffer,
+						NULL,
+						&feed_data->frame_offset);
+			}
+
+			/*
+			 * In linear buffers, after writing the packet
+			 * we switched over to a new linear buffer for the new
+			 * frame. In that case, we should re-write the prefix
+			 * of the existing frame if any exists.
+			 */
+			if ((MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR ==
+				 feed_data->video_buffer->mode) &&
+				framing_res.info[i].used_prefix_size) {
+				ret = mpq_streambuffer_data_write(stream_buffer,
+					feed_data->prev_pattern +
+					 DVB_DMX_MAX_PATTERN_LEN -
+					 framing_res.info[i].used_prefix_size,
+					framing_res.info[i].used_prefix_size);
+
+				if (ret < 0) {
+					feed_data->pending_pattern_len = 0;
+					mpq_demux->decoder_stat
+						[feed_data->stream_interface].
+						drop_count += bytes_avail;
+					feed_data->ts_dropped_bytes +=
+					 framing_res.info[i].used_prefix_size;
+					if (ret == -ENOSPC)
+						mpq_dmx_notify_overflow(feed);
+				} else {
+					feed_data->pending_pattern_len =
+					 framing_res.info[i].used_prefix_size;
+				}
+			} else {
+				s32 offset = (s32)feed_data->frame_offset;
+				u32 buff_size =
+				 feed_data->video_buffer->buffers[0].size;
+
+				offset -= framing_res.info[i].used_prefix_size;
+				offset += (offset < 0) ? buff_size : 0;
+				feed_data->pending_pattern_len =
+					framing_res.info[i].used_prefix_size;
+
+				if (MPQ_STREAMBUFFER_BUFFER_MODE_RING ==
+					feed_data->video_buffer->mode) {
+					feed_data->frame_offset = (u32)offset;
+				}
+			}
+		}
+
+		/* save the last match for next time */
+		feed_data->last_framing_match_type =
+			framing_res.info[i].type;
+		feed_data->last_pattern_offset =
+			framing_res.info[i].offset;
+		if (framing_res.info[i].used_prefix_size)
+			feed_data->last_framing_match_stc = feed_data->prev_stc;
+		else
+			feed_data->last_framing_match_stc = curr_stc;
+	}
+
+	feed_data->prev_stc = curr_stc;
+	feed_data->first_prefix_size = 0;
+
+	/*
+	 * Save the trailing of the TS packet as we might have a pattern
+	 * split that we need to re-use when closing the next
+	 * video linear buffer.
+	 */
+	if (MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR ==
+		feed_data->video_buffer->mode)
+		memcpy(feed_data->prev_pattern,
+			buf + TS_PACKET_SIZE - DVB_DMX_MAX_PATTERN_LEN,
+			DVB_DMX_MAX_PATTERN_LEN);
+
+	if (pending_data_len) {
+		ret = mpq_streambuffer_data_write(
+			stream_buffer,
+			(buf + ts_payload_offset + bytes_written),
+			pending_data_len);
+
+		if (ret < 0) {
+			mpq_demux->decoder_stat
+				[feed_data->stream_interface].drop_count +=
+				pending_data_len;
+			feed_data->ts_dropped_bytes += pending_data_len;
+			MPQ_DVB_DBG_PRINT(
+				"%s: Couldn't write %d pending bytes to data buffer, ret=%d\n",
+				__func__, pending_data_len, ret);
+			if (ret == -ENOSPC)
+				mpq_dmx_notify_overflow(feed);
+		} else {
+			feed_data->pending_pattern_len += pending_data_len;
+		}
+	}
+
+	spin_unlock(&feed_data->video_buffer_lock);
+	return 0;
+}
+
+static int mpq_dmx_process_video_packet_no_framing(
+			struct dvb_demux_feed *feed,
+			const u8 *buf,
+			u64 curr_stc)
+{
+	int bytes_avail;
+	u32 ts_payload_offset;
+	struct mpq_video_feed_info *feed_data;
+	const struct ts_packet_header *ts_header;
+	struct mpq_streambuffer *stream_buffer;
+	struct pes_packet_header *pes_header;
+	struct mpq_demux *mpq_demux;
+	struct mpq_feed *mpq_feed;
+	int discontinuity_indicator = 0;
+	struct dmx_data_ready data;
+	int cookie;
+	int ret;
+
+	mpq_demux = feed->demux->priv;
+	mpq_feed = feed->priv;
+	feed_data = &mpq_feed->video_info;
+
+	/*
+	 * spin-lock is taken to protect against manipulation of video
+	 * output buffer by the API (terminate video feed, re-use of video
+	 * buffers). Mutex on the video-feed cannot be held here
+	 * since SW demux holds a spin-lock while calling write_to_decoder
+	 */
+	spin_lock(&feed_data->video_buffer_lock);
+	stream_buffer = feed_data->video_buffer;
+	if (stream_buffer == NULL) {
+		MPQ_DVB_DBG_PRINT(
+			"%s: video_buffer released\n",
+			__func__);
+		spin_unlock(&feed_data->video_buffer_lock);
+		return 0;
+	}
+
+	ts_header = (const struct ts_packet_header *)buf;
+
+	pes_header = &feed_data->pes_header;
+
+	/* Make sure this TS packet has a payload and not scrambled */
+	if ((ts_header->sync_byte != 0x47) ||
+		(ts_header->adaptation_field_control == 0) ||
+		(ts_header->adaptation_field_control == 2) ||
+		(ts_header->transport_scrambling_control)) {
+		/* continue to next packet */
+		spin_unlock(&feed_data->video_buffer_lock);
+		return 0;
+	}
+
+	if (ts_header->payload_unit_start_indicator) { /* PUSI? */
+		if (feed->pusi_seen) { /* Did we see PUSI before? */
+			struct mpq_streambuffer_packet_header packet;
+			struct mpq_adapter_video_meta_data meta_data;
+
+			/*
+			 * Close previous PES.
+			 * Push new packet to the meta-data buffer.
+			 * Double check that we are not in middle of
+			 * previous PES header parsing.
+			 */
+
+			if (feed_data->pes_header_left_bytes == 0) {
+				packet.raw_data_len = feed->peslen;
+				mpq_streambuffer_get_buffer_handle(
+					stream_buffer,
+					0, /* current write buffer handle */
+					&packet.raw_data_handle);
+				packet.raw_data_offset =
+					feed_data->frame_offset;
+				packet.user_data_len =
+					sizeof(struct
+						mpq_adapter_video_meta_data);
+
+				mpq_dmx_write_pts_dts(feed_data,
+					&(meta_data.info.pes.pts_dts_info));
+
+				/* Mark that we detected start of new PES */
+				feed_data->first_pts_dts_copy = 1;
+
+				meta_data.packet_type = DMX_PES_PACKET;
+				meta_data.info.pes.stc = feed_data->prev_stc;
+
+				mpq_dmx_update_decoder_stat(mpq_feed);
+
+				cookie = mpq_streambuffer_pkt_write(
+					stream_buffer, &packet,
+					(u8 *)&meta_data);
+				if (cookie < 0) {
+					MPQ_DVB_ERR_PRINT
+						("%s: write failed, ret=%d\n",
+						__func__, cookie);
+				} else {
+					/*
+					 * Save write offset where new PES
+					 * will begin
+					 */
+					mpq_streambuffer_get_data_rw_offset(
+						stream_buffer,
+						NULL,
+						&feed_data->frame_offset);
+
+					mpq_dmx_prepare_es_event_data(
+						&packet, &meta_data,
+						feed_data,
+						stream_buffer, &data, cookie);
+
+					feed->data_ready_cb.ts(&feed->feed.ts,
+						&data);
+				}
+			} else {
+				MPQ_DVB_ERR_PRINT(
+					"%s: received PUSI while handling PES header of previous PES\n",
+					__func__);
+			}
+
+			/* Reset PES info */
+			feed->peslen = 0;
+			feed_data->pes_header_offset = 0;
+			feed_data->pes_header_left_bytes =
+				PES_MANDATORY_FIELDS_LEN;
+		} else {
+			feed->pusi_seen = 1;
+		}
+
+		feed_data->prev_stc = curr_stc;
+	}
+
+	/*
+	 * Parse PES data only if PUSI was encountered,
+	 * otherwise the data is dropped
+	 */
+	if (!feed->pusi_seen) {
+		spin_unlock(&feed_data->video_buffer_lock);
+		return 0; /* drop and wait for next packets */
+	}
+
+	ts_payload_offset = sizeof(struct ts_packet_header);
+
+	/*
+	 * Skip adaptation field if exists.
+	 * Save discontinuity indicator if exists.
+	 */
+	if (ts_header->adaptation_field_control == 3) {
+		const struct ts_adaptation_field *adaptation_field =
+			(const struct ts_adaptation_field *)(buf +
+				ts_payload_offset);
+
+		discontinuity_indicator =
+			adaptation_field->discontinuity_indicator;
+		ts_payload_offset += buf[ts_payload_offset] + 1;
+	}
+
+	bytes_avail = TS_PACKET_SIZE - ts_payload_offset;
+
+	/* Get the mandatory fields of the video PES header */
+	if (mpq_dmx_parse_mandatory_pes_header(feed, feed_data,
+						pes_header, buf,
+						&ts_payload_offset,
+						&bytes_avail)) {
+		spin_unlock(&feed_data->video_buffer_lock);
+		return 0;
+	}
+
+	if (mpq_dmx_parse_remaining_pes_header(feed, feed_data,
+						pes_header, buf,
+						&ts_payload_offset,
+						&bytes_avail)) {
+		spin_unlock(&feed_data->video_buffer_lock);
+		return 0;
+	}
+
+	/*
+	 * If we reached here,
+	 * then we are now at the PES payload data
+	 */
+	if (bytes_avail == 0) {
+		spin_unlock(&feed_data->video_buffer_lock);
+		return 0;
+	}
+
+	/*
+	 * Need to back-up the PTS information
+	 * of the start of new PES
+	 */
+	if (feed_data->first_pts_dts_copy) {
+		mpq_dmx_save_pts_dts(feed_data);
+		feed_data->first_pts_dts_copy = 0;
+	}
+
+	/* Update error counters based on TS header */
+	feed_data->ts_packets_num++;
+	feed_data->tei_errs += ts_header->transport_error_indicator;
+	mpq_demux->decoder_stat[feed_data->stream_interface].ts_errors +=
+		ts_header->transport_error_indicator;
+	mpq_dmx_check_continuity(feed_data,
+				ts_header->continuity_counter,
+				discontinuity_indicator);
+	mpq_demux->decoder_stat[feed_data->stream_interface].cc_errors +=
+		feed_data->continuity_errs;
+
+	ret = mpq_streambuffer_data_write(stream_buffer, buf+ts_payload_offset,
+		bytes_avail);
+	if (ret < 0) {
+		mpq_demux->decoder_stat
+			[feed_data->stream_interface].drop_count += bytes_avail;
+		feed_data->ts_dropped_bytes += bytes_avail;
+		if (ret == -ENOSPC)
+			mpq_dmx_notify_overflow(feed);
+	} else {
+		feed->peslen += bytes_avail;
+	}
+
+	spin_unlock(&feed_data->video_buffer_lock);
+
+	return 0;
+}
+
+/*
+ * parse PES headers and send down ES packets to decoder
+ * Trigger a new ES Data Event with APTS and QTimer in 1st PES
+ */
+static int mpq_dmx_process_audio_packet_no_framing(
+			struct dvb_demux_feed *feed,
+			const u8 *buf,
+			u64 curr_stc)
+{
+	int bytes_avail;
+	u32 ts_payload_offset;
+	struct mpq_audio_feed_info *feed_data;
+	const struct ts_packet_header *ts_header;
+	struct mpq_streambuffer *stream_buffer;
+	struct pes_packet_header *pes_header;
+	struct mpq_demux *mpq_demux;
+	struct mpq_feed *mpq_feed;
+	int discontinuity_indicator = 0;
+	struct dmx_data_ready data;
+	int cookie;
+	int ret;
+
+	mpq_demux = feed->demux->priv;
+	mpq_feed = feed->priv;
+	feed_data = &mpq_feed->audio_info;
+
+	/*
+	 * spin-lock is taken to protect against manipulation of audio
+	 * output buffer by the API (terminate audio feed, re-use of audio
+	 * buffers). Mutex on the audio-feed cannot be held here
+	 * since SW demux holds a spin-lock while calling write_to_decoder
+	 */
+	spin_lock(&feed_data->audio_buffer_lock);
+	stream_buffer = feed_data->audio_buffer;
+	if (stream_buffer == NULL) {
+		MPQ_DVB_DBG_PRINT(
+			"%s: audio_buffer released\n",
+			__func__);
+		spin_unlock(&feed_data->audio_buffer_lock);
+		return 0;
+	}
+
+	ts_header = (const struct ts_packet_header *)buf;
+
+	pes_header = &feed_data->pes_header;
+
+	/* Make sure this TS packet has a payload and not scrambled */
+	if ((ts_header->sync_byte != 0x47) ||
+		(ts_header->adaptation_field_control == 0) ||
+		(ts_header->adaptation_field_control == 2) ||
+		(ts_header->transport_scrambling_control)) {
+		/* continue to next packet */
+		spin_unlock(&feed_data->audio_buffer_lock);
+		return 0;
+	}
+
+	if (ts_header->payload_unit_start_indicator) { /* PUSI? */
+		if (feed->pusi_seen) { /* Did we see PUSI before? */
+			struct mpq_streambuffer_packet_header packet;
+			struct mpq_adapter_audio_meta_data meta_data;
+
+			/*
+			 * Close previous PES.
+			 * Push new packet to the meta-data buffer.
+			 * Double check that we are not in middle of
+			 * previous PES header parsing.
+			 */
+
+			if (feed_data->pes_header_left_bytes == 0) {
+				packet.raw_data_len = feed->peslen;
+				mpq_streambuffer_get_buffer_handle(
+					stream_buffer,
+					0, /* current write buffer handle */
+					&packet.raw_data_handle);
+				packet.raw_data_offset =
+					feed_data->frame_offset;
+				packet.user_data_len =
+					sizeof(struct
+						mpq_adapter_audio_meta_data);
+
+				mpq_dmx_write_audio_pts_dts(feed_data,
+					&(meta_data.info.pes.pts_dts_info));
+
+				/* Mark that we detected start of new PES */
+				feed_data->first_pts_dts_copy = 1;
+
+				meta_data.packet_type = DMX_PES_PACKET;
+				meta_data.info.pes.stc = feed_data->prev_stc;
+
+				mpq_dmx_update_decoder_stat(mpq_feed);
+
+				/* actual writing of stream audio headers */
+				cookie = mpq_streambuffer_pkt_write(
+					stream_buffer, &packet,
+					(u8 *)&meta_data);
+				if (cookie < 0) {
+					MPQ_DVB_ERR_PRINT
+						("%s: write failed, ret=%d\n",
+						 __func__, cookie);
+				} else {
+					/*
+					 * Save write offset where new PES
+					 * will begin
+					 */
+					mpq_streambuffer_get_data_rw_offset(
+						stream_buffer,
+						NULL,
+						&feed_data->frame_offset);
+
+					mpq_dmx_prepare_audio_es_event_data(
+						&packet, &meta_data,
+						feed_data,
+						stream_buffer, &data, cookie);
+
+					/*
+					 * Trigger ES data event for APTS
+					 * and AFRAME
+					 */
+					feed->data_ready_cb.ts(&feed->feed.ts,
+							       &data);
+				}
+			} else {
+				MPQ_DVB_ERR_PRINT(
+					"%s: received PUSI while handling PES header of previous PES\n",
+					 __func__);
+			}
+
+			/* Reset PES info */
+			feed->peslen = 0;
+			feed_data->pes_header_offset = 0;
+			feed_data->pes_header_left_bytes =
+				PES_MANDATORY_FIELDS_LEN;
+		} else {
+			feed->pusi_seen = 1;
+		}
+
+		feed_data->prev_stc = curr_stc;
+	}
+
+	/*
+	 * Parse PES data only if PUSI was encountered,
+	 * otherwise the data is dropped
+	 */
+	if (!feed->pusi_seen) {
+		spin_unlock(&feed_data->audio_buffer_lock);
+		return 0; /* drop and wait for next packets */
+	}
+
+	ts_payload_offset = sizeof(struct ts_packet_header);
+
+	/*
+	 * Skip adaptation field if exists.
+	 * Save discontinuity indicator if exists.
+	 */
+	if (ts_header->adaptation_field_control == 3) {
+		const struct ts_adaptation_field *adaptation_field =
+			(const struct ts_adaptation_field *)(buf +
+				ts_payload_offset);
+
+		discontinuity_indicator =
+			adaptation_field->discontinuity_indicator;
+		ts_payload_offset += buf[ts_payload_offset] + 1;
+	}
+
+	bytes_avail = TS_PACKET_SIZE - ts_payload_offset;
+
+	/* The audio decoder requires ES packets ! */
+
+	/* Get the mandatory fields of the audio PES header */
+	if (mpq_dmx_parse_mandatory_audio_pes_header(feed, feed_data,
+						pes_header, buf,
+						&ts_payload_offset,
+						&bytes_avail)) {
+		spin_unlock(&feed_data->audio_buffer_lock);
+		return 0;
+	}
+
+	if (mpq_dmx_parse_remaining_audio_pes_header(feed, feed_data,
+						pes_header, buf,
+						&ts_payload_offset,
+						&bytes_avail)) {
+		spin_unlock(&feed_data->audio_buffer_lock);
+		return 0;
+	}
+
+	/*
+	 * If we reached here,
+	 * then we are now at the PES payload data
+	 */
+	if (bytes_avail == 0) {
+		spin_unlock(&feed_data->audio_buffer_lock);
+		return 0;
+	}
+
+	/*
+	 * Need to back-up the PTS information
+	 * of the start of new PES
+	 */
+	if (feed_data->first_pts_dts_copy) {
+		mpq_dmx_save_audio_pts_dts(feed_data);
+		feed_data->first_pts_dts_copy = 0;
+	}
+
+	/* Update error counters based on TS header */
+	feed_data->ts_packets_num++;
+	feed_data->tei_errs += ts_header->transport_error_indicator;
+	mpq_demux->decoder_stat[feed_data->stream_interface].ts_errors +=
+		ts_header->transport_error_indicator;
+	mpq_dmx_check_audio_continuity(feed_data,
+				ts_header->continuity_counter,
+				discontinuity_indicator);
+	mpq_demux->decoder_stat[feed_data->stream_interface].cc_errors +=
+		feed_data->continuity_errs;
+
+	/* actual writing of audio data for a stream */
+	ret = mpq_streambuffer_data_write(stream_buffer, buf+ts_payload_offset,
+		bytes_avail);
+	if (ret < 0) {
+		mpq_demux->decoder_stat
+			[feed_data->stream_interface].drop_count += bytes_avail;
+		feed_data->ts_dropped_bytes += bytes_avail;
+		if (ret == -ENOSPC)
+			mpq_dmx_notify_overflow(feed);
+	} else {
+		feed->peslen += bytes_avail;
+	}
+
+	spin_unlock(&feed_data->audio_buffer_lock);
+
+	return 0;
+}
+
+/* function ptr used in several places, handle differently */
+int mpq_dmx_decoder_buffer_status(struct dvb_demux_feed *feed,
+		struct dmx_buffer_status *dmx_buffer_status)
+{
+
+	if (dvb_dmx_is_video_feed(feed)) {
+		struct mpq_demux *mpq_demux = feed->demux->priv;
+		struct mpq_video_feed_info *feed_data;
+		struct mpq_streambuffer *video_buff;
+		struct mpq_feed *mpq_feed;
+
+		mutex_lock(&mpq_demux->mutex);
+
+		mpq_feed = feed->priv;
+		feed_data = &mpq_feed->video_info;
+		video_buff = feed_data->video_buffer;
+		if (!video_buff) {
+			mutex_unlock(&mpq_demux->mutex);
+			return -EINVAL;
+		}
+
+		dmx_buffer_status->error = video_buff->raw_data.error;
+
+		if (video_buff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR) {
+			dmx_buffer_status->fullness =
+				video_buff->buffers[0].size *
+				video_buff->pending_buffers_count;
+			dmx_buffer_status->free_bytes =
+				video_buff->buffers[0].size *
+				(video_buff->buffers_num -
+				 video_buff->pending_buffers_count);
+			dmx_buffer_status->size =
+				video_buff->buffers[0].size *
+				video_buff->buffers_num;
+		} else {
+			dmx_buffer_status->fullness =
+				mpq_streambuffer_data_avail(video_buff);
+			dmx_buffer_status->free_bytes =
+				mpq_streambuffer_data_free(video_buff);
+			dmx_buffer_status->size = video_buff->buffers[0].size;
+		}
+
+		mpq_streambuffer_get_data_rw_offset(
+					video_buff,
+					&dmx_buffer_status->read_offset,
+					&dmx_buffer_status->write_offset);
+
+		mutex_unlock(&mpq_demux->mutex);
+
+	} else if (dvb_dmx_is_audio_feed(feed)) {
+		struct mpq_demux *mpq_demux = feed->demux->priv;
+		struct mpq_audio_feed_info *feed_data;
+		struct mpq_streambuffer *audio_buff;
+		struct mpq_feed *mpq_feed;
+
+		mutex_lock(&mpq_demux->mutex);
+
+		mpq_feed = feed->priv;
+		feed_data = &mpq_feed->audio_info;
+		audio_buff = feed_data->audio_buffer;
+		if (!audio_buff) {
+			mutex_unlock(&mpq_demux->mutex);
+			return -EINVAL;
+		}
+
+		dmx_buffer_status->error = audio_buff->raw_data.error;
+
+		if (audio_buff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR) {
+			dmx_buffer_status->fullness =
+				audio_buff->buffers[0].size *
+				audio_buff->pending_buffers_count;
+			dmx_buffer_status->free_bytes =
+				audio_buff->buffers[0].size *
+				(audio_buff->buffers_num -
+				 audio_buff->pending_buffers_count);
+			dmx_buffer_status->size =
+				audio_buff->buffers[0].size *
+				audio_buff->buffers_num;
+		} else {
+			dmx_buffer_status->fullness =
+				mpq_streambuffer_data_avail(audio_buff);
+			dmx_buffer_status->free_bytes =
+				mpq_streambuffer_data_free(audio_buff);
+			dmx_buffer_status->size = audio_buff->buffers[0].size;
+		}
+
+		mpq_streambuffer_get_data_rw_offset(
+					audio_buff,
+					&dmx_buffer_status->read_offset,
+					&dmx_buffer_status->write_offset);
+
+		mutex_unlock(&mpq_demux->mutex);
+	} else {
+		MPQ_DVB_ERR_PRINT("%s: Invalid feed type %d\n",
+				   __func__, feed->pes_type);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int mpq_dmx_process_video_packet(
+			struct dvb_demux_feed *feed,
+			const u8 *buf)
+{
+	u64 curr_stc;
+	struct mpq_demux *mpq_demux = feed->demux->priv;
+
+	if ((mpq_demux->source >= DMX_SOURCE_DVR0) &&
+		(mpq_demux->demux.tsp_format != DMX_TSP_FORMAT_192_TAIL)) {
+		curr_stc = 0;
+	} else {
+		if (mpq_demux->ts_packet_timestamp_source !=
+		    TSIF_TTS_LPASS_TIMER) {
+			curr_stc = buf[STC_LOCATION_IDX + 2] << 16;
+			curr_stc += buf[STC_LOCATION_IDX + 1] << 8;
+			curr_stc += buf[STC_LOCATION_IDX];
+			curr_stc *= 256; /* convert from 105.47 KHZ to 27MHz */
+		} else {
+			curr_stc = buf[STC_LOCATION_IDX + 3] << 24;
+			curr_stc += buf[STC_LOCATION_IDX + 2] << 16;
+			curr_stc += buf[STC_LOCATION_IDX + 1] << 8;
+			curr_stc += buf[STC_LOCATION_IDX];
+		}
+	}
+
+	if (!video_framing)
+		return mpq_dmx_process_video_packet_no_framing(feed, buf,
+				curr_stc);
+	else
+		return mpq_dmx_process_video_packet_framing(feed, buf,
+				curr_stc);
+}
+
+int mpq_dmx_process_audio_packet(
+			struct dvb_demux_feed *feed,
+			const u8 *buf)
+{
+	u64 curr_stc;
+	struct mpq_demux *mpq_demux = feed->demux->priv;
+
+	if ((mpq_demux->source >= DMX_SOURCE_DVR0) &&
+		(mpq_demux->demux.tsp_format != DMX_TSP_FORMAT_192_TAIL)) {
+		curr_stc = 0;
+	} else {
+		if (mpq_demux->ts_packet_timestamp_source !=
+		    TSIF_TTS_LPASS_TIMER) {
+			curr_stc = buf[STC_LOCATION_IDX + 2] << 16;
+			curr_stc += buf[STC_LOCATION_IDX + 1] << 8;
+			curr_stc += buf[STC_LOCATION_IDX];
+			curr_stc *= 256; /* convert from 105.47 KHZ to 27MHz */
+		} else {
+			curr_stc = buf[STC_LOCATION_IDX + 3] << 24;
+			curr_stc += buf[STC_LOCATION_IDX + 2] << 16;
+			curr_stc += buf[STC_LOCATION_IDX + 1] << 8;
+			curr_stc += buf[STC_LOCATION_IDX];
+		}
+	}
+
+	return mpq_dmx_process_audio_packet_no_framing(feed, buf, curr_stc);
+}
+
+int mpq_dmx_extract_pcr_and_dci(const u8 *buf, u64 *pcr, int *dci)
+{
+	const struct ts_packet_header *ts_header;
+	const struct ts_adaptation_field *adaptation_field;
+
+	if (buf == NULL || pcr == NULL || dci == NULL)
+		return 0;
+
+	ts_header = (const struct ts_packet_header *)buf;
+
+	/* Make sure this TS packet has a adaptation field */
+	if ((ts_header->sync_byte != 0x47) ||
+		(ts_header->adaptation_field_control == 0) ||
+		(ts_header->adaptation_field_control == 1) ||
+		ts_header->transport_error_indicator)
+		return 0;
+
+	adaptation_field = (const struct ts_adaptation_field *)
+			(buf + sizeof(struct ts_packet_header));
+
+	if ((!adaptation_field->adaptation_field_length) ||
+		(!adaptation_field->PCR_flag))
+		return 0; /* 0 adaptation field or no PCR */
+
+	*pcr = ((u64)adaptation_field->program_clock_reference_base_1) << 25;
+	*pcr += ((u64)adaptation_field->program_clock_reference_base_2) << 17;
+	*pcr += ((u64)adaptation_field->program_clock_reference_base_3) << 9;
+	*pcr += ((u64)adaptation_field->program_clock_reference_base_4) << 1;
+	*pcr += adaptation_field->program_clock_reference_base_5;
+	*pcr *= 300;
+	*pcr += (((u64)adaptation_field->program_clock_reference_ext_1) << 8) +
+		adaptation_field->program_clock_reference_ext_2;
+
+	*dci = adaptation_field->discontinuity_indicator;
+
+	return 1;
+}
+
+int mpq_dmx_process_pcr_packet(
+			struct dvb_demux_feed *feed,
+			const u8 *buf)
+{
+	u64 stc;
+	struct dmx_data_ready data;
+	struct mpq_demux *mpq_demux = feed->demux->priv;
+
+	if (mpq_dmx_extract_pcr_and_dci(buf, &data.pcr.pcr,
+		&data.pcr.disc_indicator_set) == 0)
+		return 0;
+
+	/*
+	 * When we play from front-end, we configure HW
+	 * to output the extra timestamp, if we are playing
+	 * from DVR, we don't have a timestamp if the packet
+	 * format is not 192-tail.
+	 */
+	if ((mpq_demux->source >= DMX_SOURCE_DVR0) &&
+		(mpq_demux->demux.tsp_format != DMX_TSP_FORMAT_192_TAIL)) {
+		stc = 0;
+	} else {
+		if (mpq_demux->ts_packet_timestamp_source !=
+		    TSIF_TTS_LPASS_TIMER) {
+			stc = buf[STC_LOCATION_IDX + 2] << 16;
+			stc += buf[STC_LOCATION_IDX + 1] << 8;
+			stc += buf[STC_LOCATION_IDX];
+			stc *= 256; /* convert from 105.47 KHZ to 27MHz */
+		} else {
+			stc = buf[STC_LOCATION_IDX + 3] << 24;
+			stc += buf[STC_LOCATION_IDX + 2] << 16;
+			stc += buf[STC_LOCATION_IDX + 1] << 8;
+			stc += buf[STC_LOCATION_IDX];
+		}
+	}
+
+	data.data_length = 0;
+	data.pcr.stc = stc;
+	data.status = DMX_OK_PCR;
+	feed->data_ready_cb.ts(&feed->feed.ts, &data);
+
+	return 0;
+}
+
+int mpq_dmx_decoder_eos_cmd(struct mpq_feed *mpq_feed, int feed_type)
+{
+	if (feed_type == 1) { /* video feed */
+		struct mpq_video_feed_info *feed_data = &mpq_feed->video_info;
+		struct mpq_streambuffer *stream_buffer;
+		struct mpq_streambuffer_packet_header oob_packet;
+		struct mpq_adapter_video_meta_data oob_meta_data;
+		int ret;
+
+		spin_lock(&feed_data->video_buffer_lock);
+		stream_buffer = feed_data->video_buffer;
+
+		if (stream_buffer == NULL) {
+			MPQ_DVB_DBG_PRINT("%s: video_buffer released\n",
+					  __func__);
+			spin_unlock(&feed_data->video_buffer_lock);
+			return 0;
+		}
+
+		memset(&oob_packet, 0, sizeof(oob_packet));
+		oob_packet.user_data_len = sizeof(oob_meta_data);
+		oob_meta_data.packet_type = DMX_EOS_PACKET;
+
+		ret = mpq_streambuffer_pkt_write(stream_buffer, &oob_packet,
+						 (u8 *)&oob_meta_data);
+
+		spin_unlock(&feed_data->video_buffer_lock);
+		return (ret < 0) ? ret : 0;
+
+	} else if (feed_type == 2) { /* audio feed */
+		struct mpq_audio_feed_info *feed_data = &mpq_feed->audio_info;
+		struct mpq_streambuffer *stream_buffer;
+		struct mpq_streambuffer_packet_header oob_packet;
+		struct mpq_adapter_audio_meta_data oob_meta_data;
+		int ret;
+
+		spin_lock(&feed_data->audio_buffer_lock);
+		stream_buffer = feed_data->audio_buffer;
+
+		if (stream_buffer == NULL) {
+			MPQ_DVB_DBG_PRINT("%s: audio_buffer released\n",
+					  __func__);
+			spin_unlock(&feed_data->audio_buffer_lock);
+			return 0;
+		}
+
+		memset(&oob_packet, 0, sizeof(oob_packet));
+		oob_packet.user_data_len = sizeof(oob_meta_data);
+		oob_meta_data.packet_type = DMX_EOS_PACKET;
+
+		ret = mpq_streambuffer_pkt_write(stream_buffer, &oob_packet,
+						 (u8 *)&oob_meta_data);
+
+		spin_unlock(&feed_data->audio_buffer_lock);
+		return (ret < 0) ? ret : 0;
+	}
+
+	return 0;
+}
+
+void mpq_dmx_convert_tts(struct dvb_demux_feed *feed,
+			const u8 timestamp[TIMESTAMP_LEN],
+			u64 *timestampIn27Mhz)
+{
+	struct mpq_demux *mpq_demux = feed->demux->priv;
+
+	if (unlikely(!timestampIn27Mhz))
+		return;
+
+	if (mpq_demux->ts_packet_timestamp_source != TSIF_TTS_LPASS_TIMER) {
+		*timestampIn27Mhz = timestamp[2] << 16;
+		*timestampIn27Mhz += timestamp[1] << 8;
+		*timestampIn27Mhz += timestamp[0];
+		*timestampIn27Mhz *= 256; /* convert from 105.47 KHZ to 27MHz */
+	} else {
+		*timestampIn27Mhz = timestamp[3] << 24;
+		*timestampIn27Mhz += timestamp[2] << 16;
+		*timestampIn27Mhz += timestamp[1] << 8;
+		*timestampIn27Mhz += timestamp[0];
+	}
+}
+
+int mpq_sdmx_open_session(struct mpq_demux *mpq_demux)
+{
+	enum sdmx_status ret = SDMX_SUCCESS;
+	enum sdmx_proc_mode proc_mode;
+	enum sdmx_pkt_format pkt_format;
+
+	MPQ_DVB_DBG_PRINT("%s: ref_count %d\n",
+		__func__, mpq_demux->sdmx_session_ref_count);
+
+	if (mpq_demux->sdmx_session_ref_count) {
+		/* session is already open */
+		mpq_demux->sdmx_session_ref_count++;
+		return ret;
+	}
+
+	proc_mode = (mpq_demux->demux.playback_mode == DMX_PB_MODE_PUSH) ?
+		SDMX_PUSH_MODE : SDMX_PULL_MODE;
+	MPQ_DVB_DBG_PRINT(
+		"%s: Proc mode = %s\n",
+		__func__, SDMX_PUSH_MODE == proc_mode ? "Push" : "Pull");
+
+	if (mpq_demux->source < DMX_SOURCE_DVR0) {
+		pkt_format = SDMX_192_BYTE_PKT;
+	} else if (mpq_demux->demux.tsp_format == DMX_TSP_FORMAT_188) {
+		pkt_format = SDMX_188_BYTE_PKT;
+	} else if (mpq_demux->demux.tsp_format == DMX_TSP_FORMAT_192_TAIL) {
+		pkt_format = SDMX_192_BYTE_PKT;
+	} else {
+		MPQ_DVB_ERR_PRINT("%s: invalid tsp format\n", __func__);
+		return -EINVAL;
+	}
+
+	MPQ_DVB_DBG_PRINT("%s: (%s) source, packet format: %d\n",
+		 __func__,
+		 (mpq_demux->source < DMX_SOURCE_DVR0) ?
+		 "frontend" : "DVR", pkt_format);
+
+	/* open session and set configuration */
+	ret = sdmx_open_session(&mpq_demux->sdmx_session_handle);
+	if (ret != SDMX_SUCCESS) {
+		MPQ_DVB_ERR_PRINT("%s: Could not open session. ret=%d\n",
+			__func__, ret);
+		return ret;
+	}
+
+	MPQ_DVB_DBG_PRINT("%s: new session_handle = %d\n",
+		__func__, mpq_demux->sdmx_session_handle);
+
+	ret = sdmx_set_session_cfg(mpq_demux->sdmx_session_handle,
+		proc_mode,
+		SDMX_PKT_ENC_MODE,
+		pkt_format,
+		mpq_sdmx_scramble_odd,
+		mpq_sdmx_scramble_even);
+	if (ret != SDMX_SUCCESS) {
+		MPQ_DVB_ERR_PRINT("%s: Could not set session config. ret=%d\n",
+			__func__, ret);
+		sdmx_close_session(mpq_demux->sdmx_session_handle);
+		mpq_demux->sdmx_session_handle = SDMX_INVALID_SESSION_HANDLE;
+		return -EINVAL;
+	}
+
+	ret = sdmx_set_log_level(mpq_demux->sdmx_session_handle,
+		mpq_demux->sdmx_log_level);
+	if (ret != SDMX_SUCCESS) {
+		MPQ_DVB_ERR_PRINT("%s: Could not set log level. ret=%d\n",
+				__func__, ret);
+		/* Don't fail open session if just log level setting failed */
+		ret = 0;
+	}
+
+	mpq_demux->sdmx_process_count = 0;
+	mpq_demux->sdmx_process_time_sum = 0;
+	mpq_demux->sdmx_process_time_average = 0;
+	mpq_demux->sdmx_process_time_max = 0;
+	mpq_demux->sdmx_process_packets_sum = 0;
+	mpq_demux->sdmx_process_packets_average = 0;
+	mpq_demux->sdmx_process_packets_min = 0;
+
+	mpq_demux->sdmx_session_ref_count++;
+	return ret;
+}
+
+int mpq_sdmx_close_session(struct mpq_demux *mpq_demux)
+{
+	int ret = 0;
+	enum sdmx_status status;
+
+	MPQ_DVB_DBG_PRINT("%s: session_handle = %d, ref_count %d\n",
+			__func__,
+			mpq_demux->sdmx_session_handle,
+			mpq_demux->sdmx_session_ref_count);
+
+	if (!mpq_demux->sdmx_session_ref_count)
+		return -EINVAL;
+
+	if (mpq_demux->sdmx_session_ref_count == 1) {
+		status = sdmx_close_session(mpq_demux->sdmx_session_handle);
+		if (status != SDMX_SUCCESS) {
+			MPQ_DVB_ERR_PRINT("%s: sdmx_close_session failed %d\n",
+				__func__, status);
+		}
+		mpq_demux->sdmx_eos = 0;
+		mpq_demux->sdmx_session_handle = SDMX_INVALID_SESSION_HANDLE;
+	}
+
+	mpq_demux->sdmx_session_ref_count--;
+
+	return ret;
+}
+
+static int mpq_sdmx_get_buffer_chunks(struct mpq_demux *mpq_demux,
+	struct ion_handle *buff_handle,
+	u32 actual_buff_size,
+	struct sdmx_buff_descr buff_chunks[SDMX_MAX_PHYSICAL_CHUNKS])
+{
+	int i;
+	struct sg_table *sg_ptr;
+	struct scatterlist *sg;
+	u32 chunk_size;
+	int ret;
+
+	memset(buff_chunks, 0,
+		sizeof(struct sdmx_buff_descr) * SDMX_MAX_PHYSICAL_CHUNKS);
+
+	sg_ptr = ion_sg_table(mpq_demux->ion_client, buff_handle);
+	if (IS_ERR_OR_NULL(sg_ptr)) {
+		ret = PTR_ERR(sg_ptr);
+		MPQ_DVB_ERR_PRINT("%s: ion_sg_table failed, ret=%d\n",
+			__func__, ret);
+		if (!ret)
+			ret = -EINVAL;
+		return ret;
+	}
+
+	if (sg_ptr->nents == 0) {
+		MPQ_DVB_ERR_PRINT("%s: num of scattered entries is 0\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (sg_ptr->nents > SDMX_MAX_PHYSICAL_CHUNKS) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: num of scattered entries %d greater than max supported %d\n",
+			__func__, sg_ptr->nents, SDMX_MAX_PHYSICAL_CHUNKS);
+		return -EINVAL;
+	}
+
+	sg = sg_ptr->sgl;
+	for (i = 0; i < sg_ptr->nents; i++) {
+		buff_chunks[i].base_addr = (u64)sg_dma_address(sg);
+
+		if (sg->length > actual_buff_size)
+			chunk_size = actual_buff_size;
+		else
+			chunk_size = sg->length;
+
+		buff_chunks[i].size = chunk_size;
+		sg = sg_next(sg);
+		actual_buff_size -= chunk_size;
+	}
+
+	return 0;
+}
+
+static int mpq_sdmx_init_data_buffer(struct mpq_demux *mpq_demux,
+	struct mpq_feed *feed, u32 *num_buffers,
+	struct sdmx_data_buff_descr buf_desc[DMX_MAX_DECODER_BUFFER_NUM],
+	enum sdmx_buf_mode *buf_mode)
+{
+	struct dvb_demux_feed *dvbdmx_feed = feed->dvb_demux_feed;
+	struct dvb_ringbuffer *buffer;
+	struct mpq_video_feed_info *feed_data = &feed->video_info;
+	struct ion_handle *sdmx_buff;
+	int ret;
+	int i;
+
+	*buf_mode = SDMX_RING_BUF;
+
+	if (dvb_dmx_is_video_feed(feed->dvb_demux_feed)) {
+		if (feed_data->buffer_desc.decoder_buffers_num > 1)
+			*buf_mode = SDMX_LINEAR_GROUP_BUF;
+		*num_buffers = feed_data->buffer_desc.decoder_buffers_num;
+
+		for (i = 0; i < *num_buffers; i++) {
+			buf_desc[i].length =
+				feed_data->buffer_desc.desc[i].size;
+
+			ret = mpq_sdmx_get_buffer_chunks(mpq_demux,
+					feed_data->buffer_desc.ion_handle[i],
+					buf_desc[i].length,
+					buf_desc[i].buff_chunks);
+			if (ret) {
+				MPQ_DVB_ERR_PRINT(
+					"%s: mpq_sdmx_get_buffer_chunks failed\n",
+					__func__);
+				return ret;
+			}
+		}
+
+		return 0;
+	}
+
+	*num_buffers = 1;
+	if (dvb_dmx_is_sec_feed(dvbdmx_feed) ||
+		dvb_dmx_is_pcr_feed(dvbdmx_feed)) {
+		buffer = &feed->sdmx_buf;
+		sdmx_buff = feed->sdmx_buf_handle;
+	} else {
+		buffer = (struct dvb_ringbuffer *)
+			dvbdmx_feed->feed.ts.buffer.ringbuff;
+		sdmx_buff = dvbdmx_feed->feed.ts.buffer.priv_handle;
+	}
+
+	if (sdmx_buff == NULL) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: Invalid buffer allocation\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	buf_desc[0].length = buffer->size;
+	ret = mpq_sdmx_get_buffer_chunks(mpq_demux, sdmx_buff,
+		buf_desc[0].length,
+		buf_desc[0].buff_chunks);
+	if (ret) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: mpq_sdmx_get_buffer_chunks failed\n",
+			__func__);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int mpq_sdmx_filter_setup(struct mpq_demux *mpq_demux,
+	struct dvb_demux_feed *dvbdmx_feed)
+{
+	int ret = 0;
+	struct mpq_feed *feed;
+	struct mpq_feed *main_rec_feed = NULL;
+	struct dvb_demux_feed *tmp;
+	struct sdmx_buff_descr metadata_buff_desc;
+	struct sdmx_data_buff_descr *data_buff_desc = NULL;
+	u32 data_buf_num = DMX_MAX_DECODER_BUFFER_NUM;
+	enum sdmx_buf_mode buf_mode;
+	enum sdmx_raw_out_format ts_out_format = SDMX_188_OUTPUT;
+	u32 filter_flags = 0;
+
+	feed = dvbdmx_feed->priv;
+
+	if (dvb_dmx_is_sec_feed(dvbdmx_feed)) {
+		feed->filter_type = SDMX_SECTION_FILTER;
+		if (dvbdmx_feed->feed.sec.check_crc)
+			filter_flags |= SDMX_FILTER_FLAG_VERIFY_SECTION_CRC;
+		MPQ_DVB_DBG_PRINT("%s: SDMX_SECTION_FILTER\n", __func__);
+	} else if (dvb_dmx_is_pcr_feed(dvbdmx_feed)) {
+		feed->filter_type = SDMX_PCR_FILTER;
+		MPQ_DVB_DBG_PRINT("%s: SDMX_PCR_FILTER\n", __func__);
+	} else if (dvb_dmx_is_video_feed(dvbdmx_feed)) {
+		feed->filter_type = SDMX_SEPARATED_PES_FILTER;
+		MPQ_DVB_DBG_PRINT("%s: SDMX_SEPARATED_PES_FILTER\n", __func__);
+	} else if (dvb_dmx_is_rec_feed(dvbdmx_feed)) {
+		feed->filter_type = SDMX_RAW_FILTER;
+		switch (dvbdmx_feed->tsp_out_format) {
+		case (DMX_TSP_FORMAT_188):
+			ts_out_format = SDMX_188_OUTPUT;
+			break;
+		case (DMX_TSP_FORMAT_192_HEAD):
+			ts_out_format = SDMX_192_HEAD_OUTPUT;
+			break;
+		case (DMX_TSP_FORMAT_192_TAIL):
+			ts_out_format = SDMX_192_TAIL_OUTPUT;
+			break;
+		default:
+			MPQ_DVB_ERR_PRINT(
+				"%s: Unsupported TS output format %d\n",
+				__func__, dvbdmx_feed->tsp_out_format);
+			return -EINVAL;
+		}
+		MPQ_DVB_DBG_PRINT("%s: SDMX_RAW_FILTER\n", __func__);
+	} else {
+		feed->filter_type = SDMX_PES_FILTER;
+		MPQ_DVB_DBG_PRINT("%s: SDMX_PES_FILTER\n", __func__);
+	}
+
+	data_buff_desc = vmalloc(
+			sizeof(*data_buff_desc)*DMX_MAX_DECODER_BUFFER_NUM);
+	if (!data_buff_desc) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: failed to allocate memory for data buffer\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	/*
+	 * Recording feed sdmx filter handle lookup:
+	 * In case this is a recording filter with multiple feeds,
+	 * this feed is either the first feed of a new recording filter,
+	 * or it is another feed of an existing filter for which a filter was
+	 * already opened with sdmx. In such case, we need to look up in the
+	 * feed pool for a allocated feed with same output buffer (meaning they
+	 * belong to the same filter) and to use the already allocated sdmx
+	 * filter handle.
+	 */
+	if (feed->filter_type == SDMX_RAW_FILTER) {
+		tmp = mpq_dmx_peer_rec_feed(dvbdmx_feed);
+		if (tmp)
+			main_rec_feed = tmp->priv;
+	}
+
+	/*
+	 * If this PID is not part of existing recording filter,
+	 * configure a new filter to SDMX.
+	 */
+	if (!main_rec_feed) {
+		feed->secondary_feed = 0;
+
+		MPQ_DVB_DBG_PRINT(
+			"%s: Adding new sdmx filter, pid %d, flags=0x%X, ts_out_format=%d\n",
+			__func__, dvbdmx_feed->pid, filter_flags,
+			ts_out_format);
+
+		/* Meta-data initialization,
+		 * Recording filters do no need meta-data buffers.
+		 */
+		if (dvb_dmx_is_rec_feed(dvbdmx_feed)) {
+			metadata_buff_desc.base_addr = 0;
+			metadata_buff_desc.size = 0;
+		} else {
+			ret = mpq_sdmx_init_metadata_buffer(mpq_demux, feed,
+				&metadata_buff_desc);
+			if (ret) {
+				MPQ_DVB_ERR_PRINT(
+					"%s: Failed to initialize metadata buffer. ret=%d\n",
+					__func__, ret);
+				goto sdmx_filter_setup_failed;
+			}
+		}
+
+		ret = mpq_sdmx_init_data_buffer(mpq_demux, feed, &data_buf_num,
+			data_buff_desc, &buf_mode);
+		if (ret) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: Failed to initialize data buffer. ret=%d\n",
+				__func__, ret);
+			mpq_sdmx_terminate_metadata_buffer(feed);
+			goto sdmx_filter_setup_failed;
+		}
+		ret = sdmx_add_filter(mpq_demux->sdmx_session_handle,
+			dvbdmx_feed->pid,
+			feed->filter_type,
+			&metadata_buff_desc,
+			buf_mode,
+			data_buf_num,
+			data_buff_desc,
+			&feed->sdmx_filter_handle,
+			ts_out_format,
+			filter_flags);
+		if (ret) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: SDMX_add_filter failed. ret = %d\n",
+				__func__, ret);
+			ret = -ENODEV;
+			mpq_sdmx_terminate_metadata_buffer(feed);
+			goto sdmx_filter_setup_failed;
+		}
+
+		MPQ_DVB_DBG_PRINT(
+			"%s: feed=0x%p, filter pid=%d, handle=%d, data buffer(s)=%d, size=%d\n",
+			__func__, feed, dvbdmx_feed->pid,
+			feed->sdmx_filter_handle,
+			data_buf_num, data_buff_desc[0].length);
+
+		mpq_demux->sdmx_filter_count++;
+	} else {
+		MPQ_DVB_DBG_PRINT(
+			"%s: Adding RAW pid to sdmx, pid %d\n",
+			__func__, dvbdmx_feed->pid);
+
+		feed->secondary_feed = 1;
+		feed->sdmx_filter_handle = main_rec_feed->sdmx_filter_handle;
+		ret = sdmx_add_raw_pid(mpq_demux->sdmx_session_handle,
+			feed->sdmx_filter_handle, dvbdmx_feed->pid);
+		if (ret) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: FAILED to add raw pid, ret=%d\n",
+				__func__, ret);
+			ret = -ENODEV;
+			goto sdmx_filter_setup_failed;
+		}
+	}
+
+	/*
+	 * If pid has a key ladder id associated, we need to
+	 * set it to SDMX.
+	 */
+	if (dvbdmx_feed->secure_mode.is_secured &&
+		dvbdmx_feed->cipher_ops.operations_count) {
+		MPQ_DVB_DBG_PRINT(
+			"%s: set key-ladder %d to PID %d\n",
+			__func__,
+			dvbdmx_feed->cipher_ops.operations[0].key_ladder_id,
+			dvbdmx_feed->cipher_ops.pid);
+
+		ret = sdmx_set_kl_ind(mpq_demux->sdmx_session_handle,
+			dvbdmx_feed->cipher_ops.pid,
+			dvbdmx_feed->cipher_ops.operations[0].key_ladder_id);
+
+		if (ret) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: FAILED to set key ladder, ret=%d\n",
+				__func__, ret);
+		}
+	}
+
+	vfree(data_buff_desc);
+	return 0;
+
+sdmx_filter_setup_failed:
+	vfree(data_buff_desc);
+	return ret;
+}
+
+/**
+ * mpq_sdmx_init_feed - initialize secure demux related elements of mpq feed
+ *
+ * @mpq_demux: mpq_demux object
+ * @mpq_feed: mpq_feed object
+ *
+ * Note: the function assumes mpq_demux->mutex locking is done by caller.
+ */
+static int mpq_sdmx_init_feed(struct mpq_demux *mpq_demux,
+	struct mpq_feed *mpq_feed)
+{
+	int ret;
+
+	ret = mpq_sdmx_open_session(mpq_demux);
+	if (ret) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: mpq_sdmx_open_session failed, ret=%d\n",
+			__func__, ret);
+
+		ret = -ENODEV;
+		goto init_sdmx_feed_failed;
+	}
+
+	/* PCR and sections have internal buffer for SDMX */
+	if (dvb_dmx_is_pcr_feed(mpq_feed->dvb_demux_feed))
+		ret = mpq_sdmx_alloc_data_buf(mpq_feed, SDMX_PCR_BUFFER_SIZE);
+	else if (dvb_dmx_is_sec_feed(mpq_feed->dvb_demux_feed))
+		ret = mpq_sdmx_alloc_data_buf(mpq_feed,
+			SDMX_SECTION_BUFFER_SIZE);
+	else
+		ret = 0;
+
+	if (ret) {
+		MPQ_DVB_ERR_PRINT("%s: init buffer failed, ret=%d\n",
+			__func__, ret);
+		goto init_sdmx_feed_failed_free_sdmx;
+	}
+
+	ret = mpq_sdmx_filter_setup(mpq_demux, mpq_feed->dvb_demux_feed);
+	if (ret) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: mpq_sdmx_filter_setup failed, ret=%d\n",
+			__func__, ret);
+		goto init_sdmx_feed_failed_free_data_buff;
+	}
+
+	mpq_demux->num_secure_feeds++;
+	return 0;
+
+init_sdmx_feed_failed_free_data_buff:
+	mpq_sdmx_free_data_buf(mpq_feed);
+init_sdmx_feed_failed_free_sdmx:
+	mpq_sdmx_close_session(mpq_demux);
+init_sdmx_feed_failed:
+	return ret;
+}
+
+int mpq_dmx_init_mpq_feed(struct dvb_demux_feed *feed)
+{
+	int ret = 0;
+	struct mpq_demux *mpq_demux = feed->demux->priv;
+	struct mpq_feed *mpq_feed = feed->priv;
+
+	if (mutex_lock_interruptible(&mpq_demux->mutex))
+		return -ERESTARTSYS;
+
+	mpq_feed->sdmx_buf_handle = NULL;
+	mpq_feed->metadata_buf_handle = NULL;
+	mpq_feed->sdmx_filter_handle = SDMX_INVALID_FILTER_HANDLE;
+
+	if (feed->type != DMX_TYPE_SEC)
+		feed->feed.ts.flush_buffer = mpq_dmx_flush_buffer;
+
+	if (dvb_dmx_is_video_feed(feed)) {
+		ret = mpq_dmx_init_video_feed(mpq_feed);
+		if (ret) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_dmx_init_video_feed failed, ret=%d\n",
+				__func__, ret);
+			goto init_mpq_feed_end;
+		}
+	}
+
+	if (dvb_dmx_is_audio_feed(feed)) {
+		ret = mpq_dmx_init_audio_feed(mpq_feed);
+		if (ret) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_dmx_init_audio_feed failed, ret=%d\n",
+				__func__, ret);
+			goto init_mpq_feed_end;
+		}
+	}
+
+	/*
+	 * sdmx is not relevant for recording filters, which always use
+	 * regular filters (non-sdmx)
+	 */
+	if (!mpq_sdmx_is_loaded() || !feed->secure_mode.is_secured ||
+		dvb_dmx_is_rec_feed(feed)) {
+		if (!mpq_sdmx_is_loaded())
+			mpq_demux->sdmx_session_handle =
+				SDMX_INVALID_SESSION_HANDLE;
+		goto init_mpq_feed_end;
+	}
+
+	 /* Initialization of secure demux filters (PES/PCR/Video/Section) */
+	ret = mpq_sdmx_init_feed(mpq_demux, mpq_feed);
+	if (ret) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: mpq_sdmx_init_feed failed, ret=%d\n",
+			__func__, ret);
+		if (dvb_dmx_is_video_feed(feed))
+			mpq_dmx_terminate_video_feed(mpq_feed);
+		else if (dvb_dmx_is_audio_feed(feed))
+			mpq_dmx_terminate_audio_feed(mpq_feed);
+	}
+
+init_mpq_feed_end:
+	if (!ret) {
+		mpq_demux->num_active_feeds++;
+		mpq_feed->session_id++;
+	}
+	mutex_unlock(&mpq_demux->mutex);
+	return ret;
+}
+
+/**
+ * Note: Called only when filter is in "GO" state - after feed has been started.
+ */
+int mpq_dmx_set_cipher_ops(struct dvb_demux_feed *feed,
+		struct dmx_cipher_operations *cipher_ops)
+{
+	struct mpq_feed *mpq_feed;
+	struct mpq_demux *mpq_demux;
+	int ret = 0;
+
+	if (!feed || !feed->priv || !cipher_ops) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: invalid parameters\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	MPQ_DVB_DBG_PRINT("%s(%d, %d, %d)\n",
+		__func__, cipher_ops->pid,
+		cipher_ops->operations_count,
+		cipher_ops->operations[0].key_ladder_id);
+
+	if ((cipher_ops->operations_count > 1) ||
+		(cipher_ops->operations_count &&
+		 cipher_ops->operations[0].encrypt)) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: Invalid cipher operations, count=%d, encrypt=%d\n",
+			__func__, cipher_ops->operations_count,
+			cipher_ops->operations[0].encrypt);
+		return -EINVAL;
+	}
+
+	if (!feed->secure_mode.is_secured) {
+		/*
+		 * Filter is not configured as secured, setting cipher
+		 * operations is not allowed.
+		 */
+		MPQ_DVB_ERR_PRINT(
+			"%s: Cannot set cipher operations to non-secure filter\n",
+			__func__);
+		return -EPERM;
+	}
+
+	mpq_feed = feed->priv;
+	mpq_demux = mpq_feed->mpq_demux;
+
+	mutex_lock(&mpq_demux->mutex);
+
+	/*
+	 * Feed is running in secure mode, this secure mode request is to
+	 * update the key ladder id
+	 */
+	if ((mpq_demux->sdmx_session_handle != SDMX_INVALID_SESSION_HANDLE) &&
+		cipher_ops->operations_count) {
+		ret = sdmx_set_kl_ind(mpq_demux->sdmx_session_handle,
+			cipher_ops->pid,
+			cipher_ops->operations[0].key_ladder_id);
+		if (ret) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: FAILED to set key ladder, ret=%d\n",
+				__func__, ret);
+			ret = -ENODEV;
+		}
+	}
+
+	mutex_unlock(&mpq_demux->mutex);
+
+	return ret;
+}
+
+static int mpq_sdmx_invalidate_buffer(struct mpq_feed *mpq_feed)
+{
+	struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+	struct mpq_video_feed_info *feed_data;
+	struct dvb_ringbuffer *buffer;
+	struct ion_handle *ion_handle;
+	int ret = 0;
+	int i;
+
+	if (!dvb_dmx_is_video_feed(feed)) {
+		if (dvb_dmx_is_sec_feed(feed) ||
+			dvb_dmx_is_pcr_feed(feed)) {
+			buffer = (struct dvb_ringbuffer *)
+				&mpq_feed->sdmx_buf;
+			ion_handle = mpq_feed->sdmx_buf_handle;
+		} else {
+			buffer = (struct dvb_ringbuffer *)
+				feed->feed.ts.buffer.ringbuff;
+			ion_handle = feed->feed.ts.buffer.priv_handle;
+		}
+
+		ret = msm_ion_do_cache_op(mpq_feed->mpq_demux->ion_client,
+			ion_handle, buffer->data,
+			buffer->size, ION_IOC_INV_CACHES);
+		if (ret)
+			MPQ_DVB_ERR_PRINT(
+				"%s: msm_ion_do_cache_op failed, ret = %d\n",
+				__func__, ret);
+		return ret;
+	}
+
+	/* Video buffers */
+	feed_data = &mpq_feed->video_info;
+	for (i = 0; i < feed_data->buffer_desc.decoder_buffers_num; i++) {
+		if (feed_data->buffer_desc.desc[i].base) {
+			/* Non-secured buffer */
+			ret = msm_ion_do_cache_op(
+				mpq_feed->mpq_demux->ion_client,
+				feed_data->buffer_desc.ion_handle[i],
+				feed_data->buffer_desc.desc[i].base,
+				feed_data->buffer_desc.desc[i].size,
+				ION_IOC_INV_CACHES);
+			if (ret)
+				MPQ_DVB_ERR_PRINT(
+					"%s: msm_ion_do_cache_op failed, ret = %d\n",
+					__func__, ret);
+		}
+	}
+
+	return ret;
+}
+
+static void mpq_sdmx_prepare_filter_status(struct mpq_demux *mpq_demux,
+	struct sdmx_filter_status *filter_sts,
+	struct mpq_feed *mpq_feed)
+{
+	struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+	struct mpq_video_feed_info *feed_data;
+	struct mpq_streambuffer *sbuff;
+
+	filter_sts->filter_handle = mpq_feed->sdmx_filter_handle;
+	filter_sts->metadata_fill_count =
+		dvb_ringbuffer_avail(&mpq_feed->metadata_buf);
+	filter_sts->metadata_write_offset = mpq_feed->metadata_buf.pwrite;
+	filter_sts->error_indicators = 0;
+	filter_sts->status_indicators = 0;
+
+	MPQ_DVB_DBG_PRINT(
+		"%s: Filter meta-data buffer status: fill count = %d, write_offset = %d\n",
+		__func__, filter_sts->metadata_fill_count,
+		filter_sts->metadata_write_offset);
+
+	if (!dvb_dmx_is_video_feed(feed)) {
+		struct dvb_ringbuffer *buffer;
+
+		if (dvb_dmx_is_sec_feed(feed) ||
+			dvb_dmx_is_pcr_feed(feed)) {
+			buffer = (struct dvb_ringbuffer *)
+				&mpq_feed->sdmx_buf;
+		} else {
+			buffer = (struct dvb_ringbuffer *)
+				feed->feed.ts.buffer.ringbuff;
+		}
+
+		filter_sts->data_fill_count = dvb_ringbuffer_avail(buffer);
+		filter_sts->data_write_offset = buffer->pwrite;
+
+		MPQ_DVB_DBG_PRINT(
+			"%s: Filter buffers status: fill count = %d, write_offset = %d\n",
+			__func__, filter_sts->data_fill_count,
+			filter_sts->data_write_offset);
+
+		return;
+	}
+
+	/* Video feed - decoder buffers */
+	feed_data = &mpq_feed->video_info;
+
+	spin_lock(&mpq_feed->video_info.video_buffer_lock);
+	sbuff = feed_data->video_buffer;
+	if (sbuff == NULL) {
+		MPQ_DVB_DBG_PRINT(
+			"%s: video_buffer released\n",
+			__func__);
+		spin_unlock(&feed_data->video_buffer_lock);
+		return;
+	}
+
+	if (feed_data->buffer_desc.decoder_buffers_num > 1) {
+		/* linear mode */
+		filter_sts->data_fill_count = sbuff->pending_buffers_count;
+		filter_sts->data_write_offset =
+			sbuff->raw_data.pwrite /
+			sizeof(struct mpq_streambuffer_buffer_desc);
+	} else {
+		/* ring buffer mode */
+		filter_sts->data_fill_count =
+			mpq_streambuffer_data_avail(sbuff);
+		mpq_streambuffer_get_data_rw_offset(sbuff, NULL,
+			&filter_sts->data_write_offset);
+
+	}
+
+	spin_unlock(&mpq_feed->video_info.video_buffer_lock);
+
+	MPQ_DVB_DBG_PRINT(
+		"%s: Decoder buffers filter status: fill count = %d, write_offset = %d\n",
+		__func__, filter_sts->data_fill_count,
+		filter_sts->data_write_offset);
+}
+
+static int mpq_sdmx_section_filtering(struct mpq_feed *mpq_feed,
+	struct dvb_demux_filter *f,
+	struct sdmx_metadata_header *header)
+{
+	struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+	int ret;
+	u8 neq = 0;
+	u8 xor;
+	u8 tmp;
+	int i;
+
+	if (!mutex_is_locked(&mpq_feed->mpq_demux->mutex)) {
+		MPQ_DVB_ERR_PRINT(
+				"%s: Mutex should have been locked\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < DVB_DEMUX_MASK_MAX; i++) {
+		tmp = DVB_RINGBUFFER_PEEK(&mpq_feed->sdmx_buf, i);
+		xor = f->filter.filter_value[i] ^ tmp;
+
+		if (f->maskandmode[i] & xor)
+			return 0;
+
+		neq |= f->maskandnotmode[i] & xor;
+	}
+
+	if (f->doneq && !neq)
+		return 0;
+
+	if (feed->demux->playback_mode == DMX_PB_MODE_PULL) {
+		mutex_unlock(&mpq_feed->mpq_demux->mutex);
+
+		ret = feed->demux->buffer_ctrl.sec(&f->filter,
+					header->payload_length, 1);
+
+		mutex_lock(&mpq_feed->mpq_demux->mutex);
+
+		if (ret) {
+			MPQ_DVB_DBG_PRINT(
+				"%s: buffer_ctrl.sec aborted\n",
+				__func__);
+			return ret;
+		}
+
+		if (mpq_feed->sdmx_filter_handle ==
+			SDMX_INVALID_FILTER_HANDLE) {
+			MPQ_DVB_DBG_PRINT("%s: filter was stopped\n",
+				__func__);
+			return -ENODEV;
+		}
+	}
+
+	if (mpq_feed->sdmx_buf.pread + header->payload_length <
+		mpq_feed->sdmx_buf.size) {
+		feed->cb.sec(&mpq_feed->sdmx_buf.data[mpq_feed->sdmx_buf.pread],
+			header->payload_length,
+			NULL, 0, &f->filter);
+	} else {
+		int split = mpq_feed->sdmx_buf.size - mpq_feed->sdmx_buf.pread;
+
+		feed->cb.sec(&mpq_feed->sdmx_buf.data[mpq_feed->sdmx_buf.pread],
+			split,
+			&mpq_feed->sdmx_buf.data[0],
+			header->payload_length - split,
+			&f->filter);
+	}
+
+	return 0;
+}
+
+static int mpq_sdmx_check_ts_stall(struct mpq_demux *mpq_demux,
+	struct mpq_feed *mpq_feed,
+	struct sdmx_filter_status *sts,
+	size_t req,
+	int events_only)
+{
+	struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+	int ret;
+
+	if (!mutex_is_locked(&mpq_feed->mpq_demux->mutex)) {
+		MPQ_DVB_ERR_PRINT(
+				"%s: Mutex should have been locked\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	/*
+	 * For PULL mode need to verify there is enough space for the dmxdev
+	 * event. Also, if data buffer is full we want to stall until some
+	 * data is removed from it to prevent calling the sdmx when it cannot
+	 * output data to the still full buffer.
+	 */
+	if (mpq_demux->demux.playback_mode == DMX_PB_MODE_PULL) {
+		MPQ_DVB_DBG_PRINT("%s: Stalling for events and %zu bytes\n",
+			__func__, req);
+
+		mutex_unlock(&mpq_demux->mutex);
+
+		ret = mpq_demux->demux.buffer_ctrl.ts(&feed->feed.ts, req, 1);
+		MPQ_DVB_DBG_PRINT("%s: stall result = %d\n",
+			__func__, ret);
+
+		mutex_lock(&mpq_demux->mutex);
+
+		if (mpq_feed->sdmx_filter_handle ==
+			SDMX_INVALID_FILTER_HANDLE) {
+			MPQ_DVB_DBG_PRINT("%s: filter was stopped\n",
+					__func__);
+			return -ENODEV;
+		}
+
+		return ret;
+	}
+
+	return 0;
+}
+
+/* Handle filter results for filters with no extra meta-data */
+static void mpq_sdmx_pes_filter_results(struct mpq_demux *mpq_demux,
+	struct mpq_feed *mpq_feed,
+	struct sdmx_filter_status *sts)
+{
+	int ret;
+	struct sdmx_metadata_header header;
+	struct sdmx_pes_counters counters;
+	struct dmx_data_ready data_event;
+	struct dmx_data_ready pes_event;
+	struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+	struct dvb_ringbuffer *buf = (struct dvb_ringbuffer *)
+		feed->feed.ts.buffer.ringbuff;
+	ssize_t bytes_avail;
+
+	if ((!sts->metadata_fill_count) && (!sts->data_fill_count))
+		goto pes_filter_check_overflow;
+
+	MPQ_DVB_DBG_PRINT(
+		"%s: Meta: fill=%u, write=%u. Data: fill=%u, write=%u\n",
+		__func__, sts->metadata_fill_count, sts->metadata_write_offset,
+		sts->data_fill_count, sts->data_write_offset);
+
+	mpq_feed->metadata_buf.pwrite = sts->metadata_write_offset;
+
+	if ((sts->metadata_fill_count == 0) &&
+		(sts->error_indicators & SDMX_FILTER_ERR_D_BUF_FULL)) {
+		ssize_t free = dvb_ringbuffer_free(buf);
+
+		ret = 0;
+		if ((free + SZ_2K) < MAX_PES_LENGTH)
+			ret = mpq_sdmx_check_ts_stall(mpq_demux, mpq_feed, sts,
+				free + SZ_2K, 0);
+		else
+			MPQ_DVB_ERR_PRINT(
+				"%s: Cannot stall when free space bigger than max PES size\n",
+				__func__);
+		if (ret) {
+			MPQ_DVB_DBG_PRINT(
+				"%s: mpq_sdmx_check_ts_stall aborted\n",
+				__func__);
+			return;
+		}
+	}
+
+	while (sts->metadata_fill_count) {
+		bytes_avail = dvb_ringbuffer_avail(&mpq_feed->metadata_buf);
+		if (bytes_avail < (sizeof(header) + sizeof(counters))) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: metadata_fill_count is %d less than required %zu bytes\n",
+				__func__,
+				sts->metadata_fill_count,
+				sizeof(header) + sizeof(counters));
+
+			/* clean-up remaining bytes to try to recover */
+			DVB_RINGBUFFER_SKIP(&mpq_feed->metadata_buf,
+				bytes_avail);
+			sts->metadata_fill_count = 0;
+			break;
+		}
+
+		dvb_ringbuffer_read(&mpq_feed->metadata_buf, (u8 *)&header,
+			sizeof(header));
+		MPQ_DVB_DBG_PRINT(
+			"%s: metadata header: start=%u, length=%u\n",
+			__func__, header.payload_start, header.payload_length);
+		sts->metadata_fill_count -= sizeof(header);
+
+		dvb_ringbuffer_read(&mpq_feed->metadata_buf, (u8 *)&counters,
+			sizeof(counters));
+		sts->metadata_fill_count -= sizeof(counters);
+
+		/* Notify new data in buffer */
+		data_event.status = DMX_OK;
+		data_event.data_length = header.payload_length;
+		ret = mpq_sdmx_check_ts_stall(mpq_demux, mpq_feed, sts,
+			data_event.data_length, 0);
+		if (ret) {
+			MPQ_DVB_DBG_PRINT(
+				"%s: mpq_sdmx_check_ts_stall aborted\n",
+				__func__);
+			return;
+		}
+
+		feed->data_ready_cb.ts(&feed->feed.ts, &data_event);
+
+		/* Notify new complete PES */
+		pes_event.status = DMX_OK_PES_END;
+		pes_event.pes_end.actual_length = header.payload_length;
+		pes_event.pes_end.start_gap = 0;
+		pes_event.data_length = 0;
+
+		/* Parse error indicators */
+		if (sts->error_indicators & SDMX_FILTER_ERR_INVALID_PES_LEN)
+			pes_event.pes_end.pes_length_mismatch = 1;
+		else
+			pes_event.pes_end.pes_length_mismatch = 0;
+
+		pes_event.pes_end.disc_indicator_set = 0;
+
+		pes_event.pes_end.stc = 0;
+		pes_event.pes_end.tei_counter = counters.transport_err_count;
+		pes_event.pes_end.cont_err_counter =
+			counters.continuity_err_count;
+		pes_event.pes_end.ts_packets_num =
+			counters.pes_ts_count;
+
+		ret = mpq_sdmx_check_ts_stall(mpq_demux, mpq_feed, sts, 0, 1);
+		if (ret) {
+			MPQ_DVB_DBG_PRINT(
+				"%s: mpq_sdmx_check_ts_stall aborted\n",
+				__func__);
+			return;
+		}
+		feed->data_ready_cb.ts(&feed->feed.ts, &pes_event);
+	}
+
+pes_filter_check_overflow:
+	if ((mpq_demux->demux.playback_mode == DMX_PB_MODE_PUSH) &&
+		(sts->error_indicators & SDMX_FILTER_ERR_D_BUF_FULL)) {
+		MPQ_DVB_ERR_PRINT("%s: DMX_OVERRUN_ERROR\n", __func__);
+		mpq_dmx_notify_overflow(feed);
+	}
+
+	if (sts->status_indicators & SDMX_FILTER_STATUS_EOS) {
+		data_event.data_length = 0;
+		data_event.status = DMX_OK_EOS;
+		feed->data_ready_cb.ts(&feed->feed.ts, &data_event);
+	}
+}
+
+static void mpq_sdmx_section_filter_results(struct mpq_demux *mpq_demux,
+	struct mpq_feed *mpq_feed,
+	struct sdmx_filter_status *sts)
+{
+	struct sdmx_metadata_header header;
+	struct dmx_data_ready event;
+	struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+	struct dvb_demux_filter *f;
+	struct dmx_section_feed *sec = &feed->feed.sec;
+	ssize_t bytes_avail;
+
+	/* Parse error indicators */
+	if (sts->error_indicators & SDMX_FILTER_ERR_SEC_VERIF_CRC32_FAIL) {
+		MPQ_DVB_DBG_PRINT("%s: Notify CRC err event\n", __func__);
+		event.status = DMX_CRC_ERROR;
+		event.data_length = 0;
+		dvb_dmx_notify_section_event(feed, &event, 1);
+	}
+
+	if (sts->error_indicators & SDMX_FILTER_ERR_D_BUF_FULL)
+		MPQ_DVB_ERR_PRINT("%s: internal section buffer overflowed!\n",
+			__func__);
+
+	if ((!sts->metadata_fill_count) && (!sts->data_fill_count))
+		goto section_filter_check_eos;
+
+	mpq_feed->metadata_buf.pwrite = sts->metadata_write_offset;
+	mpq_feed->sdmx_buf.pwrite = sts->data_write_offset;
+
+	while (sts->metadata_fill_count) {
+		bytes_avail = dvb_ringbuffer_avail(&mpq_feed->metadata_buf);
+		if (bytes_avail < sizeof(header)) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: metadata_fill_count is %d less than required %zu bytes\n",
+				__func__,
+				sts->metadata_fill_count,
+				sizeof(header));
+
+			/* clean-up remaining bytes to try to recover */
+			DVB_RINGBUFFER_SKIP(&mpq_feed->metadata_buf,
+				bytes_avail);
+			sts->metadata_fill_count = 0;
+			break;
+		}
+
+		dvb_ringbuffer_read(&mpq_feed->metadata_buf, (u8 *) &header,
+			sizeof(header));
+		sts->metadata_fill_count -= sizeof(header);
+		MPQ_DVB_DBG_PRINT(
+			"%s: metadata header: start=%u, length=%u\n",
+			__func__, header.payload_start, header.payload_length);
+
+		f = feed->filter;
+		do {
+			if (mpq_sdmx_section_filtering(mpq_feed, f, &header))
+				return;
+		} while ((f = f->next) && sec->is_filtering);
+
+		DVB_RINGBUFFER_SKIP(&mpq_feed->sdmx_buf, header.payload_length);
+	}
+
+section_filter_check_eos:
+	if (sts->status_indicators & SDMX_FILTER_STATUS_EOS) {
+		event.data_length = 0;
+		event.status = DMX_OK_EOS;
+		dvb_dmx_notify_section_event(feed, &event, 1);
+	}
+}
+
+static void mpq_sdmx_decoder_filter_results(struct mpq_demux *mpq_demux,
+	struct mpq_feed *mpq_feed,
+	struct sdmx_filter_status *sts)
+{
+	struct sdmx_metadata_header header;
+	struct sdmx_pes_counters counters;
+	int pes_header_offset;
+	struct ts_packet_header *ts_header;
+	struct ts_adaptation_field *ts_adapt;
+	struct pes_packet_header *pes_header;
+	u8 metadata_buf[MAX_SDMX_METADATA_LENGTH];
+	struct mpq_streambuffer *sbuf;
+	int ret;
+	struct dmx_data_ready data_event;
+	struct dmx_data_ready data;
+	struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+	ssize_t bytes_avail;
+
+	if ((!sts->metadata_fill_count) && (!sts->data_fill_count))
+		goto decoder_filter_check_flags;
+
+	/* Update meta data buffer write pointer */
+	mpq_feed->metadata_buf.pwrite = sts->metadata_write_offset;
+
+	if ((mpq_demux->demux.playback_mode == DMX_PB_MODE_PULL) &&
+		(sts->error_indicators & SDMX_FILTER_ERR_D_LIN_BUFS_FULL)) {
+		MPQ_DVB_DBG_PRINT("%s: Decoder stall...\n", __func__);
+
+		ret = mpq_dmx_decoder_fullness_check(
+			mpq_feed->dvb_demux_feed, 0, 0);
+		if (ret) {
+			/* we reach here if demuxing was aborted */
+			MPQ_DVB_DBG_PRINT(
+				"%s: mpq_dmx_decoder_fullness_check aborted\n",
+				__func__);
+			return;
+		}
+	}
+
+	while (sts->metadata_fill_count) {
+		struct mpq_streambuffer_packet_header packet;
+		struct mpq_adapter_video_meta_data meta_data;
+
+		bytes_avail = dvb_ringbuffer_avail(&mpq_feed->metadata_buf);
+		if (bytes_avail < (sizeof(header) + sizeof(counters))) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: metadata_fill_count is %d less than required %zu bytes\n",
+				__func__,
+				sts->metadata_fill_count,
+				sizeof(header) + sizeof(counters));
+
+			/* clean-up remaining bytes to try to recover */
+			DVB_RINGBUFFER_SKIP(&mpq_feed->metadata_buf,
+				bytes_avail);
+			sts->metadata_fill_count = 0;
+			break;
+		}
+
+		/* Read metadata header */
+		dvb_ringbuffer_read(&mpq_feed->metadata_buf, (u8 *)&header,
+			sizeof(header));
+		sts->metadata_fill_count -= sizeof(header);
+		MPQ_DVB_DBG_PRINT(
+			"%s: metadata header: start=%u, length=%u, metadata=%u\n",
+			__func__, header.payload_start, header.payload_length,
+			header.metadata_length);
+
+		/* Read metadata - PES counters */
+		dvb_ringbuffer_read(&mpq_feed->metadata_buf, (u8 *)&counters,
+					sizeof(counters));
+		sts->metadata_fill_count -= sizeof(counters);
+
+		/* Read metadata - TS & PES headers */
+		bytes_avail = dvb_ringbuffer_avail(&mpq_feed->metadata_buf);
+		if ((header.metadata_length < MAX_SDMX_METADATA_LENGTH) &&
+			(header.metadata_length >= sizeof(counters)) &&
+			(bytes_avail >=
+			 (header.metadata_length - sizeof(counters)))) {
+			dvb_ringbuffer_read(&mpq_feed->metadata_buf,
+				metadata_buf,
+				header.metadata_length - sizeof(counters));
+		} else {
+			MPQ_DVB_ERR_PRINT(
+				"%s: meta-data size %d larger than available meta-data %zd or max allowed %d\n",
+				__func__, header.metadata_length,
+				bytes_avail,
+				MAX_SDMX_METADATA_LENGTH);
+
+			/* clean-up remaining bytes to try to recover */
+			DVB_RINGBUFFER_SKIP(&mpq_feed->metadata_buf,
+				bytes_avail);
+			sts->metadata_fill_count = 0;
+			break;
+		}
+
+		sts->metadata_fill_count -=
+			(header.metadata_length - sizeof(counters));
+
+		ts_header = (struct ts_packet_header *)&metadata_buf[0];
+		if (ts_header->adaptation_field_control == 1) {
+			ts_adapt = NULL;
+			pes_header_offset = sizeof(*ts_header);
+		} else {
+			ts_adapt = (struct ts_adaptation_field *)
+				&metadata_buf[sizeof(*ts_header)];
+			pes_header_offset = sizeof(*ts_header) + 1 +
+				ts_adapt->adaptation_field_length;
+		}
+		pes_header = (struct pes_packet_header *)
+			&metadata_buf[pes_header_offset];
+		meta_data.packet_type = DMX_PES_PACKET;
+		/* TODO - set to real STC when SDMX supports it */
+		meta_data.info.pes.stc = 0;
+
+		if (pes_header->pts_dts_flag & 0x2) {
+			meta_data.info.pes.pts_dts_info.pts_exist = 1;
+			meta_data.info.pes.pts_dts_info.pts =
+				((u64)pes_header->pts_1 << 30) |
+				((u64)pes_header->pts_2 << 22) |
+				((u64)pes_header->pts_3 << 15) |
+				((u64)pes_header->pts_4 << 7) |
+				(u64)pes_header->pts_5;
+		} else {
+			meta_data.info.pes.pts_dts_info.pts_exist = 0;
+		}
+
+		if (pes_header->pts_dts_flag & 0x1) {
+			meta_data.info.pes.pts_dts_info.dts_exist = 1;
+			meta_data.info.pes.pts_dts_info.dts =
+				((u64)pes_header->dts_1 << 30) |
+				((u64)pes_header->dts_2 << 22) |
+				((u64)pes_header->dts_3 << 15) |
+				((u64)pes_header->dts_4 << 7) |
+				(u64)pes_header->dts_5;
+		} else {
+			meta_data.info.pes.pts_dts_info.dts_exist = 0;
+		}
+
+		spin_lock(&mpq_feed->video_info.video_buffer_lock);
+
+		mpq_feed->video_info.tei_errs =
+			counters.transport_err_count;
+		mpq_feed->video_info.continuity_errs =
+			counters.continuity_err_count;
+		mpq_feed->video_info.ts_packets_num =
+			counters.pes_ts_count;
+		mpq_feed->video_info.ts_dropped_bytes =
+			counters.drop_count *
+			mpq_demux->demux.ts_packet_size;
+
+		sbuf = mpq_feed->video_info.video_buffer;
+		if (sbuf == NULL) {
+			MPQ_DVB_DBG_PRINT(
+				"%s: video_buffer released\n",
+				__func__);
+			spin_unlock(&mpq_feed->video_info.video_buffer_lock);
+			return;
+		}
+
+		if (!header.payload_length) {
+			MPQ_DVB_DBG_PRINT(
+				"%s: warnning - video frame with 0 length, dropping\n",
+				__func__);
+			spin_unlock(&mpq_feed->video_info.video_buffer_lock);
+			continue;
+		}
+
+		packet.raw_data_len = header.payload_length;
+		packet.user_data_len = sizeof(meta_data);
+		mpq_streambuffer_get_buffer_handle(sbuf, 0,
+			&packet.raw_data_handle);
+		mpq_streambuffer_get_data_rw_offset(sbuf,
+			NULL, &packet.raw_data_offset);
+		ret = mpq_streambuffer_data_write_deposit(sbuf,
+			header.payload_length);
+		if (ret) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_streambuffer_data_write_deposit failed. ret=%d\n",
+				__func__, ret);
+		}
+		mpq_dmx_update_decoder_stat(mpq_feed);
+		ret = mpq_streambuffer_pkt_write(sbuf, &packet,
+			(u8 *)&meta_data);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_streambuffer_pkt_write failed, ret=%d\n",
+				__func__, ret);
+		} else {
+			mpq_dmx_prepare_es_event_data(
+				&packet, &meta_data, &mpq_feed->video_info,
+				sbuf, &data, ret);
+			MPQ_DVB_DBG_PRINT("%s: Notify ES Event\n", __func__);
+			feed->data_ready_cb.ts(&feed->feed.ts, &data);
+		}
+
+		spin_unlock(&mpq_feed->video_info.video_buffer_lock);
+	}
+
+decoder_filter_check_flags:
+	if ((mpq_demux->demux.playback_mode == DMX_PB_MODE_PUSH) &&
+		(sts->error_indicators & SDMX_FILTER_ERR_D_LIN_BUFS_FULL)) {
+		MPQ_DVB_ERR_PRINT("%s: DMX_OVERRUN_ERROR\n", __func__);
+		mpq_dmx_notify_overflow(mpq_feed->dvb_demux_feed);
+	}
+
+	if (sts->status_indicators & SDMX_FILTER_STATUS_EOS) {
+		/* Notify decoder via the stream buffer */
+		ret = mpq_dmx_decoder_eos_cmd(mpq_feed, 1);
+		if (ret)
+			MPQ_DVB_ERR_PRINT(
+				"%s: Failed to notify decoder on EOS, ret=%d\n",
+				__func__, ret);
+
+		/* Notify user filter */
+		data_event.data_length = 0;
+		data_event.status = DMX_OK_EOS;
+		mpq_feed->dvb_demux_feed->data_ready_cb.ts(
+			&mpq_feed->dvb_demux_feed->feed.ts, &data_event);
+	}
+}
+
+static void mpq_sdmx_pcr_filter_results(struct mpq_demux *mpq_demux,
+	struct mpq_feed *mpq_feed,
+	struct sdmx_filter_status *sts)
+{
+	int ret;
+	struct sdmx_metadata_header header;
+	struct dmx_data_ready data;
+	struct dvb_ringbuffer *rbuff = &mpq_feed->sdmx_buf;
+	struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+	u8 buf[TS_PACKET_HEADER_LENGTH + MAX_TSP_ADAPTATION_LENGTH +
+	       TIMESTAMP_LEN];
+	size_t stc_len = 0;
+	ssize_t bytes_avail;
+
+	if (sts->error_indicators & SDMX_FILTER_ERR_D_BUF_FULL)
+		MPQ_DVB_ERR_PRINT("%s: internal PCR buffer overflowed!\n",
+			__func__);
+
+	if ((!sts->metadata_fill_count) && (!sts->data_fill_count))
+		goto pcr_filter_check_eos;
+
+	if (mpq_demux->demux.tsp_format == DMX_TSP_FORMAT_192_TAIL)
+		stc_len = 4;
+
+	mpq_feed->metadata_buf.pwrite = sts->metadata_write_offset;
+	rbuff->pwrite = sts->data_write_offset;
+
+	while (sts->metadata_fill_count) {
+		bytes_avail = dvb_ringbuffer_avail(&mpq_feed->metadata_buf);
+		if (bytes_avail < sizeof(header)) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: metadata_fill_count is %d less than required %zu bytes\n",
+				__func__,
+				sts->metadata_fill_count,
+				sizeof(header));
+
+			/* clean-up remaining bytes to try to recover */
+			DVB_RINGBUFFER_SKIP(&mpq_feed->metadata_buf,
+				bytes_avail);
+			sts->metadata_fill_count = 0;
+			break;
+		}
+
+		dvb_ringbuffer_read(&mpq_feed->metadata_buf, (u8 *) &header,
+			sizeof(header));
+		MPQ_DVB_DBG_PRINT(
+			"%s: metadata header: start=%u, length=%u\n",
+			__func__, header.payload_start, header.payload_length);
+		sts->metadata_fill_count -= sizeof(header);
+
+		dvb_ringbuffer_read(rbuff, buf, header.payload_length);
+
+		if (mpq_dmx_extract_pcr_and_dci(buf, &data.pcr.pcr,
+			&data.pcr.disc_indicator_set)) {
+
+			if (stc_len) {
+				data.pcr.stc =
+					buf[header.payload_length-2] << 16;
+				data.pcr.stc +=
+					buf[header.payload_length-3] << 8;
+				data.pcr.stc += buf[header.payload_length-4];
+				 /* convert from 105.47 KHZ to 27MHz */
+				data.pcr.stc *= 256;
+			} else {
+				data.pcr.stc = 0;
+			}
+
+			data.data_length = 0;
+			data.status = DMX_OK_PCR;
+			ret = mpq_sdmx_check_ts_stall(
+				mpq_demux, mpq_feed, sts, 0, 1);
+			if (ret) {
+				MPQ_DVB_DBG_PRINT(
+					"%s: mpq_sdmx_check_ts_stall aborted\n",
+					__func__);
+				return;
+			}
+			feed->data_ready_cb.ts(&feed->feed.ts, &data);
+		}
+	}
+
+pcr_filter_check_eos:
+	if (sts->status_indicators & SDMX_FILTER_STATUS_EOS) {
+		data.data_length = 0;
+		data.status = DMX_OK_EOS;
+		feed->data_ready_cb.ts(&feed->feed.ts, &data);
+	}
+}
+
+static void mpq_sdmx_raw_filter_results(struct mpq_demux *mpq_demux,
+	struct mpq_feed *mpq_feed,
+	struct sdmx_filter_status *sts)
+{
+	int ret;
+	ssize_t new_data;
+	struct dmx_data_ready data_event;
+	struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+	struct dvb_ringbuffer *buf = (struct dvb_ringbuffer *)
+					feed->feed.ts.buffer.ringbuff;
+
+	if ((!sts->metadata_fill_count) && (!sts->data_fill_count))
+		goto raw_filter_check_flags;
+
+	new_data = sts->data_write_offset -
+		buf->pwrite;
+	if (new_data < 0)
+		new_data += buf->size;
+
+	ret = mpq_sdmx_check_ts_stall(mpq_demux, mpq_feed, sts,
+		new_data + feed->demux->ts_packet_size, 0);
+	if (ret) {
+		MPQ_DVB_DBG_PRINT(
+			"%s: mpq_sdmx_check_ts_stall aborted\n",
+			__func__);
+		return;
+	}
+
+	data_event.status = DMX_OK;
+	data_event.data_length = new_data;
+	feed->data_ready_cb.ts(&feed->feed.ts, &data_event);
+	MPQ_DVB_DBG_PRINT("%s: Callback DMX_OK, size=%d\n",
+		__func__, data_event.data_length);
+
+raw_filter_check_flags:
+	if ((mpq_demux->demux.playback_mode == DMX_PB_MODE_PUSH) &&
+		(sts->error_indicators & SDMX_FILTER_ERR_D_BUF_FULL)) {
+		MPQ_DVB_DBG_PRINT("%s: DMX_OVERRUN_ERROR\n", __func__);
+		mpq_dmx_notify_overflow(feed);
+	}
+
+	if (sts->status_indicators & SDMX_FILTER_STATUS_EOS) {
+		data_event.data_length = 0;
+		data_event.status = DMX_OK_EOS;
+		feed->data_ready_cb.ts(&feed->feed.ts, &data_event);
+	}
+
+}
+
+static void mpq_sdmx_process_results(struct mpq_demux *mpq_demux)
+{
+	int i;
+	int sdmx_filters;
+	struct sdmx_filter_status *sts;
+	struct mpq_feed *mpq_feed;
+	u8 mpq_feed_idx;
+
+	sdmx_filters = mpq_demux->sdmx_filter_count;
+	for (i = 0; i < sdmx_filters; i++) {
+		sts = &mpq_demux->sdmx_filters_state.status[i];
+		MPQ_DVB_DBG_PRINT(
+			"%s: Filter: handle=%d, status=0x%x, errors=0x%x\n",
+			__func__, sts->filter_handle, sts->status_indicators,
+			sts->error_indicators);
+		MPQ_DVB_DBG_PRINT("%s: Metadata fill count=%d (write=%d)\n",
+			__func__, sts->metadata_fill_count,
+			sts->metadata_write_offset);
+		MPQ_DVB_DBG_PRINT("%s: Data fill count=%d (write=%d)\n",
+			__func__, sts->data_fill_count, sts->data_write_offset);
+
+		mpq_feed_idx = mpq_demux->sdmx_filters_state.mpq_feed_idx[i];
+		mpq_feed = &mpq_demux->feeds[mpq_feed_idx];
+		if ((mpq_feed->dvb_demux_feed->state != DMX_STATE_GO) ||
+			(sts->filter_handle != mpq_feed->sdmx_filter_handle) ||
+			mpq_feed->secondary_feed ||
+			(mpq_demux->sdmx_filters_state.session_id[i] !=
+			 mpq_feed->session_id))
+			continue;
+
+		/* Invalidate output buffer before processing the results */
+		mpq_sdmx_invalidate_buffer(mpq_feed);
+
+		if (sts->error_indicators & SDMX_FILTER_ERR_MD_BUF_FULL)
+			MPQ_DVB_ERR_PRINT(
+				"%s: meta-data buff for pid %d overflowed!\n",
+				__func__, mpq_feed->dvb_demux_feed->pid);
+
+		switch (mpq_feed->filter_type) {
+		case SDMX_PCR_FILTER:
+			mpq_sdmx_pcr_filter_results(mpq_demux, mpq_feed, sts);
+			break;
+		case SDMX_PES_FILTER:
+			mpq_sdmx_pes_filter_results(mpq_demux, mpq_feed,
+				sts);
+			break;
+		case SDMX_SEPARATED_PES_FILTER:
+			mpq_sdmx_decoder_filter_results(mpq_demux, mpq_feed,
+				sts);
+			break;
+		case SDMX_SECTION_FILTER:
+			mpq_sdmx_section_filter_results(mpq_demux, mpq_feed,
+				sts);
+			break;
+		case SDMX_RAW_FILTER:
+			mpq_sdmx_raw_filter_results(mpq_demux, mpq_feed, sts);
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+static int mpq_sdmx_process_buffer(struct mpq_demux *mpq_demux,
+	struct sdmx_buff_descr *input,
+	u32 fill_count,
+	u32 read_offset)
+{
+	struct sdmx_filter_status *sts;
+	struct mpq_feed *mpq_feed;
+	u8 flags = 0;
+	u32 errors;
+	u32 status;
+	u32 prev_read_offset;
+	u32 prev_fill_count;
+	enum sdmx_status sdmx_res;
+	int i;
+	int filter_index = 0;
+	int bytes_read;
+	ktime_t process_start_time;
+	ktime_t process_end_time;
+
+	mutex_lock(&mpq_demux->mutex);
+
+	/*
+	 * All active filters may get totally closed and therefore
+	 * sdmx session may get terminated, in such case nothing to process
+	 */
+	if (mpq_demux->sdmx_session_handle == SDMX_INVALID_SESSION_HANDLE) {
+		MPQ_DVB_DBG_PRINT(
+			"%s: sdmx filters aborted, filter-count %d, session %d\n",
+			__func__, mpq_demux->sdmx_filter_count,
+			mpq_demux->sdmx_session_handle);
+		mutex_unlock(&mpq_demux->mutex);
+		return 0;
+	}
+
+	/* Set input flags */
+	if (mpq_demux->sdmx_eos)
+		flags |= SDMX_INPUT_FLAG_EOS;
+	if (mpq_sdmx_debug)
+		flags |= SDMX_INPUT_FLAG_DBG_ENABLE;
+
+	/* Build up to date filter status array */
+	for (i = 0; i < MPQ_MAX_DMX_FILES; i++) {
+		mpq_feed = &mpq_demux->feeds[i];
+		if ((mpq_feed->sdmx_filter_handle != SDMX_INVALID_FILTER_HANDLE)
+			&& (!mpq_feed->secondary_feed)) {
+			sts = mpq_demux->sdmx_filters_state.status +
+				filter_index;
+			mpq_sdmx_prepare_filter_status(mpq_demux, sts,
+				mpq_feed);
+			mpq_demux->sdmx_filters_state.mpq_feed_idx[filter_index]
+				 = i;
+			mpq_demux->sdmx_filters_state.session_id[filter_index] =
+				mpq_feed->session_id;
+			filter_index++;
+		}
+	}
+
+	/* Sanity check */
+	if (filter_index != mpq_demux->sdmx_filter_count) {
+		mutex_unlock(&mpq_demux->mutex);
+		MPQ_DVB_ERR_PRINT(
+			"%s: Updated %d SDMX filters status but should be %d\n",
+			__func__, filter_index, mpq_demux->sdmx_filter_count);
+		return -ERESTART;
+	}
+
+	MPQ_DVB_DBG_PRINT(
+		"%s: Before SDMX_process: input read_offset=%u, fill count=%u\n",
+		__func__, read_offset, fill_count);
+
+	process_start_time = ktime_get();
+
+	prev_read_offset = read_offset;
+	prev_fill_count = fill_count;
+	sdmx_res = sdmx_process(mpq_demux->sdmx_session_handle, flags, input,
+		&fill_count, &read_offset, &errors, &status,
+		mpq_demux->sdmx_filter_count,
+		mpq_demux->sdmx_filters_state.status);
+
+	process_end_time = ktime_get();
+	bytes_read = prev_fill_count - fill_count;
+
+	mpq_dmx_update_sdmx_stat(mpq_demux, bytes_read,
+			process_start_time, process_end_time);
+
+	MPQ_DVB_DBG_PRINT(
+		"%s: SDMX result=%d, input_fill_count=%u, read_offset=%u, read %d bytes from input, status=0x%X, errors=0x%X\n",
+		__func__, sdmx_res, fill_count, read_offset, bytes_read,
+		status, errors);
+
+	if ((sdmx_res == SDMX_SUCCESS) ||
+		(sdmx_res == SDMX_STATUS_STALLED_IN_PULL_MODE)) {
+		if (sdmx_res == SDMX_STATUS_STALLED_IN_PULL_MODE)
+			MPQ_DVB_DBG_PRINT("%s: SDMX stalled for PULL mode\n",
+				__func__);
+
+		mpq_sdmx_process_results(mpq_demux);
+	} else {
+		MPQ_DVB_ERR_PRINT(
+			"%s: SDMX Process returned %d\n",
+			__func__, sdmx_res);
+	}
+
+	mutex_unlock(&mpq_demux->mutex);
+
+	return bytes_read;
+}
+
+int mpq_sdmx_process(struct mpq_demux *mpq_demux,
+	struct sdmx_buff_descr *input,
+	u32 fill_count,
+	u32 read_offset,
+	size_t tsp_size)
+{
+	int ret;
+	int todo;
+	int total_bytes_read = 0;
+	int limit = mpq_sdmx_proc_limit * tsp_size;
+
+	MPQ_DVB_DBG_PRINT(
+		"\n\n%s: read_offset=%u, fill_count=%u, tsp_size=%zu\n",
+		__func__, read_offset, fill_count, tsp_size);
+
+	while (fill_count >= tsp_size) {
+		todo = fill_count > limit ? limit : fill_count;
+		ret = mpq_sdmx_process_buffer(mpq_demux, input, todo,
+			read_offset);
+
+		if (mpq_demux->demux.sw_filter_abort) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: Demuxing from DVR was aborted\n",
+				__func__);
+			return -ENODEV;
+		}
+
+		if (ret > 0) {
+			total_bytes_read += ret;
+			fill_count -= ret;
+			read_offset += ret;
+			if (read_offset >= input->size)
+				read_offset -= input->size;
+		} else {
+			/*
+			 * ret < 0:	some error occurred
+			 * ret == 0:	not enough data (less than 1 TS packet)
+			 */
+			if (ret < 0)
+				MPQ_DVB_ERR_PRINT(
+					"%s: mpq_sdmx_process_buffer failed, returned %d\n",
+					__func__, ret);
+			break;
+		}
+	}
+
+	return total_bytes_read;
+}
+
+static int mpq_sdmx_write(struct mpq_demux *mpq_demux,
+	struct ion_handle *input_handle,
+	const char *buf,
+	size_t count)
+{
+	struct ion_handle *ion_handle;
+	struct dvb_ringbuffer *rbuf;
+	struct sdmx_buff_descr buf_desc;
+	u32 read_offset;
+	int ret;
+
+	if (mpq_demux == NULL || input_handle == NULL) {
+		MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	ion_handle = mpq_demux->demux.dmx.dvr_input.priv_handle;
+	rbuf = (struct dvb_ringbuffer *)mpq_demux->demux.dmx.dvr_input.ringbuff;
+
+	ret = mpq_sdmx_dvr_buffer_desc(mpq_demux, &buf_desc);
+	if (ret) {
+		MPQ_DVB_ERR_PRINT(
+		"%s: Failed to init input buffer descriptor. ret = %d\n",
+			__func__, ret);
+		return ret;
+	}
+	read_offset = mpq_demux->demux.dmx.dvr_input.ringbuff->pread;
+
+
+	/*
+	 * We must flush the buffer before SDMX starts reading from it
+	 * so that it gets a valid data in memory.
+	 */
+	ret = msm_ion_do_cache_op(mpq_demux->ion_client,
+		ion_handle, rbuf->data,
+		rbuf->size, ION_IOC_CLEAN_CACHES);
+	if (ret)
+		MPQ_DVB_ERR_PRINT(
+			"%s: msm_ion_do_cache_op failed, ret = %d\n",
+			__func__, ret);
+
+	return mpq_sdmx_process(mpq_demux, &buf_desc, count,
+				read_offset, mpq_demux->demux.ts_packet_size);
+}
+
+int mpq_dmx_write(struct dmx_demux *demux, const char *buf, size_t count)
+{
+	struct dvb_demux *dvb_demux;
+	struct mpq_demux *mpq_demux;
+	int ret = count;
+
+	if (demux == NULL)
+		return -EINVAL;
+
+	dvb_demux = demux->priv;
+	mpq_demux = dvb_demux->priv;
+
+	/* Route through secure demux - process secure feeds if any exist */
+	if (mpq_sdmx_is_loaded() && mpq_demux->sdmx_filter_count) {
+		ret = mpq_sdmx_write(mpq_demux,
+			demux->dvr_input.priv_handle,
+			buf,
+			count);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_sdmx_write failed. ret = %d\n",
+				__func__, ret);
+			ret = count;
+		}
+	}
+
+	/*
+	 * Route through sw filter - process non-secure feeds if any exist.
+	 * For sw filter, should process the same amount of bytes the sdmx
+	 * process managed to consume, unless some sdmx error occurred, for
+	 * which should process the whole buffer
+	 */
+	if (mpq_demux->num_active_feeds > mpq_demux->num_secure_feeds)
+		dvb_dmx_swfilter_format(dvb_demux, buf, ret,
+			dvb_demux->tsp_format);
+
+	if (signal_pending(current))
+		return -EINTR;
+
+	return ret;
+}
+
+int mpq_sdmx_is_loaded(void)
+{
+	static int sdmx_load_checked;
+
+	if (!sdmx_load_checked) {
+		mpq_sdmx_check_app_loaded();
+		sdmx_load_checked = 1;
+	}
+
+	return mpq_dmx_info.secure_demux_app_loaded;
+}
+
+int mpq_dmx_oob_command(struct dvb_demux_feed *feed,
+	struct dmx_oob_command *cmd)
+{
+	struct mpq_feed *mpq_feed = feed->priv;
+	struct mpq_demux *mpq_demux = mpq_feed->mpq_demux;
+	struct dmx_data_ready event;
+	int ret = 0;
+
+	mutex_lock(&mpq_demux->mutex);
+	mpq_feed = feed->priv;
+
+	if (!dvb_dmx_is_video_feed(feed) && !dvb_dmx_is_pcr_feed(feed) &&
+		!feed->secure_mode.is_secured) {
+		mutex_unlock(&mpq_demux->mutex);
+		return 0;
+	}
+
+	event.data_length = 0;
+
+	switch (cmd->type) {
+	case DMX_OOB_CMD_EOS:
+		event.status = DMX_OK_EOS;
+		if (!feed->secure_mode.is_secured) {
+			if (dvb_dmx_is_video_feed(feed)) {
+				if (!video_framing)
+					mpq_dmx_decoder_pes_closure(mpq_demux,
+						mpq_feed);
+				else
+					mpq_dmx_decoder_frame_closure(mpq_demux,
+						mpq_feed);
+				ret = mpq_dmx_decoder_eos_cmd(mpq_feed, 1);
+				if (ret)
+					MPQ_DVB_ERR_PRINT(
+						"%s: Couldn't write oob eos packet\n",
+						__func__);
+			} else if (dvb_dmx_is_audio_feed(feed)) {
+				mpq_dmx_decoder_audio_pes_closure(mpq_demux,
+								  mpq_feed);
+				ret = mpq_dmx_decoder_eos_cmd(mpq_feed, 2);
+				if (ret)
+					MPQ_DVB_ERR_PRINT(
+							  "%s: Couldn't write oob eos packet\n",
+							  __func__);
+			}
+			ret = feed->data_ready_cb.ts(&feed->feed.ts, &event);
+		} else if (!mpq_demux->sdmx_eos) {
+			struct sdmx_buff_descr buf_desc;
+
+			mpq_demux->sdmx_eos = 1;
+			ret = mpq_sdmx_dvr_buffer_desc(mpq_demux, &buf_desc);
+			if (!ret) {
+				mutex_unlock(&mpq_demux->mutex);
+				mpq_sdmx_process_buffer(mpq_demux, &buf_desc,
+					0, 0);
+				return 0;
+			}
+		}
+		break;
+	case DMX_OOB_CMD_MARKER:
+		event.status = DMX_OK_MARKER;
+		event.marker.id = cmd->params.marker.id;
+
+		if (feed->type == DMX_TYPE_SEC)
+			ret = dvb_dmx_notify_section_event(feed, &event, 1);
+		else
+			/* MPQ_TODO: Notify decoder via the stream buffer */
+			ret = feed->data_ready_cb.ts(&feed->feed.ts, &event);
+		break;
+
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	mutex_unlock(&mpq_demux->mutex);
+	return ret;
+}
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h
new file mode 100644
index 0000000..0c20a89
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h
@@ -0,0 +1,1116 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MPQ_DMX_PLUGIN_COMMON_H
+#define _MPQ_DMX_PLUGIN_COMMON_H
+
+#include <linux/msm_ion.h>
+
+#include "dvbdev.h"
+#include "dmxdev.h"
+#include "demux.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "mpq_adapter.h"
+#include "mpq_sdmx.h"
+
+#define TS_PACKET_SYNC_BYTE	(0x47)
+#define TS_PACKET_SIZE		(188)
+#define TS_PACKET_HEADER_LENGTH (4)
+
+/* Length of mandatory fields that must exist in header of video PES */
+#define PES_MANDATORY_FIELDS_LEN			9
+
+/*
+ * 500 PES header packets in the meta-data buffer,
+ * should be more than enough
+ */
+#define VIDEO_NUM_OF_PES_PACKETS			500
+
+#define VIDEO_META_DATA_PACKET_SIZE	\
+	(DVB_RINGBUFFER_PKTHDRSIZE +	\
+		sizeof(struct mpq_streambuffer_packet_header) + \
+		sizeof(struct mpq_adapter_video_meta_data))
+
+#define VIDEO_META_DATA_BUFFER_SIZE	\
+	(VIDEO_NUM_OF_PES_PACKETS * VIDEO_META_DATA_PACKET_SIZE)
+
+#define AUDIO_NUM_OF_PES_PACKETS			100
+
+#define AUDIO_META_DATA_PACKET_SIZE	\
+	(DVB_RINGBUFFER_PKTHDRSIZE +	\
+		sizeof(struct mpq_streambuffer_packet_header) + \
+		sizeof(struct mpq_adapter_audio_meta_data))
+
+#define AUDIO_META_DATA_BUFFER_SIZE	\
+	(AUDIO_NUM_OF_PES_PACKETS * AUDIO_META_DATA_PACKET_SIZE)
+
+/* Max number open() request can be done on demux device */
+#define MPQ_MAX_DMX_FILES				128
+
+/* TSIF alias name length */
+#define TSIF_NAME_LENGTH				20
+
+/**
+ * struct ts_packet_header - Transport packet header
+ * as defined in MPEG2 transport stream standard.
+ */
+struct ts_packet_header {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	unsigned sync_byte:8;
+	unsigned transport_error_indicator:1;
+	unsigned payload_unit_start_indicator:1;
+	unsigned transport_priority:1;
+	unsigned pid_msb:5;
+	unsigned pid_lsb:8;
+	unsigned transport_scrambling_control:2;
+	unsigned adaptation_field_control:2;
+	unsigned continuity_counter:4;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	unsigned sync_byte:8;
+	unsigned pid_msb:5;
+	unsigned transport_priority:1;
+	unsigned payload_unit_start_indicator:1;
+	unsigned transport_error_indicator:1;
+	unsigned pid_lsb:8;
+	unsigned continuity_counter:4;
+	unsigned adaptation_field_control:2;
+	unsigned transport_scrambling_control:2;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+} __packed;
+
+/**
+ * struct ts_adaptation_field - Adaptation field prefix
+ * as defined in MPEG2 transport stream standard.
+ */
+struct ts_adaptation_field {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	unsigned adaptation_field_length:8;
+	unsigned discontinuity_indicator:1;
+	unsigned random_access_indicator:1;
+	unsigned elementary_stream_priority_indicator:1;
+	unsigned PCR_flag:1;
+	unsigned OPCR_flag:1;
+	unsigned splicing_point_flag:1;
+	unsigned transport_private_data_flag:1;
+	unsigned adaptation_field_extension_flag:1;
+	unsigned program_clock_reference_base_1:8;
+	unsigned program_clock_reference_base_2:8;
+	unsigned program_clock_reference_base_3:8;
+	unsigned program_clock_reference_base_4:8;
+	unsigned program_clock_reference_base_5:1;
+	unsigned reserved:6;
+	unsigned program_clock_reference_ext_1:1;
+	unsigned program_clock_reference_ext_2:8;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	unsigned adaptation_field_length:8;
+	unsigned adaptation_field_extension_flag:1;
+	unsigned transport_private_data_flag:1;
+	unsigned splicing_point_flag:1;
+	unsigned OPCR_flag:1;
+	unsigned PCR_flag:1;
+	unsigned elementary_stream_priority_indicator:1;
+	unsigned random_access_indicator:1;
+	unsigned discontinuity_indicator:1;
+	unsigned program_clock_reference_base_1:8;
+	unsigned program_clock_reference_base_2:8;
+	unsigned program_clock_reference_base_3:8;
+	unsigned program_clock_reference_base_4:8;
+	unsigned program_clock_reference_ext_1:1;
+	unsigned reserved:6;
+	unsigned program_clock_reference_base_5:1;
+	unsigned program_clock_reference_ext_2:8;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+} __packed;
+
+
+/*
+ * PES packet header containing dts and/or pts values
+ * as defined in MPEG2 transport stream standard.
+ */
+struct pes_packet_header {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	unsigned packet_start_code_prefix_1:8;
+	unsigned packet_start_code_prefix_2:8;
+	unsigned packet_start_code_prefix_3:8;
+	unsigned stream_id:8;
+	unsigned pes_packet_length_msb:8;
+	unsigned pes_packet_length_lsb:8;
+	unsigned reserved_bits0:2;
+	unsigned pes_scrambling_control:2;
+	unsigned pes_priority:1;
+	unsigned data_alignment_indicator:1;
+	unsigned copyright:1;
+	unsigned original_or_copy:1;
+	unsigned pts_dts_flag:2;
+	unsigned escr_flag:1;
+	unsigned es_rate_flag:1;
+	unsigned dsm_trick_mode_flag:1;
+	unsigned additional_copy_info_flag:1;
+	unsigned pes_crc_flag:1;
+	unsigned pes_extension_flag:1;
+	unsigned pes_header_data_length:8;
+	unsigned reserved_bits1:4;
+	unsigned pts_1:3;
+	unsigned marker_bit0:1;
+	unsigned pts_2:8;
+	unsigned pts_3:7;
+	unsigned marker_bit1:1;
+	unsigned pts_4:8;
+	unsigned pts_5:7;
+	unsigned marker_bit2:1;
+	unsigned reserved_bits2:4;
+	unsigned dts_1:3;
+	unsigned marker_bit3:1;
+	unsigned dts_2:8;
+	unsigned dts_3:7;
+	unsigned marker_bit4:1;
+	unsigned dts_4:8;
+	unsigned dts_5:7;
+	unsigned marker_bit5:1;
+	unsigned reserved_bits3:4;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	unsigned packet_start_code_prefix_1:8;
+	unsigned packet_start_code_prefix_2:8;
+	unsigned packet_start_code_prefix_3:8;
+	unsigned stream_id:8;
+	unsigned pes_packet_length_lsb:8;
+	unsigned pes_packet_length_msb:8;
+	unsigned original_or_copy:1;
+	unsigned copyright:1;
+	unsigned data_alignment_indicator:1;
+	unsigned pes_priority:1;
+	unsigned pes_scrambling_control:2;
+	unsigned reserved_bits0:2;
+	unsigned pes_extension_flag:1;
+	unsigned pes_crc_flag:1;
+	unsigned additional_copy_info_flag:1;
+	unsigned dsm_trick_mode_flag:1;
+	unsigned es_rate_flag:1;
+	unsigned escr_flag:1;
+	unsigned pts_dts_flag:2;
+	unsigned pes_header_data_length:8;
+	unsigned marker_bit0:1;
+	unsigned pts_1:3;
+	unsigned reserved_bits1:4;
+	unsigned pts_2:8;
+	unsigned marker_bit1:1;
+	unsigned pts_3:7;
+	unsigned pts_4:8;
+	unsigned marker_bit2:1;
+	unsigned pts_5:7;
+	unsigned marker_bit3:1;
+	unsigned dts_1:3;
+	unsigned reserved_bits2:4;
+	unsigned dts_2:8;
+	unsigned marker_bit4:1;
+	unsigned dts_3:7;
+	unsigned dts_4:8;
+	unsigned marker_bit5:1;
+	unsigned dts_5:7;
+	unsigned reserved_bits3:4;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+} __packed;
+
+/**
+ * mpq_decoder_buffers_desc - decoder buffer(s) management information.
+ *
+ * @desc: Array of buffer descriptors as they are passed to mpq_streambuffer
+ * upon its initialization. These descriptors must remain valid as long as
+ * the mpq_streambuffer object is used.
+ * @ion_handle: Array of ION handles, one for each decoder buffer, used for
+ * kernel memory mapping or allocation. Handles are saved in order to release
+ * resources properly later on.
+ * @decoder_buffers_num: number of buffers that are managed, either externally
+ * or internally by the mpq_streambuffer object
+ * @shared_file: File handle of internally allocated video buffer shared
+ * with video consumer.
+ */
+struct mpq_decoder_buffers_desc {
+	struct mpq_streambuffer_buffer_desc desc[DMX_MAX_DECODER_BUFFER_NUM];
+	struct ion_handle *ion_handle[DMX_MAX_DECODER_BUFFER_NUM];
+	u32 decoder_buffers_num;
+	struct file *shared_file;
+};
+
+/*
+ * mpq_video_feed_info - private data used for video feed.
+ *
+ * @video_buffer: Holds the streamer buffer shared with
+ * the decoder for feeds having the data going to the decoder.
+ * @video_buffer_lock: Lock protecting against video output buffer.
+ * The lock protects against API calls to manipulate the output buffer
+ * (initialize, free, re-use buffers) and dvb-sw demux parsing the video
+ * data through mpq_dmx_process_video_packet().
+ * @buffer_desc: Holds decoder buffer(s) information used for stream buffer.
+ * @pes_header: Used for feeds that output data to decoder,
+ * holds PES header of current processed PES.
+ * @pes_header_left_bytes: Used for feeds that output data to decoder,
+ * holds remaining PES header bytes of current processed PES.
+ * @pes_header_offset: Holds the offset within the current processed
+ * pes header.
+ * @fullness_wait_cancel: Flag used to signal to abort waiting for
+ * decoder's fullness.
+ * @stream_interface: The ID of the video stream interface registered
+ * with this stream buffer.
+ * @patterns: pointer to the framing patterns to look for.
+ * @patterns_num: number of framing patterns.
+ * @prev_pattern: holds the trailing data of the last processed video packet.
+ * @frame_offset: Saves data buffer offset to which a new frame will be written
+ * @last_pattern_offset: Holds the previous pattern offset
+ * @pending_pattern_len: Accumulated number of data bytes that will be
+ * reported for this frame.
+ * @last_framing_match_type: Used for saving the type of
+ * the previous pattern match found in this video feed.
+ * @last_framing_match_stc: Used for saving the STC attached to TS packet
+ * of the previous pattern match found in this video feed.
+ * @found_sequence_header_pattern: Flag used to note that an MPEG-2
+ * Sequence Header, H.264 SPS or VC-1 Sequence Header pattern
+ * (whichever is relevant according to the video standard) had already
+ * been found.
+ * @prefix_size: a bit mask representing the size(s) of possible prefixes
+ * to the pattern, already found in the previous buffer. If bit 0 is set,
+ * a prefix of size 1 was found. If bit 1 is set, a prefix of size 2 was
+ * found, etc. This supports a prefix size of up to 32, which is more
+ * than we need. The search function updates prefix_size as needed
+ * for the next buffer search.
+ * @first_prefix_size: used to save the prefix size used to find the first
+ * pattern written to the stream buffer.
+ * @saved_pts_dts_info: used to save PTS/DTS information until it is written.
+ * @new_pts_dts_info: used to store PTS/DTS information from current PES header.
+ * @saved_info_used: indicates if saved PTS/DTS information was used.
+ * @new_info_exists: indicates if new PTS/DTS information exists in
+ * new_pts_dts_info that should be saved to saved_pts_dts_info.
+ * @first_pts_dts_copy: a flag used to indicate if PTS/DTS information needs
+ * to be copied from the currently parsed PES header to the saved_pts_dts_info.
+ * @tei_errs: Transport stream Transport Error Indicator (TEI) counter.
+ * @last_continuity: last continuity counter value found in TS packet header.
+ * Initialized to -1.
+ * @continuity_errs: Transport stream continuity error counter.
+ * @ts_packets_num: TS packets counter.
+ * @ts_dropped_bytes: counts the number of bytes dropped due to insufficient
+ * buffer space.
+ * @prev_stc: STC attached to the previous video TS packet
+ */
+struct mpq_video_feed_info {
+	struct mpq_streambuffer *video_buffer;
+	spinlock_t video_buffer_lock;
+	struct mpq_decoder_buffers_desc buffer_desc;
+	struct pes_packet_header pes_header;
+	u32 pes_header_left_bytes;
+	u32 pes_header_offset;
+	int fullness_wait_cancel;
+	enum mpq_adapter_stream_if stream_interface;
+const struct dvb_dmx_video_patterns *patterns[DVB_DMX_MAX_SEARCH_PATTERN_NUM];
+	int patterns_num;
+	char prev_pattern[DVB_DMX_MAX_PATTERN_LEN];
+	u32 frame_offset;
+	u32 last_pattern_offset;
+	u32 pending_pattern_len;
+	u64 last_framing_match_type;
+	u64 last_framing_match_stc;
+	int found_sequence_header_pattern;
+	struct dvb_dmx_video_prefix_size_masks prefix_size;
+	u32 first_prefix_size;
+	struct dmx_pts_dts_info saved_pts_dts_info;
+	struct dmx_pts_dts_info new_pts_dts_info;
+	int saved_info_used;
+	int new_info_exists;
+	int first_pts_dts_copy;
+	u32 tei_errs;
+	int last_continuity;
+	u32 continuity_errs;
+	u32 ts_packets_num;
+	u32 ts_dropped_bytes;
+	u64 prev_stc;
+};
+
+/* require a bare minimal mpq_audio_feed_info struct */
+struct mpq_audio_feed_info {
+	struct mpq_streambuffer *audio_buffer;
+	spinlock_t audio_buffer_lock;
+	struct mpq_decoder_buffers_desc buffer_desc;
+	struct pes_packet_header pes_header;
+	u32 pes_header_left_bytes;
+	u32 pes_header_offset;
+	int fullness_wait_cancel;
+	enum mpq_adapter_stream_if stream_interface;
+	u32 frame_offset; /* pes frame offset */
+	struct dmx_pts_dts_info saved_pts_dts_info;
+	struct dmx_pts_dts_info new_pts_dts_info;
+	int saved_info_used;
+	int new_info_exists;
+	int first_pts_dts_copy;
+	u32 tei_errs;
+	int last_continuity;
+	u32 continuity_errs;
+	u32 ts_packets_num;
+	u32 ts_dropped_bytes;
+	u64 prev_stc;
+};
+
+/**
+ * mpq feed object - mpq common plugin feed information
+ *
+ * @dvb_demux_feed: Back pointer to dvb demux level feed object
+ * @mpq_demux: Pointer to common mpq demux object
+ * @plugin_priv: Plugin specific private data
+ * @sdmx_filter_handle: Secure demux filter handle. Recording feed may share
+ * same filter handle
+ * @secondary_feed: Specifies if this feed shares filter handle with
+ * other feeds
+ * @metadata_buf: Ring buffer object for managing the metadata buffer
+ * @metadata_buf_handle: Allocation handle for the metadata buffer
+ * @session_id: Counter that is incremented every time feed is initialized
+ * through mpq_dmx_init_mpq_feed
+ * @sdmx_buf: Ring buffer object for intermediate output data from the sdmx
+ * @sdmx_buf_handle: Allocation handle for the sdmx intermediate data buffer
+ * @video_info: Video feed specific information
+ */
+struct mpq_feed {
+	struct dvb_demux_feed *dvb_demux_feed;
+	struct mpq_demux *mpq_demux;
+	void *plugin_priv;
+
+	/* Secure demux related */
+	int sdmx_filter_handle;
+	int secondary_feed;
+	enum sdmx_filter filter_type;
+	struct dvb_ringbuffer metadata_buf;
+	struct ion_handle *metadata_buf_handle;
+
+	u8 session_id;
+	struct dvb_ringbuffer sdmx_buf;
+	struct ion_handle *sdmx_buf_handle;
+
+	struct mpq_video_feed_info video_info;
+	struct mpq_audio_feed_info audio_info;
+};
+
+/**
+ * struct mpq_demux - mpq demux information
+ * @idx: Instance index
+ * @demux: The dvb_demux instance used by mpq_demux
+ * @dmxdev: The dmxdev instance used by mpq_demux
+ * @fe_memory: Handle of front-end memory source to mpq_demux
+ * @source: The current source connected to the demux
+ * @is_initialized: Indicates whether this demux device was
+ *                  initialized or not.
+ * @ion_client: ION demux client used to allocate memory from ION.
+ * @mutex: Lock used to protect against private feed data
+ * @feeds: mpq common feed object pool
+ * @num_active_feeds: Number of active mpq feeds
+ * @num_secure_feeds: Number of secure feeds (have a sdmx filter associated)
+ * currently allocated.
+ * Used before each call to sdmx_process() to build up to date state.
+ * @sdmx_session_handle: Secure demux open session handle
+ * @sdmx_filter_count: Number of active secure demux filters
+ * @sdmx_eos: End-of-stream indication flag for current sdmx session
+ * @sdmx_filters_state: Array holding buffers status for each secure
+ * demux filter.
+ * @decoder_alloc_flags: ION flags to be used when allocating internally
+ * @plugin_priv: Underlying plugin's own private data
+ * @mpq_dmx_plugin_release: Underlying plugin's release function
+ * @hw_notification_interval: Notification interval in msec,
+ * exposed in debugfs.
+ * @hw_notification_min_interval: Minimum notification internal in msec,
+ * exposed in debugfs.
+ * @hw_notification_count: Notification count, exposed in debugfs.
+ * @hw_notification_size: Notification size in bytes, exposed in debugfs.
+ * @hw_notification_min_size: Minimum notification size in bytes,
+ * exposed in debugfs.
+ * @decoder_stat: Decoder output statistics, exposed in debug-fs.
+ * @sdmx_process_count: Total number of times sdmx_process is called.
+ * @sdmx_process_time_sum: Total time sdmx_process takes.
+ * @sdmx_process_time_average: Average time sdmx_process takes.
+ * @sdmx_process_time_max: Max time sdmx_process takes.
+ * @sdmx_process_packets_sum: Total packets number sdmx_process handled.
+ * @sdmx_process_packets_average: Average packets number sdmx_process handled.
+ * @sdmx_process_packets_min: Minimum packets number sdmx_process handled.
+ * @last_notification_time: Time of last HW notification.
+ */
+struct mpq_demux {
+	int idx;
+	struct dvb_demux demux;
+	struct dmxdev dmxdev;
+	struct dmx_frontend fe_memory;
+	dmx_source_t source;
+	int is_initialized;
+	struct ion_client *ion_client;
+	struct mutex mutex;
+	struct mpq_feed feeds[MPQ_MAX_DMX_FILES];
+	u32 num_active_feeds;
+	u32 num_secure_feeds;
+	int sdmx_session_handle;
+	int sdmx_session_ref_count;
+	int sdmx_filter_count;
+	int sdmx_eos;
+	struct {
+		/* SDMX filters status */
+		struct sdmx_filter_status status[MPQ_MAX_DMX_FILES];
+
+		/* Index of the feed respective to SDMX filter */
+		u8 mpq_feed_idx[MPQ_MAX_DMX_FILES];
+
+		/*
+		 * Snapshot of session_id of the feed
+		 * when SDMX process was called. This is used
+		 * to identify whether the feed has been
+		 * restarted when processing SDMX results.
+		 * May happen when demux is stalled in playback
+		 * from memory with PULL mode.
+		 */
+		u8 session_id[MPQ_MAX_DMX_FILES];
+	} sdmx_filters_state;
+
+	unsigned int decoder_alloc_flags;
+
+	/* HW plugin specific */
+	void *plugin_priv;
+	int (*mpq_dmx_plugin_release)(struct mpq_demux *mpq_demux);
+
+	/* debug-fs */
+	u32 hw_notification_interval;
+	u32 hw_notification_min_interval;
+	u32 hw_notification_count;
+	u32 hw_notification_size;
+	u32 hw_notification_min_size;
+
+	struct {
+		/*
+		 * Accumulated number of bytes
+		 * dropped due to decoder buffer fullness.
+		 */
+		u32 drop_count;
+
+		/* Counter incremeneted for each video frame output by demux */
+		u32 out_count;
+
+		/*
+		 * Sum of intervals (msec) holding the time
+		 * between two successive video frames output.
+		 */
+		u32 out_interval_sum;
+
+		/*
+		 * Average interval (msec) between two
+		 * successive video frames output.
+		 */
+		u32 out_interval_average;
+
+		/*
+		 * Max interval (msec) between two
+		 * successive video frames output.
+		 */
+		u32 out_interval_max;
+
+		/* Counter for number of decoder packets with TEI bit set */
+		u32 ts_errors;
+
+		/*
+		 * Counter for number of decoder packets
+		 * with continuity counter errors.
+		 */
+		u32 cc_errors;
+
+		/* Time of last video frame output */
+		ktime_t out_last_time;
+	} decoder_stat[MPQ_ADAPTER_MAX_NUM_OF_INTERFACES];
+
+	u32 sdmx_process_count;
+	u32 sdmx_process_time_sum;
+	u32 sdmx_process_time_average;
+	u32 sdmx_process_time_max;
+	u32 sdmx_process_packets_sum;
+	u32 sdmx_process_packets_average;
+	u32 sdmx_process_packets_min;
+	enum sdmx_log_level sdmx_log_level;
+
+	ktime_t last_notification_time;
+	int ts_packet_timestamp_source;
+};
+
+/**
+ * mpq_dmx_init - initialization and registration function of
+ * single MPQ demux device
+ *
+ * @adapter: The adapter to register mpq_demux to
+ * @mpq_demux: The mpq demux to initialize
+ *
+ * Every HW plug-in needs to provide implementation of such
+ * function that will be called for each demux device on the
+ * module initialization. The function mpq_demux_plugin_init
+ * should be called during the HW plug-in module initialization.
+ */
+typedef int (*mpq_dmx_init)(struct dvb_adapter *mpq_adapter,
+	struct mpq_demux *demux);
+
+/**
+ * mpq_demux_plugin_init - Initialize demux devices and register
+ * them to the dvb adapter.
+ *
+ * @dmx_init_func: Pointer to the function to be used
+ *  to initialize demux of the underlying HW plugin.
+ *
+ * Return     error code
+ *
+ * Should be called at the HW plugin module initialization.
+ */
+int mpq_dmx_plugin_init(mpq_dmx_init dmx_init_func);
+
+/**
+ * mpq_demux_plugin_exit - terminate demux devices.
+ *
+ * Should be called at the HW plugin module termination.
+ */
+void mpq_dmx_plugin_exit(void);
+
+/**
+ * mpq_dmx_set_source - implmenetation of set_source routine.
+ *
+ * @demux: The demux device to set its source.
+ * @src: The source to be set.
+ *
+ * Return     error code
+ *
+ * Can be used by the underlying plugins to implement kernel
+ * demux API set_source routine.
+ */
+int mpq_dmx_set_source(struct dmx_demux *demux, const dmx_source_t *src);
+
+/**
+ * mpq_dmx_map_buffer - map user-space buffer into kernel space.
+ *
+ * @demux: The demux device.
+ * @dmx_buffer: The demux buffer from user-space, assumes that
+ * buffer handle is ION file-handle.
+ * @priv_handle: Saves ION-handle of the buffer imported by this function.
+ * @kernel_mem: Saves kernel mapped address of the buffer.
+ *
+ * Return     error code
+ *
+ * The function maps the buffer into kernel memory only if the buffer
+ * was not allocated with secure flag, otherwise the returned kernel
+ * memory address is set to NULL.
+ */
+int mpq_dmx_map_buffer(struct dmx_demux *demux, struct dmx_buffer *dmx_buffer,
+		void **priv_handle, void **kernel_mem);
+
+/**
+ * mpq_dmx_unmap_buffer - unmap user-space buffer from kernel space memory.
+ *
+ * @demux: The demux device.
+ * @priv_handle: ION-handle of the buffer returned from mpq_dmx_map_buffer.
+ *
+ * Return     error code
+ *
+ * The function unmaps the buffer from kernel memory only if the buffer
+ * was not allocated with secure flag.
+ */
+int mpq_dmx_unmap_buffer(struct dmx_demux *demux, void *priv_handle);
+
+/**
+ * mpq_dmx_decoder_fullness_init - Initialize waiting
+ * mechanism on decoder's buffer fullness.
+ *
+ * @feed: The decoder's feed
+ *
+ * Return     error code.
+ */
+int mpq_dmx_decoder_fullness_init(struct dvb_demux_feed *feed);
+
+/**
+ * mpq_dmx_decoder_fullness_wait - Checks whether decoder buffer
+ * have free space as required, if not, wait for it.
+ *
+ * @feed: The decoder's feed
+ * @required_space: the required free space to wait for
+ *
+ * Return     error code.
+ */
+int mpq_dmx_decoder_fullness_wait(struct dvb_demux_feed *feed,
+		size_t required_space);
+
+/**
+ * mpq_dmx_decoder_fullness_abort - Aborts waiting
+ * on decoder's buffer fullness if any waiting is done
+ * now. After calling this, to wait again the user must
+ * call mpq_dmx_decoder_fullness_init.
+ *
+ * @feed: The decoder's feed
+ *
+ * Return     error code.
+ */
+int mpq_dmx_decoder_fullness_abort(struct dvb_demux_feed *feed);
+
+/**
+ * mpq_dmx_decoder_buffer_status - Returns the
+ * status of the decoder's buffer.
+ *
+ * @feed: The decoder's feed
+ * @dmx_buffer_status: Status of decoder's buffer
+ *
+ * Return     error code.
+ */
+int mpq_dmx_decoder_buffer_status(struct dvb_demux_feed *feed,
+		struct dmx_buffer_status *dmx_buffer_status);
+
+/**
+ * mpq_dmx_reuse_decoder_buffer - release buffer passed to decoder for reuse
+ * by the stream-buffer.
+ *
+ * @feed: The decoder's feed.
+ * @cookie: stream-buffer handle of the buffer.
+ *
+ * Return     error code
+ *
+ * The function releases the buffer provided by the stream-buffer
+ * connected to the decoder back to the stream-buffer for reuse.
+ */
+int mpq_dmx_reuse_decoder_buffer(struct dvb_demux_feed *feed, int cookie);
+
+/**
+ * mpq_dmx_process_video_packet - Assemble PES data and output it
+ * to the stream-buffer connected to the decoder.
+ *
+ * @feed: The feed used for the video TS packets
+ * @buf: The buffer holding video TS packet.
+ *
+ * Return     error code.
+ *
+ * The function assumes it receives buffer with single TS packet
+ * of the relevant PID.
+ * If the output buffer is full while assembly, the function drops
+ * the packet and does not write them to the output buffer.
+ * Scrambled packets are bypassed.
+ */
+int mpq_dmx_process_video_packet(struct dvb_demux_feed *feed, const u8 *buf);
+
+/**
+ * mpq_dmx_process_pcr_packet - Extract PCR/STC pairs from
+ * a 192 bytes packet.
+ *
+ * @feed: The feed used for the PCR TS packets
+ * @buf: The buffer holding pcr/stc packet.
+ *
+ * Return     error code.
+ *
+ * The function assumes it receives buffer with single TS packet
+ * of the relevant PID, and that it has 4 bytes
+ * suffix as extra timestamp in the following format:
+ *
+ * Byte3: TSIF flags
+ * Byte0-2: TTS, 0..2^24-1 at 105.47 Khz (27*10^6/256).
+ *
+ * The function callbacks dmxdev after extraction of the pcr/stc
+ * pair.
+ */
+int mpq_dmx_process_pcr_packet(struct dvb_demux_feed *feed, const u8 *buf);
+
+/**
+ * mpq_dmx_extract_pcr_and_dci() - Extract the PCR field and discontinuity
+ * indicator from a TS packet buffer.
+ *
+ * @buf: TS packet buffer
+ * @pcr: returned PCR value
+ * @dci: returned discontinuity indicator
+ *
+ * Returns 1 if PCR was extracted, 0 otherwise.
+ */
+int mpq_dmx_extract_pcr_and_dci(const u8 *buf, u64 *pcr, int *dci);
+
+/**
+ * mpq_dmx_init_debugfs_entries -
+ * Extend dvb-demux debugfs with mpq related entries (HW statistics and secure
+ * demux log level).
+ *
+ * @mpq_demux: The mpq_demux device to initialize.
+ */
+void mpq_dmx_init_debugfs_entries(struct mpq_demux *mpq_demux);
+
+/**
+ * mpq_dmx_update_hw_statistics -
+ * Update dvb-demux debugfs with HW notification statistics.
+ *
+ * @mpq_demux: The mpq_demux device to update.
+ */
+void mpq_dmx_update_hw_statistics(struct mpq_demux *mpq_demux);
+
+/**
+ * mpq_dmx_set_cipher_ops - Handles setting of cipher operations
+ *
+ * @feed: The feed to set its cipher operations
+ * @cipher_ops: Cipher operations to be set
+ *
+ * This common function handles only the case when working with
+ * secure-demux. When working with secure demux a single decrypt cipher
+ * operation is allowed.
+ *
+ * Return error code
+ */
+int mpq_dmx_set_cipher_ops(struct dvb_demux_feed *feed,
+		struct dmx_cipher_operations *cipher_ops);
+
+/**
+ * mpq_dmx_convert_tts - Convert timestamp attached by HW to each TS
+ * packet to 27MHz.
+ *
+ * @feed: The feed with TTS attached
+ * @timestamp: Buffer holding the timestamp attached by the HW
+ * @timestampIn27Mhz: Timestamp result in 27MHz
+ *
+ * Return error code
+ */
+void mpq_dmx_convert_tts(struct dvb_demux_feed *feed,
+		const u8 timestamp[TIMESTAMP_LEN],
+		u64 *timestampIn27Mhz);
+
+/**
+ * mpq_sdmx_open_session - Handle the details of opening a new secure demux
+ * session for the specified mpq demux instance. Multiple calls to this
+ * is allowed, reference counting is managed to open it only when needed.
+ *
+ * @mpq_demux: mpq demux instance
+ *
+ * Return error code
+ */
+int mpq_sdmx_open_session(struct mpq_demux *mpq_demux);
+
+/**
+ * mpq_sdmx_close_session - Closes secure demux session. The session
+ * is closed only if reference counter of the session reaches 0.
+ *
+ * @mpq_demux: mpq demux instance
+ *
+ * Return error code
+ */
+int mpq_sdmx_close_session(struct mpq_demux *mpq_demux);
+
+/**
+ * mpq_dmx_init_mpq_feed - Initialize an mpq feed object
+ * The function allocates mpq_feed object and saves in the dvb_demux_feed
+ * priv field.
+ *
+ * @feed: A dvb demux level feed parent object
+ *
+ * Return error code
+ */
+int mpq_dmx_init_mpq_feed(struct dvb_demux_feed *feed);
+
+/**
+ * mpq_dmx_terminate_feed - Destroy an mpq feed object
+ *
+ * @feed: A dvb demux level feed parent object
+ *
+ * Return error code
+ */
+int mpq_dmx_terminate_feed(struct dvb_demux_feed *feed);
+
+/**
+ * mpq_dmx_init_video_feed() - Initializes video related data structures
+ *
+ * @mpq_feed:	mpq_feed object to initialize
+ *
+ * Return error code
+ */
+int mpq_dmx_init_video_feed(struct mpq_feed *mpq_feed);
+
+/**
+ * mpq_dmx_terminate_video_feed() - Release video related feed resources
+ *
+ * @mpq_feed:	mpq_feed object to terminate
+ *
+ * Return error code
+ */
+int mpq_dmx_terminate_video_feed(struct mpq_feed *mpq_feed);
+
+/**
+ * mpq_dmx_write - demux write() function implementation.
+ *
+ * A wrapper function used for writing new data into the demux via DVR.
+ * It checks where new data should actually go, the secure demux or the normal
+ * dvb demux software demux.
+ *
+ * @demux: demux interface
+ * @buf: input buffer
+ * @count: number of data bytes in input buffer
+ *
+ * Return number of bytes processed or error code
+ */
+int mpq_dmx_write(struct dmx_demux *demux, const char *buf, size_t count);
+
+/**
+ * mpq_sdmx_process - Perform demuxing process on the specified input buffer
+ * in the secure demux instance
+ *
+ * @mpq_demux: mpq demux instance
+ * @input: input buffer descriptor
+ * @fill_count: number of data bytes in input buffer that can be read
+ * @read_offset: offset in buffer for reading
+ * @tsp_size: size of single TS packet
+ *
+ * Return number of bytes read or error code
+ */
+int mpq_sdmx_process(struct mpq_demux *mpq_demux,
+	struct sdmx_buff_descr *input,
+	u32 fill_count,
+	u32 read_offset,
+	size_t tsp_size);
+
+/**
+ * mpq_sdmx_loaded - Returns 1 if secure demux application is loaded,
+ * 0 otherwise. This function should be used to determine whether or not
+ * processing should take place in the SDMX.
+ */
+int mpq_sdmx_is_loaded(void);
+
+/**
+ * mpq_dmx_oob_command - Handles OOB command from dvb-demux.
+ *
+ * OOB marker commands trigger callback to the dmxdev.
+ * Handling of EOS command may trigger current (last on stream) PES/Frame to
+ * be reported, in addition to callback to the dmxdev.
+ * In case secure demux is active for the feed, EOS command is passed to the
+ * secure demux for handling.
+ *
+ * @feed: dvb demux feed object
+ * @cmd: oob command data
+ *
+ * returns 0 on success or error
+ */
+int mpq_dmx_oob_command(struct dvb_demux_feed *feed,
+	struct dmx_oob_command *cmd);
+
+/**
+ * mpq_dmx_peer_rec_feed() - For a recording filter with multiple feeds objects
+ * search for a feed object that shares the same filter as the specified feed
+ * object, and return it.
+ * This can be used to test whether the specified feed object is the first feed
+ * allocate for the recording filter - return value is NULL.
+ *
+ * @feed: dvb demux feed object
+ *
+ * Return the dvb_demux_feed sharing the same filter's buffer or NULL if no
+ * such is found.
+ */
+struct dvb_demux_feed *mpq_dmx_peer_rec_feed(struct dvb_demux_feed *feed);
+
+/**
+ * mpq_dmx_decoder_eos_cmd() - Report EOS event to the mpq_streambuffer
+ *
+ * @mpq_feed: Audio/Video mpq_feed object for notification
+ * @feed_type: Feed type( Audio or Video )
+ *
+ * Return error code
+ */
+int mpq_dmx_decoder_eos_cmd(struct mpq_feed *mpq_feed, int feed_type);
+
+/**
+ * mpq_dmx_parse_mandatory_pes_header() - Parse non-optional PES header fields
+ * from TS packet buffer and save results in the feed object.
+ *
+ * @feed:		Video dvb demux feed object
+ * @feed_data:		Structure where results will be saved
+ * @pes_header:		Saved PES header
+ * @buf:		Input buffer containing TS packet with the PES header
+ * @ts_payload_offset:	Offset in 'buf' where payload begins
+ * @bytes_avail:	Length of actual payload
+ *
+ * Return error code
+ */
+int mpq_dmx_parse_mandatory_pes_header(
+				struct dvb_demux_feed *feed,
+				struct mpq_video_feed_info *feed_data,
+				struct pes_packet_header *pes_header,
+				const u8 *buf,
+				u32 *ts_payload_offset,
+				int *bytes_avail);
+
+/**
+ * mpq_dmx_parse_remaining_pes_header() - Parse optional PES header fields
+ * from TS packet buffer and save results in the feed object.
+ * This function depends on mpq_dmx_parse_mandatory_pes_header being called
+ * first for state to be valid.
+ *
+ * @feed:		Video dvb demux feed object
+ * @feed_data:		Structure where results will be saved
+ * @pes_header:		Saved PES header
+ * @buf:		Input buffer containing TS packet with the PES header
+ * @ts_payload_offset:	Offset in 'buf' where payload begins
+ * @bytes_avail:	Length of actual payload
+ *
+ * Return error code
+ */
+int mpq_dmx_parse_remaining_pes_header(
+				struct dvb_demux_feed *feed,
+				struct mpq_video_feed_info *feed_data,
+				struct pes_packet_header *pes_header,
+				const u8 *buf,
+				u32 *ts_payload_offset,
+				int *bytes_avail);
+
+/**
+ * mpq_dmx_flush_stream_buffer() - Flush video stream buffer object of the
+ * specific video feed, both meta-data packets and data.
+ *
+ * @feed:	dvb demux video feed object
+ *
+ * Return error code
+ */
+int mpq_dmx_flush_stream_buffer(struct dvb_demux_feed *feed);
+
+/**
+ * mpq_dmx_save_pts_dts() - Save the current PTS/DTS data
+ *
+ * @feed_data: Video feed structure where PTS/DTS is saved
+ */
+static inline void mpq_dmx_save_pts_dts(struct mpq_video_feed_info *feed_data)
+{
+	if (feed_data->new_info_exists) {
+		feed_data->saved_pts_dts_info.pts_exist =
+			feed_data->new_pts_dts_info.pts_exist;
+		feed_data->saved_pts_dts_info.pts =
+			feed_data->new_pts_dts_info.pts;
+		feed_data->saved_pts_dts_info.dts_exist =
+			feed_data->new_pts_dts_info.dts_exist;
+		feed_data->saved_pts_dts_info.dts =
+			feed_data->new_pts_dts_info.dts;
+
+		feed_data->new_info_exists = 0;
+		feed_data->saved_info_used = 0;
+	}
+}
+
+/**
+ * mpq_dmx_write_pts_dts() - Write out the saved PTS/DTS data and mark as used
+ *
+ * @feed_data:	Video feed structure where PTS/DTS was saved
+ * @info:	PTS/DTS structure to write to
+ */
+static inline void mpq_dmx_write_pts_dts(struct mpq_video_feed_info *feed_data,
+					struct dmx_pts_dts_info *info)
+{
+	if (!feed_data->saved_info_used) {
+		info->pts_exist = feed_data->saved_pts_dts_info.pts_exist;
+		info->pts = feed_data->saved_pts_dts_info.pts;
+		info->dts_exist = feed_data->saved_pts_dts_info.dts_exist;
+		info->dts = feed_data->saved_pts_dts_info.dts;
+
+		feed_data->saved_info_used = 1;
+	} else {
+		info->pts_exist = 0;
+		info->dts_exist = 0;
+	}
+}
+
+/*
+ * mpq_dmx_calc_time_delta -
+ * Calculate delta in msec between two time snapshots.
+ *
+ * @curr_time: value of current time
+ * @prev_time: value of previous time
+ *
+ * Return	time-delta in msec
+ */
+static inline u32 mpq_dmx_calc_time_delta(ktime_t curr_time, ktime_t prev_time)
+{
+	s64 delta_time_ms = ktime_ms_delta(curr_time, prev_time);
+
+	return (u32)delta_time_ms;
+}
+
+void mpq_dmx_update_decoder_stat(struct mpq_feed *mpq_feed);
+
+/* Return the common module parameter tsif_mode */
+int mpq_dmx_get_param_tsif_mode(void);
+
+/* Return the common module parameter clock_inv */
+int mpq_dmx_get_param_clock_inv(void);
+
+/* Return the common module parameter mpq_sdmx_scramble_odd */
+int mpq_dmx_get_param_scramble_odd(void);
+
+/* Return the common module parameter mpq_sdmx_scramble_even */
+int mpq_dmx_get_param_scramble_even(void);
+
+/* Return the common module parameter mpq_sdmx_scramble_default_discard */
+int mpq_dmx_get_param_scramble_default_discard(void);
+
+/* APIs for Audio stream buffers interface -- Added for broadcase use case */
+/*
+ * The Audio/Video drivers (or consumers) require the stream_buffer information
+ * for consuming packet headers and compressed AV data from the
+ * ring buffer filled by demux driver which is the producer
+ */
+struct mpq_streambuffer *consumer_audio_streambuffer(int dmx_ts_pes_audio);
+struct mpq_streambuffer *consumer_video_streambuffer(int dmx_ts_pes_video);
+
+int mpq_dmx_init_audio_feed(struct mpq_feed *mpq_feed);
+
+int mpq_dmx_terminate_audio_feed(struct mpq_feed *mpq_feed);
+
+int mpq_dmx_parse_remaining_audio_pes_header(
+				struct dvb_demux_feed *feed,
+				struct mpq_audio_feed_info *feed_data,
+				struct pes_packet_header *pes_header,
+				const u8 *buf,
+				u32 *ts_payload_offset,
+				int *bytes_avail);
+
+static inline void mpq_dmx_save_audio_pts_dts(
+				struct mpq_audio_feed_info *feed_data)
+{
+	if (feed_data->new_info_exists) {
+		feed_data->saved_pts_dts_info.pts_exist =
+			feed_data->new_pts_dts_info.pts_exist;
+		feed_data->saved_pts_dts_info.pts =
+			feed_data->new_pts_dts_info.pts;
+		feed_data->saved_pts_dts_info.dts_exist =
+			feed_data->new_pts_dts_info.dts_exist;
+		feed_data->saved_pts_dts_info.dts =
+			feed_data->new_pts_dts_info.dts;
+
+		feed_data->new_info_exists = 0;
+		feed_data->saved_info_used = 0;
+	}
+}
+
+/*
+ * mpq_dmx_process_audio_packet - Assemble Audio PES data and output to
+ * stream buffer connected to decoder.
+ */
+int mpq_dmx_process_audio_packet(struct dvb_demux_feed *feed, const u8 *buf);
+
+static inline void mpq_dmx_write_audio_pts_dts(
+					struct mpq_audio_feed_info *feed_data,
+					struct dmx_pts_dts_info *info)
+{
+	if (!feed_data->saved_info_used) {
+		info->pts_exist = feed_data->saved_pts_dts_info.pts_exist;
+		info->pts = feed_data->saved_pts_dts_info.pts;
+		info->dts_exist = feed_data->saved_pts_dts_info.dts_exist;
+		info->dts = feed_data->saved_pts_dts_info.dts;
+
+		feed_data->saved_info_used = 1;
+	} else {
+		info->pts_exist = 0;
+		info->dts_exist = 0;
+	}
+}
+
+#endif /* _MPQ_DMX_PLUGIN_COMMON_H */
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c
new file mode 100644
index 0000000..16e1ba4
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c
@@ -0,0 +1,280 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include "mpq_dvb_debug.h"
+#include "mpq_dmx_plugin_common.h"
+
+
+static int mpq_sw_dmx_start_filtering(struct dvb_demux_feed *feed)
+{
+	int ret = -EINVAL;
+	struct mpq_demux *mpq_demux = feed->demux->priv;
+
+	MPQ_DVB_DBG_PRINT("%s(pid=%d) executed\n", __func__, feed->pid);
+
+	if (mpq_demux == NULL) {
+		MPQ_DVB_ERR_PRINT("%s: invalid mpq_demux handle\n", __func__);
+		goto out;
+	}
+
+	if (mpq_demux->source < DMX_SOURCE_DVR0) {
+		MPQ_DVB_ERR_PRINT("%s: only DVR source is supported (%d)\n",
+			__func__, mpq_demux->source);
+		goto out;
+	}
+
+	/*
+	 * Always feed sections/PES starting from a new one and
+	 * do not partial transfer data from older one
+	 */
+	feed->pusi_seen = 0;
+
+	ret = mpq_dmx_init_mpq_feed(feed);
+	if (ret)
+		MPQ_DVB_ERR_PRINT("%s: mpq_dmx_init_mpq_feed failed(%d)\n",
+			__func__, ret);
+out:
+	return ret;
+}
+
+static int mpq_sw_dmx_stop_filtering(struct dvb_demux_feed *feed)
+{
+	int ret;
+
+	MPQ_DVB_DBG_PRINT("%s(%d) executed\n", __func__, feed->pid);
+
+	ret = mpq_dmx_terminate_feed(feed);
+	if (ret)
+		MPQ_DVB_ERR_PRINT("%s: mpq_dmx_terminate_feed failed(%d)\n",
+			__func__, ret);
+
+	return ret;
+}
+
+static int mpq_sw_dmx_write_to_decoder(struct dvb_demux_feed *feed,
+		const u8 *buf, size_t len)
+{
+	/*
+	 * It is assumed that this function is called once for each
+	 * TS packet of the relevant feed.
+	 */
+	if (len > (TIMESTAMP_LEN + TS_PACKET_SIZE))
+		MPQ_DVB_DBG_PRINT(
+				"%s: warnning - len larger than one packet\n",
+				__func__);
+
+	if (dvb_dmx_is_video_feed(feed))
+		return mpq_dmx_process_video_packet(feed, buf);
+
+	if (dvb_dmx_is_pcr_feed(feed))
+		return mpq_dmx_process_pcr_packet(feed, buf);
+
+	return 0;
+}
+
+static int mpq_sw_dmx_set_source(struct dmx_demux *demux,
+		const dmx_source_t *src)
+{
+	int ret = -EINVAL;
+
+	if (demux == NULL || demux->priv == NULL || src == NULL) {
+		MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+		goto out;
+	}
+
+	if (*src >= DMX_SOURCE_DVR0 && *src <= DMX_SOURCE_DVR3) {
+		ret = mpq_dmx_set_source(demux, src);
+		if (ret)
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_dmx_set_source(%d) failed, ret=%d\n",
+				__func__, *src, ret);
+	} else {
+		MPQ_DVB_ERR_PRINT("%s: not a DVR source\n", __func__);
+	}
+
+out:
+	return ret;
+}
+
+static int mpq_sw_dmx_get_caps(struct dmx_demux *demux, struct dmx_caps *caps)
+{
+	struct dvb_demux *dvb_demux = demux->priv;
+
+	if (dvb_demux == NULL || caps == NULL) {
+		MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	caps->caps = DMX_CAP_PULL_MODE | DMX_CAP_VIDEO_DECODER_DATA |
+		DMX_CAP_TS_INSERTION | DMX_CAP_VIDEO_INDEXING |
+		DMX_CAP_AUTO_BUFFER_FLUSH;
+	caps->recording_max_video_pids_indexed = 0;
+	caps->num_decoders = MPQ_ADAPTER_MAX_NUM_OF_INTERFACES;
+	caps->num_demux_devices = CONFIG_DVB_MPQ_NUM_DMX_DEVICES;
+	caps->num_pid_filters = MPQ_MAX_DMX_FILES;
+	caps->num_section_filters = dvb_demux->filternum;
+	caps->num_section_filters_per_pid = dvb_demux->filternum;
+	caps->section_filter_length = DMX_FILTER_SIZE;
+	caps->num_demod_inputs = 0;
+	caps->num_memory_inputs = CONFIG_DVB_MPQ_NUM_DMX_DEVICES;
+	caps->max_bitrate = 192;
+	caps->demod_input_max_bitrate = 96;
+	caps->memory_input_max_bitrate = 96;
+	caps->num_cipher_ops = 1;
+
+	/* No STC support */
+	caps->max_stc = 0;
+
+	/* Buffer requirements */
+	caps->section.flags =
+		DMX_BUFFER_EXTERNAL_SUPPORT |
+		DMX_BUFFER_INTERNAL_SUPPORT |
+		DMX_BUFFER_CACHED;
+	caps->section.max_buffer_num = 1;
+	caps->section.max_size = 0xFFFFFFFF;
+	caps->section.size_alignment = 0;
+	caps->pes.flags =
+		DMX_BUFFER_EXTERNAL_SUPPORT |
+		DMX_BUFFER_INTERNAL_SUPPORT |
+		DMX_BUFFER_CACHED;
+	caps->pes.max_buffer_num = 1;
+	caps->pes.max_size = 0xFFFFFFFF;
+	caps->pes.size_alignment = 0;
+	caps->recording_188_tsp.flags =
+		DMX_BUFFER_EXTERNAL_SUPPORT |
+		DMX_BUFFER_INTERNAL_SUPPORT |
+		DMX_BUFFER_CACHED;
+	caps->recording_188_tsp.max_buffer_num = 1;
+	caps->recording_188_tsp.max_size = 0xFFFFFFFF;
+	caps->recording_188_tsp.size_alignment = 0;
+	caps->recording_192_tsp.flags =
+		DMX_BUFFER_EXTERNAL_SUPPORT |
+		DMX_BUFFER_INTERNAL_SUPPORT |
+		DMX_BUFFER_CACHED;
+	caps->recording_192_tsp.max_buffer_num = 1;
+	caps->recording_192_tsp.max_size = 0xFFFFFFFF;
+	caps->recording_192_tsp.size_alignment = 0;
+	caps->playback_188_tsp.flags =
+		DMX_BUFFER_EXTERNAL_SUPPORT |
+		DMX_BUFFER_INTERNAL_SUPPORT |
+		DMX_BUFFER_CACHED;
+	caps->playback_188_tsp.max_buffer_num = 1;
+	caps->playback_188_tsp.max_size = 0xFFFFFFFF;
+	caps->playback_188_tsp.size_alignment = 188;
+	caps->playback_192_tsp.flags =
+		DMX_BUFFER_EXTERNAL_SUPPORT |
+		DMX_BUFFER_INTERNAL_SUPPORT |
+		DMX_BUFFER_CACHED;
+	caps->playback_192_tsp.max_buffer_num = 1;
+	caps->playback_192_tsp.max_size = 0xFFFFFFFF;
+	caps->playback_192_tsp.size_alignment = 192;
+	caps->decoder.flags =
+		DMX_BUFFER_SECURED_IF_DECRYPTED	|
+		DMX_BUFFER_EXTERNAL_SUPPORT	|
+		DMX_BUFFER_INTERNAL_SUPPORT	|
+		DMX_BUFFER_LINEAR_GROUP_SUPPORT |
+		DMX_BUFFER_CACHED;
+	caps->decoder.max_buffer_num = DMX_MAX_DECODER_BUFFER_NUM;
+	caps->decoder.max_size = 0xFFFFFFFF;
+	caps->decoder.size_alignment = SZ_4K;
+
+	return 0;
+}
+
+static int mpq_sw_dmx_init(struct dvb_adapter *mpq_adapter,
+		struct mpq_demux *mpq_demux)
+{
+	int ret;
+	struct dvb_demux *dvb_demux = &mpq_demux->demux;
+
+	/* Set the kernel-demux object capabilities */
+	mpq_demux->demux.dmx.capabilities =
+		DMX_TS_FILTERING			|
+		DMX_PES_FILTERING			|
+		DMX_SECTION_FILTERING			|
+		DMX_MEMORY_BASED_FILTERING		|
+		DMX_CRC_CHECKING			|
+		DMX_TS_DESCRAMBLING;
+
+	mpq_demux->decoder_alloc_flags = ION_FLAG_CACHED;
+
+	/* Set dvb-demux "virtual" function pointers */
+	dvb_demux->priv = (void *)mpq_demux;
+	dvb_demux->filternum = MPQ_MAX_DMX_FILES;
+	dvb_demux->feednum = MPQ_MAX_DMX_FILES;
+	dvb_demux->start_feed = mpq_sw_dmx_start_filtering;
+	dvb_demux->stop_feed = mpq_sw_dmx_stop_filtering;
+	dvb_demux->write_to_decoder = mpq_sw_dmx_write_to_decoder;
+	dvb_demux->decoder_fullness_init = mpq_dmx_decoder_fullness_init;
+	dvb_demux->decoder_fullness_wait = mpq_dmx_decoder_fullness_wait;
+	dvb_demux->decoder_fullness_abort = mpq_dmx_decoder_fullness_abort;
+	dvb_demux->decoder_buffer_status = mpq_dmx_decoder_buffer_status;
+	dvb_demux->reuse_decoder_buffer = mpq_dmx_reuse_decoder_buffer;
+	dvb_demux->set_cipher_op = mpq_dmx_set_cipher_ops;
+	dvb_demux->oob_command = mpq_dmx_oob_command;
+	dvb_demux->convert_ts = mpq_dmx_convert_tts;
+	dvb_demux->flush_decoder_buffer = NULL;
+
+	/* Initialize dvb_demux object */
+	ret = dvb_dmx_init(dvb_demux);
+	if (ret) {
+		MPQ_DVB_ERR_PRINT("%s: dvb_dmx_init failed, ret=%d\n",
+			__func__, ret);
+		goto init_failed;
+	}
+
+	/* Now initialize the dmx-dev object */
+	mpq_demux->dmxdev.filternum = MPQ_MAX_DMX_FILES;
+	mpq_demux->dmxdev.demux = &mpq_demux->demux.dmx;
+	mpq_demux->dmxdev.capabilities = DMXDEV_CAP_DUPLEX;
+
+	mpq_demux->dmxdev.demux->set_source = mpq_sw_dmx_set_source;
+	mpq_demux->dmxdev.demux->get_stc = NULL;
+	mpq_demux->dmxdev.demux->get_caps = mpq_sw_dmx_get_caps;
+	mpq_demux->dmxdev.demux->map_buffer = mpq_dmx_map_buffer;
+	mpq_demux->dmxdev.demux->unmap_buffer = mpq_dmx_unmap_buffer;
+	mpq_demux->dmxdev.demux->write = mpq_dmx_write;
+	ret = dvb_dmxdev_init(&mpq_demux->dmxdev, mpq_adapter);
+	if (ret) {
+		MPQ_DVB_ERR_PRINT("%s: dvb_dmxdev_init failed, ret=%d\n",
+			__func__, ret);
+		goto init_failed_dmx_release;
+	}
+
+	/* Extend dvb-demux debugfs with mpq demux statistics. */
+	mpq_dmx_init_debugfs_entries(mpq_demux);
+
+	return 0;
+
+init_failed_dmx_release:
+	dvb_dmx_release(dvb_demux);
+init_failed:
+	return ret;
+}
+
+static int __init mpq_dmx_sw_plugin_init(void)
+{
+	return mpq_dmx_plugin_init(mpq_sw_dmx_init);
+}
+
+static void __exit mpq_dmx_sw_plugin_exit(void)
+{
+	mpq_dmx_plugin_exit();
+}
+
+
+module_init(mpq_dmx_sw_plugin_init);
+module_exit(mpq_dmx_sw_plugin_exit);
+
+MODULE_DESCRIPTION("Qualcomm Technologies Inc. demux software plugin");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c
new file mode 100644
index 0000000..da7ecce
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c
@@ -0,0 +1,1984 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/vmalloc.h>
+#include <linux/qcom_tspp.h>
+#include "mpq_dvb_debug.h"
+#include "mpq_dmx_plugin_common.h"
+
+#define TSIF_COUNT			2
+
+/* Max number of PID filters */
+#define TSPP_MAX_PID_FILTER_NUM		128
+
+/* Max number of user-defined HW PID filters */
+#define TSPP_MAX_HW_PID_FILTER_NUM	15
+
+/* HW index  of the last entry in the TSPP HW filter table */
+#define TSPP_LAST_HW_FILTER_INDEX	15
+
+/* Number of filters required to accept all packets except NULL packets */
+#define TSPP_BLOCK_NULLS_FILTERS_NUM	13
+
+/* Max number of section filters */
+#define TSPP_MAX_SECTION_FILTER_NUM	128
+
+/* For each TSIF we use a single pipe holding the data after PID filtering */
+#define TSPP_CHANNEL			0
+
+/* the channel_id set to TSPP driver based on TSIF number and channel type */
+#define TSPP_CHANNEL_ID(tsif, ch)		((tsif << 1) + ch)
+#define TSPP_GET_TSIF_NUM(ch_id)		(ch_id >> 1)
+
+/* mask that set to care for all bits in pid filter */
+#define TSPP_PID_MASK			0x1FFF
+
+/* dvb-demux defines pid 0x2000 as full capture pid */
+#define TSPP_PASS_THROUGH_PID		0x2000
+
+/* NULL packets pid */
+#define TSPP_NULL_PACKETS_PID		0x1FFF
+
+#define TSPP_RAW_TTS_SIZE		192
+#define TSPP_RAW_SIZE			188
+
+#define MAX_BAM_DESCRIPTOR_SIZE	(32 * 1024 - 1)
+
+#define MAX_BAM_DESCRIPTOR_COUNT	(8 * 1024 - 2)
+
+#define TSPP_BUFFER_SIZE		(500 * 1024) /* 500KB */
+
+#define TSPP_DEFAULT_DESCRIPTOR_SIZE	(TSPP_RAW_TTS_SIZE)
+
+#define TSPP_BUFFER_COUNT(buffer_size)	\
+	((buffer_size) / tspp_desc_size)
+
+/* When TSPP notifies demux that new packets are received.
+ * Using max descriptor size (170 packets).
+ * Assuming 20MBit/sec stream, with 170 packets
+ * per descriptor there would be about 82 descriptors,
+ * Meaning about 82 notifications per second.
+ */
+#define TSPP_NOTIFICATION_SIZE(desc_size)		\
+	(MAX_BAM_DESCRIPTOR_SIZE / (desc_size))
+
+/* Channel timeout in msec */
+#define TSPP_CHANNEL_TIMEOUT			100
+
+enum mem_buffer_allocation_mode {
+	MPQ_DMX_TSPP_INTERNAL_ALLOC = 0,
+	MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC = 1
+};
+
+/* module parameters for load time configuration */
+static int allocation_mode = MPQ_DMX_TSPP_INTERNAL_ALLOC;
+static int tspp_out_buffer_size = TSPP_BUFFER_SIZE;
+static int tspp_desc_size = TSPP_DEFAULT_DESCRIPTOR_SIZE;
+static int tspp_notification_size =
+	TSPP_NOTIFICATION_SIZE(TSPP_DEFAULT_DESCRIPTOR_SIZE);
+static int tspp_channel_timeout = TSPP_CHANNEL_TIMEOUT;
+static int tspp_out_ion_heap = ION_QSECOM_HEAP_ID;
+
+module_param(allocation_mode, int, 0644);
+module_param(tspp_out_buffer_size, int, 0644);
+module_param(tspp_desc_size, int, 0644);
+module_param(tspp_notification_size, int, 0644);
+module_param(tspp_channel_timeout, int, 0644);
+module_param(tspp_out_ion_heap, int, 0644);
+
+/* The following structure hold singleton information
+ * required for dmx implementation on top of TSPP.
+ */
+static struct
+{
+	/* Information for each TSIF input processing */
+	struct {
+		/*
+		 * TSPP pipe holding all TS packets after PID filtering.
+		 * The following is reference count for number of feeds
+		 * allocated on that pipe.
+		 */
+		int channel_ref;
+
+		/* Counter for data notifications on the pipe */
+		atomic_t data_cnt;
+
+		/* flag to indicate control operation is in progress */
+		atomic_t control_op;
+
+		/* ION handle used for TSPP data buffer allocation */
+		struct ion_handle *ch_mem_heap_handle;
+
+		/* TSPP data buffer heap virtual base address */
+		void *ch_mem_heap_virt_base;
+
+		/* TSPP data buffer heap physical base address */
+		ion_phys_addr_t ch_mem_heap_phys_base;
+
+		/* Buffer allocation index */
+		int buff_index;
+
+		/* Number of buffers */
+		u32 buffer_count;
+
+		/*
+		 * Array holding the IDs of the TSPP buffer descriptors in the
+		 * current aggregate, in order to release these descriptors at
+		 * the end of processing.
+		 */
+		int *aggregate_ids;
+
+		/*
+		 * Holds PIDs of allocated filters along with
+		 * how many feeds are opened on the same PID. For
+		 * TSPP HW filters, holds also the filter table index.
+		 * When pid == -1, the entry is free.
+		 */
+		struct {
+			int pid;
+			int ref_count;
+			int hw_index;
+		} filters[TSPP_MAX_PID_FILTER_NUM];
+
+		/* Indicates available/allocated filter table indexes */
+		int hw_indexes[TSPP_MAX_HW_PID_FILTER_NUM];
+
+		/* Number of currently allocated PID filters */
+		u16 current_filter_count;
+
+		/*
+		 * Flag to indicate whether the user added a filter to accept
+		 * NULL packets (PID = 0x1FFF)
+		 */
+		int pass_nulls_flag;
+
+		/*
+		 * Flag to indicate whether the user added a filter to accept
+		 * all packets (PID = 0x2000)
+		 */
+		int pass_all_flag;
+
+		/*
+		 * Flag to indicate whether the filter that accepts
+		 * all packets has already been added and is
+		 * currently enabled
+		 */
+		int accept_all_filter_exists_flag;
+
+		/* Thread processing TS packets from TSPP */
+		struct task_struct *thread;
+		wait_queue_head_t wait_queue;
+
+		/* TSIF alias */
+		char name[TSIF_NAME_LENGTH];
+
+		/* Pointer to the demux connected to this TSIF */
+		struct mpq_demux *mpq_demux;
+
+		/* Mutex protecting the data-structure */
+		struct mutex mutex;
+	} tsif[TSIF_COUNT];
+
+	/* ION client used for TSPP data buffer allocation */
+	struct ion_client *ion_client;
+} mpq_dmx_tspp_info;
+
+static void *tspp_mem_allocator(int channel_id, u32 size,
+				phys_addr_t *phys_base, void *user)
+{
+	void *virt_addr = NULL;
+	int i = TSPP_GET_TSIF_NUM(channel_id);
+
+	if (mpq_dmx_tspp_info.tsif[i].buff_index ==
+		mpq_dmx_tspp_info.tsif[i].buffer_count)
+		return NULL;
+
+	virt_addr =
+		(mpq_dmx_tspp_info.tsif[i].ch_mem_heap_virt_base +
+		(mpq_dmx_tspp_info.tsif[i].buff_index * size));
+
+	*phys_base =
+		(mpq_dmx_tspp_info.tsif[i].ch_mem_heap_phys_base +
+		(mpq_dmx_tspp_info.tsif[i].buff_index * size));
+
+	mpq_dmx_tspp_info.tsif[i].buff_index++;
+
+	return virt_addr;
+}
+
+static void tspp_mem_free(int channel_id, u32 size,
+			void *virt_base, phys_addr_t phys_base, void *user)
+{
+	int i = TSPP_GET_TSIF_NUM(channel_id);
+
+	/*
+	 * actual buffer heap free is done in mpq_dmx_tspp_plugin_exit().
+	 * we update index here, so if this function is called repetitively
+	 * for all the buffers, then afterwards tspp_mem_allocator()
+	 * can be called again.
+	 * Note: it would be incorrect to call tspp_mem_allocator()
+	 * a few times, then call tspp_mem_free(), then call
+	 * tspp_mem_allocator() again.
+	 */
+	if (mpq_dmx_tspp_info.tsif[i].buff_index > 0)
+		mpq_dmx_tspp_info.tsif[i].buff_index--;
+}
+
+/**
+ * Returns a free HW filter index that can be used.
+ *
+ * @tsif: The TSIF to allocate filter from
+ *
+ * Return  HW filter index or -ENOMEM if no filters available
+ */
+static int mpq_tspp_allocate_hw_filter_index(int tsif)
+{
+	int i;
+
+	for (i = 0; i < TSPP_MAX_HW_PID_FILTER_NUM; i++) {
+		if (mpq_dmx_tspp_info.tsif[tsif].hw_indexes[i] == 0) {
+			mpq_dmx_tspp_info.tsif[tsif].hw_indexes[i] = 1;
+			return i;
+		}
+	}
+
+	return -ENOMEM;
+}
+
+/**
+ * Releases a HW filter index for future reuse.
+ *
+ * @tsif: The TSIF from which the filter should be released
+ * @hw_index: The HW index to release
+ *
+ */
+static inline void mpq_tspp_release_hw_filter_index(int tsif, int hw_index)
+{
+	if ((hw_index >= 0) && (hw_index < TSPP_MAX_HW_PID_FILTER_NUM))
+		mpq_dmx_tspp_info.tsif[tsif].hw_indexes[hw_index] = 0;
+}
+
+
+/**
+ * Returns a free filter slot that can be used.
+ *
+ * @tsif: The TSIF to allocate filter from
+ *
+ * Return  filter index or -ENOMEM if no filters available
+ */
+static int mpq_tspp_get_free_filter_slot(int tsif)
+{
+	int slot;
+
+	for (slot = 0; slot < TSPP_MAX_PID_FILTER_NUM; slot++)
+		if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid == -1)
+			return slot;
+
+	return -ENOMEM;
+}
+
+/**
+ * Returns filter index of specific pid.
+ *
+ * @tsif: The TSIF to which the pid is allocated
+ * @pid: The pid to search for
+ *
+ * Return  filter index or -1 if no filter available
+ */
+static int mpq_tspp_get_filter_slot(int tsif, int pid)
+{
+	int slot;
+
+	for (slot = 0; slot < TSPP_MAX_PID_FILTER_NUM; slot++)
+		if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid == pid)
+			return slot;
+
+	return -EINVAL;
+}
+
+/**
+ * mpq_dmx_tspp_swfilter_desc - helper function
+ *
+ * Takes a tspp buffer descriptor and send it to the SW filter for demuxing,
+ * one TS packet at a time.
+ *
+ * @mpq_demux - mpq demux object
+ * @tspp_data_desc - tspp buffer descriptor
+ */
+static inline void mpq_dmx_tspp_swfilter_desc(struct mpq_demux *mpq_demux,
+	const struct tspp_data_descriptor *tspp_data_desc)
+{
+	u32 notif_size;
+	int i;
+
+	notif_size = tspp_data_desc->size / TSPP_RAW_TTS_SIZE;
+	for (i = 0; i < notif_size; i++)
+		dvb_dmx_swfilter_packet(&mpq_demux->demux,
+			((u8 *)tspp_data_desc->virt_base) +
+			i * TSPP_RAW_TTS_SIZE,
+			((u8 *)tspp_data_desc->virt_base) +
+			i * TSPP_RAW_TTS_SIZE + TSPP_RAW_SIZE);
+}
+
+/**
+ * Demux TS packets from TSPP by secure-demux.
+ * The function assumes the buffer is physically contiguous
+ * and that TSPP descriptors are continuous in memory.
+ *
+ * @tsif: The TSIF interface to process its packets
+ * @channel_id: the TSPP output pipe with the TS packets
+ */
+static void mpq_dmx_tspp_aggregated_process(int tsif, int channel_id)
+{
+	const struct tspp_data_descriptor *tspp_data_desc;
+	struct mpq_demux *mpq_demux = mpq_dmx_tspp_info.tsif[tsif].mpq_demux;
+	struct sdmx_buff_descr input;
+	size_t aggregate_len = 0;
+	size_t aggregate_count = 0;
+	phys_addr_t buff_start_addr_phys;
+	phys_addr_t buff_current_addr_phys = 0;
+	u32 notif_size;
+	int i;
+
+	while ((tspp_data_desc = tspp_get_buffer(0, channel_id)) != NULL) {
+		if (aggregate_count == 0)
+			buff_current_addr_phys = tspp_data_desc->phys_base;
+		notif_size = tspp_data_desc->size / TSPP_RAW_TTS_SIZE;
+		mpq_dmx_tspp_info.tsif[tsif].aggregate_ids[aggregate_count] =
+			tspp_data_desc->id;
+		aggregate_len += tspp_data_desc->size;
+		aggregate_count++;
+		mpq_demux->hw_notification_size += notif_size;
+
+		/* Let SW filter process only if it might be relevant */
+		if (mpq_demux->num_active_feeds > mpq_demux->num_secure_feeds)
+			mpq_dmx_tspp_swfilter_desc(mpq_demux, tspp_data_desc);
+
+	}
+
+	if (!aggregate_count)
+		return;
+
+	buff_start_addr_phys =
+		mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_phys_base;
+
+	input.base_addr = (u64)buff_start_addr_phys;
+	input.size = mpq_dmx_tspp_info.tsif[tsif].buffer_count * tspp_desc_size;
+
+	if (mpq_sdmx_is_loaded() && mpq_demux->sdmx_filter_count) {
+		MPQ_DVB_DBG_PRINT(
+			"%s: SDMX Processing %zu descriptors: %zu bytes at start address 0x%llx, read offset %d\n",
+			__func__, aggregate_count, aggregate_len,
+			input.base_addr,
+			(int)(buff_current_addr_phys - buff_start_addr_phys));
+
+		mpq_sdmx_process(mpq_demux, &input, aggregate_len,
+			buff_current_addr_phys - buff_start_addr_phys,
+			TSPP_RAW_TTS_SIZE);
+	}
+
+	for (i = 0; i < aggregate_count; i++)
+		tspp_release_buffer(0, channel_id,
+			mpq_dmx_tspp_info.tsif[tsif].aggregate_ids[i]);
+}
+
+
+/**
+ * Demux thread function handling data from specific TSIF.
+ *
+ * @arg: TSIF number
+ */
+static int mpq_dmx_tspp_thread(void *arg)
+{
+	int tsif = (int)(uintptr_t)arg;
+	struct mpq_demux *mpq_demux;
+	const struct tspp_data_descriptor *tspp_data_desc;
+	atomic_t *data_cnt;
+	u32 notif_size;
+	int channel_id;
+	int ref_count;
+	int ret;
+
+	do {
+		ret = wait_event_interruptible(
+			mpq_dmx_tspp_info.tsif[tsif].wait_queue,
+			(atomic_read(&mpq_dmx_tspp_info.tsif[tsif].data_cnt) &&
+			!atomic_read(&mpq_dmx_tspp_info.tsif[tsif].control_op))
+			|| kthread_should_stop());
+
+		if ((ret < 0) || kthread_should_stop()) {
+			MPQ_DVB_ERR_PRINT("%s: exit\n", __func__);
+			break;
+		}
+
+		/* Lock against the TSPP filters data-structure */
+		if (mutex_lock_interruptible(
+			&mpq_dmx_tspp_info.tsif[tsif].mutex))
+			return -ERESTARTSYS;
+
+		channel_id = TSPP_CHANNEL_ID(tsif, TSPP_CHANNEL);
+
+		ref_count = mpq_dmx_tspp_info.tsif[tsif].channel_ref;
+		data_cnt = &mpq_dmx_tspp_info.tsif[tsif].data_cnt;
+
+		/* Make sure channel is still active */
+		if (ref_count == 0) {
+			mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
+			continue;
+		}
+
+		atomic_dec(data_cnt);
+
+		mpq_demux = mpq_dmx_tspp_info.tsif[tsif].mpq_demux;
+		mpq_demux->hw_notification_size = 0;
+
+		if (allocation_mode != MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC &&
+			mpq_sdmx_is_loaded())
+			pr_err_once(
+				"%s: TSPP Allocation mode does not support secure demux.\n",
+				__func__);
+
+		if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC &&
+			mpq_sdmx_is_loaded()) {
+			mpq_dmx_tspp_aggregated_process(tsif, channel_id);
+		} else {
+			/*
+			 * Go through all filled descriptors
+			 * and perform demuxing on them
+			 */
+			do {
+				if (atomic_read(&mpq_dmx_tspp_info.tsif[tsif].
+						control_op)) {
+					/* restore for next iteration */
+					atomic_inc(data_cnt);
+					break;
+				}
+				tspp_data_desc = tspp_get_buffer(0, channel_id);
+				if (!tspp_data_desc)
+					break;
+
+				notif_size = tspp_data_desc->size /
+					TSPP_RAW_TTS_SIZE;
+				mpq_demux->hw_notification_size += notif_size;
+
+				mpq_dmx_tspp_swfilter_desc(mpq_demux,
+					tspp_data_desc);
+				/*
+				 * Notify TSPP that the buffer
+				 * is no longer needed
+				 */
+				tspp_release_buffer(0, channel_id,
+					tspp_data_desc->id);
+			} while (1);
+		}
+
+		if (mpq_demux->hw_notification_size &&
+			(mpq_demux->hw_notification_size <
+			mpq_demux->hw_notification_min_size))
+			mpq_demux->hw_notification_min_size =
+				mpq_demux->hw_notification_size;
+
+		mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
+	} while (1);
+
+	return 0;
+}
+
+/**
+ * Callback function from TSPP when new data is ready.
+ *
+ * @channel_id: Channel with new TS packets
+ * @user: user-data holding TSIF number
+ */
+static void mpq_tspp_callback(int channel_id, void *user)
+{
+	int tsif = (int)(uintptr_t)user;
+	struct mpq_demux *mpq_demux;
+
+	/* Save statistics on TSPP notifications */
+	mpq_demux = mpq_dmx_tspp_info.tsif[tsif].mpq_demux;
+	mpq_dmx_update_hw_statistics(mpq_demux);
+
+	atomic_inc(&mpq_dmx_tspp_info.tsif[tsif].data_cnt);
+	wake_up(&mpq_dmx_tspp_info.tsif[tsif].wait_queue);
+}
+
+/**
+ * Free memory of channel output of specific TSIF.
+ *
+ * @tsif: The TSIF id to which memory should be freed.
+ */
+static void mpq_dmx_channel_mem_free(int tsif)
+{
+	MPQ_DVB_DBG_PRINT("%s(%d)\n", __func__, tsif);
+
+	mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_phys_base = 0;
+
+	if (!IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle)) {
+		if (!IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[tsif].
+				ch_mem_heap_virt_base))
+			ion_unmap_kernel(mpq_dmx_tspp_info.ion_client,
+				mpq_dmx_tspp_info.tsif[tsif].
+					ch_mem_heap_handle);
+
+		ion_free(mpq_dmx_tspp_info.ion_client,
+			mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle);
+	}
+
+	mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_virt_base = NULL;
+	mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle = NULL;
+}
+
+/**
+ * Allocate memory for channel output of specific TSIF.
+ *
+ * @tsif: The TSIF id to which memory should be allocated.
+ *
+ * Return  error status
+ */
+static int mpq_dmx_channel_mem_alloc(int tsif)
+{
+	int result;
+	size_t len;
+
+	MPQ_DVB_DBG_PRINT("%s(%d)\n", __func__, tsif);
+
+	mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle =
+		ion_alloc(mpq_dmx_tspp_info.ion_client,
+		 (mpq_dmx_tspp_info.tsif[tsif].buffer_count * tspp_desc_size),
+		 SZ_4K,
+		 ION_HEAP(tspp_out_ion_heap),
+		 0); /* non-cached */
+
+	if (IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle)) {
+		MPQ_DVB_ERR_PRINT("%s: ion_alloc() failed\n", __func__);
+		mpq_dmx_channel_mem_free(tsif);
+		return -ENOMEM;
+	}
+
+	/* save virtual base address of heap */
+	mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_virt_base =
+		ion_map_kernel(mpq_dmx_tspp_info.ion_client,
+			mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle);
+	if (IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[tsif].
+				ch_mem_heap_virt_base)) {
+		MPQ_DVB_ERR_PRINT("%s: ion_map_kernel() failed\n", __func__);
+		mpq_dmx_channel_mem_free(tsif);
+		return -ENOMEM;
+	}
+
+	/* save physical base address of heap */
+	result = ion_phys(mpq_dmx_tspp_info.ion_client,
+		mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle,
+		&(mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_phys_base), &len);
+	if (result < 0) {
+		MPQ_DVB_ERR_PRINT("%s: ion_phys() failed\n", __func__);
+		mpq_dmx_channel_mem_free(tsif);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/**
+ * Add a filter to accept all packets as the last entry
+ * of the TSPP HW filter table.
+ *
+ * @channel_id: Channel ID number.
+ * @source: TSPP source.
+ *
+ * Return  error status
+ */
+static int mpq_tspp_add_accept_all_filter(int channel_id,
+				enum tspp_source source)
+{
+	struct tspp_filter tspp_filter;
+	int tsif = TSPP_GET_TSIF_NUM(channel_id);
+	int ret;
+
+	MPQ_DVB_DBG_PRINT("%s: executed, channel id = %d, source = %d\n",
+		__func__, channel_id, source);
+
+	if (mpq_dmx_tspp_info.tsif[tsif].accept_all_filter_exists_flag) {
+		MPQ_DVB_DBG_PRINT("%s: accept all filter already exists\n",
+				__func__);
+		return 0;
+	}
+
+	/* This filter will be the last entry in the table */
+	tspp_filter.priority = TSPP_LAST_HW_FILTER_INDEX;
+	/* Pass all pids - set mask to 0 */
+	tspp_filter.pid = 0;
+	tspp_filter.mask = 0;
+	/*
+	 * Include TTS in RAW packets, if you change this to
+	 * TSPP_MODE_RAW_NO_SUFFIX you must also change TSPP_RAW_TTS_SIZE
+	 * accordingly.
+	 */
+	tspp_filter.mode = TSPP_MODE_RAW;
+	tspp_filter.source = source;
+	tspp_filter.decrypt = 0;
+
+	ret = tspp_add_filter(0, channel_id, &tspp_filter);
+	if (!ret) {
+		mpq_dmx_tspp_info.tsif[tsif].accept_all_filter_exists_flag = 1;
+		MPQ_DVB_DBG_PRINT(
+				"%s: accept all filter added successfully\n",
+				__func__);
+	}
+
+	return ret;
+}
+
+/**
+ * Remove the filter that accepts all packets from the last entry
+ * of the TSPP HW filter table.
+ *
+ * @channel_id: Channel ID number.
+ * @source: TSPP source.
+ *
+ * Return  error status
+ */
+static int mpq_tspp_remove_accept_all_filter(int channel_id,
+				enum tspp_source source)
+{
+	struct tspp_filter tspp_filter;
+	int tsif = TSPP_GET_TSIF_NUM(channel_id);
+	int ret;
+
+	MPQ_DVB_DBG_PRINT("%s: executed, channel id = %d, source = %d\n",
+		__func__, channel_id, source);
+
+	if (mpq_dmx_tspp_info.tsif[tsif].accept_all_filter_exists_flag == 0) {
+		MPQ_DVB_DBG_PRINT("%s: accept all filter doesn't exist\n",
+				__func__);
+		return 0;
+	}
+
+	tspp_filter.priority = TSPP_LAST_HW_FILTER_INDEX;
+
+	ret = tspp_remove_filter(0, channel_id, &tspp_filter);
+	if (!ret) {
+		mpq_dmx_tspp_info.tsif[tsif].accept_all_filter_exists_flag = 0;
+		MPQ_DVB_DBG_PRINT(
+			"%s: accept all filter removed successfully\n",
+			__func__);
+	}
+
+	return ret;
+}
+
+/**
+ * Add filters designed to accept all packets except NULL packets, i.e.
+ * packets with PID = 0x1FFF.
+ * This function is called after user-defined filters were removed,
+ * so it assumes that the first 13 HW filters in the TSPP filter
+ * table are free for use.
+ *
+ * @channel_id: Channel ID number.
+ * @source: TSPP source.
+ *
+ * Return  0 on success, -1 otherwise
+ */
+static int mpq_tspp_add_null_blocking_filters(int channel_id,
+				enum tspp_source source)
+{
+	struct tspp_filter tspp_filter;
+	int ret = 0;
+	int i, j;
+	u16 full_pid_mask = 0x1FFF;
+	u8 mask_shift;
+	u8 pid_shift;
+	int tsif = TSPP_GET_TSIF_NUM(channel_id);
+
+	MPQ_DVB_DBG_PRINT("%s: executed, channel id = %d, source = %d\n",
+		__func__, channel_id, source);
+
+	/*
+	 * Add a total of 13 filters that will accept packets with
+	 * every PID other than 0x1FFF, which is the NULL PID.
+	 *
+	 * Filter 0: accept all PIDs with bit 12 clear, i.e.
+	 * PID = 0x0000 .. 0x0FFF (4096 PIDs in total):
+	 * Mask = 0x1000, PID = 0x0000.
+	 *
+	 * Filter 12: Accept PID 0x1FFE:
+	 * Mask = 0x1FFF, PID = 0x1FFE.
+	 *
+	 * In general: For N = 0 .. 12,
+	 * Filter <N>: accept all PIDs with <N> MSBits set and bit <N-1> clear.
+	 * Filter <N> Mask = N+1 MSBits set, others clear.
+	 * Filter <N> PID = <N> MSBits set, others clear.
+	 */
+
+	/*
+	 * Include TTS in RAW packets, if you change this to
+	 * TSPP_MODE_RAW_NO_SUFFIX you must also change TSPP_RAW_TTS_SIZE
+	 * accordingly.
+	 */
+	tspp_filter.mode = TSPP_MODE_RAW;
+	tspp_filter.source = source;
+	tspp_filter.decrypt = 0;
+
+	for (i = 0; i < TSPP_BLOCK_NULLS_FILTERS_NUM; i++) {
+		tspp_filter.priority = mpq_tspp_allocate_hw_filter_index(tsif);
+		if (tspp_filter.priority != i) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: got unexpected HW index %d, expected %d\n",
+				__func__, tspp_filter.priority, i);
+			ret = -1;
+			break;
+		}
+		mask_shift = (TSPP_BLOCK_NULLS_FILTERS_NUM - 1 - i);
+		pid_shift = (TSPP_BLOCK_NULLS_FILTERS_NUM - i);
+		tspp_filter.mask =
+			((full_pid_mask >> mask_shift) << mask_shift);
+		tspp_filter.pid = ((full_pid_mask >> pid_shift) << pid_shift);
+
+		if (tspp_add_filter(0, channel_id, &tspp_filter)) {
+			ret = -1;
+			break;
+		}
+	}
+
+	if (ret) {
+		/* cleanup on failure */
+		for (j = 0; j < i; j++) {
+			tspp_filter.priority = j;
+			mpq_tspp_release_hw_filter_index(tsif, j);
+			tspp_remove_filter(0, channel_id, &tspp_filter);
+		}
+	} else {
+		MPQ_DVB_DBG_PRINT(
+			"%s: NULL blocking filters added successfully\n",
+			__func__);
+	}
+
+	return ret;
+}
+
+/**
+ * Remove filters designed to accept all packets except NULL packets, i.e.
+ * packets with PID = 0x1FFF.
+ *
+ * @channel_id: Channel ID number.
+ *
+ * @source: TSPP source.
+ *
+ * Return  0 on success, -1 otherwise
+ */
+static int mpq_tspp_remove_null_blocking_filters(int channel_id,
+				enum tspp_source source)
+{
+	struct tspp_filter tspp_filter;
+	int tsif = TSPP_GET_TSIF_NUM(channel_id);
+	int ret = 0;
+	int i;
+
+	MPQ_DVB_DBG_PRINT("%s: executed, channel id = %d, source = %d\n",
+		__func__, channel_id, source);
+
+	for (i = 0; i < TSPP_BLOCK_NULLS_FILTERS_NUM; i++) {
+		tspp_filter.priority = i;
+		if (tspp_remove_filter(0, channel_id, &tspp_filter)) {
+			MPQ_DVB_ERR_PRINT("%s: failed to remove filter %d\n",
+				__func__, i);
+			ret = -1;
+		}
+
+		mpq_tspp_release_hw_filter_index(tsif, i);
+	}
+
+	return ret;
+}
+
+/**
+ * Add all current user-defined filters (up to 15) as HW filters
+ *
+ * @channel_id: Channel ID number.
+ *
+ * @source: TSPP source.
+ *
+ * Return  0 on success, -1 otherwise
+ */
+static int mpq_tspp_add_all_user_filters(int channel_id,
+				enum tspp_source source)
+{
+	struct tspp_filter tspp_filter;
+	int tsif = TSPP_GET_TSIF_NUM(channel_id);
+	int slot;
+	u16 added_count = 0;
+	u16 total_filters_count = 0;
+
+	MPQ_DVB_DBG_PRINT("%s: executed\n", __func__);
+
+	/*
+	 * Include TTS in RAW packets, if you change this to
+	 * TSPP_MODE_RAW_NO_SUFFIX you must also change TSPP_RAW_TTS_SIZE
+	 * accordingly.
+	 */
+	tspp_filter.mode = TSPP_MODE_RAW;
+	tspp_filter.source = source;
+	tspp_filter.decrypt = 0;
+
+	for (slot = 0; slot < TSPP_MAX_PID_FILTER_NUM; slot++) {
+		if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid == -1)
+			continue;
+
+		/*
+		 * count total number of user filters to verify that it is
+		 * exactly TSPP_MAX_HW_PID_FILTER_NUM as expected.
+		 */
+		total_filters_count++;
+
+		if (added_count > TSPP_MAX_HW_PID_FILTER_NUM)
+			continue;
+
+		tspp_filter.priority = mpq_tspp_allocate_hw_filter_index(tsif);
+
+		if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid ==
+				TSPP_PASS_THROUGH_PID) {
+			/* pass all pids */
+			tspp_filter.pid = 0;
+			tspp_filter.mask = 0;
+		} else {
+			tspp_filter.pid =
+				mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid;
+			tspp_filter.mask = TSPP_PID_MASK;
+		}
+
+		MPQ_DVB_DBG_PRINT(
+			"%s: adding HW filter, PID = %d, mask = 0x%X, index = %d\n",
+				__func__, tspp_filter.pid, tspp_filter.mask,
+				tspp_filter.priority);
+
+		if (!tspp_add_filter(0, channel_id, &tspp_filter)) {
+			mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index =
+				tspp_filter.priority;
+			added_count++;
+		} else {
+			MPQ_DVB_ERR_PRINT("%s: tspp_add_filter failed\n",
+						__func__);
+		}
+	}
+
+	if ((added_count != TSPP_MAX_HW_PID_FILTER_NUM) ||
+		(added_count != total_filters_count))
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * Remove all user-defined HW filters
+ *
+ * @channel_id: Channel ID number.
+ *
+ * @source: TSPP source.
+ *
+ * Return  0 on success, -1 otherwise
+ */
+static int mpq_tspp_remove_all_user_filters(int channel_id,
+				enum tspp_source source)
+{
+	struct tspp_filter tspp_filter;
+	int ret = 0;
+	int tsif = TSPP_GET_TSIF_NUM(channel_id);
+	int i;
+
+	MPQ_DVB_DBG_PRINT("%s: executed\n", __func__);
+
+	for (i = 0; i < TSPP_MAX_HW_PID_FILTER_NUM; i++) {
+		tspp_filter.priority = i;
+		MPQ_DVB_DBG_PRINT("%s: Removing HW filter %d\n",
+			__func__, tspp_filter.priority);
+		if (tspp_remove_filter(0, channel_id, &tspp_filter))
+			ret = -1;
+
+		mpq_tspp_release_hw_filter_index(tsif, i);
+		mpq_dmx_tspp_info.tsif[tsif].filters[i].hw_index = -1;
+	}
+
+	return ret;
+}
+
+/**
+ * Configure TSPP channel to filter the PID of new feed.
+ *
+ * @feed: The feed to configure the channel with
+ *
+ * Return  error status
+ *
+ * The function checks if the new PID can be added to an already
+ * allocated channel, if not, a new channel is allocated and configured.
+ */
+static int mpq_tspp_dmx_add_channel(struct dvb_demux_feed *feed)
+{
+	struct mpq_demux *mpq_demux = feed->demux->priv;
+	struct tspp_select_source tspp_source;
+	struct tspp_filter tspp_filter;
+	int tsif;
+	int tsif_mode = mpq_dmx_get_param_tsif_mode();
+	int ret = 0;
+	int slot;
+	int channel_id;
+	int *channel_ref_count;
+	u32 buffer_size;
+	int restore_user_filters = 0;
+	int remove_accept_all_filter = 0;
+	int remove_null_blocking_filters = 0;
+	size_t agg_size;
+
+	tspp_source.clk_inverse = mpq_dmx_get_param_clock_inv();
+	tspp_source.data_inverse = 0;
+	tspp_source.sync_inverse = 0;
+	tspp_source.enable_inverse = 0;
+
+	MPQ_DVB_DBG_PRINT("%s: executed, PID = %d\n", __func__, feed->pid);
+
+	switch (tsif_mode) {
+	case 1:
+		tspp_source.mode = TSPP_TSIF_MODE_1;
+		break;
+	case 2:
+		tspp_source.mode = TSPP_TSIF_MODE_2;
+		break;
+	default:
+		tspp_source.mode = TSPP_TSIF_MODE_LOOPBACK;
+		break;
+	}
+
+	/* determine the TSIF we are reading from */
+	if (mpq_demux->source == DMX_SOURCE_FRONT0) {
+		tsif = 0;
+		tspp_source.source = TSPP_SOURCE_TSIF0;
+	} else if (mpq_demux->source == DMX_SOURCE_FRONT1) {
+		tsif = 1;
+		tspp_source.source = TSPP_SOURCE_TSIF1;
+	} else {
+		/* invalid source */
+		MPQ_DVB_ERR_PRINT(
+			"%s: invalid input source (%d)\n",
+			__func__,
+			mpq_demux->source);
+
+		return -EINVAL;
+	}
+
+	atomic_inc(&mpq_dmx_tspp_info.tsif[tsif].control_op);
+	if (mutex_lock_interruptible(&mpq_dmx_tspp_info.tsif[tsif].mutex)) {
+		atomic_dec(&mpq_dmx_tspp_info.tsif[tsif].control_op);
+		return -ERESTARTSYS;
+	}
+
+	/*
+	 * It is possible that this PID was already requested before.
+	 * Can happen if we play and record same PES or PCR
+	 * piggypacked on video packet.
+	 */
+	slot = mpq_tspp_get_filter_slot(tsif, feed->pid);
+	if (slot >= 0) {
+		/* PID already configured */
+		mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count++;
+		goto out;
+	}
+
+
+	channel_id = TSPP_CHANNEL_ID(tsif, TSPP_CHANNEL);
+	channel_ref_count = &mpq_dmx_tspp_info.tsif[tsif].channel_ref;
+
+	/*
+	 * Recalculate 'tspp_notification_size' and buffer count in case
+	 * 'tspp_desc_size' or 'tspp_out_buffer_size' parameters have changed.
+	 */
+	buffer_size = tspp_desc_size;
+	tspp_notification_size = TSPP_NOTIFICATION_SIZE(tspp_desc_size);
+	mpq_dmx_tspp_info.tsif[tsif].buffer_count =
+			TSPP_BUFFER_COUNT(tspp_out_buffer_size);
+	if (mpq_dmx_tspp_info.tsif[tsif].buffer_count >
+			MAX_BAM_DESCRIPTOR_COUNT)
+		mpq_dmx_tspp_info.tsif[tsif].buffer_count =
+			MAX_BAM_DESCRIPTOR_COUNT;
+
+	/* check if required TSPP pipe is already allocated or not */
+	if (*channel_ref_count == 0) {
+		if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC) {
+			agg_size = mpq_dmx_tspp_info.tsif[tsif].buffer_count *
+				sizeof(int);
+			mpq_dmx_tspp_info.tsif[tsif].aggregate_ids =
+					vzalloc(agg_size);
+			if (!mpq_dmx_tspp_info.tsif[tsif].aggregate_ids) {
+				MPQ_DVB_ERR_PRINT(
+					"%s: Failed to allocate memory for buffer descriptors aggregation\n",
+					__func__);
+				ret = -ENOMEM;
+				goto out;
+			}
+
+			ret = mpq_dmx_channel_mem_alloc(tsif);
+			if (ret < 0) {
+				MPQ_DVB_ERR_PRINT(
+					"%s: mpq_dmx_channel_mem_alloc(%d) failed (%d)\n",
+					__func__,
+					channel_id,
+					ret);
+
+				goto add_channel_failed;
+			}
+		}
+
+		ret = tspp_open_channel(0, channel_id);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: tspp_open_channel(%d) failed (%d)\n",
+				__func__,
+				channel_id,
+				ret);
+
+			goto add_channel_failed;
+		}
+
+		/* set TSPP source */
+		ret = tspp_open_stream(0, channel_id, &tspp_source);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: tspp_select_source(%d,%d) failed (%d)\n",
+				__func__,
+				channel_id,
+				tspp_source.source,
+				ret);
+
+			goto add_channel_close_ch;
+		}
+
+		/* register notification on TS packets */
+		tspp_register_notification(0,
+					   channel_id,
+					   mpq_tspp_callback,
+					   (void *)(uintptr_t)tsif,
+					   tspp_channel_timeout);
+
+		/*
+		 * Register allocator and provide allocation function
+		 * that allocates from contiguous memory so that we can have
+		 * big notification size, smallest descriptor, and still provide
+		 * TZ with single big buffer based on notification size.
+		 */
+		if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC) {
+			ret = tspp_allocate_buffers(0, channel_id,
+				   mpq_dmx_tspp_info.tsif[tsif].buffer_count,
+				   buffer_size, tspp_notification_size,
+				   tspp_mem_allocator, tspp_mem_free, NULL);
+		} else {
+			ret = tspp_allocate_buffers(0, channel_id,
+				   mpq_dmx_tspp_info.tsif[tsif].buffer_count,
+				   buffer_size, tspp_notification_size,
+				   NULL, NULL, NULL);
+		}
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: tspp_allocate_buffers(%d) failed (%d)\n",
+				__func__,
+				channel_id,
+				ret);
+
+			goto add_channel_unregister_notif;
+		}
+
+		mpq_dmx_tspp_info.tsif[tsif].mpq_demux = mpq_demux;
+	}
+
+	/* add new PID to the existing pipe */
+	slot = mpq_tspp_get_free_filter_slot(tsif);
+	if (slot < 0) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: mpq_tspp_get_free_filter_slot(%d) failed\n",
+			__func__, tsif);
+
+		goto add_channel_unregister_notif;
+	}
+
+	if (feed->pid == TSPP_PASS_THROUGH_PID)
+		mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 1;
+	else if (feed->pid == TSPP_NULL_PACKETS_PID)
+		mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 1;
+
+	mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = feed->pid;
+	mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count++;
+
+	tspp_filter.priority = -1;
+
+	if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count <
+					TSPP_MAX_HW_PID_FILTER_NUM) {
+		/* HW filtering mode */
+		tspp_filter.priority = mpq_tspp_allocate_hw_filter_index(tsif);
+		if (tspp_filter.priority < 0)
+			goto add_channel_free_filter_slot;
+
+		if (feed->pid == TSPP_PASS_THROUGH_PID) {
+			/* pass all pids */
+			tspp_filter.pid = 0;
+			tspp_filter.mask = 0;
+		} else {
+			tspp_filter.pid = feed->pid;
+			tspp_filter.mask = TSPP_PID_MASK;
+		}
+
+		/*
+		 * Include TTS in RAW packets, if you change this to
+		 * TSPP_MODE_RAW_NO_SUFFIX you must also change
+		 * TSPP_RAW_TTS_SIZE accordingly.
+		 */
+		tspp_filter.mode = TSPP_MODE_RAW;
+		tspp_filter.source = tspp_source.source;
+		tspp_filter.decrypt = 0;
+		ret = tspp_add_filter(0, channel_id, &tspp_filter);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: tspp_add_filter(%d) failed (%d)\n",
+				__func__,
+				channel_id,
+				ret);
+
+			goto add_channel_free_filter_slot;
+		}
+		mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index =
+			tspp_filter.priority;
+
+		MPQ_DVB_DBG_PRINT(
+			"%s: HW filtering mode: added TSPP HW filter, PID = %d, mask = 0x%X, index = %d\n",
+			__func__, tspp_filter.pid, tspp_filter.mask,
+			tspp_filter.priority);
+	} else if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count ==
+					TSPP_MAX_HW_PID_FILTER_NUM) {
+		/* Crossing the threshold - from HW to SW filtering mode */
+
+		/* Add a temporary filter to accept all packets */
+		ret = mpq_tspp_add_accept_all_filter(channel_id,
+					tspp_source.source);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_tspp_add_accept_all_filter(%d, %d) failed\n",
+				__func__, channel_id, tspp_source.source);
+
+			goto add_channel_free_filter_slot;
+		}
+
+		/* Remove all existing user filters */
+		ret = mpq_tspp_remove_all_user_filters(channel_id,
+					tspp_source.source);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_tspp_remove_all_user_filters(%d, %d) failed\n",
+				__func__, channel_id, tspp_source.source);
+
+			restore_user_filters = 1;
+			remove_accept_all_filter = 1;
+
+			goto add_channel_free_filter_slot;
+		}
+
+		/* Add HW filters to block NULL packets */
+		ret = mpq_tspp_add_null_blocking_filters(channel_id,
+					tspp_source.source);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_tspp_add_null_blocking_filters(%d, %d) failed\n",
+				__func__, channel_id, tspp_source.source);
+
+			restore_user_filters = 1;
+			remove_accept_all_filter = 1;
+
+			goto add_channel_free_filter_slot;
+		}
+
+		/* Remove filters that accepts all packets, if necessary */
+		if ((mpq_dmx_tspp_info.tsif[tsif].pass_all_flag == 0) &&
+			(mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag == 0)) {
+
+			ret = mpq_tspp_remove_accept_all_filter(channel_id,
+						tspp_source.source);
+			if (ret < 0) {
+				MPQ_DVB_ERR_PRINT(
+					"%s: mpq_tspp_remove_accept_all_filter(%d, %d) failed\n",
+					__func__, channel_id,
+					tspp_source.source);
+
+				remove_null_blocking_filters = 1;
+				restore_user_filters = 1;
+				remove_accept_all_filter = 1;
+
+				goto add_channel_free_filter_slot;
+			}
+		}
+	} else {
+		/* Already working in SW filtering mode */
+		if (mpq_dmx_tspp_info.tsif[tsif].pass_all_flag ||
+			mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag) {
+
+			ret = mpq_tspp_add_accept_all_filter(channel_id,
+						tspp_source.source);
+			if (ret < 0) {
+				MPQ_DVB_ERR_PRINT(
+					"%s: mpq_tspp_add_accept_all_filter(%d, %d) failed\n",
+					__func__, channel_id,
+					tspp_source.source);
+
+				goto add_channel_free_filter_slot;
+			}
+		}
+	}
+
+	(*channel_ref_count)++;
+	mpq_dmx_tspp_info.tsif[tsif].current_filter_count++;
+
+	MPQ_DVB_DBG_PRINT("%s: success, current_filter_count = %d\n",
+		__func__, mpq_dmx_tspp_info.tsif[tsif].current_filter_count);
+
+	goto out;
+
+add_channel_free_filter_slot:
+	/* restore internal database state */
+	mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = -1;
+	mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count--;
+
+	/* release HW index if we allocated one */
+	if (tspp_filter.priority >= 0) {
+		mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index = -1;
+		mpq_tspp_release_hw_filter_index(tsif, tspp_filter.priority);
+	}
+
+	/* restore HW filter table state if necessary */
+	if (remove_null_blocking_filters)
+		mpq_tspp_remove_null_blocking_filters(channel_id,
+						tspp_source.source);
+
+	if (restore_user_filters)
+		mpq_tspp_add_all_user_filters(channel_id, tspp_source.source);
+
+	if (remove_accept_all_filter)
+		mpq_tspp_remove_accept_all_filter(channel_id,
+						tspp_source.source);
+
+	/* restore flags. we can only get here if we changed the flags. */
+	if (feed->pid == TSPP_PASS_THROUGH_PID)
+		mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 0;
+	else if (feed->pid == TSPP_NULL_PACKETS_PID)
+		mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 0;
+
+add_channel_unregister_notif:
+	if (*channel_ref_count == 0) {
+		tspp_unregister_notification(0, channel_id);
+		tspp_close_stream(0, channel_id);
+	}
+add_channel_close_ch:
+	if (*channel_ref_count == 0)
+		tspp_close_channel(0, channel_id);
+add_channel_failed:
+	if (*channel_ref_count == 0)
+		if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC) {
+			vfree(mpq_dmx_tspp_info.tsif[tsif].aggregate_ids);
+			mpq_dmx_tspp_info.tsif[tsif].aggregate_ids = NULL;
+			mpq_dmx_channel_mem_free(tsif);
+		}
+
+out:
+	mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
+	atomic_dec(&mpq_dmx_tspp_info.tsif[tsif].control_op);
+	return ret;
+}
+
+/**
+ * Removes filter from TSPP.
+ *
+ * @feed: The feed to remove
+ *
+ * Return  error status
+ *
+ * The function checks if this is the only PID allocated within
+ * the channel, if so, the channel is closed as well.
+ */
+static int mpq_tspp_dmx_remove_channel(struct dvb_demux_feed *feed)
+{
+	int tsif;
+	int ret = 0;
+	int channel_id;
+	int slot;
+	atomic_t *data_cnt;
+	int *channel_ref_count;
+	enum tspp_source tspp_source;
+	struct tspp_filter tspp_filter;
+	struct mpq_demux *mpq_demux = feed->demux->priv;
+	int restore_null_blocking_filters = 0;
+	int remove_accept_all_filter = 0;
+	int remove_user_filters = 0;
+	int accept_all_filter_existed = 0;
+
+	MPQ_DVB_DBG_PRINT("%s: executed, PID = %d\n", __func__, feed->pid);
+
+	/* determine the TSIF we are reading from */
+	if (mpq_demux->source == DMX_SOURCE_FRONT0) {
+		tsif = 0;
+		tspp_source = TSPP_SOURCE_TSIF0;
+	} else if (mpq_demux->source == DMX_SOURCE_FRONT1) {
+		tsif = 1;
+		tspp_source = TSPP_SOURCE_TSIF1;
+	} else {
+		/* invalid source */
+		MPQ_DVB_ERR_PRINT(
+			"%s: invalid input source (%d)\n",
+			__func__,
+			mpq_demux->source);
+
+		return -EINVAL;
+	}
+
+	atomic_inc(&mpq_dmx_tspp_info.tsif[tsif].control_op);
+	if (mutex_lock_interruptible(&mpq_dmx_tspp_info.tsif[tsif].mutex)) {
+		atomic_dec(&mpq_dmx_tspp_info.tsif[tsif].control_op);
+		return -ERESTARTSYS;
+	}
+
+	channel_id = TSPP_CHANNEL_ID(tsif, TSPP_CHANNEL);
+	channel_ref_count = &mpq_dmx_tspp_info.tsif[tsif].channel_ref;
+	data_cnt = &mpq_dmx_tspp_info.tsif[tsif].data_cnt;
+
+	/* check if required TSPP pipe is already allocated or not */
+	if (*channel_ref_count == 0) {
+		/* invalid feed provided as the channel is not allocated */
+		MPQ_DVB_ERR_PRINT(
+			"%s: invalid feed (%d)\n",
+			__func__,
+			channel_id);
+
+		ret = -EINVAL;
+		goto out;
+	}
+
+	slot = mpq_tspp_get_filter_slot(tsif, feed->pid);
+
+	if (slot < 0) {
+		/* invalid feed provided as it has no filter allocated */
+		MPQ_DVB_ERR_PRINT(
+			"%s: mpq_tspp_get_filter_slot failed (%d,%d)\n",
+			__func__,
+			feed->pid,
+			tsif);
+
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* since filter was found, ref_count > 0 so it's ok to decrement it */
+	mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count--;
+
+	if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count) {
+		/*
+		 * there are still references to this pid, do not
+		 * remove the filter yet
+		 */
+		goto out;
+	}
+
+	if (feed->pid == TSPP_PASS_THROUGH_PID)
+		mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 0;
+	else if (feed->pid == TSPP_NULL_PACKETS_PID)
+		mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 0;
+
+	mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = -1;
+
+	if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count <=
+					TSPP_MAX_HW_PID_FILTER_NUM) {
+		/* staying in HW filtering mode */
+		tspp_filter.priority =
+			mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index;
+		ret = tspp_remove_filter(0, channel_id, &tspp_filter);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: tspp_remove_filter failed (%d,%d)\n",
+				__func__,
+				channel_id,
+				tspp_filter.priority);
+
+			goto remove_channel_failed_restore_count;
+		}
+		mpq_tspp_release_hw_filter_index(tsif, tspp_filter.priority);
+		mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index = -1;
+
+		MPQ_DVB_DBG_PRINT(
+			"%s: HW filtering mode: Removed TSPP HW filter, PID = %d, index = %d\n",
+			__func__, feed->pid, tspp_filter.priority);
+	} else  if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count ==
+					(TSPP_MAX_HW_PID_FILTER_NUM + 1)) {
+		/* Crossing the threshold - from SW to HW filtering mode */
+
+		accept_all_filter_existed =
+			mpq_dmx_tspp_info.tsif[tsif].
+				accept_all_filter_exists_flag;
+
+		/* Add a temporary filter to accept all packets */
+		ret = mpq_tspp_add_accept_all_filter(channel_id,
+					tspp_source);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_tspp_add_accept_all_filter(%d, %d) failed\n",
+				__func__, channel_id, tspp_source);
+
+			goto remove_channel_failed_restore_count;
+		}
+
+		ret = mpq_tspp_remove_null_blocking_filters(channel_id,
+					tspp_source);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_tspp_remove_null_blocking_filters(%d, %d) failed\n",
+				__func__, channel_id, tspp_source);
+
+			restore_null_blocking_filters = 1;
+			if (!accept_all_filter_existed)
+				remove_accept_all_filter = 1;
+
+			goto remove_channel_failed_restore_count;
+		}
+
+		ret = mpq_tspp_add_all_user_filters(channel_id,
+					tspp_source);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_tspp_add_all_user_filters(%d, %d) failed\n",
+				__func__, channel_id, tspp_source);
+
+			remove_user_filters = 1;
+			restore_null_blocking_filters = 1;
+			if (!accept_all_filter_existed)
+				remove_accept_all_filter = 1;
+
+			goto remove_channel_failed_restore_count;
+		}
+
+		ret = mpq_tspp_remove_accept_all_filter(channel_id,
+					tspp_source);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_tspp_remove_accept_all_filter(%d, %d) failed\n",
+				__func__, channel_id, tspp_source);
+
+			remove_user_filters = 1;
+			restore_null_blocking_filters = 1;
+			if (!accept_all_filter_existed)
+				remove_accept_all_filter = 1;
+
+			goto remove_channel_failed_restore_count;
+		}
+	} else {
+		/* staying in SW filtering mode */
+		if ((mpq_dmx_tspp_info.tsif[tsif].pass_all_flag == 0) &&
+			(mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag == 0)) {
+
+			ret = mpq_tspp_remove_accept_all_filter(channel_id,
+						tspp_source);
+			if (ret < 0) {
+				MPQ_DVB_ERR_PRINT(
+					"%s: mpq_tspp_remove_accept_all_filter(%d, %d) failed\n",
+					__func__, channel_id,
+					tspp_source);
+
+				goto remove_channel_failed_restore_count;
+			}
+		}
+	}
+
+	mpq_dmx_tspp_info.tsif[tsif].current_filter_count--;
+	(*channel_ref_count)--;
+
+	MPQ_DVB_DBG_PRINT("%s: success, current_filter_count = %d\n",
+		__func__, mpq_dmx_tspp_info.tsif[tsif].current_filter_count);
+
+	if (*channel_ref_count == 0) {
+		/* channel is not used any more, release it */
+		tspp_unregister_notification(0, channel_id);
+		tspp_close_stream(0, channel_id);
+		tspp_close_channel(0, channel_id);
+		atomic_set(data_cnt, 0);
+
+		if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC) {
+			vfree(mpq_dmx_tspp_info.tsif[tsif].aggregate_ids);
+			mpq_dmx_tspp_info.tsif[tsif].aggregate_ids = NULL;
+			mpq_dmx_channel_mem_free(tsif);
+		}
+	}
+
+	goto out;
+
+remove_channel_failed_restore_count:
+	/* restore internal database state */
+	mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = feed->pid;
+	mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count++;
+
+	if (remove_user_filters)
+		mpq_tspp_remove_all_user_filters(channel_id, tspp_source);
+
+	if (restore_null_blocking_filters)
+		mpq_tspp_add_null_blocking_filters(channel_id, tspp_source);
+
+	if (remove_accept_all_filter)
+		mpq_tspp_remove_accept_all_filter(channel_id, tspp_source);
+
+	/* restore flags. we can only get here if we changed the flags. */
+	if (feed->pid == TSPP_PASS_THROUGH_PID)
+		mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 1;
+	else if (feed->pid == TSPP_NULL_PACKETS_PID)
+		mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 1;
+
+out:
+	mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
+	atomic_dec(&mpq_dmx_tspp_info.tsif[tsif].control_op);
+	return ret;
+}
+
+static int mpq_tspp_dmx_start_filtering(struct dvb_demux_feed *feed)
+{
+	int ret;
+	struct mpq_demux *mpq_demux = feed->demux->priv;
+
+	MPQ_DVB_DBG_PRINT(
+		"%s(pid=%d) executed\n",
+		__func__,
+		feed->pid);
+
+	if (mpq_demux == NULL) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: invalid mpq_demux handle\n",
+			__func__);
+
+		return -EINVAL;
+	}
+
+	if (mpq_demux->source < DMX_SOURCE_DVR0) {
+		/* source from TSPP, need to configure tspp pipe */
+		ret = mpq_tspp_dmx_add_channel(feed);
+
+		if (ret < 0) {
+			MPQ_DVB_DBG_PRINT(
+				"%s: mpq_tspp_dmx_add_channel failed(%d)\n",
+				__func__,
+				ret);
+			return ret;
+		}
+	}
+
+	/*
+	 * Always feed sections/PES starting from a new one and
+	 * do not partial transfer data from older one
+	 */
+	feed->pusi_seen = 0;
+
+	ret = mpq_dmx_init_mpq_feed(feed);
+	if (ret) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: mpq_dmx_init_mpq_feed failed(%d)\n",
+			__func__,
+			ret);
+		if (mpq_demux->source < DMX_SOURCE_DVR0)
+			mpq_tspp_dmx_remove_channel(feed);
+
+		return ret;
+	}
+
+	return 0;
+}
+
+static int mpq_tspp_dmx_stop_filtering(struct dvb_demux_feed *feed)
+{
+	int ret = 0;
+	struct mpq_demux *mpq_demux = feed->demux->priv;
+
+	MPQ_DVB_DBG_PRINT("%s(%d) executed\n", __func__, feed->pid);
+
+	mpq_dmx_terminate_feed(feed);
+
+	if (mpq_demux->source < DMX_SOURCE_DVR0) {
+		/* source from TSPP, need to configure tspp pipe */
+		ret = mpq_tspp_dmx_remove_channel(feed);
+	}
+
+	return ret;
+}
+
+static int mpq_tspp_dmx_write_to_decoder(
+					struct dvb_demux_feed *feed,
+					const u8 *buf,
+					size_t len)
+{
+	/*
+	 * It is assumed that this function is called once for each
+	 * TS packet of the relevant feed.
+	 */
+	if (len > TSPP_RAW_TTS_SIZE)
+		MPQ_DVB_DBG_PRINT(
+				"%s: warnning - len larger than one packet\n",
+				__func__);
+
+	if (dvb_dmx_is_video_feed(feed))
+		return mpq_dmx_process_video_packet(feed, buf);
+
+	if (dvb_dmx_is_audio_feed(feed))
+		return mpq_dmx_process_audio_packet(feed, buf);
+
+	if (dvb_dmx_is_pcr_feed(feed))
+		return mpq_dmx_process_pcr_packet(feed, buf);
+
+	return 0;
+}
+
+/**
+ * Returns demux capabilities of TSPPv1 plugin
+ *
+ * @demux: demux device
+ * @caps: Returned capbabilities
+ *
+ * Return     error code
+ */
+static int mpq_tspp_dmx_get_caps(struct dmx_demux *demux,
+				struct dmx_caps *caps)
+{
+	struct dvb_demux *dvb_demux = demux->priv;
+
+	if ((dvb_demux == NULL) || (caps == NULL)) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: invalid parameters\n",
+			__func__);
+
+		return -EINVAL;
+	}
+
+	caps->caps = DMX_CAP_PULL_MODE | DMX_CAP_VIDEO_DECODER_DATA |
+		DMX_CAP_TS_INSERTION | DMX_CAP_VIDEO_INDEXING |
+		DMX_CAP_AUDIO_DECODER_DATA | DMX_CAP_AUTO_BUFFER_FLUSH;
+	caps->recording_max_video_pids_indexed = 0;
+	caps->num_decoders = MPQ_ADAPTER_MAX_NUM_OF_INTERFACES;
+	caps->num_demux_devices = CONFIG_DVB_MPQ_NUM_DMX_DEVICES;
+	caps->num_pid_filters = TSPP_MAX_PID_FILTER_NUM;
+	caps->num_section_filters = dvb_demux->filternum;
+	caps->num_section_filters_per_pid = dvb_demux->filternum;
+	caps->section_filter_length = DMX_FILTER_SIZE;
+	caps->num_demod_inputs = TSIF_COUNT;
+	caps->num_memory_inputs = CONFIG_DVB_MPQ_NUM_DMX_DEVICES;
+	caps->max_bitrate = 192;
+	caps->demod_input_max_bitrate = 96;
+	caps->memory_input_max_bitrate = 96;
+	caps->num_cipher_ops = 1;
+
+	/* TSIF reports 3 bytes STC at unit of 27MHz/256 */
+	caps->max_stc = (u64)0xFFFFFF * 256;
+
+	/* Buffer requirements */
+	caps->section.flags =
+		DMX_BUFFER_EXTERNAL_SUPPORT	|
+		DMX_BUFFER_INTERNAL_SUPPORT |
+		DMX_BUFFER_CACHED;
+	caps->section.max_buffer_num = 1;
+	caps->section.max_size = 0xFFFFFFFF;
+	caps->section.size_alignment = 0;
+	caps->pes.flags =
+		DMX_BUFFER_EXTERNAL_SUPPORT	|
+		DMX_BUFFER_INTERNAL_SUPPORT |
+		DMX_BUFFER_CACHED;
+	caps->pes.max_buffer_num = 1;
+	caps->pes.max_size = 0xFFFFFFFF;
+	caps->pes.size_alignment = 0;
+	caps->recording_188_tsp.flags =
+		DMX_BUFFER_EXTERNAL_SUPPORT	|
+		DMX_BUFFER_INTERNAL_SUPPORT |
+		DMX_BUFFER_CACHED;
+	caps->recording_188_tsp.max_buffer_num = 1;
+	caps->recording_188_tsp.max_size = 0xFFFFFFFF;
+	caps->recording_188_tsp.size_alignment = 0;
+	caps->recording_192_tsp.flags =
+		DMX_BUFFER_EXTERNAL_SUPPORT	|
+		DMX_BUFFER_INTERNAL_SUPPORT |
+		DMX_BUFFER_CACHED;
+	caps->recording_192_tsp.max_buffer_num = 1;
+	caps->recording_192_tsp.max_size = 0xFFFFFFFF;
+	caps->recording_192_tsp.size_alignment = 0;
+	caps->playback_188_tsp.flags =
+		DMX_BUFFER_EXTERNAL_SUPPORT	|
+		DMX_BUFFER_INTERNAL_SUPPORT |
+		DMX_BUFFER_CACHED;
+	caps->playback_188_tsp.max_buffer_num = 1;
+	caps->playback_188_tsp.max_size = 0xFFFFFFFF;
+	caps->playback_188_tsp.size_alignment = 188;
+	caps->playback_192_tsp.flags =
+		DMX_BUFFER_EXTERNAL_SUPPORT	|
+		DMX_BUFFER_INTERNAL_SUPPORT |
+		DMX_BUFFER_CACHED;
+	caps->playback_192_tsp.max_buffer_num = 1;
+	caps->playback_192_tsp.max_size = 0xFFFFFFFF;
+	caps->playback_192_tsp.size_alignment = 192;
+	caps->decoder.flags =
+		DMX_BUFFER_SECURED_IF_DECRYPTED	|
+		DMX_BUFFER_EXTERNAL_SUPPORT	|
+		DMX_BUFFER_INTERNAL_SUPPORT	|
+		DMX_BUFFER_LINEAR_GROUP_SUPPORT |
+		DMX_BUFFER_CACHED;
+	caps->decoder.max_buffer_num = DMX_MAX_DECODER_BUFFER_NUM;
+	caps->decoder.max_size = 0xFFFFFFFF;
+	caps->decoder.size_alignment = SZ_4K;
+
+	return 0;
+}
+
+
+/**
+ * Reads TSIF STC from TSPP
+ *
+ * @demux: demux device
+ * @num: STC number. 0 for TSIF0 and 1 for TSIF1.
+ * @stc: STC value
+ * @base: divisor to get 90KHz value
+ *
+ * Return     error code
+ */
+static int mpq_tspp_dmx_get_stc(struct dmx_demux *demux, unsigned int num,
+		u64 *stc, unsigned int *base)
+{
+	enum tspp_source source;
+	u32 tcr_counter;
+	u64 avtimer_stc = 0;
+	int tts_source = 0;
+
+	if (!demux || !stc || !base)
+		return -EINVAL;
+
+	if (num == 0)
+		source = TSPP_SOURCE_TSIF0;
+	else if (num == 1)
+		source = TSPP_SOURCE_TSIF1;
+	else
+		return -EINVAL;
+
+	if (tspp_get_tts_source(0, &tts_source) < 0)
+		tts_source = TSIF_TTS_TCR;
+
+	if (tts_source != TSIF_TTS_LPASS_TIMER) {
+		tspp_get_ref_clk_counter(0, source, &tcr_counter);
+		*stc = ((u64)tcr_counter) * 256; /* conversion to 27MHz */
+		*base = 300; /* divisor to get 90KHz clock from stc value */
+	} else {
+		if (tspp_get_lpass_time_counter(0, source, &avtimer_stc) < 0)
+			return -EINVAL;
+		*stc = avtimer_stc;
+	}
+	return 0;
+}
+
+static int mpq_tspp_dmx_init(
+			struct dvb_adapter *mpq_adapter,
+			struct mpq_demux *mpq_demux)
+{
+	int result;
+
+	MPQ_DVB_DBG_PRINT("%s executed\n", __func__);
+
+	mpq_dmx_tspp_info.ion_client = mpq_demux->ion_client;
+
+	/* Set the kernel-demux object capabilities */
+	mpq_demux->demux.dmx.capabilities =
+		DMX_TS_FILTERING			|
+		DMX_PES_FILTERING			|
+		DMX_SECTION_FILTERING			|
+		DMX_MEMORY_BASED_FILTERING		|
+		DMX_CRC_CHECKING			|
+		DMX_TS_DESCRAMBLING;
+
+	mpq_demux->decoder_alloc_flags = ION_FLAG_CACHED;
+
+	/* Set dvb-demux "virtual" function pointers */
+	mpq_demux->demux.priv = (void *)mpq_demux;
+	mpq_demux->demux.filternum = TSPP_MAX_SECTION_FILTER_NUM;
+	mpq_demux->demux.feednum = MPQ_MAX_DMX_FILES;
+	mpq_demux->demux.start_feed = mpq_tspp_dmx_start_filtering;
+	mpq_demux->demux.stop_feed = mpq_tspp_dmx_stop_filtering;
+	mpq_demux->demux.write_to_decoder = mpq_tspp_dmx_write_to_decoder;
+	mpq_demux->demux.decoder_fullness_init = mpq_dmx_decoder_fullness_init;
+	mpq_demux->demux.decoder_fullness_wait = mpq_dmx_decoder_fullness_wait;
+	mpq_demux->demux.decoder_fullness_abort =
+		mpq_dmx_decoder_fullness_abort;
+	mpq_demux->demux.decoder_buffer_status = mpq_dmx_decoder_buffer_status;
+	mpq_demux->demux.reuse_decoder_buffer = mpq_dmx_reuse_decoder_buffer;
+	mpq_demux->demux.set_cipher_op = mpq_dmx_set_cipher_ops;
+	mpq_demux->demux.oob_command = mpq_dmx_oob_command;
+	mpq_demux->demux.convert_ts = mpq_dmx_convert_tts;
+	mpq_demux->demux.flush_decoder_buffer = NULL;
+
+	/* Initialize dvb_demux object */
+	result = dvb_dmx_init(&mpq_demux->demux);
+	if (result < 0) {
+		MPQ_DVB_ERR_PRINT("%s: dvb_dmx_init failed\n", __func__);
+		goto init_failed;
+	}
+
+	/* Now initailize the dmx-dev object */
+	mpq_demux->dmxdev.filternum = MPQ_MAX_DMX_FILES;
+	mpq_demux->dmxdev.demux = &mpq_demux->demux.dmx;
+	mpq_demux->dmxdev.capabilities = DMXDEV_CAP_DUPLEX;
+
+	mpq_demux->dmxdev.demux->set_source = mpq_dmx_set_source;
+	mpq_demux->dmxdev.demux->get_stc = mpq_tspp_dmx_get_stc;
+	mpq_demux->dmxdev.demux->get_caps = mpq_tspp_dmx_get_caps;
+	mpq_demux->dmxdev.demux->map_buffer = mpq_dmx_map_buffer;
+	mpq_demux->dmxdev.demux->unmap_buffer = mpq_dmx_unmap_buffer;
+	mpq_demux->dmxdev.demux->write = mpq_dmx_write;
+	result = dvb_dmxdev_init(&mpq_demux->dmxdev, mpq_adapter);
+	if (result < 0) {
+		MPQ_DVB_ERR_PRINT("%s: dvb_dmxdev_init failed (errno=%d)\n",
+						  __func__,
+						  result);
+		goto init_failed_dmx_release;
+	}
+
+	/* Extend dvb-demux debugfs with TSPP statistics. */
+	mpq_dmx_init_debugfs_entries(mpq_demux);
+
+	/* Get the TSIF TTS info */
+	if (tspp_get_tts_source(0, &mpq_demux->ts_packet_timestamp_source) < 0)
+		mpq_demux->ts_packet_timestamp_source = TSIF_TTS_TCR;
+
+	return 0;
+
+init_failed_dmx_release:
+	dvb_dmx_release(&mpq_demux->demux);
+init_failed:
+	return result;
+}
+
+static int __init mpq_dmx_tspp_plugin_init(void)
+{
+	int i;
+	int j;
+	int ret;
+
+	MPQ_DVB_DBG_PRINT("%s executed\n", __func__);
+
+	for (i = 0; i < TSIF_COUNT; i++) {
+		mpq_dmx_tspp_info.tsif[i].aggregate_ids = NULL;
+		mpq_dmx_tspp_info.tsif[i].channel_ref = 0;
+		mpq_dmx_tspp_info.tsif[i].buff_index = 0;
+		mpq_dmx_tspp_info.tsif[i].ch_mem_heap_handle = NULL;
+		mpq_dmx_tspp_info.tsif[i].ch_mem_heap_virt_base = NULL;
+		mpq_dmx_tspp_info.tsif[i].ch_mem_heap_phys_base = 0;
+		atomic_set(&mpq_dmx_tspp_info.tsif[i].data_cnt, 0);
+		atomic_set(&mpq_dmx_tspp_info.tsif[i].control_op, 0);
+
+		for (j = 0; j < TSPP_MAX_PID_FILTER_NUM; j++) {
+			mpq_dmx_tspp_info.tsif[i].filters[j].pid = -1;
+			mpq_dmx_tspp_info.tsif[i].filters[j].ref_count = 0;
+			mpq_dmx_tspp_info.tsif[i].filters[j].hw_index = -1;
+		}
+
+		for (j = 0; j < TSPP_MAX_HW_PID_FILTER_NUM; j++)
+			mpq_dmx_tspp_info.tsif[i].hw_indexes[j] = 0;
+
+		mpq_dmx_tspp_info.tsif[i].current_filter_count = 0;
+		mpq_dmx_tspp_info.tsif[i].pass_nulls_flag = 0;
+		mpq_dmx_tspp_info.tsif[i].pass_all_flag = 0;
+		mpq_dmx_tspp_info.tsif[i].accept_all_filter_exists_flag = 0;
+
+		snprintf(mpq_dmx_tspp_info.tsif[i].name,
+				TSIF_NAME_LENGTH,
+				"dmx_tsif%d",
+				i);
+
+		init_waitqueue_head(&mpq_dmx_tspp_info.tsif[i].wait_queue);
+		mpq_dmx_tspp_info.tsif[i].thread =
+			kthread_run(
+				mpq_dmx_tspp_thread, (void *)(uintptr_t)i,
+				mpq_dmx_tspp_info.tsif[i].name);
+
+		if (IS_ERR(mpq_dmx_tspp_info.tsif[i].thread)) {
+			for (j = 0; j < i; j++) {
+				kthread_stop(mpq_dmx_tspp_info.tsif[j].thread);
+				mutex_destroy(&mpq_dmx_tspp_info.tsif[j].mutex);
+			}
+
+			MPQ_DVB_ERR_PRINT(
+				"%s: kthread_run failed\n",
+				__func__);
+
+			return -ENOMEM;
+		}
+
+		mutex_init(&mpq_dmx_tspp_info.tsif[i].mutex);
+	}
+
+	ret = mpq_dmx_plugin_init(mpq_tspp_dmx_init);
+
+	if (ret < 0) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: mpq_dmx_plugin_init failed (errno=%d)\n",
+			__func__,
+			ret);
+
+		for (i = 0; i < TSIF_COUNT; i++) {
+			kthread_stop(mpq_dmx_tspp_info.tsif[i].thread);
+			mutex_destroy(&mpq_dmx_tspp_info.tsif[i].mutex);
+		}
+	}
+
+	return ret;
+}
+
+static void __exit mpq_dmx_tspp_plugin_exit(void)
+{
+	int i;
+
+	MPQ_DVB_DBG_PRINT("%s executed\n", __func__);
+
+	for (i = 0; i < TSIF_COUNT; i++) {
+		mutex_lock(&mpq_dmx_tspp_info.tsif[i].mutex);
+
+		/*
+		 * Note: tspp_close_channel will also free the TSPP buffers
+		 * even if we allocated them ourselves,
+		 * using our free function.
+		 */
+		if (mpq_dmx_tspp_info.tsif[i].channel_ref) {
+			tspp_unregister_notification(0,
+				TSPP_CHANNEL_ID(i, TSPP_CHANNEL));
+			tspp_close_channel(0,
+				TSPP_CHANNEL_ID(i, TSPP_CHANNEL));
+
+			if (allocation_mode ==
+				MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC) {
+				vfree(mpq_dmx_tspp_info.tsif[i].aggregate_ids);
+				mpq_dmx_tspp_info.tsif[i].aggregate_ids = NULL;
+				mpq_dmx_channel_mem_free(i);
+			}
+		}
+
+		mutex_unlock(&mpq_dmx_tspp_info.tsif[i].mutex);
+		kthread_stop(mpq_dmx_tspp_info.tsif[i].thread);
+		mutex_destroy(&mpq_dmx_tspp_info.tsif[i].mutex);
+	}
+
+	mpq_dmx_plugin_exit();
+}
+
+
+module_init(mpq_dmx_tspp_plugin_init);
+module_exit(mpq_dmx_tspp_plugin_exit);
+
+MODULE_DESCRIPTION("Qualcomm Technologies Inc. demux TSPP version 1 HW Plugin");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_sdmx.c b/drivers/media/platform/msm/dvb/demux/mpq_sdmx.c
new file mode 100644
index 0000000..860c365
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/mpq_sdmx.c
@@ -0,0 +1,1023 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include "qseecom_kernel.h"
+#include "mpq_sdmx.h"
+
+static struct qseecom_handle *sdmx_qseecom_handles[SDMX_MAX_SESSIONS];
+static struct mutex sdmx_lock[SDMX_MAX_SESSIONS];
+
+#define QSEECOM_SBUFF_SIZE	SZ_128K
+
+enum sdmx_cmd_id {
+	SDMX_OPEN_SESSION_CMD,
+	SDMX_CLOSE_SESSION_CMD,
+	SDMX_SET_SESSION_CFG_CMD,
+	SDMX_ADD_FILTER_CMD,
+	SDMX_REMOVE_FILTER_CMD,
+	SDMX_SET_KL_IDX_CMD,
+	SDMX_ADD_RAW_PID_CMD,
+	SDMX_REMOVE_RAW_PID_CMD,
+	SDMX_PROCESS_CMD,
+	SDMX_GET_DBG_COUNTERS_CMD,
+	SDMX_RESET_DBG_COUNTERS_CMD,
+	SDMX_GET_VERSION_CMD,
+	SDMX_INVALIDATE_KL_CMD,
+	SDMX_SET_LOG_LEVEL_CMD
+};
+
+#pragma pack(push, sdmx, 1)
+
+struct sdmx_proc_req {
+	enum sdmx_cmd_id cmd_id;
+	u32 session_handle;
+	u8 flags;
+	struct sdmx_buff_descr in_buf_descr;
+	u32 inp_fill_cnt;
+	u32 in_rd_offset;
+	u32 num_filters;
+	struct sdmx_filter_status filters_status[];
+};
+
+struct sdmx_proc_rsp {
+	enum sdmx_status ret;
+	u32 inp_fill_cnt;
+	u32 in_rd_offset;
+	u32 err_indicators;
+	u32 status_indicators;
+};
+
+struct sdmx_open_ses_req {
+	enum sdmx_cmd_id cmd_id;
+};
+
+struct sdmx_open_ses_rsp {
+	enum sdmx_status ret;
+	u32 session_handle;
+};
+
+struct sdmx_close_ses_req {
+	enum sdmx_cmd_id cmd_id;
+	u32 session_handle;
+};
+
+struct sdmx_close_ses_rsp {
+	enum sdmx_status ret;
+};
+
+struct sdmx_ses_cfg_req {
+	enum sdmx_cmd_id cmd_id;
+	u32 session_handle;
+	enum sdmx_proc_mode process_mode;
+	enum sdmx_inp_mode input_mode;
+	enum sdmx_pkt_format packet_len;
+	u8 odd_scramble_bits;
+	u8 even_scramble_bits;
+};
+
+struct sdmx_ses_cfg_rsp {
+	enum sdmx_status ret;
+};
+
+struct sdmx_set_kl_ind_req {
+	enum sdmx_cmd_id cmd_id;
+	u32 session_handle;
+	u32 pid;
+	u32 kl_index;
+};
+
+struct sdmx_set_kl_ind_rsp {
+	enum sdmx_status ret;
+};
+
+struct sdmx_add_filt_req {
+	enum sdmx_cmd_id cmd_id;
+	u32 session_handle;
+	u32 pid;
+	enum sdmx_filter filter_type;
+	struct sdmx_buff_descr meta_data_buf;
+	enum sdmx_buf_mode buffer_mode;
+	enum sdmx_raw_out_format ts_out_format;
+	u32 flags;
+	u32 num_data_bufs;
+	struct sdmx_data_buff_descr data_bufs[];
+};
+
+struct sdmx_add_filt_rsp {
+	enum sdmx_status ret;
+	u32 filter_handle;
+};
+
+struct sdmx_rem_filt_req {
+	enum sdmx_cmd_id cmd_id;
+	u32 session_handle;
+	u32 filter_handle;
+};
+
+struct sdmx_rem_filt_rsp {
+	enum sdmx_status ret;
+};
+
+struct sdmx_add_raw_req {
+	enum sdmx_cmd_id cmd_id;
+	u32 session_handle;
+	u32 filter_handle;
+	u32 pid;
+};
+
+struct sdmx_add_raw_rsp {
+	enum sdmx_status ret;
+};
+
+struct sdmx_rem_raw_req {
+	enum sdmx_cmd_id cmd_id;
+	u32 session_handle;
+	u32 filter_handle;
+	u32 pid;
+};
+
+struct sdmx_rem_raw_rsp {
+	enum sdmx_status ret;
+};
+
+struct sdmx_get_counters_req {
+	enum sdmx_cmd_id cmd_id;
+	u32 session_handle;
+	u32 num_filters;
+};
+
+struct sdmx_get_counters_rsp {
+	enum sdmx_status ret;
+	struct sdmx_session_dbg_counters session_counters;
+	u32 num_filters;
+	struct sdmx_filter_dbg_counters filter_counters[];
+};
+
+struct sdmx_rst_counters_req {
+	enum sdmx_cmd_id cmd_id;
+	u32 session_handle;
+};
+
+struct sdmx_rst_counters_rsp {
+	enum sdmx_status ret;
+};
+
+struct sdmx_get_version_req {
+	enum sdmx_cmd_id cmd_id;
+};
+
+struct sdmx_get_version_rsp {
+	enum sdmx_status ret;
+	int32_t version;
+};
+
+struct sdmx_set_log_level_req {
+	enum sdmx_cmd_id cmd_id;
+	enum sdmx_log_level level;
+	u32 session_handle;
+};
+
+struct sdmx_set_log_level_rsp {
+	enum sdmx_status ret;
+};
+
+#pragma pack(pop, sdmx)
+
+static int get_cmd_rsp_buffers(int handle_index,
+	void **cmd,
+	int *cmd_len,
+	void **rsp,
+	int *rsp_len)
+{
+	if (*cmd_len & QSEECOM_ALIGN_MASK)
+		*cmd_len = QSEECOM_ALIGN(*cmd_len);
+
+	if (*rsp_len & QSEECOM_ALIGN_MASK)
+		*rsp_len = QSEECOM_ALIGN(*rsp_len);
+
+	if ((*rsp_len + *cmd_len) > QSEECOM_SBUFF_SIZE) {
+		pr_err("%s: shared buffer too small to hold cmd=%d and rsp=%d\n",
+			__func__, *cmd_len, *rsp_len);
+		return SDMX_STATUS_OUT_OF_MEM;
+	}
+
+	*cmd = sdmx_qseecom_handles[handle_index]->sbuf;
+	*rsp = sdmx_qseecom_handles[handle_index]->sbuf + *cmd_len;
+	return SDMX_SUCCESS;
+}
+
+/*
+ * Returns version of secure-demux app.
+ *
+ * @session_handle: Returned instance handle. Must not be NULL.
+ * Return error code
+ */
+int sdmx_get_version(int session_handle, int32_t *version)
+{
+	int res, cmd_len, rsp_len;
+	struct sdmx_get_version_req *cmd;
+	struct sdmx_get_version_rsp *rsp;
+	enum sdmx_status ret;
+
+	if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS) ||
+		(version == NULL))
+		return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+	cmd_len = sizeof(struct sdmx_get_version_req);
+	rsp_len = sizeof(struct sdmx_get_version_rsp);
+
+	/* Lock shared memory */
+	mutex_lock(&sdmx_lock[session_handle]);
+
+	/* Get command and response buffers */
+	ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+		(void **)&rsp, &rsp_len);
+	if (ret)
+		goto out;
+
+	/* Populate command struct */
+	cmd->cmd_id = SDMX_GET_VERSION_CMD;
+
+	/* Issue QSEECom command */
+	res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+		(void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+	if (res < 0) {
+		mutex_unlock(&sdmx_lock[session_handle]);
+		return SDMX_STATUS_GENERAL_FAILURE;
+	}
+
+	ret = rsp->ret;
+	*version = rsp->version;
+out:
+	mutex_unlock(&sdmx_lock[session_handle]);
+
+	return ret;
+
+}
+EXPORT_SYMBOL(sdmx_get_version);
+
+/*
+ * Initializes a new secure demux instance and returns a handle of the instance.
+ *
+ * @session_handle: handle of a secure demux instance to get its version.
+ * Return the version if successful or an error code.
+ */
+int sdmx_open_session(int *session_handle)
+{
+	int res, cmd_len, rsp_len;
+	enum sdmx_status ret, version_ret;
+	struct sdmx_open_ses_req *cmd;
+	struct sdmx_open_ses_rsp *rsp;
+	struct qseecom_handle *qseecom_handle = NULL;
+	int32_t version;
+
+	/* Input validation */
+	if (session_handle == NULL)
+		return SDMX_STATUS_GENERAL_FAILURE;
+
+	/* Start the TZ app */
+	res = qseecom_start_app(&qseecom_handle, "securemm",
+		QSEECOM_SBUFF_SIZE);
+
+	if (res < 0)
+		return SDMX_STATUS_GENERAL_FAILURE;
+
+	cmd_len = sizeof(struct sdmx_open_ses_req);
+	rsp_len = sizeof(struct sdmx_open_ses_rsp);
+
+	/* Get command and response buffers */
+	cmd = (struct sdmx_open_ses_req *)qseecom_handle->sbuf;
+
+	if (cmd_len & QSEECOM_ALIGN_MASK)
+		cmd_len = QSEECOM_ALIGN(cmd_len);
+
+	rsp = (struct sdmx_open_ses_rsp *)qseecom_handle->sbuf + cmd_len;
+
+	if (rsp_len & QSEECOM_ALIGN_MASK)
+		rsp_len = QSEECOM_ALIGN(rsp_len);
+
+	/* Will be later overridden by SDMX response */
+	*session_handle = SDMX_INVALID_SESSION_HANDLE;
+
+	/* Populate command struct */
+	cmd->cmd_id = SDMX_OPEN_SESSION_CMD;
+
+	/* Issue QSEECom command */
+	res = qseecom_send_command(qseecom_handle, (void *)cmd, cmd_len,
+		(void *)rsp, rsp_len);
+
+	if (res < 0) {
+		qseecom_shutdown_app(&qseecom_handle);
+		return SDMX_STATUS_GENERAL_FAILURE;
+	}
+
+	/* Parse response struct */
+	*session_handle = rsp->session_handle;
+
+	/* Initialize handle and mutex */
+	sdmx_qseecom_handles[*session_handle] = qseecom_handle;
+	mutex_init(&sdmx_lock[*session_handle]);
+	ret = rsp->ret;
+
+	/* Get and print the app version */
+	version_ret = sdmx_get_version(*session_handle, &version);
+	if (version_ret == SDMX_SUCCESS)
+		pr_info("TZ SDMX version is %x.%x\n", version >> 8,
+			version & 0xFF);
+	else
+		pr_err("Error reading TZ SDMX version\n");
+
+	return ret;
+}
+EXPORT_SYMBOL(sdmx_open_session);
+
+/*
+ * Closes a secure demux instance.
+ *
+ * @session_handle: handle of a secure demux instance to close.
+ * Return error code
+ */
+int sdmx_close_session(int session_handle)
+{
+	int res, cmd_len, rsp_len;
+	struct sdmx_close_ses_req *cmd;
+	struct sdmx_close_ses_rsp *rsp;
+	enum sdmx_status ret;
+
+	if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
+		return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+	cmd_len = sizeof(struct sdmx_close_ses_req);
+	rsp_len = sizeof(struct sdmx_close_ses_rsp);
+
+	/* Lock shared memory */
+	mutex_lock(&sdmx_lock[session_handle]);
+
+	/* Get command and response buffers */
+	ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+		(void **)&rsp, &rsp_len);
+	if (ret)
+		goto out;
+
+	/* Populate command struct */
+	cmd->cmd_id = SDMX_CLOSE_SESSION_CMD;
+	cmd->session_handle = session_handle;
+
+	/* Issue QSEECom command */
+	res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+		(void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+	if (res < 0) {
+		mutex_unlock(&sdmx_lock[session_handle]);
+		return SDMX_STATUS_GENERAL_FAILURE;
+	}
+
+	ret = rsp->ret;
+
+	/* Shutdown the TZ app (or at least free the current handle) */
+	res = qseecom_shutdown_app(&sdmx_qseecom_handles[session_handle]);
+	if (res < 0) {
+		mutex_unlock(&sdmx_lock[session_handle]);
+		return SDMX_STATUS_GENERAL_FAILURE;
+	}
+
+	sdmx_qseecom_handles[session_handle] = NULL;
+out:
+	mutex_unlock(&sdmx_lock[session_handle]);
+
+	return ret;
+}
+EXPORT_SYMBOL(sdmx_close_session);
+
+/*
+ * Configures an open secure demux instance.
+ *
+ * @session_handle: secure demux instance
+ * @proc_mode: Defines secure demux's behavior in case of output
+ *             buffer overflow.
+ * @inp_mode: Defines the input encryption settings.
+ * @pkt_format: TS packet length in input buffer.
+ * @odd_scramble_bits: Value of the scramble bits indicating the ODD key.
+ * @even_scramble_bits: Value of the scramble bits indicating the EVEN key.
+ * Return error code
+ */
+int sdmx_set_session_cfg(int session_handle,
+	enum sdmx_proc_mode proc_mode,
+	enum sdmx_inp_mode inp_mode,
+	enum sdmx_pkt_format pkt_format,
+	u8 odd_scramble_bits,
+	u8 even_scramble_bits)
+{
+	int res, cmd_len, rsp_len;
+	struct sdmx_ses_cfg_req *cmd;
+	struct sdmx_ses_cfg_rsp *rsp;
+	enum sdmx_status ret;
+
+	if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
+		return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+	cmd_len = sizeof(struct sdmx_ses_cfg_req);
+	rsp_len = sizeof(struct sdmx_ses_cfg_rsp);
+
+	/* Lock shared memory */
+	mutex_lock(&sdmx_lock[session_handle]);
+
+	/* Get command and response buffers */
+	ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+		(void **)&rsp, &rsp_len);
+	if (ret)
+		goto out;
+
+	/* Populate command struct */
+	cmd->cmd_id = SDMX_SET_SESSION_CFG_CMD;
+	cmd->session_handle = session_handle;
+	cmd->process_mode = proc_mode;
+	cmd->input_mode = inp_mode;
+	cmd->packet_len = pkt_format;
+	cmd->odd_scramble_bits = odd_scramble_bits;
+	cmd->even_scramble_bits = even_scramble_bits;
+
+	/* Issue QSEECom command */
+	res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+		(void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+	if (res < 0) {
+		mutex_unlock(&sdmx_lock[session_handle]);
+		return SDMX_STATUS_GENERAL_FAILURE;
+	}
+
+	ret = rsp->ret;
+out:
+	mutex_unlock(&sdmx_lock[session_handle]);
+
+	return ret;
+}
+EXPORT_SYMBOL(sdmx_set_session_cfg);
+
+/*
+ * Creates a new secure demux filter and returns a filter handle
+ *
+ * @session_handle: secure demux instance
+ * @pid: pid to filter
+ * @filter_type: type of filtering
+ * @meta_data_buf: meta data buffer descriptor
+ * @data_buf_mode: data buffer mode (ring/linear)
+ * @num_data_bufs: number of data buffers (use 1 for a ring buffer)
+ * @data_bufs: data buffers descriptors array
+ * @filter_handle: returned filter handle
+ * @ts_out_format: output format for raw filters
+ * @flags: optional flags for filter
+ *	   (currently only clear section CRC verification is supported)
+ *
+ * Return error code
+ */
+int sdmx_add_filter(int session_handle,
+	u16 pid,
+	enum sdmx_filter filterype,
+	struct sdmx_buff_descr *meta_data_buf,
+	enum sdmx_buf_mode d_buf_mode,
+	u32 num_data_bufs,
+	struct sdmx_data_buff_descr *data_bufs,
+	int *filter_handle,
+	enum sdmx_raw_out_format ts_out_format,
+	u32 flags)
+{
+	int res, cmd_len, rsp_len;
+	struct sdmx_add_filt_req *cmd;
+	struct sdmx_add_filt_rsp *rsp;
+	enum sdmx_status ret;
+
+	if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS) ||
+		(filter_handle == NULL))
+		return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+	cmd_len = sizeof(struct sdmx_add_filt_req)
+		+ num_data_bufs * sizeof(struct sdmx_data_buff_descr);
+	rsp_len = sizeof(struct sdmx_add_filt_rsp);
+
+	/* Will be later overridden by SDMX response */
+	*filter_handle = SDMX_INVALID_FILTER_HANDLE;
+
+	/* Lock shared memory */
+	mutex_lock(&sdmx_lock[session_handle]);
+
+	/* Get command and response buffers */
+	ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+		(void **)&rsp, &rsp_len);
+	if (ret)
+		goto out;
+
+	/* Populate command struct */
+	cmd->cmd_id = SDMX_ADD_FILTER_CMD;
+	cmd->session_handle = session_handle;
+	cmd->pid = (u32)pid;
+	cmd->filter_type = filterype;
+	cmd->ts_out_format = ts_out_format;
+	cmd->flags = flags;
+	if (meta_data_buf != NULL)
+		memcpy(&(cmd->meta_data_buf), meta_data_buf,
+				sizeof(struct sdmx_buff_descr));
+	else
+		memset(&(cmd->meta_data_buf), 0, sizeof(cmd->meta_data_buf));
+
+	cmd->buffer_mode = d_buf_mode;
+	cmd->num_data_bufs = num_data_bufs;
+	memcpy(cmd->data_bufs, data_bufs,
+			num_data_bufs * sizeof(struct sdmx_data_buff_descr));
+
+	/* Issue QSEECom command */
+	res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+		(void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+	if (res < 0) {
+		mutex_unlock(&sdmx_lock[session_handle]);
+		return SDMX_STATUS_GENERAL_FAILURE;
+	}
+
+	/* Parse response struct */
+	*filter_handle = rsp->filter_handle;
+	ret = rsp->ret;
+out:
+	mutex_unlock(&sdmx_lock[session_handle]);
+
+	return ret;
+}
+EXPORT_SYMBOL(sdmx_add_filter);
+
+/*
+ * Removes a secure demux filter
+ *
+ * @session_handle: secure demux instance
+ * @filter_handle: filter handle to remove
+ *
+ * Return error code
+ */
+int sdmx_remove_filter(int session_handle, int filter_handle)
+{
+	int res, cmd_len, rsp_len;
+	struct sdmx_rem_filt_req *cmd;
+	struct sdmx_rem_filt_rsp *rsp;
+	enum sdmx_status ret;
+
+	if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
+		return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+	cmd_len = sizeof(struct sdmx_rem_filt_req);
+	rsp_len = sizeof(struct sdmx_rem_filt_rsp);
+
+	/* Lock shared memory */
+	mutex_lock(&sdmx_lock[session_handle]);
+
+	/* Get command and response buffers */
+	ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+		(void **)&rsp, &rsp_len);
+	if (ret)
+		goto out;
+
+	/* Populate command struct */
+	cmd->cmd_id = SDMX_REMOVE_FILTER_CMD;
+	cmd->session_handle = session_handle;
+	cmd->filter_handle = filter_handle;
+
+	/* Issue QSEECom command */
+	res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+		(void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+	if (res < 0) {
+		mutex_unlock(&sdmx_lock[session_handle]);
+		return SDMX_STATUS_GENERAL_FAILURE;
+	}
+
+	ret = rsp->ret;
+out:
+	mutex_unlock(&sdmx_lock[session_handle]);
+
+	return ret;
+}
+EXPORT_SYMBOL(sdmx_remove_filter);
+
+/*
+ * Associates a key ladder index for the specified pid
+ *
+ * @session_handle: secure demux instance
+ * @pid: pid
+ * @key_ladder_index: key ladder index to associate to the pid
+ *
+ * Return error code
+ *
+ * Note: if pid already has some key ladder index associated, it will be
+ * overridden.
+ */
+int sdmx_set_kl_ind(int session_handle, u16 pid, u32 key_ladder_index)
+{
+	int res, cmd_len, rsp_len;
+	struct sdmx_set_kl_ind_req *cmd;
+	struct sdmx_set_kl_ind_rsp *rsp;
+	enum sdmx_status ret;
+
+	if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
+		return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+	cmd_len = sizeof(struct sdmx_set_kl_ind_req);
+	rsp_len = sizeof(struct sdmx_set_kl_ind_rsp);
+
+	/* Lock shared memory */
+	mutex_lock(&sdmx_lock[session_handle]);
+
+	/* Get command and response buffers */
+	ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+		(void **)&rsp, &rsp_len);
+	if (ret)
+		goto out;
+
+	/* Populate command struct */
+	cmd->cmd_id = SDMX_SET_KL_IDX_CMD;
+	cmd->session_handle = session_handle;
+	cmd->pid = (u32)pid;
+	cmd->kl_index = key_ladder_index;
+
+	/* Issue QSEECom command */
+	res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+		(void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+	if (res < 0) {
+		mutex_unlock(&sdmx_lock[session_handle]);
+		return SDMX_STATUS_GENERAL_FAILURE;
+	}
+
+	ret = rsp->ret;
+out:
+	mutex_unlock(&sdmx_lock[session_handle]);
+
+	return ret;
+}
+EXPORT_SYMBOL(sdmx_set_kl_ind);
+
+/*
+ * Adds the specified pid to an existing raw (recording) filter
+ *
+ * @session_handle: secure demux instance
+ * @filter_handle: raw filter handle
+ * @pid: pid
+ *
+ * Return error code
+ */
+int sdmx_add_raw_pid(int session_handle, int filter_handle, u16 pid)
+{
+	int res, cmd_len, rsp_len;
+	struct sdmx_add_raw_req *cmd;
+	struct sdmx_add_raw_rsp *rsp;
+	enum sdmx_status ret;
+
+	if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
+		return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+	cmd_len = sizeof(struct sdmx_add_raw_req);
+	rsp_len = sizeof(struct sdmx_add_raw_rsp);
+
+	/* Lock shared memory */
+	mutex_lock(&sdmx_lock[session_handle]);
+
+	/* Get command and response buffers */
+	ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+		(void **)&rsp, &rsp_len);
+	if (ret)
+		goto out;
+
+	/* Populate command struct */
+	cmd->cmd_id = SDMX_ADD_RAW_PID_CMD;
+	cmd->session_handle = session_handle;
+	cmd->filter_handle = filter_handle;
+	cmd->pid = (u32)pid;
+
+	/* Issue QSEECom command */
+	res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+		(void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+	if (res < 0) {
+		mutex_unlock(&sdmx_lock[session_handle]);
+		return SDMX_STATUS_GENERAL_FAILURE;
+	}
+
+	ret = rsp->ret;
+out:
+	mutex_unlock(&sdmx_lock[session_handle]);
+
+	return ret;
+}
+EXPORT_SYMBOL(sdmx_add_raw_pid);
+
+/*
+ * Removes the specified pid from a raw (recording) filter
+ *
+ * @session_handle: secure demux instance
+ * @filter_handle: raw filter handle
+ * @pid: pid
+ *
+ * Return error code
+ */
+int sdmx_remove_raw_pid(int session_handle, int filter_handle, u16 pid)
+{
+	int res, cmd_len, rsp_len;
+	struct sdmx_rem_raw_req *cmd;
+	struct sdmx_rem_raw_rsp *rsp;
+	enum sdmx_status ret;
+
+	if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
+		return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+	cmd_len = sizeof(struct sdmx_rem_raw_req);
+	rsp_len = sizeof(struct sdmx_rem_raw_rsp);
+
+	/* Lock shared memory */
+	mutex_lock(&sdmx_lock[session_handle]);
+
+	/* Get command and response buffers */
+	ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+		(void **)&rsp, &rsp_len);
+	if (ret)
+		goto out;
+
+	/* Populate command struct */
+	cmd->cmd_id = SDMX_REMOVE_RAW_PID_CMD;
+	cmd->session_handle = session_handle;
+	cmd->filter_handle = filter_handle;
+	cmd->pid = (u32)pid;
+
+	/* Issue QSEECom command */
+	res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+		(void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+	if (res < 0) {
+		mutex_unlock(&sdmx_lock[session_handle]);
+		return SDMX_STATUS_GENERAL_FAILURE;
+	}
+
+	ret = rsp->ret;
+out:
+	mutex_unlock(&sdmx_lock[session_handle]);
+
+	return ret;
+}
+EXPORT_SYMBOL(sdmx_remove_raw_pid);
+
+/*
+ * Call secure demux to perform processing on the specified input buffer
+ *
+ * @session_handle: secure demux instance
+ * @flags: input flags. Currently only EOS marking is supported.
+ * @input_buf_desc: input buffer descriptor
+ * @input_fill_count: number of bytes available in input buffer
+ * @input_read_offset: offset inside input buffer where data starts
+ * @error_indicators: returned general error indicators
+ * @status_indicators: returned general status indicators
+ * @num_filters: number of filters in filter status array
+ * @filter_status: filter status descriptor array
+ *
+ * Return error code
+ */
+int sdmx_process(int session_handle, u8 flags,
+	struct sdmx_buff_descr *input_buf_desc,
+	u32 *input_fill_count,
+	u32 *input_read_offset,
+	u32 *error_indicators,
+	u32 *status_indicators,
+	u32 num_filters,
+	struct sdmx_filter_status *filter_status)
+{
+	int res, cmd_len, rsp_len;
+	struct sdmx_proc_req *cmd;
+	struct sdmx_proc_rsp *rsp;
+	enum sdmx_status ret;
+
+	if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS) ||
+		(input_buf_desc == NULL) ||
+		(input_fill_count == NULL) || (input_read_offset == NULL) ||
+		(error_indicators == NULL) || (status_indicators == NULL) ||
+		(filter_status == NULL))
+		return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+	cmd_len = sizeof(struct sdmx_proc_req)
+		+ num_filters * sizeof(struct sdmx_filter_status);
+	rsp_len = sizeof(struct sdmx_proc_rsp);
+
+	/* Lock shared memory */
+	mutex_lock(&sdmx_lock[session_handle]);
+
+	/* Get command and response buffers */
+	ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+		(void **)&rsp, &rsp_len);
+	if (ret)
+		goto out;
+
+	/* Populate command struct */
+	cmd->cmd_id = SDMX_PROCESS_CMD;
+	cmd->session_handle = session_handle;
+	cmd->flags = flags;
+	cmd->in_buf_descr.base_addr = input_buf_desc->base_addr;
+	cmd->in_buf_descr.size = input_buf_desc->size;
+	cmd->inp_fill_cnt = *input_fill_count;
+	cmd->in_rd_offset = *input_read_offset;
+	cmd->num_filters = num_filters;
+	memcpy(cmd->filters_status, filter_status,
+		num_filters * sizeof(struct sdmx_filter_status));
+
+	/* Issue QSEECom command */
+	res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+		(void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+	if (res < 0) {
+		mutex_unlock(&sdmx_lock[session_handle]);
+		return SDMX_STATUS_GENERAL_FAILURE;
+	}
+
+	/* Parse response struct */
+	*input_fill_count = rsp->inp_fill_cnt;
+	*input_read_offset = rsp->in_rd_offset;
+	*error_indicators = rsp->err_indicators;
+	*status_indicators = rsp->status_indicators;
+	memcpy(filter_status, cmd->filters_status,
+		num_filters * sizeof(struct sdmx_filter_status));
+	ret = rsp->ret;
+out:
+	mutex_unlock(&sdmx_lock[session_handle]);
+
+	return ret;
+}
+EXPORT_SYMBOL(sdmx_process);
+
+/*
+ * Returns session-level & filter-level debug counters
+ *
+ * @session_handle: secure demux instance
+ * @session_counters: returned session-level debug counters
+ * @num_filters: returned number of filters reported in filter_counters
+ * @filter_counters: returned filter-level debug counters array
+ *
+ * Return error code
+ */
+int sdmx_get_dbg_counters(int session_handle,
+	struct sdmx_session_dbg_counters *session_counters,
+	u32 *num_filters,
+	struct sdmx_filter_dbg_counters *filter_counters)
+{
+	int res, cmd_len, rsp_len;
+	struct sdmx_get_counters_req *cmd;
+	struct sdmx_get_counters_rsp *rsp;
+	enum sdmx_status ret;
+
+	if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS) ||
+		(session_counters == NULL) || (num_filters == NULL) ||
+		(filter_counters == NULL))
+		return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+	cmd_len = sizeof(struct sdmx_get_counters_req);
+	rsp_len = sizeof(struct sdmx_get_counters_rsp)
+		+ *num_filters * sizeof(struct sdmx_filter_dbg_counters);
+
+	/* Lock shared memory */
+	mutex_lock(&sdmx_lock[session_handle]);
+
+	/* Get command and response buffers */
+	ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+		(void **)&rsp, &rsp_len);
+	if (ret)
+		goto out;
+
+	/* Populate command struct */
+	cmd->cmd_id = SDMX_GET_DBG_COUNTERS_CMD;
+	cmd->session_handle = session_handle;
+	cmd->num_filters = *num_filters;
+
+	/* Issue QSEECom command */
+	res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+		(void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+	if (res < 0) {
+		mutex_unlock(&sdmx_lock[session_handle]);
+		return SDMX_STATUS_GENERAL_FAILURE;
+	}
+
+	/* Parse response struct */
+	*session_counters = rsp->session_counters;
+	*num_filters = rsp->num_filters;
+	memcpy(filter_counters, rsp->filter_counters,
+		*num_filters * sizeof(struct sdmx_filter_dbg_counters));
+	ret = rsp->ret;
+out:
+	mutex_unlock(&sdmx_lock[session_handle]);
+
+	return ret;
+}
+EXPORT_SYMBOL(sdmx_get_dbg_counters);
+
+/*
+ * Reset debug counters
+ *
+ * @session_handle: secure demux instance
+ *
+ * Return error code
+ */
+int sdmx_reset_dbg_counters(int session_handle)
+{
+	int res, cmd_len, rsp_len;
+	struct sdmx_rst_counters_req *cmd;
+	struct sdmx_rst_counters_rsp *rsp;
+	enum sdmx_status ret;
+
+	if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
+		return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+	cmd_len = sizeof(struct sdmx_rst_counters_req);
+	rsp_len = sizeof(struct sdmx_rst_counters_rsp);
+
+	/* Lock shared memory */
+	mutex_lock(&sdmx_lock[session_handle]);
+
+	/* Get command and response buffers */
+	ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+		(void **)&rsp, &rsp_len);
+	if (ret)
+		goto out;
+
+	/* Populate command struct */
+	cmd->cmd_id = SDMX_RESET_DBG_COUNTERS_CMD;
+	cmd->session_handle = session_handle;
+
+	/* Issue QSEECom command */
+	res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+		(void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+	if (res < 0) {
+		mutex_unlock(&sdmx_lock[session_handle]);
+		return SDMX_STATUS_GENERAL_FAILURE;
+	}
+
+	ret = rsp->ret;
+out:
+	mutex_unlock(&sdmx_lock[session_handle]);
+
+	return ret;
+}
+EXPORT_SYMBOL(sdmx_reset_dbg_counters);
+
+/*
+ * Set debug log verbosity level
+ *
+ * @session_handle: secure demux instance
+ * @level: requested log level
+ *
+ * Return error code
+ */
+int sdmx_set_log_level(int session_handle, enum sdmx_log_level level)
+{
+	int res, cmd_len, rsp_len;
+	struct sdmx_set_log_level_req *cmd;
+	struct sdmx_set_log_level_rsp *rsp;
+	enum sdmx_status ret;
+
+	cmd_len = sizeof(struct sdmx_set_log_level_req);
+	rsp_len = sizeof(struct sdmx_set_log_level_rsp);
+
+	/* Lock shared memory */
+	mutex_lock(&sdmx_lock[session_handle]);
+
+	/* Get command and response buffers */
+	ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+		(void **)&rsp, &rsp_len);
+	if (ret)
+		goto out;
+
+	/* Populate command struct */
+	cmd->cmd_id = SDMX_SET_LOG_LEVEL_CMD;
+	cmd->session_handle = session_handle;
+	cmd->level = level;
+
+	/* Issue QSEECom command */
+	res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+		(void *)cmd, cmd_len, (void *)rsp, rsp_len);
+	if (res < 0) {
+		mutex_unlock(&sdmx_lock[session_handle]);
+		return SDMX_STATUS_GENERAL_FAILURE;
+	}
+	ret = rsp->ret;
+out:
+	/* Unlock */
+	mutex_unlock(&sdmx_lock[session_handle]);
+	return ret;
+}
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_sdmx.h b/drivers/media/platform/msm/dvb/demux/mpq_sdmx.h
new file mode 100644
index 0000000..9be26ae5
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/mpq_sdmx.h
@@ -0,0 +1,368 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MPQ_SDMX_H
+#define _MPQ_SDMX_H
+
+#include <linux/types.h>
+
+/* Constant declarations */
+#define SDMX_MAX_SESSIONS  (4)
+#define SDMX_LOOPBACK_PID  (0x2000)
+
+#define SDMX_MAX_PHYSICAL_CHUNKS (256)
+
+/* Filter-level error indicators */
+#define SDMX_FILTER_SUCCESS                       (0)
+#define SDMX_FILTER_ERR_MD_BUF_FULL               BIT(0)
+#define SDMX_FILTER_ERR_D_BUF_FULL                BIT(1)
+#define SDMX_FILTER_ERR_D_LIN_BUFS_FULL           BIT(2)
+#define SDMX_FILTER_ERR_INVALID_SCRAMBLE_BITS     BIT(3)
+#define SDMX_FILTER_ERR_KL_IND_NOT_SET            BIT(4)
+#define SDMX_FILTER_ERR_CAS_DECRYPT_ERROR         BIT(5)
+#define SDMX_FILTER_ERR_SEC_VERIF_CRC32_FAIL      BIT(6)
+#define SDMX_FILTER_ERR_SEC_INTERNAL_MALLOC_FAIL  BIT(7)
+#define SDMX_FILTER_ERR_SEC_LEN_INVALID           BIT(8)
+#define SDMX_FILTER_ERR_SEC_PUSI_PTR_INVALID      BIT(9)
+#define SDMX_FILTER_ERR_TS_SYNC_BYTE_INVALID      BIT(10)
+#define SDMX_FILTER_ERR_TS_TRANSPORT_ERR          BIT(11)
+#define SDMX_FILTER_ERR_CONT_CNT_INVALID          BIT(12)
+#define SDMX_FILTER_ERR_CONT_CNT_DUPLICATE        BIT(13)
+#define SDMX_FILTER_ERR_INVALID_PES_HDR           BIT(14)
+#define SDMX_FILTER_ERR_INVALID_PES_LEN           BIT(15)
+#define SDMX_FILTER_ERR_INVALID_PES_ENCRYPTION    BIT(16)
+#define SDMX_FILTER_ERR_SECURITY_FAULT            BIT(17)
+#define SDMX_FILTER_ERR_IN_NS_BUFFER              BIT(18)
+
+/* Filter-level status indicators */
+#define SDMX_FILTER_STATUS_EOS                    BIT(0)
+#define SDMX_FILTER_STATUS_WR_PTR_CHANGED         BIT(1)
+
+/* Filter-level flags */
+#define SDMX_FILTER_FLAG_VERIFY_SECTION_CRC	BIT(0)
+
+#define SDMX_INVALID_SESSION_HANDLE		(-1)
+#define SDMX_INVALID_FILTER_HANDLE		(-1)
+
+/* Input flags */
+#define SDMX_INPUT_FLAG_EOS		BIT(0)
+#define SDMX_INPUT_FLAG_DBG_ENABLE	BIT(1)
+
+
+enum sdmx_buf_mode {
+	SDMX_RING_BUF,
+	SDMX_LINEAR_GROUP_BUF,
+};
+
+enum sdmx_proc_mode {
+	SDMX_PUSH_MODE,
+	SDMX_PULL_MODE,
+};
+
+enum sdmx_inp_mode {
+	SDMX_PKT_ENC_MODE,
+	SDMX_BULK_ENC_MODE,
+	SDMX_CLEAR_MODE,
+};
+
+enum sdmx_pkt_format {
+	SDMX_188_BYTE_PKT = 188,
+	SDMX_192_BYTE_PKT = 192,
+	SDMX_195_BYTE_PKT = 195,
+};
+
+enum sdmx_log_level {
+	SDMX_LOG_NO_PRINT,
+	SDMX_LOG_MSG_ERROR,
+	SDMX_LOG_DEBUG,
+	SDMX_LOG_VERBOSE
+};
+
+enum sdmx_status {
+	SDMX_SUCCESS = 0,
+	SDMX_STATUS_GENERAL_FAILURE = -1,
+	SDMX_STATUS_MAX_OPEN_SESSIONS_REACHED = -2,
+	SDMX_STATUS_INVALID_SESSION_HANDLE = -3,
+	SDMX_STATUS_INVALID_INPUT_PARAMS = -4,
+	SDMX_STATUS_UNSUPPORTED_MODE = -5,
+	SDMX_STATUS_INVALID_PID = -6,
+	SDMX_STATUS_OUT_OF_MEM = -7,
+	SDMX_STATUS_FILTER_EXISTS = -8,
+	SDMX_STATUS_INVALID_FILTER_HANDLE = -9,
+	SDMX_STATUS_MAX_RAW_PIDS_REACHED = -10,
+	SDMX_STATUS_SINGLE_PID_RAW_FILTER = -11,
+	SDMX_STATUS_INP_BUF_INVALID_PARAMS = -12,
+	SDMX_STATUS_INVALID_FILTER_CFG = -13,
+	SDMX_STATUS_STALLED_IN_PULL_MODE = -14,
+	SDMX_STATUS_SECURITY_FAULT = -15,
+	SDMX_STATUS_NS_BUFFER_ERROR = -16,
+};
+
+enum sdmx_filter {
+	SDMX_PES_FILTER,		/* Other PES */
+	SDMX_SEPARATED_PES_FILTER,	/* Separated PES (for decoder) */
+	SDMX_SECTION_FILTER,		/* Section */
+	SDMX_PCR_FILTER,		/* PCR */
+	SDMX_RAW_FILTER,		/* Recording */
+};
+
+enum sdmx_raw_out_format {
+	SDMX_188_OUTPUT,
+	SDMX_192_HEAD_OUTPUT,
+	SDMX_192_TAIL_OUTPUT
+};
+
+#pragma pack(push, sdmx, 1)
+
+struct sdmx_session_dbg_counters {
+	/* Total number of TS-packets input to SDMX. */
+	u32 ts_pkt_in;
+
+	/* Total number of TS-packets filtered out by SDMX. */
+	u32 ts_pkt_out;
+};
+
+struct sdmx_filter_dbg_counters {
+	int filter_handle;
+
+	/* Number of TS-packets filtered. */
+	u32 ts_pkt_count;
+
+	/* Number of TS-packets with adaptation field only (no payload). */
+	u32 ts_pkt_no_payload;
+
+	/* Number of TS-packets with the discontinuity indicator set. */
+	u32 ts_pkt_discont;
+
+	/* Number of duplicate TS-packets detected. */
+	u32 ts_pkt_dup;
+
+	/* Number of packets not decrypted because the key wasn't ready. */
+	u32 ts_pkt_key_not_ready;
+};
+
+struct sdmx_pes_counters {
+	/* Number of TS packets with the TEI flag set */
+	u32 transport_err_count;
+
+	/* Number of TS packets with continuity counter errors */
+	u32 continuity_err_count;
+
+	/* Number of TS packets composing this PES frame */
+	u32 pes_ts_count;
+
+	/* Number of TS packets dropped due to full buffer */
+	u32 drop_count;
+};
+
+struct sdmx_buff_descr {
+	/* Physical address where buffer starts */
+	u64 base_addr;
+
+	/* Size of buffer */
+	u32 size;
+};
+
+struct sdmx_data_buff_descr {
+	/* Physical chunks of the buffer */
+	struct sdmx_buff_descr buff_chunks[SDMX_MAX_PHYSICAL_CHUNKS];
+
+	/* Length of buffer */
+	u32 length;
+};
+
+/*
+ * Data payload residing in the data buffers is described using this meta-data
+ * header. The meta data header specifies where the payload is located in the
+ * data buffer and how big it is.
+ * The meta data header optionally carries additional relevant meta data
+ * immediately following the meta-data header.
+ */
+struct sdmx_metadata_header {
+	/*
+	 * Payload start offset inside data buffer. In case data is managed
+	 * as a linear buffer group, this specifies buffer index.
+	 */
+	u32 payload_start;
+
+	/* Payload length */
+	u32 payload_length;
+
+	/* Number of meta data bytes immediately following this header */
+	u32 metadata_length;
+};
+
+
+struct sdmx_filter_status {
+	/* Secure demux filter handle */
+	int filter_handle;
+
+	/*
+	 * Number of pending bytes in filter's output data buffer.
+	 * For linear buffer mode, this is number of buffers pending.
+	 */
+	u32 data_fill_count;
+
+	/*
+	 * Offset in data buffer for next data payload to be written.
+	 * For linear buffer mode, this is a buffer index.
+	 */
+	u32 data_write_offset;
+
+	/* Number of pending bytes in filter's output meta data buffer */
+	u32 metadata_fill_count;
+
+	/* Offset in meta data buffer for next metadata header to be written */
+	u32 metadata_write_offset;
+
+	/* Errors (bitmap) reported by secure demux for this filter */
+	u32 error_indicators;
+
+	/* General status (bitmap) reported by secure demux for this filter */
+	u32 status_indicators;
+};
+#pragma pack(pop, sdmx)
+
+#ifdef CONFIG_QSEECOM
+
+int sdmx_open_session(int *session_handle);
+
+int sdmx_close_session(int session_handle);
+
+int sdmx_get_version(int session_handle, int32_t *version);
+
+int sdmx_set_session_cfg(int session_handle, enum sdmx_proc_mode proc_mode,
+	enum sdmx_inp_mode inp_mode, enum sdmx_pkt_format pkt_format,
+	u8 odd_scramble_bits, u8 even_scramble_bits);
+
+int sdmx_add_filter(int session_handle, u16 pid, enum sdmx_filter filter_type,
+	struct sdmx_buff_descr *meta_data_buf, enum sdmx_buf_mode data_buf_mode,
+	u32 num_data_bufs, struct sdmx_data_buff_descr *data_bufs,
+	int *filter_handle, enum sdmx_raw_out_format ts_out_format, u32 flags);
+
+int sdmx_remove_filter(int session_handle, int filter_handle);
+
+int sdmx_set_kl_ind(int session_handle, u16 pid, u32 key_ladder_index);
+
+int sdmx_add_raw_pid(int session_handle, int filter_handle, u16 pid);
+
+int sdmx_remove_raw_pid(int session_handle, int filter_handle, u16 pid);
+
+int sdmx_process(int session_handle, u8 flags,
+	struct sdmx_buff_descr *input_buf_desc,
+	u32 *input_fill_count, u32 *input_read_offset,
+	u32 *error_indicators,
+	u32 *status_indicators,
+	u32 num_filters,
+	struct sdmx_filter_status *filter_status);
+
+int sdmx_get_dbg_counters(int session_handle,
+	struct sdmx_session_dbg_counters *session_counters,
+	u32 *num_filters,
+	struct sdmx_filter_dbg_counters *filter_counters);
+
+int sdmx_reset_dbg_counters(int session_handle);
+
+int sdmx_set_log_level(int session_handle, enum sdmx_log_level level);
+
+#else
+
+static inline int sdmx_open_session(int *session_handle)
+{
+	return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_close_session(int session_handle)
+{
+	return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_get_version(int session_handle, int32_t *version)
+{
+	return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_set_session_cfg(int session_handle,
+	enum sdmx_proc_mode proc_mode,
+	enum sdmx_inp_mode inp_mode, enum sdmx_pkt_format pkt_format,
+	u8 odd_scramble_bits, u8 even_scramble_bits)
+{
+	return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_add_filter(int session_handle, u16 pid,
+	enum sdmx_filter filter_type,
+	struct sdmx_buff_descr *meta_data_buf, enum sdmx_buf_mode data_buf_mode,
+	u32 num_data_bufs, struct sdmx_data_buff_descr *data_bufs,
+	int *filter_handle, enum sdmx_raw_out_format ts_out_format, u32 flags)
+{
+	return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_remove_filter(int session_handle, int filter_handle)
+{
+	return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_set_kl_ind(int session_handle, u16 pid,
+	u32 key_ladder_index)
+{
+	return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_add_raw_pid(int session_handle, int filter_handle,
+	u16 pid)
+{
+	return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_remove_raw_pid(int session_handle, int filter_handle,
+	u16 pid)
+{
+	return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_process(int session_handle, u8 flags,
+	struct sdmx_buff_descr *input_buf_desc,
+	u32 *input_fill_count, u32 *input_read_offset,
+	u32 *error_indicators,
+	u32 *status_indicators,
+	u32 num_filters,
+	struct sdmx_filter_status *filter_status)
+{
+	*status_indicators = 0;
+	*error_indicators = 0;
+	return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_get_dbg_counters(int session_handle,
+	struct sdmx_session_dbg_counters *session_counters,
+	u32 *num_filters,
+	struct sdmx_filter_dbg_counters *filter_counters)
+{
+	return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_reset_dbg_counters(int session_handle)
+{
+	return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_set_log_level(int session_handle,
+	enum sdmx_log_level level)
+{
+	return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+#endif
+
+#endif /* _MPQ_SDMX_H */
diff --git a/drivers/media/platform/msm/dvb/include/mpq_adapter.h b/drivers/media/platform/msm/dvb/include/mpq_adapter.h
new file mode 100644
index 0000000..c55a5aa
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/include/mpq_adapter.h
@@ -0,0 +1,222 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MPQ_ADAPTER_H
+#define _MPQ_ADAPTER_H
+
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "mpq_stream_buffer.h"
+
+
+
+/** IDs of interfaces holding stream-buffers */
+enum mpq_adapter_stream_if {
+	/** Interface holding stream-buffer for video0 stream */
+	MPQ_ADAPTER_VIDEO0_STREAM_IF = 0,
+
+	/** Interface holding stream-buffer for video1 stream */
+	MPQ_ADAPTER_VIDEO1_STREAM_IF = 1,
+
+	/** Interface holding stream-buffer for video2 stream */
+	MPQ_ADAPTER_VIDEO2_STREAM_IF = 2,
+
+	/** Interface holding stream-buffer for video3 stream */
+	MPQ_ADAPTER_VIDEO3_STREAM_IF = 3,
+
+	/** Interface holding stream-buffer for audio0 stream */
+	MPQ_ADAPTER_AUDIO0_STREAM_IF = 4,
+
+	/** Interface holding stream-buffer for audio1 stream */
+	MPQ_ADAPTER_AUDIO1_STREAM_IF = 5,
+
+	/** Interface holding stream-buffer for audio2 stream */
+	MPQ_ADAPTER_AUDIO2_STREAM_IF = 6,
+
+	/** Interface holding stream-buffer for audio3 stream */
+	MPQ_ADAPTER_AUDIO3_STREAM_IF = 7,
+
+	/** Maximum number of interfaces holding stream-buffers */
+	MPQ_ADAPTER_MAX_NUM_OF_INTERFACES,
+};
+
+enum dmx_packet_type {
+	DMX_PES_PACKET,
+	DMX_FRAMING_INFO_PACKET,
+	DMX_EOS_PACKET,
+	DMX_MARKER_PACKET
+};
+
+struct dmx_pts_dts_info {
+	/** Indication whether PTS exist */
+	int pts_exist;
+
+	/** Indication whether DTS exist */
+	int dts_exist;
+
+	/** PTS value associated with the PES data if any */
+	u64 pts;
+
+	/** DTS value associated with the PES data if any */
+	u64 dts;
+};
+
+struct dmx_framing_packet_info {
+	/** framing pattern type, one of DMX_IDX_* definitions */
+	u64 pattern_type;
+
+	/** PTS/DTS information */
+	struct dmx_pts_dts_info pts_dts_info;
+
+	/** STC value attached to first TS packet holding the pattern */
+	u64 stc;
+
+	/*
+	 * Number of TS packets with Transport Error Indicator (TEI)
+	 * found while constructing the frame.
+	 */
+	__u32 transport_error_indicator_counter;
+
+	/* Number of continuity errors found while constructing the frame */
+	__u32 continuity_error_counter;
+
+	/*
+	 * Number of dropped bytes due to insufficient buffer space,
+	 * since last reported frame.
+	 */
+	__u32 ts_dropped_bytes;
+
+	/* Total number of TS packets holding the frame */
+	__u32 ts_packets_num;
+};
+
+struct dmx_pes_packet_info {
+	/** PTS/DTS information */
+	struct dmx_pts_dts_info pts_dts_info;
+
+	/** STC value attached to first TS packet holding the PES */
+	u64 stc;
+};
+
+struct dmx_marker_info {
+	/* marker id */
+	u64 id;
+};
+
+/** The meta-data used for video interface */
+struct mpq_adapter_video_meta_data {
+	/** meta-data packet type */
+	enum dmx_packet_type packet_type;
+
+	/** packet-type specific information */
+	union {
+		struct dmx_framing_packet_info framing;
+		struct dmx_pes_packet_info pes;
+		struct dmx_marker_info marker;
+	} info;
+} __packed;
+
+/** The meta-data used for audio interface */
+struct mpq_adapter_audio_meta_data {
+	/** meta-data packet type */
+	enum dmx_packet_type packet_type;
+
+	/** packet-type specific information */
+	union {
+		struct dmx_pes_packet_info pes;
+		struct dmx_marker_info marker;
+	} info;
+} __packed;
+
+/** Callback function to notify on registrations of specific interfaces */
+typedef void (*mpq_adapter_stream_if_callback)(
+				enum mpq_adapter_stream_if interface_id,
+				void *user_param);
+
+
+/**
+ * mpq_adapter_get - Returns pointer to Qualcomm Technologies Inc. DVB adapter
+ *
+ * Return     dvb adapter or NULL if not exist.
+ */
+struct dvb_adapter *mpq_adapter_get(void);
+
+
+/**
+ * mpq_adapter_register_stream_if - Register a stream interface.
+ *
+ * @interface_id: The interface id
+ * @stream_buffer: The buffer used for the interface
+ *
+ * Return     error status
+ *
+ * Stream interface used to connect between two units in tunneling
+ * mode using mpq_streambuffer implementation.
+ * The producer of the interface should register the new interface,
+ * consumer may get the interface using mpq_adapter_get_stream_if.
+ *
+ * Note that the function holds a pointer to this interface,
+ * stream_buffer pointer assumed to be valid as long as interface
+ * is active.
+ */
+int mpq_adapter_register_stream_if(
+		enum mpq_adapter_stream_if interface_id,
+		struct mpq_streambuffer *stream_buffer);
+
+
+/**
+ * mpq_adapter_unregister_stream_if - Un-register a stream interface.
+ *
+ * @interface_id: The interface id
+ *
+ * Return     error status
+ */
+int mpq_adapter_unregister_stream_if(
+		enum mpq_adapter_stream_if interface_id);
+
+
+/**
+ * mpq_adapter_get_stream_if - Get buffer used for a stream interface.
+ *
+ * @interface_id: The interface id
+ * @stream_buffer: The returned stream buffer
+ *
+ * Return     error status
+ */
+int mpq_adapter_get_stream_if(
+		enum mpq_adapter_stream_if interface_id,
+		struct mpq_streambuffer **stream_buffer);
+
+
+/**
+ * mpq_adapter_notify_stream_if - Register notification
+ * to be triggered when a stream interface is registered.
+ *
+ * @interface_id: The interface id
+ * @callback: The callback to be triggered when the interface is registered
+ * @user_param: A parameter that is passed back to the callback function
+ *				when triggered.
+ *
+ * Return     error status
+ *
+ * Producer may use this to register notification when desired
+ * interface registered in the system and query its information
+ * afterwards using mpq_adapter_get_stream_if.
+ * To remove the callback, this function should be called with NULL
+ * value in callback parameter.
+ */
+int mpq_adapter_notify_stream_if(
+		enum mpq_adapter_stream_if interface_id,
+		mpq_adapter_stream_if_callback callback,
+		void *user_param);
+
+#endif /* _MPQ_ADAPTER_H */
diff --git a/drivers/media/platform/msm/dvb/include/mpq_dvb_debug.h b/drivers/media/platform/msm/dvb/include/mpq_dvb_debug.h
new file mode 100644
index 0000000..6550ddd
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/include/mpq_dvb_debug.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MPQ_DVB_DEBUG_H
+#define _MPQ_DVB_DEBUG_H
+
+/* Enable this line if you want to output debug printouts */
+#define MPG_DVB_DEBUG_ENABLE
+
+#undef MPQ_DVB_DBG_PRINT		/* undef it, just in case */
+
+#ifdef MPG_DVB_DEBUG_ENABLE
+#define MPQ_DVB_ERR_PRINT(fmt, args...) pr_err(fmt, ## args)
+#define MPQ_DVB_WARN_PRINT(fmt, args...) pr_warn(fmt, ## args)
+#define MPQ_DVB_NOTICE_PRINT(fmt, args...) pr_notice(fmt, ## args)
+#define MPQ_DVB_DBG_PRINT(fmt, args...) pr_debug(fmt, ## args)
+#else  /* MPG_DVB_DEBUG_ENABLE */
+#define MPQ_DVB_ERR_PRINT(fmt, args...)
+#define MPQ_DVB_WARN_PRINT(fmt, args...)
+#define MPQ_DVB_NOTICE_PRINT(fmt, args...)
+#define MPQ_DVB_DBG_PRINT(fmt, args...)
+#endif /* MPG_DVB_DEBUG_ENABLE */
+
+
+/*
+ * The following can be used to disable specific printout
+ * by adding a letter to the end of MPQ_DVB_DBG_PRINT
+ */
+#undef MPQ_DVB_DBG_PRINTT
+#define MPQ_DVB_DBG_PRINTT(fmt, args...)
+
+#endif /* _MPQ_DVB_DEBUG_H */
diff --git a/drivers/media/platform/msm/dvb/include/mpq_stream_buffer.h b/drivers/media/platform/msm/dvb/include/mpq_stream_buffer.h
new file mode 100644
index 0000000..6240451
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/include/mpq_stream_buffer.h
@@ -0,0 +1,494 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MPQ_STREAM_BUFFER_H
+#define _MPQ_STREAM_BUFFER_H
+
+#include "dvb_ringbuffer.h"
+
+/**
+ * DOC: MPQ Stream Buffer
+ *
+ * A stream buffer implementation is used to transfer data between two units
+ * such as demux and decoders. The implementation relies on dvb_ringbuffer
+ * implementation. Refer to dvb_ringbuffer.h for details.
+ *
+ * The implementation uses two dvb_ringbuffers, one to pass the
+ * raw-data (PES payload for example) and the other to pass
+ * meta-data (information from PES header for example).
+ *
+ * The meta-data uses dvb_ringbuffer packet interface. Each meta-data
+ * packet points to the data buffer, and includes the offset to the data in the
+ * buffer, the size of raw-data described by the meta-data packet, and also the
+ * size of user's own parameters if any required.
+ *
+ * Data can be managed in two ways: ring-buffer & linear buffers, as specified
+ * in initialization when calling the mpq_streambuffer_init function.
+ * For managing data as a ring buffer exactly 1 data buffer descriptor must be
+ * specified in initialization. For this mode, dvb_ringbuffer is used "as-is".
+ * For managing data in several linear buffers, an array of buffer descriptors
+ * must be passed.
+ * For both modes, data descriptor(s) must be remain valid throughout the life
+ * span of the mpq_streambuffer object.
+ * Apart from initialization API remains the same for both modes.
+ *
+ * Contrary to dvb_ringbuffer implementation, this API makes sure there's
+ * enough data to read/write when making read/write operations.
+ * Users interested to flush/reset specific buffer, check for bytes
+ * ready or space available for write should use the respective services
+ * in dvb_ringbuffer (dvb_ringbuffer_avail, dvb_ringbuffer_free,
+ * dvb_ringbuffer_reset, dvb_ringbuffer_flush,
+ * dvb_ringbuffer_flush_spinlock_wakeup).
+ *
+ * Concurrency protection is handled in the same manner as in
+ * dvb_ringbuffer implementation.
+ *
+ * Typical call flow from producer:
+ *
+ * - Start writing the raw-data of new packet, the following call is
+ *   repeated until end of data of the specific packet
+ *
+ *      mpq_streambuffer_data_write(...)
+ *
+ * - Now write a new packet describing the new available raw-data
+ *      mpq_streambuffer_pkt_write(...)
+ *
+ *   For linear buffer mode, writing a new packet with data size > 0, causes the
+ *   current buffer to be marked as pending for reading, and triggers moving to
+ *   the next available buffer, that shall now be the current write buffer.
+ *
+ * Typical call flow from consumer:
+ *
+ * - Poll for next available packet:
+ *      mpq_streambuffer_pkt_next(&streambuff,-1,&len)
+ *
+ *   In different approach, consumer can wait on event for new data and then
+ *   call mpq_streambuffer_pkt_next, waiting for data can be done as follows:
+ *
+ *      wait_event_interruptible(
+ *			streambuff->packet_data->queue,
+ *			!dvb_ringbuffer_empty(&streambuff->packet_data) ||
+ *			(streambuff->packet_data.error != 0);
+ *
+ * - Get the new packet information:
+ *      mpq_streambuffer_pkt_read(..)
+ *
+ * - Read the raw-data of the new packet. Here you can use two methods:
+ *
+ *   1. Read the data to a user supplied buffer:
+ *         mpq_streambuffer_data_read()
+ *
+ *      In this case memory copy is done, read pointer is updated in the raw
+ *      data buffer, the amount of raw-data is provided part of the
+ *      packet's information. User should then call mpq_streambuffer_pkt_dispose
+ *      with dispose_data set to 0 as the raw-data was already disposed.
+ *      Note that secure buffer cannot be accessed directly and an error will
+ *      occur.
+ *
+ *   2. Access the data directly using the raw-data address. The address
+ *      of the raw data is provided part of the packet's information. User
+ *      then should call mpq_streambuffer_pkt_dispose with dispose_data set
+ *      to 1 to dispose the packet along with it's raw-data.
+ *
+ * - Disposal of packets:
+ *      mpq_streambuffer_pkt_dispose(...)
+ *
+ *   For linear buffer mode, disposing of a packet with data size > 0,
+ *   regardless of the 'dispose_data' parameter, causes the current buffer's
+ *   data to be disposed and marked as free for writing, and triggers moving to
+ *   the next available buffer, that shall now be the current read buffer.
+ */
+
+struct mpq_streambuffer;
+struct mpq_streambuffer_packet_header;
+
+typedef void (*mpq_streambuffer_dispose_cb) (
+	struct mpq_streambuffer *sbuff,
+	u32 offset,
+	size_t len,
+	void *user_data);
+
+enum mpq_streambuffer_mode {
+	MPQ_STREAMBUFFER_BUFFER_MODE_RING,
+	MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR
+};
+
+/**
+ * struct mpq_streambuffer - mpq stream buffer representation
+ *
+ * @raw_data: The buffer used to hold raw-data, or linear buffer descriptors
+ * @packet_data: The buffer user to hold the meta-data
+ * @buffers: array of buffer descriptor(s) holding buffer initial & dynamic
+ *	     buffer information
+ * @mode: mpq_streambuffer buffer management work mode - Ring-buffer or Linear
+ *	  buffers
+ * @buffers_num: number of data buffers to manage
+ * @pending_buffers_count: for linear buffer management, counts the number of
+ * buffer that has been
+ */
+struct mpq_streambuffer {
+	struct dvb_ringbuffer raw_data;
+	struct dvb_ringbuffer packet_data;
+	struct mpq_streambuffer_buffer_desc *buffers;
+	enum mpq_streambuffer_mode mode;
+	u32 buffers_num;
+	u32 pending_buffers_count;
+	mpq_streambuffer_dispose_cb cb;
+	void *cb_user_data;
+};
+
+/**
+ * mpq_streambuffer_linear_desc
+ * @handle:	ION handle's file descriptor of buffer
+ * @base:	kernel mapped address to start of buffer.
+ *		Can be NULL for secured buffers
+ * @size:	size of buffer
+ * @read_ptr:	initial read pointer value (should normally be 0)
+ * @write_ptr:	initial write pointer value (should normally be 0)
+ */
+struct mpq_streambuffer_buffer_desc {
+	int	handle;
+	void	*base;
+	u32	size;
+	u32	read_ptr;
+	u32	write_ptr;
+};
+
+/**
+ * struct mpq_streambuffer_packet_header - packet header saved in packet buffer
+ * @user_data_len: length of private user (meta) data
+ * @raw_data_handle: ION handle's file descriptor of raw-data buffer
+ * @raw_data_offset: offset of raw-data from start of buffer (0 for linear)
+ * @raw_data_len: size of raw-data in the raw-data buffer (can be 0)
+ *
+ * The packet structure that is saved in each packet-buffer:
+ * user_data_len
+ * raw_data_handle
+ * raw_data_offset
+ * raw_data_len
+ * private user-data bytes
+ */
+struct mpq_streambuffer_packet_header {
+	u32 user_data_len;
+	int raw_data_handle;
+	u32 raw_data_offset;
+	u32 raw_data_len;
+} __packed;
+
+/**
+ * mpq_streambuffer_init - Initialize a new stream buffer
+ *
+ * @sbuff: The buffer to initialize
+ * @data_buffers: array of data buffer descriptor(s).
+ *		  Data descriptor(s) must be remain valid throughout the life
+ *		  span of the mpq_streambuffer object
+ * @data_buff_num: number of data buffer in array
+ * @packet_buff: The buffer holding meta-data
+ * @packet_buff_size: Size of meta-data buffer
+ *
+ * Return	Error status, -EINVAL if any of the arguments are invalid
+ *
+ * Note:
+ * for data_buff_num > 1, mpq_streambuffer object manages these buffers as a
+ * separated set of linear buffers. A linear buffer cannot wrap-around and one
+ * can only write as many data bytes as the buffer's size. Data will not be
+ * written to the next free buffer.
+ */
+int mpq_streambuffer_init(
+		struct mpq_streambuffer *sbuff,
+		enum mpq_streambuffer_mode mode,
+		struct mpq_streambuffer_buffer_desc *data_buffers,
+		u32 data_buff_num,
+		void *packet_buff,
+		size_t packet_buff_size);
+
+/**
+ * mpq_streambuffer_terminate - Terminate stream buffer
+ *
+ * @sbuff: The buffer to terminate
+ *
+ * The function sets the the buffers error flags to ENODEV
+ * and wakeup any waiting threads on the buffer queues.
+ * Threads waiting on the buffer queues should check if
+ * error was set.
+ */
+void mpq_streambuffer_terminate(struct mpq_streambuffer *sbuff);
+
+/**
+ * mpq_streambuffer_packet_next - Returns index of next available packet.
+ *
+ * @sbuff: The stream buffer
+ * @idx: Previous packet index or -1 to return index of the the first
+ *       available packet.
+ * @pktlen: The length of the ready packet
+ *
+ * Return index to the packet-buffer, -1 if buffer is empty
+ *
+ * After getting the index, the user of this function can either
+ * access the packet buffer directly using the returned index
+ * or ask to read the data back from the buffer using mpq_ringbuffer_pkt_read
+ */
+ssize_t mpq_streambuffer_pkt_next(
+		struct mpq_streambuffer *sbuff,
+		ssize_t idx, size_t *pktlen);
+
+/**
+ * mpq_streambuffer_pkt_read - Reads out the packet from the provided index.
+ *
+ * @sbuff: The stream buffer
+ * @idx: The index of the packet to be read
+ * @packet: The read packet's header
+ * @user_data: The read private user data
+ *
+ * Return  The actual number of bytes read, -EINVAL if the packet is
+ * already disposed or the packet-data is invalid.
+ *
+ * The packet is not disposed after this function is called, to dispose it
+ * along with the raw-data it points to use mpq_streambuffer_pkt_dispose.
+ * If there are no private user-data, the user-data pointer can be NULL.
+ * The caller of this function must make sure that the private user-data
+ * buffer has enough space for the private user-data length
+ */
+ssize_t mpq_streambuffer_pkt_read(
+		struct mpq_streambuffer *sbuff,
+		size_t idx,
+		struct mpq_streambuffer_packet_header *packet,
+		u8 *user_data);
+
+/**
+ * mpq_streambuffer_pkt_dispose - Disposes a packet from the packet buffer
+ *
+ * @sbuff: The stream buffer
+ * @idx: The index of the packet to be disposed
+ * @dispose_data: Indicates whether to update the read pointer inside the
+ * raw-data buffer for the respective data pointed by the packet.
+ *
+ * Return  error status, -EINVAL if the packet-data is invalid
+ *
+ * The function updates the read pointer inside the raw-data buffer
+ * for the respective data pointed by the packet if dispose_data is set.
+ */
+int mpq_streambuffer_pkt_dispose(
+		struct mpq_streambuffer *sbuff,
+		size_t idx,
+		int dispose_data);
+
+/**
+ * mpq_streambuffer_pkt_write - Write a new packet to the packet buffer.
+ *
+ * @sbuff: The stream buffer
+ * @packet: The packet header to write
+ * @user_data: The private user-data to be written
+ *
+ * Return  error status, -ENOSPC if there's no space to write the packet
+ */
+int mpq_streambuffer_pkt_write(
+		struct mpq_streambuffer *sbuff,
+		struct mpq_streambuffer_packet_header *packet,
+		u8 *user_data);
+
+/**
+ * mpq_streambuffer_data_write - Write data to raw-data buffer
+ *
+ * @sbuff: The stream buffer
+ * @buf: The buffer holding the data to be written
+ * @len: The length of the data buffer
+ *
+ * Return  The actual number of bytes written or -ENOSPC if
+ *			no space to write the data
+ */
+ssize_t mpq_streambuffer_data_write(
+		struct mpq_streambuffer *sbuff,
+		const u8 *buf, size_t len);
+
+/**
+ * mpq_streambuffer_data_write_deposit - Advances the raw-buffer write pointer.
+ * Assumes the raw-data was written by the user directly
+ *
+ * @sbuff: The stream buffer
+ * @len: The length of the raw-data that was already written
+ *
+ * Return  error status
+ */
+int mpq_streambuffer_data_write_deposit(
+		struct mpq_streambuffer *sbuff,
+		size_t len);
+
+/**
+ * mpq_streambuffer_data_read - Reads out raw-data to the provided buffer.
+ *
+ * @sbuff: The stream buffer
+ * @buf: The buffer to read the raw-data data to
+ * @len: The length of the buffer that will hold the raw-data
+ *
+ * Return  The actual number of bytes read or error code
+ *
+ * This function copies the data from the ring-buffer to the
+ * provided buf parameter. The user can save the extra copy by accessing
+ * the data pointer directly and reading from it, then update the
+ * read pointer by the amount of data that was read using
+ * mpq_streambuffer_data_read_dispose
+ */
+ssize_t mpq_streambuffer_data_read(
+		struct mpq_streambuffer *sbuff,
+		u8 *buf, size_t len);
+
+/**
+ * mpq_streambuffer_data_read_user
+ *
+ * Same as mpq_streambuffer_data_read except data can be copied to user-space
+ * buffer.
+ */
+ssize_t mpq_streambuffer_data_read_user(
+		struct mpq_streambuffer *sbuff,
+		u8 __user *buf, size_t len);
+
+/**
+ * mpq_streambuffer_data_read_dispose - Advances the raw-buffer read pointer.
+ * Assumes the raw-data was read by the user directly.
+ *
+ * @sbuff:	The stream buffer
+ * @len:	The length of the raw-data to be disposed
+ *
+ * Return  error status, -EINVAL if buffer there's no enough data to
+ *			be disposed
+ *
+ * The user can instead dispose a packet along with the data in the
+ * raw-data buffer using mpq_streambuffer_pkt_dispose.
+ */
+int mpq_streambuffer_data_read_dispose(
+		struct mpq_streambuffer *sbuff,
+		size_t len);
+/**
+ * mpq_streambuffer_get_buffer_handle - Returns the current linear buffer
+ * ION handle.
+ * @sbuff: The stream buffer
+ * @read_buffer: specifies if a read buffer handle is requested (when set),
+ *		 or a write buffer handle is requested.
+ *		 For linear buffer mode read & write buffers may be different
+ *		 buffers. For ring buffer mode, the same (single) buffer handle
+ *		 is returned.
+ * buffer handle
+ * @handle: returned handle
+ *
+ * Return error status
+ * -EINVAL is arguments are invalid.
+ * -EPERM if stream buffer specified was not initialized with linear support.
+ */
+int mpq_streambuffer_get_buffer_handle(
+	struct mpq_streambuffer *sbuff,
+	int read_buffer,
+	int *handle);
+
+/**
+ * mpq_streambuffer_data_free - Returns number of free bytes in data buffer.
+ * @sbuff: The stream buffer object
+ *
+ * Note: for linear buffer management this return number of free bytes in the
+ * current write buffer only.
+ */
+ssize_t mpq_streambuffer_data_free(
+	struct mpq_streambuffer *sbuff);
+
+/**
+ * mpq_streambuffer_data_avail - Returns number of bytes in data buffer that
+ * can be read.
+ * @sbuff: The stream buffer object
+ *
+ * Note: for linear buffer management this return number of data bytes in the
+ * current read buffer only.
+ */
+ssize_t mpq_streambuffer_data_avail(
+	struct mpq_streambuffer *sbuff);
+
+/**
+ * mpq_streambuffer_register_pkt_dispose - Registers a callback to notify on
+ * packet disposal events.
+ * can be read.
+ * @sbuff: The stream buffer object
+ * @cb_func: user callback function
+ * @user_data: user data to be passed to callback function.
+ *
+ * Returns error status
+ * -EINVAL if arguments are invalid
+ */
+int mpq_streambuffer_register_data_dispose(
+	struct mpq_streambuffer *sbuff,
+	mpq_streambuffer_dispose_cb cb_func,
+	void *user_data);
+
+/**
+ * mpq_streambuffer_data_rw_offset - returns read/write offsets of current data
+ * buffer.
+ * @sbuff: The stream buffer object
+ * @read_offset: returned read offset
+ * @write_offset: returned write offset
+ *
+ * Note: read offset or write offset may be NULL if not required.
+ * Returns error status
+ * -EINVAL if arguments are invalid
+ */
+int mpq_streambuffer_get_data_rw_offset(
+	struct mpq_streambuffer *sbuff,
+	u32 *read_offset,
+	u32 *write_offset);
+
+/**
+ * mpq_streambuffer_metadata_free - returns number of free bytes in the meta
+ * data buffer, or error status.
+ * @sbuff: the stream buffer object
+ */
+ssize_t mpq_streambuffer_metadata_free(struct mpq_streambuffer *sbuff);
+
+/**
+ * mpq_streambuffer_flush - flush both pending packets and data in buffer
+ *
+ * @sbuff: the stream buffer object
+ *
+ * Returns error status
+ */
+int mpq_streambuffer_flush(struct mpq_streambuffer *sbuff);
+
+/*
+ *  ------------------------------------------------------
+ *  Consumer or AV Decoder Stream Interface to Ring Buffer
+ *  ------------------------------------------------------
+ *  Producer is Demux Driver
+ *  ------------------------
+ *
+ *  call from Audio/Video Decoder Driver to find Audio/Video
+ *  streambuffer AV handles, "DMX_PES_AUDIO0 through 3" or
+ *  DMX_PES_VIDEO0 through 3" interfaces corresponding to 4 programs.
+ */
+
+/* call from Audio/Video Decoder Driver via POLLING to consume
+ * Headers and Compressed data from ring buffer using streambuffer handle.
+ * hdrdata[] and cdata[] buffers have to be malloc'd by consumer
+ *
+ *  --------------------------
+ *  Consumer Calling Sequence
+ *  --------------------------
+ *  Find the streambuffer corresponding to a DMX TS PES stream instance.
+ *  1. consumer_audio_streambuffer() or consumer_video_streambuffer()
+ *  Process the packet headers if required.
+ *  2. mpq_read_new_packet_hdr_data()
+ *  Process the compressed data by forwarding to AV decoder.
+ *  3. mpq_read_new_packet_compressed_data()
+ *  Dispose the packet.
+ *  4. mpq_dispose_new_packet_read()
+ *
+ *  The Audio/Video drivers (or consumers) require the stream_buffer information
+ *  for consuming packet headers and compressed AV data from the
+ *  ring buffer filled by demux driver which is the producer
+ */
+
+#endif /* _MPQ_STREAM_BUFFER_H */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index a850bc0..a195c15 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -2958,7 +2958,7 @@
 	*pmgr = mgr;
 	ret = sde_rotator_footswitch_ctrl(mgr, true);
 	if (ret) {
-		SDEROT_ERR("res_init failed %d\n", ret);
+		SDEROT_INFO("res_init failed %d, use probe defer\n", ret);
 		ret = -EPROBE_DEFER;
 		goto error_fs_en_fail;
 	}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index f2778b0..d300de2 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -3291,7 +3291,10 @@
 
 	ret = sde_rotator_core_init(&rot_dev->mgr, pdev);
 	if (ret < 0) {
-		SDEDEV_ERR(&pdev->dev, "fail init core %d\n", ret);
+		if (ret == -EPROBE_DEFER)
+			SDEDEV_INFO(&pdev->dev, "probe defer for core init\n");
+		else
+			SDEDEV_ERR(&pdev->dev, "fail init core %d\n", ret);
 		goto error_rotator_core_init;
 	}
 
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index aa6c522..743d2f7 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -57,6 +57,9 @@
 
 #define DEFAULT_MAXLINEWIDTH	4096
 
+/* stride alignment requirement for avoiding partial writes */
+#define PARTIAL_WRITE_ALIGNMENT	0x1F
+
 /* Macro for constructing the REGDMA command */
 #define SDE_REGDMA_WRITE(p, off, data) \
 	do { \
@@ -869,6 +872,8 @@
 	SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE, 0x00010001);
 	SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE, 0x00010001);
 	SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY, 0);
+	SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG,
+			(ctx->rot->highest_bank & 0x3) << 8);
 	SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC, 0);
 	SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 1);
 	SDE_REGDMA_MODIFY(wrptr, REGDMA_TIMESTAMP_REG, mask, swts);
@@ -1270,7 +1275,7 @@
 	u32 *wrptr;
 	u32 pack = 0;
 	u32 dst_format = 0;
-	u32 partial_write = 0;
+	u32 no_partial_writes = 0;
 	int i;
 
 	wrptr = sde_hw_rotator_get_regdma_segment(ctx);
@@ -1355,12 +1360,34 @@
 			(cfg->h_downscale_factor << 16));
 
 	/* partial write check */
-	if (test_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map) &&
-			!sde_mdp_is_ubwc_format(fmt))
-		partial_write = BIT(10);
+	if (test_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map)) {
+		no_partial_writes = BIT(10);
+
+		/*
+		 * For simplicity, don't disable partial writes if
+		 * the ROI does not span the entire width of the
+		 * output image, and require the total stride to
+		 * also be properly aligned.
+		 *
+		 * This avoids having to determine the memory access
+		 * alignment of the actual horizontal ROI on a per
+		 * color format basis.
+		 */
+		if (sde_mdp_is_ubwc_format(fmt)) {
+			no_partial_writes = 0x0;
+		} else if (cfg->dst_rect->x ||
+				cfg->dst_rect->w != cfg->img_width) {
+			no_partial_writes = 0x0;
+		} else {
+			for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
+				if (cfg->dst_plane.ystride[i] &
+						PARTIAL_WRITE_ALIGNMENT)
+					no_partial_writes = 0x0;
+		}
+	}
 
 	/* write config setup for bank configuration */
-	SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG, partial_write |
+	SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG, no_partial_writes |
 			(ctx->rot->highest_bank & 0x3) << 8);
 
 	if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
index e209192..9e47187 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
@@ -519,7 +519,8 @@
 	char name[MAX_CLIENT_NAME_LEN];
 
 	if (!mdata) {
-		SDEROT_ERR("probe failed as mdata is not initialized\n");
+		SDEROT_INFO(
+			"probe failed as mdata is not initializedi, probe defer\n");
 		return -EPROBE_DEFER;
 	}
 
diff --git a/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c b/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c
index d329a8b..9daf053 100644
--- a/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c
+++ b/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c
@@ -387,13 +387,23 @@
 
 	integer_part = d->compression_ratio >> 16;
 	frac_part =
-		((d->compression_ratio - (integer_part * 65536)) * 100) >> 16;
+		((d->compression_ratio - (integer_part << 16)) * 100) >> 16;
 
 	dpb_read_compression_factor = FP(integer_part, frac_part, 100);
 
+	integer_part = d->complexity_factor >> 16;
+	frac_part =
+		((d->complexity_factor - (integer_part << 16)) * 100) >> 16;
+
+	motion_vector_complexity = FP(integer_part, frac_part, 100);
+
 	dpb_write_compression_factor = !dpb_compression_enabled ? FP_ONE :
 		__compression_ratio(__lut(width, height, fps), opb_bpp);
 
+	dpb_write_compression_factor = d->use_dpb_read ?
+		dpb_read_compression_factor :
+		dpb_write_compression_factor;
+
 	opb_compression_factor = !opb_compression_enabled ? FP_ONE :
 		__compression_ratio(__lut(width, height, fps), opb_bpp);
 
@@ -437,8 +447,6 @@
 			lcu_per_frame * fps / bps(1));
 	ddr.line_buffer_write = ddr.line_buffer_read;
 
-	motion_vector_complexity = FP_INT(4);
-
 	bw_for_1x_8bpc = fp_div(FP_INT(width * height), FP_INT(32 * 8));
 
 	bw_for_1x_8bpc = fp_mult(bw_for_1x_8bpc,
@@ -871,6 +879,7 @@
 	switch (event) {
 	case DEVFREQ_GOV_START:
 	case DEVFREQ_GOV_RESUME:
+	case DEVFREQ_GOV_SUSPEND:
 		mutex_lock(&devfreq->lock);
 		rc = update_devfreq(devfreq);
 		mutex_unlock(&devfreq->lock);
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 40c306d..1991a34 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -616,6 +616,9 @@
 	case HAL_EXTRADATA_VPX_COLORSPACE:
 		ret = HFI_PROPERTY_PARAM_VDEC_VPX_COLORSPACE_EXTRADATA;
 		break;
+	case HAL_EXTRADATA_UBWC_CR_STATS_INFO:
+		ret = HFI_PROPERTY_PARAM_VDEC_UBWC_CR_STAT_INFO_EXTRADATA;
+		break;
 	default:
 		dprintk(VIDC_WARN, "Extradata index not found: %d\n", index);
 		break;
@@ -1290,7 +1293,7 @@
 		struct hal_quantization *hal_quant =
 			(struct hal_quantization *) pdata;
 		pkt->rg_property_data[0] =
-			HFI_PROPERTY_CONFIG_VENC_SESSION_QP;
+			HFI_PROPERTY_CONFIG_VENC_FRAME_QP;
 		hfi = (struct hfi_quantization *) &pkt->rg_property_data[1];
 		hfi->qp_packed = hal_quant->qpi | hal_quant->qpp << 8 |
 			hal_quant->qpb << 16;
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index 554e89a..acca9f4 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -152,7 +152,7 @@
 		.name = "Extradata Type",
 		.type = V4L2_CTRL_TYPE_MENU,
 		.minimum = V4L2_MPEG_VIDC_EXTRADATA_NONE,
-		.maximum = V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE,
+		.maximum = V4L2_MPEG_VIDC_EXTRADATA_UBWC_CR_STATS_INFO,
 		.default_value = V4L2_MPEG_VIDC_EXTRADATA_NONE,
 		.menu_skip_mask = ~(
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_NONE) |
@@ -179,7 +179,8 @@
 			(1 <<
 			V4L2_MPEG_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI) |
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_VUI_DISPLAY) |
-			(1 << V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE)
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_UBWC_CR_STATS_INFO)
 			),
 		.qmenu = mpeg_video_vidc_extradata,
 	},
@@ -877,6 +878,7 @@
 		case V4L2_MPEG_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI:
 		case V4L2_MPEG_VIDC_EXTRADATA_VUI_DISPLAY:
 		case V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE:
+		case V4L2_MPEG_VIDC_EXTRADATA_UBWC_CR_STATS_INFO:
 			inst->bufq[CAPTURE_PORT].num_planes = 2;
 			inst->bufq[CAPTURE_PORT].plane_sizes[EXTRADATA_IDX(2)] =
 				VENUS_EXTRADATA_SIZE(
@@ -1115,6 +1117,13 @@
 							__func__, rc);
 						break;
 					}
+					rc = msm_comm_try_get_bufreqs(inst);
+					if (rc) {
+						dprintk(VIDC_ERR,
+							"%s Failed to get buffer requirements : %d\n",
+							__func__, rc);
+						break;
+					}
 				}
 				inst->clk_data.dpb_fourcc = fourcc;
 				break;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 2ca3e8d..21ad17a 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -709,8 +709,8 @@
 			sizes[i] = inst->bufq[OUTPUT_PORT].plane_sizes[i];
 
 		bufreq->buffer_count_actual = *num_buffers;
-		rc = set_buffer_count(inst, bufreq->buffer_count_actual,
-			*num_buffers, HAL_BUFFER_INPUT);
+		rc = set_buffer_count(inst, bufreq->buffer_count_min_host,
+			bufreq->buffer_count_actual, HAL_BUFFER_INPUT);
 		}
 
 		break;
@@ -743,8 +743,8 @@
 			sizes[i] = inst->bufq[CAPTURE_PORT].plane_sizes[i];
 
 		bufreq->buffer_count_actual = *num_buffers;
-		rc = set_buffer_count(inst, bufreq->buffer_count_actual,
-			*num_buffers, buffer_type);
+		rc = set_buffer_count(inst, bufreq->buffer_count_min_host,
+			bufreq->buffer_count_actual, buffer_type);
 		}
 		break;
 	default:
@@ -837,13 +837,13 @@
 		b.buffer_type = HAL_BUFFER_OUTPUT;
 	}
 
+	rc = msm_comm_try_get_bufreqs(inst);
+
 	b.buffer_size = inst->bufq[CAPTURE_PORT].plane_sizes[0];
 	rc = call_hfi_op(hdev, session_set_property,
 			inst->session, HAL_PARAM_BUFFER_SIZE_MINIMUM,
 			&b);
 
-	rc = msm_comm_try_get_bufreqs(inst);
-
 	/* Verify if buffer counts are correct */
 	rc = msm_vidc_verify_buffer_counts(inst);
 	if (rc) {
@@ -918,24 +918,6 @@
 	}
 
 fail_start:
-	if (rc) {
-		struct msm_vidc_buffer *temp, *next;
-
-		mutex_lock(&inst->registeredbufs.lock);
-		list_for_each_entry_safe(temp, next,
-				&inst->registeredbufs.list, list) {
-			struct vb2_buffer *vb;
-
-			print_vidc_buffer(VIDC_ERR, "return buf", inst, temp);
-			vb = msm_comm_get_vb_using_vidc_buffer(inst, temp);
-			if (vb)
-				vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED);
-			msm_comm_unmap_vidc_buffer(inst, temp);
-			list_del(&temp->list);
-			kfree(temp);
-		}
-		mutex_unlock(&inst->registeredbufs.lock);
-	}
 	return rc;
 }
 
@@ -987,6 +969,35 @@
 	}
 
 stream_start_failed:
+	if (rc) {
+		struct msm_vidc_buffer *temp, *next;
+		struct vb2_buffer *vb;
+
+		mutex_lock(&inst->registeredbufs.lock);
+		list_for_each_entry_safe(temp, next, &inst->registeredbufs.list,
+					list) {
+			if (temp->vvb.vb2_buf.type != q->type)
+				continue;
+			/*
+			 * queued_list lock is already acquired before
+			 * vb2_stream so no need to acquire it again.
+			 */
+			list_for_each_entry(vb, &q->queued_list, queued_entry) {
+				if (msm_comm_compare_vb2_planes(inst, temp,
+						vb)) {
+					print_vb2_buffer(VIDC_ERR, "return vb",
+						inst, vb);
+					vb2_buffer_done(vb,
+						VB2_BUF_STATE_QUEUED);
+					break;
+				}
+			}
+			msm_comm_unmap_vidc_buffer(inst, temp);
+			list_del(&temp->list);
+			kfree(temp);
+		}
+		mutex_unlock(&inst->registeredbufs.lock);
+	}
 	return rc;
 }
 
@@ -1223,29 +1234,6 @@
 	return rc;
 }
 
-static int set_actual_buffer_count(struct msm_vidc_inst *inst,
-	int count, enum hal_buffer type)
-{
-	int rc = 0;
-	struct hfi_device *hdev;
-	struct hal_buffer_count_actual buf_count;
-
-	hdev = inst->core->device;
-
-	buf_count.buffer_type = type;
-	buf_count.buffer_count_min_host = count;
-	buf_count.buffer_count_actual = count;
-	rc = call_hfi_op(hdev, session_set_property,
-		inst->session, HAL_PARAM_BUFFER_COUNT_ACTUAL,
-		&buf_count);
-	if (rc)
-		dprintk(VIDC_ERR,
-			"Failed to set actual count %d for buffer type %d\n",
-			count, type);
-	return rc;
-}
-
-
 static int msm_vidc_get_count(struct msm_vidc_inst *inst,
 	struct v4l2_ctrl *ctrl)
 {
@@ -1270,13 +1258,20 @@
 				"Buffer count Host changed from %d to %d\n",
 					bufreq->buffer_count_min_host,
 					ctrl->val);
-			bufreq->buffer_count_min_host = ctrl->val;
+			bufreq->buffer_count_actual =
+			bufreq->buffer_count_min =
+			bufreq->buffer_count_min_host =
+				ctrl->val;
 		} else {
 			ctrl->val = bufreq->buffer_count_min_host;
 		}
-		rc = set_actual_buffer_count(inst,
-				bufreq->buffer_count_min_host,
+		rc = set_buffer_count(inst,
+			bufreq->buffer_count_min_host,
+			bufreq->buffer_count_actual,
 			HAL_BUFFER_INPUT);
+
+		msm_vidc_update_host_buff_counts(inst);
+		ctrl->val = bufreq->buffer_count_min_host;
 		return rc;
 
 	} else if (ctrl->id == V4L2_CID_MIN_BUFFERS_FOR_CAPTURE) {
@@ -1297,31 +1292,37 @@
 				return 0;
 		}
 
-
-		if (inst->in_reconfig) {
-			ctrl->val = bufreq->buffer_count_min;
-		}
 		if (inst->session_type == MSM_VIDC_DECODER &&
 				!inst->in_reconfig &&
 			inst->state < MSM_VIDC_LOAD_RESOURCES_DONE) {
 			dprintk(VIDC_DBG,
 				"Clients updates Buffer count from %d to %d\n",
 				bufreq->buffer_count_min_host, ctrl->val);
-			bufreq->buffer_count_min_host = ctrl->val;
+			bufreq->buffer_count_actual =
+			bufreq->buffer_count_min =
+			bufreq->buffer_count_min_host =
+				ctrl->val;
 		}
 		if (ctrl->val > bufreq->buffer_count_min_host) {
 			dprintk(VIDC_DBG,
 				"Buffer count Host changed from %d to %d\n",
 				bufreq->buffer_count_min_host,
 				ctrl->val);
-			bufreq->buffer_count_min_host = ctrl->val;
+			bufreq->buffer_count_actual =
+			bufreq->buffer_count_min =
+			bufreq->buffer_count_min_host =
+				ctrl->val;
 		} else {
 			ctrl->val = bufreq->buffer_count_min_host;
 		}
-		rc = set_actual_buffer_count(inst,
-				bufreq->buffer_count_min_host,
+		rc = set_buffer_count(inst,
+			bufreq->buffer_count_min_host,
+			bufreq->buffer_count_actual,
 			HAL_BUFFER_OUTPUT);
 
+		msm_vidc_update_host_buff_counts(inst);
+		ctrl->val = bufreq->buffer_count_min_host;
+
 		return rc;
 	}
 	return -EINVAL;
@@ -1367,6 +1368,8 @@
 		break;
 
 	case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+		if (inst->in_reconfig)
+			msm_vidc_update_host_buff_counts(inst);
 		buffer_type = msm_comm_get_hal_output_buffer(inst);
 		bufreq = get_buff_req_buffer(inst,
 			buffer_type);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index 5e366d0..86dc973 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -16,6 +16,9 @@
 #include "msm_vidc_debug.h"
 #include "msm_vidc_clocks.h"
 
+#define MSM_VIDC_MIN_UBWC_COMPLEXITY_FACTOR (1 << 16)
+#define MSM_VIDC_MAX_UBWC_COMPLEXITY_FACTOR (4 << 16)
+
 static inline unsigned long int get_ubwc_compression_ratio(
 	struct ubwc_cr_stats_info_type ubwc_stats_info)
 {
@@ -91,16 +94,34 @@
 	struct vidc_bus_vote_data *vote_data)
 {
 	struct recon_buf *binfo;
-	u32 CR = 0, CF = 0;
+	u32 CR = 0, min_cf = MSM_VIDC_MIN_UBWC_COMPLEXITY_FACTOR,
+		max_cf = MSM_VIDC_MAX_UBWC_COMPLEXITY_FACTOR;
 
 	mutex_lock(&inst->reconbufs.lock);
 	list_for_each_entry(binfo, &inst->reconbufs.list, list) {
 		CR = max(CR, binfo->CR);
-		CF = max(CF, binfo->CF);
+		min_cf = min(min_cf, binfo->CF);
+		max_cf = max(max_cf, binfo->CF);
 	}
 	mutex_unlock(&inst->reconbufs.lock);
-	vote_data->complexity_factor = CF;
+
+	/* Sanitize CF values from HW . */
+	max_cf = min_t(u32, max_cf, MSM_VIDC_MAX_UBWC_COMPLEXITY_FACTOR);
+	min_cf = max_t(u32, min_cf, MSM_VIDC_MIN_UBWC_COMPLEXITY_FACTOR);
+
 	vote_data->compression_ratio = CR;
+	vote_data->complexity_factor = max_cf;
+	vote_data->use_dpb_read = false;
+	if (inst->clk_data.load <= inst->clk_data.load_norm) {
+		vote_data->complexity_factor = min_cf;
+		vote_data->use_dpb_read = true;
+	}
+
+	dprintk(VIDC_DBG,
+		"Compression Ratio = %d Complexity Factor = %d\n",
+			vote_data->compression_ratio,
+			vote_data->complexity_factor);
+
 	return 0;
 }
 
@@ -129,6 +150,33 @@
 	mutex_lock(&core->lock);
 	list_for_each_entry(inst, &core->instances, list) {
 		int codec = 0;
+		struct msm_vidc_buffer *temp, *next;
+		u32 filled_len = 0;
+		u32 device_addr = 0;
+
+		if (!inst) {
+			dprintk(VIDC_ERR, "%s Invalid args\n",
+				__func__);
+			return -EINVAL;
+		}
+
+		mutex_lock(&inst->registeredbufs.lock);
+		list_for_each_entry_safe(temp, next,
+				&inst->registeredbufs.list, list) {
+			if (temp->vvb.vb2_buf.type ==
+				V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+					temp->deferred) {
+				filled_len = max(filled_len,
+					temp->vvb.vb2_buf.planes[0].bytesused);
+				device_addr = temp->smem[0].device_addr;
+			}
+		}
+		mutex_unlock(&inst->registeredbufs.lock);
+
+		if (!filled_len || !device_addr) {
+			dprintk(VIDC_DBG, "%s No ETBs\n", __func__);
+			continue;
+		}
 
 		++vote_data_count;
 
@@ -178,6 +226,10 @@
 		}
 		vote_data[i].work_mode = inst->clk_data.work_mode;
 		fill_recon_stats(inst, &vote_data[i]);
+
+		if (core->resources.sys_cache_enabled)
+			vote_data[i].use_sys_cache = true;
+
 		i++;
 	}
 	mutex_unlock(&core->lock);
@@ -213,6 +265,7 @@
 	int rc = 0;
 	int fw_pending_bufs = 0;
 	int total_output_buf = 0;
+	int min_output_buf = 0;
 	int buffers_outside_fw = 0;
 	struct msm_vidc_core *core;
 	struct hal_buffer_requirements *output_buf_req;
@@ -247,19 +300,40 @@
 	/* Total number of output buffers */
 	total_output_buf = output_buf_req->buffer_count_actual;
 
+	min_output_buf = output_buf_req->buffer_count_min;
+
 	/* Buffers outside FW are with display */
 	buffers_outside_fw = total_output_buf - fw_pending_bufs;
-	dprintk(VIDC_DBG,
-		"Counts : total_output_buf = %d fw_pending_bufs = %d buffers_outside_fw = %d\n",
-		total_output_buf, fw_pending_bufs, buffers_outside_fw);
+	dprintk(VIDC_PROF,
+		"Counts : total_output_buf = %d Min buffers = %d fw_pending_bufs = %d buffers_outside_fw = %d\n",
+		total_output_buf, min_output_buf, fw_pending_bufs,
+			buffers_outside_fw);
 
-	if (buffers_outside_fw >=  dcvs->min_threshold &&
-			dcvs->load > dcvs->load_low) {
-		dcvs->load = dcvs->load_low;
-	} else if (buffers_outside_fw < dcvs->min_threshold &&
-			dcvs->load == dcvs->load_low) {
+	/*
+	 * PMS decides clock level based on below algo
+
+	 * Limits :
+	 * max_threshold : Client extra allocated buffers. Client
+	 * reserves these buffers for it's smooth flow.
+	 * min_output_buf : HW requested buffers for it's smooth
+	 * flow of buffers.
+	 * min_threshold : Driver requested extra buffers for PMS.
+
+	 * 1) When buffers outside FW are reaching client's extra buffers,
+	 *    FW is slow and will impact pipeline, Increase clock.
+	 * 2) When pending buffers with FW are same as FW requested,
+	 *    pipeline has cushion to absorb FW slowness, Decrease clocks.
+	 * 3) When none of 1) or 2) FW is just fast enough to maintain
+	 *    pipeline, request Right Clocks.
+	 */
+
+	if (buffers_outside_fw <= dcvs->max_threshold)
 		dcvs->load = dcvs->load_high;
-	}
+	else if (fw_pending_bufs <= min_output_buf)
+		dcvs->load = dcvs->load_low;
+	else
+		dcvs->load = dcvs->load_norm;
+
 	return rc;
 }
 
@@ -287,8 +361,6 @@
 	mutex_unlock(&inst->freqs.lock);
 }
 
-// TODO this needs to be removed later and use queued_list
-
 void msm_vidc_clear_freq_entry(struct msm_vidc_inst *inst,
 	u32 device_addr)
 {
@@ -318,9 +390,8 @@
 
 	/* If current requirement is within DCVS limits, try DCVS. */
 
-	if (freq < inst->clk_data.load_high) {
+	if (freq < inst->clk_data.load_norm) {
 		dprintk(VIDC_DBG, "Calling DCVS now\n");
-		// TODO calling DCVS here may reduce the residency. Re-visit.
 		msm_dcvs_scale_clocks(inst);
 		freq = inst->clk_data.load;
 	}
@@ -342,6 +413,18 @@
 	mutex_unlock(&inst->freqs.lock);
 }
 
+static unsigned long msm_vidc_max_freq(struct msm_vidc_core *core)
+{
+	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
+	unsigned long freq = 0;
+
+	allowed_clks_tbl = core->resources.allowed_clks_tbl;
+	freq = allowed_clks_tbl[0].clock_rate;
+	dprintk(VIDC_PROF, "Max rate = %lu\n", freq);
+
+	return freq;
+}
+
 static unsigned long msm_vidc_calc_freq(struct msm_vidc_inst *inst,
 	u32 filled_len)
 {
@@ -373,17 +456,17 @@
 
 		vsp_cycles = mbs_per_second * inst->clk_data.entry->vsp_cycles;
 		/* 10 / 7 is overhead factor */
-		vsp_cycles += (inst->prop.fps * filled_len * 8 * 10) / 7;
+		vsp_cycles += ((inst->prop.fps * filled_len * 8) / 7) * 10;
 
 	} else {
-		// TODO return Min or Max ?
 		dprintk(VIDC_ERR, "Unknown session type = %s\n", __func__);
-		return freq;
+		return msm_vidc_max_freq(inst->core);
 	}
 
 	freq = max(vpp_cycles, vsp_cycles);
 
-	dprintk(VIDC_PROF, "%s Inst %pK : Freq = %lu\n", __func__, inst, freq);
+	dprintk(VIDC_PROF, "%s Inst %pK : Filled Len = %d Freq = %lu\n",
+		__func__, inst, filled_len, freq);
 
 	return freq;
 }
@@ -425,18 +508,6 @@
 	return rc;
 }
 
-static unsigned long msm_vidc_max_freq(struct msm_vidc_core *core)
-{
-	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
-	unsigned long freq = 0;
-
-	allowed_clks_tbl = core->resources.allowed_clks_tbl;
-	freq = allowed_clks_tbl[0].clock_rate;
-	dprintk(VIDC_PROF, "Max rate = %lu", freq);
-
-	return freq;
-}
-
 int msm_vidc_update_operating_rate(struct msm_vidc_inst *inst)
 {
 	struct v4l2_ctrl *ctrl = NULL;
@@ -528,7 +599,7 @@
 	list_for_each_entry_safe(temp, next, &inst->registeredbufs.list, list) {
 		if (temp->vvb.vb2_buf.type ==
 				V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
-				temp->deferred) {
+					temp->deferred) {
 			filled_len = max(filled_len,
 				temp->vvb.vb2_buf.planes[0].bytesused);
 			device_addr = temp->smem[0].device_addr;
@@ -537,8 +608,8 @@
 	mutex_unlock(&inst->registeredbufs.lock);
 
 	if (!filled_len || !device_addr) {
-		dprintk(VIDC_PROF, "No Change in frequency\n");
-		goto decision_done;
+		dprintk(VIDC_DBG, "%s No ETBs\n", __func__);
+		goto no_clock_change;
 	}
 
 	freq = msm_vidc_calc_freq(inst, filled_len);
@@ -555,8 +626,9 @@
 	else
 		inst->clk_data.curr_freq = freq;
 
-decision_done:
 	msm_vidc_set_clocks(inst->core);
+
+no_clock_change:
 	return 0;
 }
 
@@ -603,7 +675,6 @@
 	}
 	inst->clk_data.dcvs_mode = true;
 
-	// TODO : Update with proper number based on on-target tuning.
 	inst->clk_data.extra_capture_buffer_count =
 		DCVS_DEC_EXTRA_OUTPUT_BUFFERS;
 	inst->clk_data.extra_output_buffer_count =
@@ -641,12 +712,13 @@
 
 static inline void msm_dcvs_print_dcvs_stats(struct clock_data *dcvs)
 {
-	dprintk(VIDC_DBG,
-		"DCVS: Load_Low %d, Load High %d\n",
+	dprintk(VIDC_PROF,
+		"DCVS: Load_Low %d, Load Norm %d, Load High %d\n",
 		dcvs->load_low,
+		dcvs->load_norm,
 		dcvs->load_high);
 
-	dprintk(VIDC_DBG,
+	dprintk(VIDC_PROF,
 		"DCVS: min_threshold %d, max_threshold %d\n",
 		dcvs->min_threshold, dcvs->max_threshold);
 }
@@ -659,6 +731,7 @@
 	u64 total_freq = 0, rate = 0, load;
 	int cycles;
 	struct clock_data *dcvs;
+	struct hal_buffer_requirements *output_buf_req;
 
 	dprintk(VIDC_DBG, "Init DCVS Load\n");
 
@@ -679,12 +752,22 @@
 			cycles;
 
 		dcvs->buffer_type = HAL_BUFFER_INPUT;
-		// TODO : Update with proper no based on Buffer counts change.
-		dcvs->min_threshold = 7;
+		dcvs->min_threshold =
+			msm_vidc_get_extra_buff_count(inst, HAL_BUFFER_INPUT);
 	} else if (inst->session_type == MSM_VIDC_DECODER) {
 		dcvs->buffer_type = msm_comm_get_hal_output_buffer(inst);
-		// TODO : Update with proper no based on Buffer counts change.
-		dcvs->min_threshold = 4;
+		output_buf_req = get_buff_req_buffer(inst,
+				dcvs->buffer_type);
+		if (!output_buf_req) {
+			dprintk(VIDC_ERR,
+				"%s: No bufer req for buffer type %x\n",
+				__func__, dcvs->buffer_type);
+			return;
+		}
+		dcvs->max_threshold = output_buf_req->buffer_count_actual -
+			output_buf_req->buffer_count_min_host + 1;
+		dcvs->min_threshold =
+			msm_vidc_get_extra_buff_count(inst, dcvs->buffer_type);
 	} else {
 		return;
 	}
@@ -697,8 +780,12 @@
 			break;
 	}
 
-	dcvs->load = dcvs->load_high = rate;
-	dcvs->load_low = allowed_clks_tbl[i+1].clock_rate;
+	dcvs->load = dcvs->load_norm = rate;
+
+	dcvs->load_low = i < (core->resources.allowed_clks_tbl_size - 1) ?
+		allowed_clks_tbl[i+1].clock_rate : dcvs->load_norm;
+	dcvs->load_high = i > 0 ? allowed_clks_tbl[i-1].clock_rate :
+		dcvs->load_norm;
 
 	inst->clk_data.buffer_counter = 0;
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
index e1226e4..705cb7c 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
@@ -15,21 +15,11 @@
 #define _MSM_VIDC_CLOCKS_H_
 #include "msm_vidc_internal.h"
 
-/* Low threshold for encoder dcvs */
-#define DCVS_ENC_LOW_THR 4
-/* High threshold for encoder dcvs */
-#define DCVS_ENC_HIGH_THR 9
 /* extra o/p buffers in case of encoder dcvs */
 #define DCVS_ENC_EXTRA_OUTPUT_BUFFERS 2
+
 /* extra o/p buffers in case of decoder dcvs */
 #define DCVS_DEC_EXTRA_OUTPUT_BUFFERS 4
-/* Default threshold to reduce the core frequency */
-#define DCVS_NOMINAL_THRESHOLD 8
-/* Default threshold to increase the core frequency */
-#define DCVS_TURBO_THRESHOLD 4
-
-/* Considering one safeguard buffer */
-#define DCVS_BUFFER_SAFEGUARD (DCVS_DEC_EXTRA_OUTPUT_BUFFERS - 1)
 
 void msm_clock_data_reset(struct msm_vidc_inst *inst);
 int msm_vidc_update_operating_rate(struct msm_vidc_inst *inst);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index ac69ab8..ec9777b 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -36,7 +36,6 @@
 #define L_MODE V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY
 
 #define MAX_SUPPORTED_INSTANCES 16
-static int msm_vidc_update_host_buff_counts(struct msm_vidc_inst *inst);
 
 const char *const mpeg_video_vidc_extradata[] = {
 	"Extradata none",
@@ -70,6 +69,7 @@
 	"Extradata PQ Info",
 	"Extradata display VUI",
 	"Extradata vpx color space",
+	"Extradata UBWC CR stats info",
 };
 
 struct getprop_buf {
@@ -1604,10 +1604,8 @@
 			return;
 		}
 		bufreq->buffer_count_min = event_notify->capture_buf_count;
-
 	}
 
-	msm_vidc_update_host_buff_counts(inst);
 	mutex_unlock(&inst->lock);
 
 	if (event == V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT) {
@@ -4044,7 +4042,7 @@
 	return rc;
 }
 
-static int msm_vidc_update_host_buff_counts(struct msm_vidc_inst *inst)
+int msm_vidc_update_host_buff_counts(struct msm_vidc_inst *inst)
 {
 	int extra_buffers;
 	struct hal_buffer_requirements *bufreq;
@@ -4075,7 +4073,6 @@
 		}
 
 		/* For DPB buffers, no need to add Extra buffers */
-
 		bufreq->buffer_count_min_host =	bufreq->buffer_count_actual =
 			bufreq->buffer_count_min;
 
@@ -4093,6 +4090,20 @@
 
 		bufreq->buffer_count_min_host =	bufreq->buffer_count_actual =
 			bufreq->buffer_count_min + extra_buffers;
+
+		bufreq = get_buff_req_buffer(inst,
+				HAL_BUFFER_EXTRADATA_OUTPUT2);
+		if (!bufreq) {
+			dprintk(VIDC_DBG,
+				"No buffer requirements : %x\n",
+					HAL_BUFFER_EXTRADATA_OUTPUT2);
+		} else {
+			if (bufreq->buffer_count_min) {
+				bufreq->buffer_count_min_host =
+				bufreq->buffer_count_actual =
+				bufreq->buffer_count_min + extra_buffers;
+			}
+		}
 	} else {
 
 		bufreq = get_buff_req_buffer(inst,
@@ -4109,6 +4120,20 @@
 
 		bufreq->buffer_count_min_host =	bufreq->buffer_count_actual =
 			bufreq->buffer_count_min + extra_buffers;
+
+		bufreq = get_buff_req_buffer(inst,
+				HAL_BUFFER_EXTRADATA_OUTPUT);
+		if (!bufreq) {
+			dprintk(VIDC_DBG,
+				"No buffer requirements : %x\n",
+				HAL_BUFFER_EXTRADATA_OUTPUT);
+		} else {
+			if (bufreq->buffer_count_min) {
+				bufreq->buffer_count_min_host =
+				bufreq->buffer_count_actual =
+				bufreq->buffer_count_min + extra_buffers;
+			}
+		}
 	}
 
 	return 0;
@@ -4143,8 +4168,8 @@
 				req.buffer_count_min, req.buffer_size);
 		}
 	}
-
-	rc = msm_vidc_update_host_buff_counts(inst);
+	if (inst->session_type == MSM_VIDC_ENCODER)
+		rc = msm_vidc_update_host_buff_counts(inst);
 
 	dprintk(VIDC_DBG, "Buffer requirements host adjusted:\n");
 	dprintk(VIDC_DBG, "%15s %8s %8s %8s %8s\n",
@@ -4893,6 +4918,9 @@
 	case V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE:
 		ret = HAL_EXTRADATA_VPX_COLORSPACE;
 		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_UBWC_CR_STATS_INFO:
+		ret = HAL_EXTRADATA_UBWC_CR_STATS_INFO;
+		break;
 	default:
 		dprintk(VIDC_WARN, "Extradata not found: %d\n", index);
 		break;
@@ -5048,7 +5076,6 @@
 	rc = msm_vidc_load_supported(inst);
 	if (rc) {
 		change_inst_state(inst, MSM_VIDC_CORE_INVALID);
-		msm_comm_kill_session(inst);
 		dprintk(VIDC_WARN,
 			"%s: Hardware is overloaded\n", __func__);
 		return rc;
@@ -5098,7 +5125,6 @@
 	}
 	if (rc) {
 		change_inst_state(inst, MSM_VIDC_CORE_INVALID);
-		msm_comm_kill_session(inst);
 		dprintk(VIDC_ERR,
 			"%s: Resolution unsupported\n", __func__);
 	}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h
index 5c653f5..bc881a0 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h
@@ -61,6 +61,7 @@
 void msm_comm_validate_output_buffers(struct msm_vidc_inst *inst);
 int msm_comm_force_cleanup(struct msm_vidc_inst *inst);
 int msm_comm_suspend(int core_id);
+int msm_vidc_update_host_buff_counts(struct msm_vidc_inst *inst);
 enum hal_extradata_id msm_comm_get_hal_extradata_index(
 	enum v4l2_mpeg_vidc_extradata index);
 struct hal_buffer_requirements *get_buff_req_buffer(
@@ -107,7 +108,6 @@
 u32 get_frame_size_rgba(int plane, u32 height, u32 width);
 u32 get_frame_size_nv21(int plane, u32 height, u32 width);
 u32 get_frame_size_tp10_ubwc(int plane, u32 height, u32 width);
-void msm_comm_set_use_sys_cache(struct msm_vidc_inst *inst);
 struct vb2_buffer *msm_comm_get_vb_using_vidc_buffer(
 		struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf);
 struct msm_vidc_buffer *msm_comm_get_buffer_using_device_planes(
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index 22772ef..373dbba 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -243,6 +243,7 @@
 	int buffer_counter;
 	int load;
 	int load_low;
+	int load_norm;
 	int load_high;
 	int min_threshold;
 	int max_threshold;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_platform.c b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
index 25f22c7..8a701cb 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_platform.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
@@ -53,14 +53,14 @@
 
 static struct msm_vidc_common_data default_common_data[] = {
 	{
-		.key = "qcon,never-unload-fw",
+		.key = "qcom,never-unload-fw",
 		.value = 1,
 	},
 };
 
 static struct msm_vidc_common_data sdm845_common_data[] = {
 	{
-		.key = "qcon,never-unload-fw",
+		.key = "qcom,never-unload-fw",
 		.value = 1,
 	},
 	{
@@ -97,7 +97,7 @@
 	},
 	{
 		.key = "qcom,hw-resp-timeout",
-		.value = 2000,
+		.value = 250,
 	},
 };
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index 062795f..afb8893 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -27,6 +27,8 @@
 	CLOCK_PROP_HAS_MEM_RETENTION    = 1 << 1,
 };
 
+#define PERF_GOV "performance"
+
 static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
 {
 	return NULL;
@@ -426,6 +428,8 @@
 	buses->bus_tbl = temp_table;
 	bus = &buses->bus_tbl[buses->count];
 
+	memset(bus, 0x0, sizeof(struct bus_info));
+
 	rc = of_property_read_string(dev->of_node, "label", &temp_name);
 	if (rc) {
 		dprintk(VIDC_ERR, "'label' not found in node\n");
@@ -457,9 +461,12 @@
 		rc = 0;
 		dprintk(VIDC_DBG,
 				"'qcom,bus-governor' not found, default to performance governor\n");
-		bus->governor = "performance";
+		bus->governor = PERF_GOV;
 	}
 
+	if (!strcmp(bus->governor, PERF_GOV))
+		bus->is_prfm_gov_used = true;
+
 	rc = of_property_read_u32_array(dev->of_node, "qcom,bus-range-kbps",
 			range, ARRAY_SIZE(range));
 	if (rc) {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
index dda5e80..755f0c86 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
@@ -112,6 +112,7 @@
 	struct devfreq_dev_profile devfreq_prof;
 	struct devfreq *devfreq;
 	struct msm_bus_client_handle *client;
+	bool is_prfm_gov_used;
 };
 
 struct bus_set {
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index f8d8842..dad4b60 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -809,21 +809,22 @@
 	int rc = 0;
 	struct bus_info *bus = NULL;
 
+	kfree(device->bus_vote.data);
+	device->bus_vote.data = NULL;
+	device->bus_vote.data_count = 0;
+
 	venus_hfi_for_each_bus(device, bus) {
-		int local_rc = 0;
 		unsigned long zero = 0;
 
-		rc = devfreq_suspend_device(bus->devfreq);
+		if (!bus->is_prfm_gov_used)
+			rc = devfreq_suspend_device(bus->devfreq);
+		else
+			rc = __devfreq_target(bus->dev, &zero, 0);
+
 		if (rc)
 			goto err_unknown_device;
-
-		local_rc = __devfreq_target(bus->dev, &zero, 0);
-		rc = rc ?: local_rc;
 	}
 
-	if (rc)
-		dprintk(VIDC_WARN, "Failed to unvote some buses\n");
-
 err_unknown_device:
 	return rc;
 }
@@ -857,15 +858,14 @@
 
 	venus_hfi_for_each_bus(device, bus) {
 		if (bus && bus->devfreq) {
-			/* NOP if already resume */
-			rc = devfreq_resume_device(bus->devfreq);
-			if (rc)
-				goto err_no_mem;
-
-			/* Kick devfreq awake incase _resume() didn't do it */
-
-			bus->devfreq->nb.notifier_call(
-				&bus->devfreq->nb, 0, NULL);
+			if (!bus->is_prfm_gov_used) {
+				rc = devfreq_resume_device(bus->devfreq);
+				if (rc)
+					goto err_no_mem;
+			} else {
+				bus->devfreq->nb.notifier_call(
+					&bus->devfreq->nb, 0, NULL);
+			}
 		}
 	}
 
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi.h b/drivers/media/platform/msm/vidc/vidc_hfi.h
index 5601f1b..8e9e51f 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi.h
@@ -188,6 +188,12 @@
 	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x001E)
 #define HFI_PROPERTY_PARAM_VDEC_CONTENT_LIGHT_LEVEL_SEI_EXTRADATA \
 	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x001F)
+#define HFI_PROPERTY_PARAM_VDEC_COLOUR_REMAPPING_INFO_SEI_EXTRADATA	\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x0020)
+#define HFI_PROPERTY_PARAM_VDEC_DOWN_SCALAR	\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x0021)
+#define HFI_PROPERTY_PARAM_VDEC_UBWC_CR_STAT_INFO_EXTRADATA	\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x0022)
 
 #define HFI_PROPERTY_CONFIG_VDEC_OX_START				\
 	(HFI_DOMAIN_BASE_VDEC + HFI_ARCH_OX_OFFSET + 0x4000)
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 47ce0ba..79ce858 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -125,6 +125,7 @@
 	HAL_EXTRADATA_PQ_INFO,
 	HAL_EXTRADATA_VUI_DISPLAY_INFO,
 	HAL_EXTRADATA_VPX_COLORSPACE,
+	HAL_EXTRADATA_UBWC_CR_STATS_INFO,
 };
 
 enum hal_property {
@@ -1338,12 +1339,11 @@
 	int output_height, output_width;
 	int compression_ratio;
 	int complexity_factor;
+	bool use_dpb_read;
 	unsigned int lcu_size;
 	enum msm_vidc_power_mode power_mode;
-	struct imem_ab_table *imem_ab_tbl;
 	enum hal_work_mode work_mode;
-	unsigned long bitrate;
-	u32 imem_ab_tbl_size;
+	bool use_sys_cache;
 };
 
 struct vidc_clk_scale_data {
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index 616fc09..8169a9b 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -339,7 +339,7 @@
 	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x00E)
 #define HFI_PROPERTY_CONFIG_VENC_BASELAYER_PRIORITYID		\
 	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x00F)
-#define HFI_PROPERTY_CONFIG_VENC_SESSION_QP			\
+#define HFI_PROPERTY_CONFIG_VENC_FRAME_QP			\
 	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x012)
 
 #define HFI_PROPERTY_PARAM_VPE_COMMON_START				\
diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
index e6e5e90..b3659448 100644
--- a/drivers/media/tuners/xc5000.c
+++ b/drivers/media/tuners/xc5000.c
@@ -931,7 +931,7 @@
 static int xc5000_set_tv_freq(struct dvb_frontend *fe)
 {
 	struct xc5000_priv *priv = fe->tuner_priv;
-	u16 pll_lock_status;
+	u16 pll_lock_status = 0;
 	int ret;
 
 tune_channel:
@@ -1040,7 +1040,6 @@
 
 	return 0;
 }
-
 static int xc5000_set_params(struct dvb_frontend *fe)
 {
 	struct xc5000_priv *priv = fe->tuner_priv;
@@ -1133,7 +1132,7 @@
 	const struct xc5000_fw_cfg *desired_fw = xc5000_assign_firmware(priv->chip_id);
 	const struct firmware *fw;
 	int ret, i;
-	u16 pll_lock_status;
+	u16 pll_lock_status = 0;
 	u16 fw_ck;
 
 	cancel_delayed_work(&priv->timer_sleep);
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
index e1907cd0c..7613d1f 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
@@ -123,15 +123,10 @@
 	memset(&tvdata,0,sizeof(tvdata));
 
 	eeprom = pvr2_eeprom_fetch(hdw);
-	if (!eeprom) return -EINVAL;
+	if (!eeprom)
+		return -EINVAL;
 
-	{
-		struct i2c_client fake_client;
-		/* Newer version expects a useless client interface */
-		fake_client.addr = hdw->eeprom_addr;
-		fake_client.adapter = &hdw->i2c_adap;
-		tveeprom_hauppauge_analog(&fake_client,&tvdata,eeprom);
-	}
+	tveeprom_hauppauge_analog(NULL, &tvdata, eeprom);
 
 	trace_eeprom("eeprom assumed v4l tveeprom module");
 	trace_eeprom("eeprom direct call results:");
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 21900202..9ccf7f5 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -868,7 +868,7 @@
 
 void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
 {
-	if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
+	if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
 		return NULL;
 
 	return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 1239e68..71341a7 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -8,7 +8,7 @@
 config MFD_CORE
 	tristate
 	select IRQ_DOMAIN
-	default n
+	default y
 
 config MFD_CS5535
 	tristate "AMD CS5535 and CS5536 southbridge core functions"
@@ -1636,61 +1636,19 @@
 	  power supply enable or disable. This driver acts as interface
 	  between codec and regulator framework.
 
-config WCD9XXX_CODEC_UTIL
-	tristate "WCD9XXX Codec Utils"
-	select MFD_CORE
-	help
-	  WCD9XXX Util driver provides APIs for WCD drivers to reset,
-	  suspend/resume, regmap bus callback functions and read/write
-	  functions. This driver also hides the underlying bus related
-	  functionalities.
-
-config WCD9330_CODEC
-	tristate "WCD9330 Codec"
-	select SLIMBUS
-	select MFD_CORE
-	select WCD9XXX_CODEC_UTIL
-	select MSM_CDC_SUPPLY
-	select REGMAP_ALLOW_WRITE_DEBUGFS
-	help
-	  Enables the WCD9xxx codec core driver. The core driver provides
-	  read/write capability to registers which are part of the
-	  WCD9330 core and gives the ability to use the WCD9330 codec.
-	  The WCD9330 codec support either I2C/I2S or Slimbus for
-	  control and data exchnage with master processor.
-
-config WCD9335_CODEC
-	tristate "WCD9335 Codec"
+config WCD9XXX_CODEC_CORE
+	tristate "WCD9XXX Codec Core"
 	select SLIMBUS
 	select SOUNDWIRE_WCD_CTRL
-	select MFD_CORE
-	select WCD9XXX_CODEC_UTIL
-	select MSM_CDC_SUPPLY
-	select MSM_CDC_PINCTRL
-	select REGMAP_ALLOW_WRITE_DEBUGFS
-	help
-	  Enables the WCD9xxx codec core driver. The core driver provides
-	  read/write capability to registers which are part of the
-	  WCD9335 core and gives the ability to use the WCD9335 codec.
-	  The WCD9335 codec support either I2C/I2S or Slimbus for
-	  control and data exchnage with master processor.
-
-config WCD934X_CODEC
-	tristate "WCD934X Codec"
-	depends on SLIMBUS
-	select SOUNDWIRE_WCD_CTRL
-	select MFD_CORE
-	select WCD9XXX_CODEC_UTIL
 	select MSM_CDC_SUPPLY
 	select MSM_CDC_PINCTRL
 	select REGMAP_ALLOW_WRITE_DEBUGFS
 	select PINCTRL_WCD
 	help
-	  Enables the WCD9xxx codec core driver. The core driver provides
-	  read/write capability to registers which are part of the
-	  WCD934X core and gives the ability to use the WCD934X codec.
-	  The WCD934X codec supports either I2C/I2S or Slimbus for
-	  control and data exchange with master processor.
+	  WCD9XXX Core driver provides APIs for WCD drivers to reset,
+	  suspend/resume, regmap bus callback functions and read/write
+	  functions. This driver also hides the underlying bus related
+	  functionalities.
 
 menu "Multimedia Capabilities Port drivers"
 	depends on ARCH_SA1100
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index b2fe74b..0ce70f3 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -205,16 +205,13 @@
 obj-$(CONFIG_MFD_DLN2)		+= dln2.o
 obj-$(CONFIG_MFD_RT5033)	+= rt5033.o
 obj-$(CONFIG_MFD_SKY81452)	+= sky81452.o
-obj-$(CONFIG_MSM_CDC_PINCTRL)	+= msm-cdc-pinctrl.o
-obj-$(CONFIG_MSM_CDC_SUPPLY) += msm-cdc-supply.o
-obj-$(CONFIG_WCD9XXX_CODEC_UTIL) += wcd9xxx-utils.o
-obj-$(CONFIG_WCD9330_CODEC)	+= wcd9xxx-core.o wcd9xxx-irq.o wcd9xxx-slimslave.o\
-						wcd9330-regmap.o
-obj-$(CONFIG_WCD9335_CODEC)	+= wcd9xxx-core.o wcd9xxx-irq.o wcd9xxx-slimslave.o\
-					wcd9335-regmap.o wcd9335-tables.o
-obj-$(CONFIG_WCD934X_CODEC)	+= wcd9xxx-core.o wcd9xxx-irq.o wcd9xxx-slimslave.o\
-					wcd934x-regmap.o wcd934x-tables.o
-
+wcd-core-objs			:= wcd9xxx-rst.o wcd9xxx-core-init.o \
+				   wcd9xxx-core.o wcd9xxx-irq.o \
+				   wcd9xxx-slimslave.o wcd9xxx-utils.o \
+				   wcd934x-regmap.o wcd934x-tables.o \
+				   wcd9335-regmap.o wcd9335-tables.o \
+				   msm-cdc-pinctrl.o msm-cdc-supply.o
+obj-$(CONFIG_WCD9XXX_CODEC_CORE) += wcd-core.o
 intel-soc-pmic-objs		:= intel_soc_pmic_core.o intel_soc_pmic_crc.o
 intel-soc-pmic-$(CONFIG_INTEL_PMC_IPC)	+= intel_soc_pmic_bxtwc.o
 obj-$(CONFIG_INTEL_SOC_PMIC)	+= intel-soc-pmic.o
diff --git a/drivers/mfd/msm-cdc-pinctrl.c b/drivers/mfd/msm-cdc-pinctrl.c
index 9622256..859a75f 100644
--- a/drivers/mfd/msm-cdc-pinctrl.c
+++ b/drivers/mfd/msm-cdc-pinctrl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -239,7 +239,15 @@
 	.probe = msm_cdc_pinctrl_probe,
 	.remove = msm_cdc_pinctrl_remove,
 };
-module_platform_driver(msm_cdc_pinctrl_driver);
 
+int msm_cdc_pinctrl_drv_init(void)
+{
+	return platform_driver_register(&msm_cdc_pinctrl_driver);
+}
+
+void msm_cdc_pinctrl_drv_exit(void)
+{
+	platform_driver_unregister(&msm_cdc_pinctrl_driver);
+}
 MODULE_DESCRIPTION("MSM CODEC pin control platform driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
index 1aa74c4..9d167c9 100644
--- a/drivers/mfd/omap-usb-tll.c
+++ b/drivers/mfd/omap-usb-tll.c
@@ -377,8 +377,8 @@
 				 * and use SDR Mode
 				 */
 				reg &= ~(OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE
-					| OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF
 					| OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE);
+				reg |= OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF;
 			} else if (pdata->port_mode[i] ==
 					OMAP_EHCI_PORT_MODE_HSIC) {
 				/*
diff --git a/drivers/mfd/qcom-spmi-pmic.c b/drivers/mfd/qcom-spmi-pmic.c
index 2280770..0977563 100644
--- a/drivers/mfd/qcom-spmi-pmic.c
+++ b/drivers/mfd/qcom-spmi-pmic.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -118,12 +118,23 @@
 	.fast_io	= true,
 };
 
+static const struct regmap_config spmi_regmap_can_sleep_config = {
+	.reg_bits	= 16,
+	.val_bits	= 8,
+	.max_register	= 0xffff,
+	.fast_io	= false,
+};
+
 static int pmic_spmi_probe(struct spmi_device *sdev)
 {
 	struct device_node *root = sdev->dev.of_node;
 	struct regmap *regmap;
 
-	regmap = devm_regmap_init_spmi_ext(sdev, &spmi_regmap_config);
+	if (of_property_read_bool(root, "qcom,can-sleep"))
+		regmap = devm_regmap_init_spmi_ext(sdev,
+						&spmi_regmap_can_sleep_config);
+	else
+		regmap = devm_regmap_init_spmi_ext(sdev, &spmi_regmap_config);
 	if (IS_ERR(regmap))
 		return PTR_ERR(regmap);
 
diff --git a/drivers/mfd/wcd9330-regmap.c b/drivers/mfd/wcd9330-regmap.c
deleted file mode 100644
index 878ea72..0000000
--- a/drivers/mfd/wcd9330-regmap.c
+++ /dev/null
@@ -1,990 +0,0 @@
-/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/mfd/wcd9xxx/core.h>
-#include <linux/mfd/wcd9xxx/wcd9330_registers.h>
-#include <linux/regmap.h>
-#include <linux/device.h>
-#include "wcd9xxx-regmap.h"
-
-static struct reg_default wcd9330_defaults[] = {
-	{ TOMTOM_A_CHIP_CTL, TOMTOM_A_CHIP_CTL__POR },
-	{ TOMTOM_A_CHIP_STATUS, TOMTOM_A_CHIP_STATUS__POR },
-	{ TOMTOM_A_CHIP_ID_BYTE_0, TOMTOM_A_CHIP_ID_BYTE_0__POR },
-	{ TOMTOM_A_CHIP_ID_BYTE_1, TOMTOM_A_CHIP_ID_BYTE_1__POR },
-	{ TOMTOM_A_CHIP_ID_BYTE_2, TOMTOM_A_CHIP_ID_BYTE_2__POR },
-	{ TOMTOM_A_CHIP_ID_BYTE_3, TOMTOM_A_CHIP_ID_BYTE_3__POR },
-	{ TOMTOM_A_CHIP_I2C_SLAVE_ID, TOMTOM_A_CHIP_I2C_SLAVE_ID__POR },
-	{ TOMTOM_A_SLAVE_ID_1, TOMTOM_A_SLAVE_ID_1__POR },
-	{ TOMTOM_A_SLAVE_ID_2, TOMTOM_A_SLAVE_ID_2__POR },
-	{ TOMTOM_A_SLAVE_ID_3, TOMTOM_A_SLAVE_ID_3__POR },
-	{ TOMTOM_A_PIN_CTL_OE0, TOMTOM_A_PIN_CTL_OE0__POR },
-	{ TOMTOM_A_PIN_CTL_OE1, TOMTOM_A_PIN_CTL_OE1__POR },
-	{ TOMTOM_A_PIN_CTL_OE2, TOMTOM_A_PIN_CTL_OE2__POR },
-	{ TOMTOM_A_PIN_CTL_DATA0, TOMTOM_A_PIN_CTL_DATA0__POR },
-	{ TOMTOM_A_PIN_CTL_DATA1, TOMTOM_A_PIN_CTL_DATA1__POR },
-	{ TOMTOM_A_PIN_CTL_DATA2, TOMTOM_A_PIN_CTL_DATA2__POR },
-	{ TOMTOM_A_HDRIVE_GENERIC, TOMTOM_A_HDRIVE_GENERIC__POR },
-	{ TOMTOM_A_HDRIVE_OVERRIDE, TOMTOM_A_HDRIVE_OVERRIDE__POR },
-	{ TOMTOM_A_ANA_CSR_WAIT_STATE, TOMTOM_A_ANA_CSR_WAIT_STATE__POR },
-	{ TOMTOM_A_PROCESS_MONITOR_CTL0, TOMTOM_A_PROCESS_MONITOR_CTL0__POR },
-	{ TOMTOM_A_PROCESS_MONITOR_CTL1, TOMTOM_A_PROCESS_MONITOR_CTL1__POR },
-	{ TOMTOM_A_PROCESS_MONITOR_CTL2, TOMTOM_A_PROCESS_MONITOR_CTL2__POR },
-	{ TOMTOM_A_PROCESS_MONITOR_CTL3, TOMTOM_A_PROCESS_MONITOR_CTL3__POR },
-	{ TOMTOM_A_QFUSE_CTL, TOMTOM_A_QFUSE_CTL__POR },
-	{ TOMTOM_A_QFUSE_STATUS, TOMTOM_A_QFUSE_STATUS__POR },
-	{ TOMTOM_A_QFUSE_DATA_OUT0, TOMTOM_A_QFUSE_DATA_OUT0__POR },
-	{ TOMTOM_A_QFUSE_DATA_OUT1, TOMTOM_A_QFUSE_DATA_OUT1__POR },
-	{ TOMTOM_A_QFUSE_DATA_OUT2, TOMTOM_A_QFUSE_DATA_OUT2__POR },
-	{ TOMTOM_A_QFUSE_DATA_OUT3, TOMTOM_A_QFUSE_DATA_OUT3__POR },
-	{ TOMTOM_A_QFUSE_DATA_OUT4, TOMTOM_A_QFUSE_DATA_OUT4__POR },
-	{ TOMTOM_A_QFUSE_DATA_OUT5, TOMTOM_A_QFUSE_DATA_OUT5__POR },
-	{ TOMTOM_A_QFUSE_DATA_OUT6, TOMTOM_A_QFUSE_DATA_OUT6__POR },
-	{ TOMTOM_A_QFUSE_DATA_OUT7, TOMTOM_A_QFUSE_DATA_OUT7__POR },
-	{ TOMTOM_A_CDC_CTL, TOMTOM_A_CDC_CTL__POR },
-	{ TOMTOM_A_LEAKAGE_CTL, TOMTOM_A_LEAKAGE_CTL__POR },
-	{ TOMTOM_A_SVASS_MEM_PTR0, TOMTOM_A_SVASS_MEM_PTR0__POR },
-	{ TOMTOM_A_SVASS_MEM_PTR1, TOMTOM_A_SVASS_MEM_PTR1__POR },
-	{ TOMTOM_A_SVASS_MEM_PTR2, TOMTOM_A_SVASS_MEM_PTR2__POR },
-	{ TOMTOM_A_SVASS_MEM_CTL, TOMTOM_A_SVASS_MEM_CTL__POR },
-	{ TOMTOM_A_SVASS_MEM_BANK, TOMTOM_A_SVASS_MEM_BANK__POR },
-	{ TOMTOM_A_DMIC_B1_CTL, TOMTOM_A_DMIC_B1_CTL__POR },
-	{ TOMTOM_A_DMIC_B2_CTL, TOMTOM_A_DMIC_B2_CTL__POR },
-	{ TOMTOM_A_SVASS_CLKRST_CTL, TOMTOM_A_SVASS_CLKRST_CTL__POR },
-	{ TOMTOM_A_SVASS_CPAR_CFG, TOMTOM_A_SVASS_CPAR_CFG__POR },
-	{ TOMTOM_A_SVASS_BUF_RDY_INT_PERIOD,
-				TOMTOM_A_SVASS_BUF_RDY_INT_PERIOD__POR },
-	{ TOMTOM_A_SVASS_CPAR_WDOG_CFG, TOMTOM_A_SVASS_CPAR_WDOG_CFG__POR },
-	{ TOMTOM_A_SVASS_CFG, TOMTOM_A_SVASS_CFG__POR },
-	{ TOMTOM_A_SVASS_SPE_CFG, TOMTOM_A_SVASS_SPE_CFG__POR },
-	{ TOMTOM_A_SVASS_STATUS, TOMTOM_A_SVASS_STATUS__POR },
-	{ TOMTOM_A_SVASS_INT_MASK, TOMTOM_A_SVASS_INT_MASK__POR },
-	{ TOMTOM_A_SVASS_INT_STATUS, TOMTOM_A_SVASS_INT_STATUS__POR },
-	{ TOMTOM_A_SVASS_INT_CLR, TOMTOM_A_SVASS_INT_CLR__POR },
-	{ TOMTOM_A_SVASS_DEBUG, TOMTOM_A_SVASS_DEBUG__POR },
-	{ TOMTOM_A_SVASS_SPE_BKUP_INT, TOMTOM_A_SVASS_SPE_BKUP_INT__POR },
-	{ TOMTOM_A_SVASS_MEM_ACC, TOMTOM_A_SVASS_MEM_ACC__POR },
-	{ TOMTOM_A_MEM_LEAKAGE_CTL, TOMTOM_A_MEM_LEAKAGE_CTL__POR },
-	{ TOMTOM_A_SVASS_SPE_INBOX_TRG, TOMTOM_A_SVASS_SPE_INBOX_TRG__POR },
-	{ TOMTOM_A_SVASS_SPE_INBOX_0, TOMTOM_A_SVASS_SPE_INBOX_0__POR },
-	{ TOMTOM_A_SVASS_SPE_INBOX_1, TOMTOM_A_SVASS_SPE_INBOX_1__POR },
-	{ TOMTOM_A_SVASS_SPE_INBOX_2, TOMTOM_A_SVASS_SPE_INBOX_2__POR },
-	{ TOMTOM_A_SVASS_SPE_INBOX_3, TOMTOM_A_SVASS_SPE_INBOX_3__POR },
-	{ TOMTOM_A_SVASS_SPE_INBOX_4, TOMTOM_A_SVASS_SPE_INBOX_4__POR },
-	{ TOMTOM_A_SVASS_SPE_INBOX_5, TOMTOM_A_SVASS_SPE_INBOX_5__POR },
-	{ TOMTOM_A_SVASS_SPE_INBOX_6, TOMTOM_A_SVASS_SPE_INBOX_6__POR },
-	{ TOMTOM_A_SVASS_SPE_INBOX_7, TOMTOM_A_SVASS_SPE_INBOX_7__POR },
-	{ TOMTOM_A_SVASS_SPE_INBOX_8, TOMTOM_A_SVASS_SPE_INBOX_8__POR },
-	{ TOMTOM_A_SVASS_SPE_INBOX_9, TOMTOM_A_SVASS_SPE_INBOX_9__POR },
-	{ TOMTOM_A_SVASS_SPE_INBOX_10, TOMTOM_A_SVASS_SPE_INBOX_10__POR },
-	{ TOMTOM_A_SVASS_SPE_INBOX_11, TOMTOM_A_SVASS_SPE_INBOX_11__POR },
-	{ TOMTOM_A_SVASS_SPE_OUTBOX_0, TOMTOM_A_SVASS_SPE_OUTBOX_0__POR },
-	{ TOMTOM_A_SVASS_SPE_OUTBOX_1, TOMTOM_A_SVASS_SPE_OUTBOX_1__POR },
-	{ TOMTOM_A_SVASS_SPE_OUTBOX_2, TOMTOM_A_SVASS_SPE_OUTBOX_2__POR },
-	{ TOMTOM_A_SVASS_SPE_OUTBOX_3, TOMTOM_A_SVASS_SPE_OUTBOX_3__POR },
-	{ TOMTOM_A_SVASS_SPE_OUTBOX_4, TOMTOM_A_SVASS_SPE_OUTBOX_4__POR },
-	{ TOMTOM_A_SVASS_SPE_OUTBOX_5, TOMTOM_A_SVASS_SPE_OUTBOX_5__POR },
-	{ TOMTOM_A_SVASS_SPE_OUTBOX_6, TOMTOM_A_SVASS_SPE_OUTBOX_6__POR },
-	{ TOMTOM_A_SVASS_SPE_OUTBOX_7, TOMTOM_A_SVASS_SPE_OUTBOX_7__POR },
-	{ TOMTOM_A_SVASS_SPE_OUTBOX_8, TOMTOM_A_SVASS_SPE_OUTBOX_8__POR },
-	{ TOMTOM_A_SVASS_SPE_OUTBOX_9, TOMTOM_A_SVASS_SPE_OUTBOX_9__POR },
-	{ TOMTOM_A_SVASS_SPE_OUTBOX_10, TOMTOM_A_SVASS_SPE_OUTBOX_10__POR },
-	{ TOMTOM_A_SVASS_SPE_OUTBOX_11, TOMTOM_A_SVASS_SPE_OUTBOX_11__POR },
-	{ TOMTOM_A_INTR_MODE, TOMTOM_A_INTR_MODE__POR },
-	{ TOMTOM_A_INTR1_MASK0, TOMTOM_A_INTR1_MASK0__POR },
-	{ TOMTOM_A_INTR1_MASK1, TOMTOM_A_INTR1_MASK1__POR },
-	{ TOMTOM_A_INTR1_MASK2, TOMTOM_A_INTR1_MASK2__POR },
-	{ TOMTOM_A_INTR1_MASK3, TOMTOM_A_INTR1_MASK3__POR },
-	{ TOMTOM_A_INTR1_STATUS0, TOMTOM_A_INTR1_STATUS0__POR },
-	{ TOMTOM_A_INTR1_STATUS1, TOMTOM_A_INTR1_STATUS1__POR },
-	{ TOMTOM_A_INTR1_STATUS2, TOMTOM_A_INTR1_STATUS2__POR },
-	{ TOMTOM_A_INTR1_STATUS3, TOMTOM_A_INTR1_STATUS3__POR },
-	{ TOMTOM_A_INTR1_CLEAR0, TOMTOM_A_INTR1_CLEAR0__POR },
-	{ TOMTOM_A_INTR1_CLEAR1, TOMTOM_A_INTR1_CLEAR1__POR },
-	{ TOMTOM_A_INTR1_CLEAR2, TOMTOM_A_INTR1_CLEAR2__POR },
-	{ TOMTOM_A_INTR1_CLEAR3, TOMTOM_A_INTR1_CLEAR3__POR },
-	{ TOMTOM_A_INTR1_LEVEL0, TOMTOM_A_INTR1_LEVEL0__POR },
-	{ TOMTOM_A_INTR1_LEVEL1, TOMTOM_A_INTR1_LEVEL1__POR },
-	{ TOMTOM_A_INTR1_LEVEL2, TOMTOM_A_INTR1_LEVEL2__POR },
-	{ TOMTOM_A_INTR1_LEVEL3, TOMTOM_A_INTR1_LEVEL3__POR },
-	{ TOMTOM_A_INTR1_TEST0, TOMTOM_A_INTR1_TEST0__POR },
-	{ TOMTOM_A_INTR1_TEST1, TOMTOM_A_INTR1_TEST1__POR },
-	{ TOMTOM_A_INTR1_TEST2, TOMTOM_A_INTR1_TEST2__POR },
-	{ TOMTOM_A_INTR1_TEST3, TOMTOM_A_INTR1_TEST3__POR },
-	{ TOMTOM_A_INTR1_SET0, TOMTOM_A_INTR1_SET0__POR },
-	{ TOMTOM_A_INTR1_SET1, TOMTOM_A_INTR1_SET1__POR },
-	{ TOMTOM_A_INTR1_SET2, TOMTOM_A_INTR1_SET2__POR },
-	{ TOMTOM_A_INTR1_SET3, TOMTOM_A_INTR1_SET3__POR },
-	{ TOMTOM_A_INTR2_MASK0, TOMTOM_A_INTR2_MASK0__POR },
-	{ TOMTOM_A_INTR2_STATUS0, TOMTOM_A_INTR2_STATUS0__POR },
-	{ TOMTOM_A_INTR2_CLEAR0, TOMTOM_A_INTR2_CLEAR0__POR },
-	{ TOMTOM_A_INTR2_LEVEL0, TOMTOM_A_INTR2_LEVEL0__POR },
-	{ TOMTOM_A_INTR2_TEST0, TOMTOM_A_INTR2_TEST0__POR },
-	{ TOMTOM_A_INTR2_SET0, TOMTOM_A_INTR2_SET0__POR },
-	{ TOMTOM_A_CDC_TX_I2S_SCK_MODE, TOMTOM_A_CDC_TX_I2S_SCK_MODE__POR },
-	{ TOMTOM_A_CDC_TX_I2S_WS_MODE, TOMTOM_A_CDC_TX_I2S_WS_MODE__POR },
-	{ TOMTOM_A_CDC_DMIC_DATA0_MODE, TOMTOM_A_CDC_DMIC_DATA0_MODE__POR },
-	{ TOMTOM_A_CDC_DMIC_CLK0_MODE, TOMTOM_A_CDC_DMIC_CLK0_MODE__POR },
-	{ TOMTOM_A_CDC_DMIC_DATA1_MODE, TOMTOM_A_CDC_DMIC_DATA1_MODE__POR },
-	{ TOMTOM_A_CDC_DMIC_CLK1_MODE, TOMTOM_A_CDC_DMIC_CLK1_MODE__POR },
-	{ TOMTOM_A_CDC_RX_I2S_SCK_MODE, TOMTOM_A_CDC_RX_I2S_SCK_MODE__POR },
-	{ TOMTOM_A_CDC_RX_I2S_WS_MODE, TOMTOM_A_CDC_RX_I2S_WS_MODE__POR },
-	{ TOMTOM_A_CDC_DMIC_DATA2_MODE, TOMTOM_A_CDC_DMIC_DATA2_MODE__POR },
-	{ TOMTOM_A_CDC_DMIC_CLK2_MODE, TOMTOM_A_CDC_DMIC_CLK2_MODE__POR },
-	{ TOMTOM_A_CDC_INTR1_MODE, TOMTOM_A_CDC_INTR1_MODE__POR },
-	{ TOMTOM_A_CDC_SB_NRZ_SEL_MODE, TOMTOM_A_CDC_SB_NRZ_SEL_MODE__POR },
-	{ TOMTOM_A_CDC_INTR2_MODE, TOMTOM_A_CDC_INTR2_MODE__POR },
-	{ TOMTOM_A_CDC_RF_PA_ON_MODE, TOMTOM_A_CDC_RF_PA_ON_MODE__POR },
-	{ TOMTOM_A_CDC_BOOST_MODE, TOMTOM_A_CDC_BOOST_MODE__POR },
-	{ TOMTOM_A_CDC_JTCK_MODE, TOMTOM_A_CDC_JTCK_MODE__POR },
-	{ TOMTOM_A_CDC_JTDI_MODE, TOMTOM_A_CDC_JTDI_MODE__POR },
-	{ TOMTOM_A_CDC_JTMS_MODE, TOMTOM_A_CDC_JTMS_MODE__POR },
-	{ TOMTOM_A_CDC_JTDO_MODE, TOMTOM_A_CDC_JTDO_MODE__POR },
-	{ TOMTOM_A_CDC_JTRST_MODE, TOMTOM_A_CDC_JTRST_MODE__POR },
-	{ TOMTOM_A_CDC_BIST_MODE_MODE, TOMTOM_A_CDC_BIST_MODE_MODE__POR },
-	{ TOMTOM_A_CDC_MAD_MAIN_CTL_1, TOMTOM_A_CDC_MAD_MAIN_CTL_1__POR },
-	{ TOMTOM_A_CDC_MAD_MAIN_CTL_2, TOMTOM_A_CDC_MAD_MAIN_CTL_2__POR },
-	{ TOMTOM_A_CDC_MAD_AUDIO_CTL_1, TOMTOM_A_CDC_MAD_AUDIO_CTL_1__POR },
-	{ TOMTOM_A_CDC_MAD_AUDIO_CTL_2, TOMTOM_A_CDC_MAD_AUDIO_CTL_2__POR },
-	{ TOMTOM_A_CDC_MAD_AUDIO_CTL_3, TOMTOM_A_CDC_MAD_AUDIO_CTL_3__POR },
-	{ TOMTOM_A_CDC_MAD_AUDIO_CTL_4, TOMTOM_A_CDC_MAD_AUDIO_CTL_4__POR },
-	{ TOMTOM_A_CDC_MAD_AUDIO_CTL_5, TOMTOM_A_CDC_MAD_AUDIO_CTL_5__POR },
-	{ TOMTOM_A_CDC_MAD_AUDIO_CTL_6, TOMTOM_A_CDC_MAD_AUDIO_CTL_6__POR },
-	{ TOMTOM_A_CDC_MAD_AUDIO_CTL_7, TOMTOM_A_CDC_MAD_AUDIO_CTL_7__POR },
-	{ TOMTOM_A_CDC_MAD_AUDIO_CTL_8, TOMTOM_A_CDC_MAD_AUDIO_CTL_8__POR },
-	{ TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_PTR,
-				TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_PTR__POR },
-	{ TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_VAL,
-				TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_VAL__POR },
-	{ TOMTOM_A_CDC_MAD_ULTR_CTL_1, TOMTOM_A_CDC_MAD_ULTR_CTL_1__POR },
-	{ TOMTOM_A_CDC_MAD_ULTR_CTL_2, TOMTOM_A_CDC_MAD_ULTR_CTL_2__POR },
-	{ TOMTOM_A_CDC_MAD_ULTR_CTL_3, TOMTOM_A_CDC_MAD_ULTR_CTL_3__POR },
-	{ TOMTOM_A_CDC_MAD_ULTR_CTL_4, TOMTOM_A_CDC_MAD_ULTR_CTL_4__POR },
-	{ TOMTOM_A_CDC_MAD_ULTR_CTL_5, TOMTOM_A_CDC_MAD_ULTR_CTL_5__POR },
-	{ TOMTOM_A_CDC_MAD_ULTR_CTL_6, TOMTOM_A_CDC_MAD_ULTR_CTL_6__POR },
-	{ TOMTOM_A_CDC_MAD_ULTR_CTL_7, TOMTOM_A_CDC_MAD_ULTR_CTL_7__POR },
-	{ TOMTOM_A_CDC_MAD_BEACON_CTL_1, TOMTOM_A_CDC_MAD_BEACON_CTL_1__POR },
-	{ TOMTOM_A_CDC_MAD_BEACON_CTL_2, TOMTOM_A_CDC_MAD_BEACON_CTL_2__POR },
-	{ TOMTOM_A_CDC_MAD_BEACON_CTL_3, TOMTOM_A_CDC_MAD_BEACON_CTL_3__POR },
-	{ TOMTOM_A_CDC_MAD_BEACON_CTL_4, TOMTOM_A_CDC_MAD_BEACON_CTL_4__POR },
-	{ TOMTOM_A_CDC_MAD_BEACON_CTL_5, TOMTOM_A_CDC_MAD_BEACON_CTL_5__POR },
-	{ TOMTOM_A_CDC_MAD_BEACON_CTL_6, TOMTOM_A_CDC_MAD_BEACON_CTL_6__POR },
-	{ TOMTOM_A_CDC_MAD_BEACON_CTL_7, TOMTOM_A_CDC_MAD_BEACON_CTL_7__POR },
-	{ TOMTOM_A_CDC_MAD_BEACON_CTL_8, TOMTOM_A_CDC_MAD_BEACON_CTL_8__POR },
-	{ TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_PTR,
-				TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_PTR__POR },
-	{ TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_VAL,
-				TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_VAL__POR },
-	{ TOMTOM_A_CDC_MAD_INP_SEL, TOMTOM_A_CDC_MAD_INP_SEL__POR },
-	{ TOMTOM_A_BIAS_REF_CTL, TOMTOM_A_BIAS_REF_CTL__POR },
-	{ TOMTOM_A_BIAS_CENTRAL_BG_CTL, TOMTOM_A_BIAS_CENTRAL_BG_CTL__POR },
-	{ TOMTOM_A_BIAS_PRECHRG_CTL, TOMTOM_A_BIAS_PRECHRG_CTL__POR },
-	{ TOMTOM_A_BIAS_CURR_CTL_1, TOMTOM_A_BIAS_CURR_CTL_1__POR },
-	{ TOMTOM_A_BIAS_CURR_CTL_2, TOMTOM_A_BIAS_CURR_CTL_2__POR },
-	{ TOMTOM_A_BIAS_OSC_BG_CTL, TOMTOM_A_BIAS_OSC_BG_CTL__POR },
-	{ TOMTOM_A_CLK_BUFF_EN1, TOMTOM_A_CLK_BUFF_EN1__POR },
-	{ TOMTOM_A_CLK_BUFF_EN2, TOMTOM_A_CLK_BUFF_EN2__POR },
-	{ TOMTOM_A_LDO_L_MODE_1, TOMTOM_A_LDO_L_MODE_1__POR },
-	{ TOMTOM_A_LDO_L_MODE_2, TOMTOM_A_LDO_L_MODE_2__POR },
-	{ TOMTOM_A_LDO_L_CTRL_1, TOMTOM_A_LDO_L_CTRL_1__POR },
-	{ TOMTOM_A_LDO_L_CTRL_2, TOMTOM_A_LDO_L_CTRL_2__POR },
-	{ TOMTOM_A_LDO_L_CTRL_3, TOMTOM_A_LDO_L_CTRL_3__POR },
-	{ TOMTOM_A_LDO_L_CTRL_4, TOMTOM_A_LDO_L_CTRL_4__POR },
-	{ TOMTOM_A_LDO_H_MODE_1, TOMTOM_A_LDO_H_MODE_1__POR },
-	{ TOMTOM_A_LDO_H_MODE_2, TOMTOM_A_LDO_H_MODE_2__POR },
-	{ TOMTOM_A_LDO_H_LOOP_CTL, TOMTOM_A_LDO_H_LOOP_CTL__POR },
-	{ TOMTOM_A_LDO_H_COMP_1, TOMTOM_A_LDO_H_COMP_1__POR },
-	{ TOMTOM_A_LDO_H_COMP_2, TOMTOM_A_LDO_H_COMP_2__POR },
-	{ TOMTOM_A_LDO_H_BIAS_1, TOMTOM_A_LDO_H_BIAS_1__POR },
-	{ TOMTOM_A_LDO_H_BIAS_2, TOMTOM_A_LDO_H_BIAS_2__POR },
-	{ TOMTOM_A_LDO_H_BIAS_3, TOMTOM_A_LDO_H_BIAS_3__POR },
-	{ TOMTOM_A_VBAT_CLK, TOMTOM_A_VBAT_CLK__POR },
-	{ TOMTOM_A_VBAT_LOOP, TOMTOM_A_VBAT_LOOP__POR },
-	{ TOMTOM_A_VBAT_REF, TOMTOM_A_VBAT_REF__POR },
-	{ TOMTOM_A_VBAT_ADC_TEST, TOMTOM_A_VBAT_ADC_TEST__POR },
-	{ TOMTOM_A_VBAT_FE, TOMTOM_A_VBAT_FE__POR },
-	{ TOMTOM_A_VBAT_BIAS_1, TOMTOM_A_VBAT_BIAS_1__POR },
-	{ TOMTOM_A_VBAT_BIAS_2, TOMTOM_A_VBAT_BIAS_2__POR },
-	{ TOMTOM_A_VBAT_ADC_DATA_MSB, TOMTOM_A_VBAT_ADC_DATA_MSB__POR },
-	{ TOMTOM_A_VBAT_ADC_DATA_LSB, TOMTOM_A_VBAT_ADC_DATA_LSB__POR },
-	{ TOMTOM_A_FLL_NREF, TOMTOM_A_FLL_NREF__POR },
-	{ TOMTOM_A_FLL_KDCO_TUNE, TOMTOM_A_FLL_KDCO_TUNE__POR },
-	{ TOMTOM_A_FLL_LOCK_THRESH, TOMTOM_A_FLL_LOCK_THRESH__POR },
-	{ TOMTOM_A_FLL_LOCK_DET_COUNT, TOMTOM_A_FLL_LOCK_DET_COUNT__POR },
-	{ TOMTOM_A_FLL_DAC_THRESHOLD, TOMTOM_A_FLL_DAC_THRESHOLD__POR },
-	{ TOMTOM_A_FLL_TEST_DCO_FREERUN, TOMTOM_A_FLL_TEST_DCO_FREERUN__POR },
-	{ TOMTOM_A_FLL_TEST_ENABLE, TOMTOM_A_FLL_TEST_ENABLE__POR },
-	{ TOMTOM_A_MICB_CFILT_1_CTL, TOMTOM_A_MICB_CFILT_1_CTL__POR },
-	{ TOMTOM_A_MICB_CFILT_1_VAL, TOMTOM_A_MICB_CFILT_1_VAL__POR },
-	{ TOMTOM_A_MICB_CFILT_1_PRECHRG, TOMTOM_A_MICB_CFILT_1_PRECHRG__POR },
-	{ TOMTOM_A_MICB_1_CTL, TOMTOM_A_MICB_1_CTL__POR },
-	{ TOMTOM_A_MICB_1_INT_RBIAS, TOMTOM_A_MICB_1_INT_RBIAS__POR },
-	{ TOMTOM_A_MICB_1_MBHC, TOMTOM_A_MICB_1_MBHC__POR },
-	{ TOMTOM_A_MICB_CFILT_2_CTL, TOMTOM_A_MICB_CFILT_2_CTL__POR },
-	{ TOMTOM_A_MICB_CFILT_2_VAL, TOMTOM_A_MICB_CFILT_2_VAL__POR },
-	{ TOMTOM_A_MICB_CFILT_2_PRECHRG, TOMTOM_A_MICB_CFILT_2_PRECHRG__POR },
-	{ TOMTOM_A_MICB_2_CTL, TOMTOM_A_MICB_2_CTL__POR },
-	{ TOMTOM_A_MICB_2_INT_RBIAS, TOMTOM_A_MICB_2_INT_RBIAS__POR },
-	{ TOMTOM_A_MICB_2_MBHC, TOMTOM_A_MICB_2_MBHC__POR },
-	{ TOMTOM_A_MICB_CFILT_3_CTL, TOMTOM_A_MICB_CFILT_3_CTL__POR },
-	{ TOMTOM_A_MICB_CFILT_3_VAL, TOMTOM_A_MICB_CFILT_3_VAL__POR },
-	{ TOMTOM_A_MICB_CFILT_3_PRECHRG, TOMTOM_A_MICB_CFILT_3_PRECHRG__POR },
-	{ TOMTOM_A_MICB_3_CTL, TOMTOM_A_MICB_3_CTL__POR },
-	{ TOMTOM_A_MICB_3_INT_RBIAS, TOMTOM_A_MICB_3_INT_RBIAS__POR },
-	{ TOMTOM_A_MICB_3_MBHC, TOMTOM_A_MICB_3_MBHC__POR },
-	{ TOMTOM_A_MICB_4_CTL, TOMTOM_A_MICB_4_CTL__POR },
-	{ TOMTOM_A_MICB_4_INT_RBIAS, TOMTOM_A_MICB_4_INT_RBIAS__POR },
-	{ TOMTOM_A_MICB_4_MBHC, TOMTOM_A_MICB_4_MBHC__POR },
-	{ TOMTOM_A_SPKR_DRV2_EN, TOMTOM_A_SPKR_DRV2_EN__POR },
-	{ TOMTOM_A_SPKR_DRV2_GAIN, TOMTOM_A_SPKR_DRV2_GAIN__POR },
-	{ TOMTOM_A_SPKR_DRV2_DAC_CTL, TOMTOM_A_SPKR_DRV2_DAC_CTL__POR },
-	{ TOMTOM_A_SPKR_DRV2_OCP_CTL, TOMTOM_A_SPKR_DRV2_OCP_CTL__POR },
-	{ TOMTOM_A_SPKR_DRV2_CLIP_DET, TOMTOM_A_SPKR_DRV2_CLIP_DET__POR },
-	{ TOMTOM_A_SPKR_DRV2_DBG_DAC, TOMTOM_A_SPKR_DRV2_DBG_DAC__POR },
-	{ TOMTOM_A_SPKR_DRV2_DBG_PA, TOMTOM_A_SPKR_DRV2_DBG_PA__POR },
-	{ TOMTOM_A_SPKR_DRV2_DBG_PWRSTG, TOMTOM_A_SPKR_DRV2_DBG_PWRSTG__POR },
-	{ TOMTOM_A_SPKR_DRV2_BIAS_LDO, TOMTOM_A_SPKR_DRV2_BIAS_LDO__POR },
-	{ TOMTOM_A_SPKR_DRV2_BIAS_INT, TOMTOM_A_SPKR_DRV2_BIAS_INT__POR },
-	{ TOMTOM_A_SPKR_DRV2_BIAS_PA, TOMTOM_A_SPKR_DRV2_BIAS_PA__POR },
-	{ TOMTOM_A_SPKR_DRV2_STATUS_OCP, TOMTOM_A_SPKR_DRV2_STATUS_OCP__POR },
-	{ TOMTOM_A_SPKR_DRV2_STATUS_PA, TOMTOM_A_SPKR_DRV2_STATUS_PA__POR },
-	{ TOMTOM_A_MBHC_INSERT_DETECT, TOMTOM_A_MBHC_INSERT_DETECT__POR },
-	{ TOMTOM_A_MBHC_INSERT_DET_STATUS,
-				TOMTOM_A_MBHC_INSERT_DET_STATUS__POR },
-	{ TOMTOM_A_TX_COM_BIAS, TOMTOM_A_TX_COM_BIAS__POR },
-	{ TOMTOM_A_MBHC_INSERT_DETECT2, TOMTOM_A_MBHC_INSERT_DETECT2__POR },
-	{ TOMTOM_A_MBHC_SCALING_MUX_1, TOMTOM_A_MBHC_SCALING_MUX_1__POR },
-	{ TOMTOM_A_MBHC_SCALING_MUX_2, TOMTOM_A_MBHC_SCALING_MUX_2__POR },
-	{ TOMTOM_A_MAD_ANA_CTRL, TOMTOM_A_MAD_ANA_CTRL__POR },
-	{ TOMTOM_A_TX_SUP_SWITCH_CTRL_1, TOMTOM_A_TX_SUP_SWITCH_CTRL_1__POR },
-	{ TOMTOM_A_TX_SUP_SWITCH_CTRL_2, TOMTOM_A_TX_SUP_SWITCH_CTRL_2__POR },
-	{ TOMTOM_A_TX_1_GAIN, TOMTOM_A_TX_1_GAIN__POR },
-	{ TOMTOM_A_TX_1_2_TEST_EN, TOMTOM_A_TX_1_2_TEST_EN__POR },
-	{ TOMTOM_A_TX_2_GAIN, TOMTOM_A_TX_2_GAIN__POR },
-	{ TOMTOM_A_TX_1_2_ADC_IB, TOMTOM_A_TX_1_2_ADC_IB__POR },
-	{ TOMTOM_A_TX_1_2_ATEST_REFCTRL, TOMTOM_A_TX_1_2_ATEST_REFCTRL__POR },
-	{ TOMTOM_A_TX_1_2_TEST_CTL, TOMTOM_A_TX_1_2_TEST_CTL__POR },
-	{ TOMTOM_A_TX_1_2_TEST_BLOCK_EN, TOMTOM_A_TX_1_2_TEST_BLOCK_EN__POR },
-	{ TOMTOM_A_TX_1_2_TXFE_CLKDIV, TOMTOM_A_TX_1_2_TXFE_CLKDIV__POR },
-	{ TOMTOM_A_TX_1_2_SAR_ERR_CH1, TOMTOM_A_TX_1_2_SAR_ERR_CH1__POR },
-	{ TOMTOM_A_TX_1_2_SAR_ERR_CH2, TOMTOM_A_TX_1_2_SAR_ERR_CH2__POR },
-	{ TOMTOM_A_TX_3_GAIN, TOMTOM_A_TX_3_GAIN__POR },
-	{ TOMTOM_A_TX_3_4_TEST_EN, TOMTOM_A_TX_3_4_TEST_EN__POR },
-	{ TOMTOM_A_TX_4_GAIN, TOMTOM_A_TX_4_GAIN__POR },
-	{ TOMTOM_A_TX_3_4_ADC_IB, TOMTOM_A_TX_3_4_ADC_IB__POR },
-	{ TOMTOM_A_TX_3_4_ATEST_REFCTRL, TOMTOM_A_TX_3_4_ATEST_REFCTRL__POR },
-	{ TOMTOM_A_TX_3_4_TEST_CTL, TOMTOM_A_TX_3_4_TEST_CTL__POR },
-	{ TOMTOM_A_TX_3_4_TEST_BLOCK_EN, TOMTOM_A_TX_3_4_TEST_BLOCK_EN__POR },
-	{ TOMTOM_A_TX_3_4_TXFE_CKDIV, TOMTOM_A_TX_3_4_TXFE_CKDIV__POR },
-	{ TOMTOM_A_TX_3_4_SAR_ERR_CH3, TOMTOM_A_TX_3_4_SAR_ERR_CH3__POR },
-	{ TOMTOM_A_TX_3_4_SAR_ERR_CH4, TOMTOM_A_TX_3_4_SAR_ERR_CH4__POR },
-	{ TOMTOM_A_TX_5_GAIN, TOMTOM_A_TX_5_GAIN__POR },
-	{ TOMTOM_A_TX_5_6_TEST_EN, TOMTOM_A_TX_5_6_TEST_EN__POR },
-	{ TOMTOM_A_TX_6_GAIN, TOMTOM_A_TX_6_GAIN__POR },
-	{ TOMTOM_A_TX_5_6_ADC_IB, TOMTOM_A_TX_5_6_ADC_IB__POR },
-	{ TOMTOM_A_TX_5_6_ATEST_REFCTRL, TOMTOM_A_TX_5_6_ATEST_REFCTRL__POR },
-	{ TOMTOM_A_TX_5_6_TEST_CTL, TOMTOM_A_TX_5_6_TEST_CTL__POR },
-	{ TOMTOM_A_TX_5_6_TEST_BLOCK_EN, TOMTOM_A_TX_5_6_TEST_BLOCK_EN__POR },
-	{ TOMTOM_A_TX_5_6_TXFE_CKDIV, TOMTOM_A_TX_5_6_TXFE_CKDIV__POR },
-	{ TOMTOM_A_TX_5_6_SAR_ERR_CH5, TOMTOM_A_TX_5_6_SAR_ERR_CH5__POR },
-	{ TOMTOM_A_TX_5_6_SAR_ERR_CH6, TOMTOM_A_TX_5_6_SAR_ERR_CH6__POR },
-	{ TOMTOM_A_TX_7_MBHC_EN, TOMTOM_A_TX_7_MBHC_EN__POR },
-	{ TOMTOM_A_TX_7_MBHC_ATEST_REFCTRL,
-				TOMTOM_A_TX_7_MBHC_ATEST_REFCTRL__POR },
-	{ TOMTOM_A_TX_7_MBHC_ADC, TOMTOM_A_TX_7_MBHC_ADC__POR },
-	{ TOMTOM_A_TX_7_MBHC_TEST_CTL, TOMTOM_A_TX_7_MBHC_TEST_CTL__POR },
-	{ TOMTOM_A_TX_7_MBHC_SAR_ERR, TOMTOM_A_TX_7_MBHC_SAR_ERR__POR },
-	{ TOMTOM_A_TX_7_TXFE_CLKDIV, TOMTOM_A_TX_7_TXFE_CLKDIV__POR },
-	{ TOMTOM_A_RCO_CTRL, TOMTOM_A_RCO_CTRL__POR },
-	{ TOMTOM_A_RCO_CALIBRATION_CTRL1, TOMTOM_A_RCO_CALIBRATION_CTRL1__POR },
-	{ TOMTOM_A_RCO_CALIBRATION_CTRL2, TOMTOM_A_RCO_CALIBRATION_CTRL2__POR },
-	{ TOMTOM_A_RCO_CALIBRATION_CTRL3, TOMTOM_A_RCO_CALIBRATION_CTRL3__POR },
-	{ TOMTOM_A_RCO_TEST_CTRL, TOMTOM_A_RCO_TEST_CTRL__POR },
-	{ TOMTOM_A_RCO_CALIBRATION_RESULT1,
-				TOMTOM_A_RCO_CALIBRATION_RESULT1__POR },
-	{ TOMTOM_A_RCO_CALIBRATION_RESULT2,
-				TOMTOM_A_RCO_CALIBRATION_RESULT2__POR },
-	{ TOMTOM_A_BUCK_MODE_1, TOMTOM_A_BUCK_MODE_1__POR },
-	{ TOMTOM_A_BUCK_MODE_2, TOMTOM_A_BUCK_MODE_2__POR },
-	{ TOMTOM_A_BUCK_MODE_3, TOMTOM_A_BUCK_MODE_3__POR },
-	{ TOMTOM_A_BUCK_MODE_4, TOMTOM_A_BUCK_MODE_4__POR },
-	{ TOMTOM_A_BUCK_MODE_5, TOMTOM_A_BUCK_MODE_5__POR },
-	{ TOMTOM_A_BUCK_CTRL_VCL_1, TOMTOM_A_BUCK_CTRL_VCL_1__POR },
-	{ TOMTOM_A_BUCK_CTRL_VCL_2, TOMTOM_A_BUCK_CTRL_VCL_2__POR },
-	{ TOMTOM_A_BUCK_CTRL_VCL_3, TOMTOM_A_BUCK_CTRL_VCL_3__POR },
-	{ TOMTOM_A_BUCK_CTRL_CCL_1, TOMTOM_A_BUCK_CTRL_CCL_1__POR },
-	{ TOMTOM_A_BUCK_CTRL_CCL_2, TOMTOM_A_BUCK_CTRL_CCL_2__POR },
-	{ TOMTOM_A_BUCK_CTRL_CCL_3, TOMTOM_A_BUCK_CTRL_CCL_3__POR },
-	{ TOMTOM_A_BUCK_CTRL_CCL_4, TOMTOM_A_BUCK_CTRL_CCL_4__POR },
-	{ TOMTOM_A_BUCK_CTRL_PWM_DRVR_1, TOMTOM_A_BUCK_CTRL_PWM_DRVR_1__POR },
-	{ TOMTOM_A_BUCK_CTRL_PWM_DRVR_2, TOMTOM_A_BUCK_CTRL_PWM_DRVR_2__POR },
-	{ TOMTOM_A_BUCK_CTRL_PWM_DRVR_3, TOMTOM_A_BUCK_CTRL_PWM_DRVR_3__POR },
-	{ TOMTOM_A_BUCK_TMUX_A_D, TOMTOM_A_BUCK_TMUX_A_D__POR },
-	{ TOMTOM_A_NCP_BUCKREF, TOMTOM_A_NCP_BUCKREF__POR },
-	{ TOMTOM_A_NCP_EN, TOMTOM_A_NCP_EN__POR },
-	{ TOMTOM_A_NCP_CLK, TOMTOM_A_NCP_CLK__POR },
-	{ TOMTOM_A_NCP_STATIC, TOMTOM_A_NCP_STATIC__POR },
-	{ TOMTOM_A_NCP_VTH_LOW, TOMTOM_A_NCP_VTH_LOW__POR },
-	{ TOMTOM_A_NCP_VTH_HIGH, TOMTOM_A_NCP_VTH_HIGH__POR },
-	{ TOMTOM_A_NCP_ATEST, TOMTOM_A_NCP_ATEST__POR },
-	{ TOMTOM_A_NCP_DTEST, TOMTOM_A_NCP_DTEST__POR },
-	{ TOMTOM_A_NCP_DLY1, TOMTOM_A_NCP_DLY1__POR },
-	{ TOMTOM_A_NCP_DLY2, TOMTOM_A_NCP_DLY2__POR },
-	{ TOMTOM_A_RX_AUX_SW_CTL, TOMTOM_A_RX_AUX_SW_CTL__POR },
-	{ TOMTOM_A_RX_PA_AUX_IN_CONN, TOMTOM_A_RX_PA_AUX_IN_CONN__POR },
-	{ TOMTOM_A_RX_COM_TIMER_DIV, TOMTOM_A_RX_COM_TIMER_DIV__POR },
-	{ TOMTOM_A_RX_COM_OCP_CTL, TOMTOM_A_RX_COM_OCP_CTL__POR },
-	{ TOMTOM_A_RX_COM_OCP_COUNT, TOMTOM_A_RX_COM_OCP_COUNT__POR },
-	{ TOMTOM_A_RX_COM_DAC_CTL, TOMTOM_A_RX_COM_DAC_CTL__POR },
-	{ TOMTOM_A_RX_COM_BIAS, TOMTOM_A_RX_COM_BIAS__POR },
-	{ TOMTOM_A_RX_HPH_AUTO_CHOP, TOMTOM_A_RX_HPH_AUTO_CHOP__POR },
-	{ TOMTOM_A_RX_HPH_CHOP_CTL, TOMTOM_A_RX_HPH_CHOP_CTL__POR },
-	{ TOMTOM_A_RX_HPH_BIAS_PA, TOMTOM_A_RX_HPH_BIAS_PA__POR },
-	{ TOMTOM_A_RX_HPH_BIAS_LDO, TOMTOM_A_RX_HPH_BIAS_LDO__POR },
-	{ TOMTOM_A_RX_HPH_BIAS_CNP, TOMTOM_A_RX_HPH_BIAS_CNP__POR },
-	{ TOMTOM_A_RX_HPH_BIAS_WG_OCP, TOMTOM_A_RX_HPH_BIAS_WG_OCP__POR },
-	{ TOMTOM_A_RX_HPH_OCP_CTL, TOMTOM_A_RX_HPH_OCP_CTL__POR },
-	{ TOMTOM_A_RX_HPH_CNP_EN, TOMTOM_A_RX_HPH_CNP_EN__POR },
-	{ TOMTOM_A_RX_HPH_CNP_WG_CTL, TOMTOM_A_RX_HPH_CNP_WG_CTL__POR },
-	{ TOMTOM_A_RX_HPH_CNP_WG_TIME, TOMTOM_A_RX_HPH_CNP_WG_TIME__POR },
-	{ TOMTOM_A_RX_HPH_L_GAIN, TOMTOM_A_RX_HPH_L_GAIN__POR },
-	{ TOMTOM_A_RX_HPH_L_TEST, TOMTOM_A_RX_HPH_L_TEST__POR },
-	{ TOMTOM_A_RX_HPH_L_PA_CTL, TOMTOM_A_RX_HPH_L_PA_CTL__POR },
-	{ TOMTOM_A_RX_HPH_L_DAC_CTL, TOMTOM_A_RX_HPH_L_DAC_CTL__POR },
-	{ TOMTOM_A_RX_HPH_L_ATEST, TOMTOM_A_RX_HPH_L_ATEST__POR },
-	{ TOMTOM_A_RX_HPH_L_STATUS, TOMTOM_A_RX_HPH_L_STATUS__POR },
-	{ TOMTOM_A_RX_HPH_R_GAIN, TOMTOM_A_RX_HPH_R_GAIN__POR },
-	{ TOMTOM_A_RX_HPH_R_TEST, TOMTOM_A_RX_HPH_R_TEST__POR },
-	{ TOMTOM_A_RX_HPH_R_PA_CTL, TOMTOM_A_RX_HPH_R_PA_CTL__POR },
-	{ TOMTOM_A_RX_HPH_R_DAC_CTL, TOMTOM_A_RX_HPH_R_DAC_CTL__POR },
-	{ TOMTOM_A_RX_HPH_R_ATEST, TOMTOM_A_RX_HPH_R_ATEST__POR },
-	{ TOMTOM_A_RX_HPH_R_STATUS, TOMTOM_A_RX_HPH_R_STATUS__POR },
-	{ TOMTOM_A_RX_EAR_BIAS_PA, TOMTOM_A_RX_EAR_BIAS_PA__POR },
-	{ TOMTOM_A_RX_EAR_BIAS_CMBUFF, TOMTOM_A_RX_EAR_BIAS_CMBUFF__POR },
-	{ TOMTOM_A_RX_EAR_EN, TOMTOM_A_RX_EAR_EN__POR },
-	{ TOMTOM_A_RX_EAR_GAIN, TOMTOM_A_RX_EAR_GAIN__POR },
-	{ TOMTOM_A_RX_EAR_CMBUFF, TOMTOM_A_RX_EAR_CMBUFF__POR },
-	{ TOMTOM_A_RX_EAR_ICTL, TOMTOM_A_RX_EAR_ICTL__POR },
-	{ TOMTOM_A_RX_EAR_CCOMP, TOMTOM_A_RX_EAR_CCOMP__POR },
-	{ TOMTOM_A_RX_EAR_VCM, TOMTOM_A_RX_EAR_VCM__POR },
-	{ TOMTOM_A_RX_EAR_CNP, TOMTOM_A_RX_EAR_CNP__POR },
-	{ TOMTOM_A_RX_EAR_DAC_CTL_ATEST, TOMTOM_A_RX_EAR_DAC_CTL_ATEST__POR },
-	{ TOMTOM_A_RX_EAR_STATUS, TOMTOM_A_RX_EAR_STATUS__POR },
-	{ TOMTOM_A_RX_LINE_BIAS_PA, TOMTOM_A_RX_LINE_BIAS_PA__POR },
-	{ TOMTOM_A_RX_BUCK_BIAS1, TOMTOM_A_RX_BUCK_BIAS1__POR },
-	{ TOMTOM_A_RX_BUCK_BIAS2, TOMTOM_A_RX_BUCK_BIAS2__POR },
-	{ TOMTOM_A_RX_LINE_COM, TOMTOM_A_RX_LINE_COM__POR },
-	{ TOMTOM_A_RX_LINE_CNP_EN, TOMTOM_A_RX_LINE_CNP_EN__POR },
-	{ TOMTOM_A_RX_LINE_CNP_WG_CTL, TOMTOM_A_RX_LINE_CNP_WG_CTL__POR },
-	{ TOMTOM_A_RX_LINE_CNP_WG_TIME, TOMTOM_A_RX_LINE_CNP_WG_TIME__POR },
-	{ TOMTOM_A_RX_LINE_1_GAIN, TOMTOM_A_RX_LINE_1_GAIN__POR },
-	{ TOMTOM_A_RX_LINE_1_TEST, TOMTOM_A_RX_LINE_1_TEST__POR },
-	{ TOMTOM_A_RX_LINE_1_DAC_CTL, TOMTOM_A_RX_LINE_1_DAC_CTL__POR },
-	{ TOMTOM_A_RX_LINE_1_STATUS, TOMTOM_A_RX_LINE_1_STATUS__POR },
-	{ TOMTOM_A_RX_LINE_2_GAIN, TOMTOM_A_RX_LINE_2_GAIN__POR },
-	{ TOMTOM_A_RX_LINE_2_TEST, TOMTOM_A_RX_LINE_2_TEST__POR },
-	{ TOMTOM_A_RX_LINE_2_DAC_CTL, TOMTOM_A_RX_LINE_2_DAC_CTL__POR },
-	{ TOMTOM_A_RX_LINE_2_STATUS, TOMTOM_A_RX_LINE_2_STATUS__POR },
-	{ TOMTOM_A_RX_LINE_3_GAIN, TOMTOM_A_RX_LINE_3_GAIN__POR },
-	{ TOMTOM_A_RX_LINE_3_TEST, TOMTOM_A_RX_LINE_3_TEST__POR },
-	{ TOMTOM_A_RX_LINE_3_DAC_CTL, TOMTOM_A_RX_LINE_3_DAC_CTL__POR },
-	{ TOMTOM_A_RX_LINE_3_STATUS, TOMTOM_A_RX_LINE_3_STATUS__POR },
-	{ TOMTOM_A_RX_LINE_4_GAIN, TOMTOM_A_RX_LINE_4_GAIN__POR },
-	{ TOMTOM_A_RX_LINE_4_TEST, TOMTOM_A_RX_LINE_4_TEST__POR },
-	{ TOMTOM_A_RX_LINE_4_DAC_CTL, TOMTOM_A_RX_LINE_4_DAC_CTL__POR },
-	{ TOMTOM_A_RX_LINE_4_STATUS, TOMTOM_A_RX_LINE_4_STATUS__POR },
-	{ TOMTOM_A_RX_LINE_CNP_DBG, TOMTOM_A_RX_LINE_CNP_DBG__POR },
-	{ TOMTOM_A_SPKR_DRV1_EN, TOMTOM_A_SPKR_DRV1_EN__POR },
-	{ TOMTOM_A_SPKR_DRV1_GAIN, TOMTOM_A_SPKR_DRV1_GAIN__POR },
-	{ TOMTOM_A_SPKR_DRV1_DAC_CTL, TOMTOM_A_SPKR_DRV1_DAC_CTL__POR },
-	{ TOMTOM_A_SPKR_DRV1_OCP_CTL, TOMTOM_A_SPKR_DRV1_OCP_CTL__POR },
-	{ TOMTOM_A_SPKR_DRV1_CLIP_DET, TOMTOM_A_SPKR_DRV1_CLIP_DET__POR },
-	{ TOMTOM_A_SPKR_DRV1_IEC, TOMTOM_A_SPKR_DRV1_IEC__POR },
-	{ TOMTOM_A_SPKR_DRV1_DBG_DAC, TOMTOM_A_SPKR_DRV1_DBG_DAC__POR },
-	{ TOMTOM_A_SPKR_DRV1_DBG_PA, TOMTOM_A_SPKR_DRV1_DBG_PA__POR },
-	{ TOMTOM_A_SPKR_DRV1_DBG_PWRSTG, TOMTOM_A_SPKR_DRV1_DBG_PWRSTG__POR },
-	{ TOMTOM_A_SPKR_DRV1_BIAS_LDO, TOMTOM_A_SPKR_DRV1_BIAS_LDO__POR },
-	{ TOMTOM_A_SPKR_DRV1_BIAS_INT, TOMTOM_A_SPKR_DRV1_BIAS_INT__POR },
-	{ TOMTOM_A_SPKR_DRV1_BIAS_PA, TOMTOM_A_SPKR_DRV1_BIAS_PA__POR },
-	{ TOMTOM_A_SPKR_DRV1_STATUS_OCP, TOMTOM_A_SPKR_DRV1_STATUS_OCP__POR },
-	{ TOMTOM_A_SPKR_DRV1_STATUS_PA, TOMTOM_A_SPKR_DRV1_STATUS_PA__POR },
-	{ TOMTOM_A_SPKR1_PROT_EN, TOMTOM_A_SPKR1_PROT_EN__POR },
-	{ TOMTOM_A_SPKR1_PROT_ADC_TEST_EN,
-				TOMTOM_A_SPKR1_PROT_ADC_TEST_EN__POR },
-	{ TOMTOM_A_SPKR1_PROT_ATEST, TOMTOM_A_SPKR1_PROT_ATEST__POR },
-	{ TOMTOM_A_SPKR1_PROT_LDO_CTRL, TOMTOM_A_SPKR1_PROT_LDO_CTRL__POR },
-	{ TOMTOM_A_SPKR1_PROT_ISENSE_CTRL,
-				TOMTOM_A_SPKR1_PROT_ISENSE_CTRL__POR },
-	{ TOMTOM_A_SPKR1_PROT_VSENSE_CTRL,
-				TOMTOM_A_SPKR1_PROT_VSENSE_CTRL__POR },
-	{ TOMTOM_A_SPKR2_PROT_EN, TOMTOM_A_SPKR2_PROT_EN__POR },
-	{ TOMTOM_A_SPKR2_PROT_ADC_TEST_EN,
-				TOMTOM_A_SPKR2_PROT_ADC_TEST_EN__POR },
-	{ TOMTOM_A_SPKR2_PROT_ATEST, TOMTOM_A_SPKR2_PROT_ATEST__POR },
-	{ TOMTOM_A_SPKR2_PROT_LDO_CTRL, TOMTOM_A_SPKR2_PROT_LDO_CTRL__POR },
-	{ TOMTOM_A_SPKR2_PROT_ISENSE_CTRL,
-				TOMTOM_A_SPKR2_PROT_ISENSE_CTRL__POR },
-	{ TOMTOM_A_SPKR2_PROT_VSENSE_CTRL,
-				TOMTOM_A_SPKR2_PROT_VSENSE_CTRL__POR },
-	{ TOMTOM_A_MBHC_HPH, TOMTOM_A_MBHC_HPH__POR },
-	{ TOMTOM_A_CDC_ANC1_B1_CTL, TOMTOM_A_CDC_ANC1_B1_CTL__POR },
-	{ TOMTOM_A_CDC_ANC2_B1_CTL, TOMTOM_A_CDC_ANC2_B1_CTL__POR },
-	{ TOMTOM_A_CDC_ANC1_SHIFT, TOMTOM_A_CDC_ANC1_SHIFT__POR },
-	{ TOMTOM_A_CDC_ANC2_SHIFT, TOMTOM_A_CDC_ANC2_SHIFT__POR },
-	{ TOMTOM_A_CDC_ANC1_IIR_B1_CTL, TOMTOM_A_CDC_ANC1_IIR_B1_CTL__POR },
-	{ TOMTOM_A_CDC_ANC2_IIR_B1_CTL, TOMTOM_A_CDC_ANC2_IIR_B1_CTL__POR },
-	{ TOMTOM_A_CDC_ANC1_IIR_B2_CTL, TOMTOM_A_CDC_ANC1_IIR_B2_CTL__POR },
-	{ TOMTOM_A_CDC_ANC2_IIR_B2_CTL, TOMTOM_A_CDC_ANC2_IIR_B2_CTL__POR },
-	{ TOMTOM_A_CDC_ANC1_IIR_B3_CTL, TOMTOM_A_CDC_ANC1_IIR_B3_CTL__POR },
-	{ TOMTOM_A_CDC_ANC2_IIR_B3_CTL, TOMTOM_A_CDC_ANC2_IIR_B3_CTL__POR },
-	{ TOMTOM_A_CDC_ANC1_LPF_B1_CTL, TOMTOM_A_CDC_ANC1_LPF_B1_CTL__POR },
-	{ TOMTOM_A_CDC_ANC2_LPF_B1_CTL, TOMTOM_A_CDC_ANC2_LPF_B1_CTL__POR },
-	{ TOMTOM_A_CDC_ANC1_LPF_B2_CTL, TOMTOM_A_CDC_ANC1_LPF_B2_CTL__POR },
-	{ TOMTOM_A_CDC_ANC2_LPF_B2_CTL, TOMTOM_A_CDC_ANC2_LPF_B2_CTL__POR },
-	{ TOMTOM_A_CDC_ANC1_SPARE, TOMTOM_A_CDC_ANC1_SPARE__POR },
-	{ TOMTOM_A_CDC_ANC2_SPARE, TOMTOM_A_CDC_ANC2_SPARE__POR },
-	{ TOMTOM_A_CDC_ANC1_SMLPF_CTL, TOMTOM_A_CDC_ANC1_SMLPF_CTL__POR },
-	{ TOMTOM_A_CDC_ANC2_SMLPF_CTL, TOMTOM_A_CDC_ANC2_SMLPF_CTL__POR },
-	{ TOMTOM_A_CDC_ANC1_DCFLT_CTL, TOMTOM_A_CDC_ANC1_DCFLT_CTL__POR },
-	{ TOMTOM_A_CDC_ANC2_DCFLT_CTL, TOMTOM_A_CDC_ANC2_DCFLT_CTL__POR },
-	{ TOMTOM_A_CDC_ANC1_GAIN_CTL, TOMTOM_A_CDC_ANC1_GAIN_CTL__POR },
-	{ TOMTOM_A_CDC_ANC2_GAIN_CTL, TOMTOM_A_CDC_ANC2_GAIN_CTL__POR },
-	{ TOMTOM_A_CDC_ANC1_B2_CTL, TOMTOM_A_CDC_ANC1_B2_CTL__POR },
-	{ TOMTOM_A_CDC_ANC2_B2_CTL, TOMTOM_A_CDC_ANC2_B2_CTL__POR },
-	{ TOMTOM_A_CDC_TX1_VOL_CTL_TIMER, TOMTOM_A_CDC_TX1_VOL_CTL_TIMER__POR },
-	{ TOMTOM_A_CDC_TX2_VOL_CTL_TIMER, TOMTOM_A_CDC_TX2_VOL_CTL_TIMER__POR },
-	{ TOMTOM_A_CDC_TX3_VOL_CTL_TIMER, TOMTOM_A_CDC_TX3_VOL_CTL_TIMER__POR },
-	{ TOMTOM_A_CDC_TX4_VOL_CTL_TIMER, TOMTOM_A_CDC_TX4_VOL_CTL_TIMER__POR },
-	{ TOMTOM_A_CDC_TX5_VOL_CTL_TIMER, TOMTOM_A_CDC_TX5_VOL_CTL_TIMER__POR },
-	{ TOMTOM_A_CDC_TX6_VOL_CTL_TIMER, TOMTOM_A_CDC_TX6_VOL_CTL_TIMER__POR },
-	{ TOMTOM_A_CDC_TX7_VOL_CTL_TIMER, TOMTOM_A_CDC_TX7_VOL_CTL_TIMER__POR },
-	{ TOMTOM_A_CDC_TX8_VOL_CTL_TIMER, TOMTOM_A_CDC_TX8_VOL_CTL_TIMER__POR },
-	{ TOMTOM_A_CDC_TX9_VOL_CTL_TIMER, TOMTOM_A_CDC_TX9_VOL_CTL_TIMER__POR },
-	{ TOMTOM_A_CDC_TX10_VOL_CTL_TIMER,
-				TOMTOM_A_CDC_TX10_VOL_CTL_TIMER__POR },
-	{ TOMTOM_A_CDC_TX1_VOL_CTL_GAIN, TOMTOM_A_CDC_TX1_VOL_CTL_GAIN__POR },
-	{ TOMTOM_A_CDC_TX2_VOL_CTL_GAIN, TOMTOM_A_CDC_TX2_VOL_CTL_GAIN__POR },
-	{ TOMTOM_A_CDC_TX3_VOL_CTL_GAIN, TOMTOM_A_CDC_TX3_VOL_CTL_GAIN__POR },
-	{ TOMTOM_A_CDC_TX4_VOL_CTL_GAIN, TOMTOM_A_CDC_TX4_VOL_CTL_GAIN__POR },
-	{ TOMTOM_A_CDC_TX5_VOL_CTL_GAIN, TOMTOM_A_CDC_TX5_VOL_CTL_GAIN__POR },
-	{ TOMTOM_A_CDC_TX6_VOL_CTL_GAIN, TOMTOM_A_CDC_TX6_VOL_CTL_GAIN__POR },
-	{ TOMTOM_A_CDC_TX7_VOL_CTL_GAIN, TOMTOM_A_CDC_TX7_VOL_CTL_GAIN__POR },
-	{ TOMTOM_A_CDC_TX8_VOL_CTL_GAIN, TOMTOM_A_CDC_TX8_VOL_CTL_GAIN__POR },
-	{ TOMTOM_A_CDC_TX9_VOL_CTL_GAIN, TOMTOM_A_CDC_TX9_VOL_CTL_GAIN__POR },
-	{ TOMTOM_A_CDC_TX10_VOL_CTL_GAIN, TOMTOM_A_CDC_TX10_VOL_CTL_GAIN__POR },
-	{ TOMTOM_A_CDC_TX1_VOL_CTL_CFG, TOMTOM_A_CDC_TX1_VOL_CTL_CFG__POR },
-	{ TOMTOM_A_CDC_TX2_VOL_CTL_CFG, TOMTOM_A_CDC_TX2_VOL_CTL_CFG__POR },
-	{ TOMTOM_A_CDC_TX3_VOL_CTL_CFG, TOMTOM_A_CDC_TX3_VOL_CTL_CFG__POR },
-	{ TOMTOM_A_CDC_TX4_VOL_CTL_CFG, TOMTOM_A_CDC_TX4_VOL_CTL_CFG__POR },
-	{ TOMTOM_A_CDC_TX5_VOL_CTL_CFG, TOMTOM_A_CDC_TX5_VOL_CTL_CFG__POR },
-	{ TOMTOM_A_CDC_TX6_VOL_CTL_CFG, TOMTOM_A_CDC_TX6_VOL_CTL_CFG__POR },
-	{ TOMTOM_A_CDC_TX7_VOL_CTL_CFG, TOMTOM_A_CDC_TX7_VOL_CTL_CFG__POR },
-	{ TOMTOM_A_CDC_TX8_VOL_CTL_CFG, TOMTOM_A_CDC_TX8_VOL_CTL_CFG__POR },
-	{ TOMTOM_A_CDC_TX9_VOL_CTL_CFG, TOMTOM_A_CDC_TX9_VOL_CTL_CFG__POR },
-	{ TOMTOM_A_CDC_TX10_VOL_CTL_CFG, TOMTOM_A_CDC_TX10_VOL_CTL_CFG__POR },
-	{ TOMTOM_A_CDC_TX1_MUX_CTL, TOMTOM_A_CDC_TX1_MUX_CTL__POR },
-	{ TOMTOM_A_CDC_TX2_MUX_CTL, TOMTOM_A_CDC_TX2_MUX_CTL__POR },
-	{ TOMTOM_A_CDC_TX3_MUX_CTL, TOMTOM_A_CDC_TX3_MUX_CTL__POR },
-	{ TOMTOM_A_CDC_TX4_MUX_CTL, TOMTOM_A_CDC_TX4_MUX_CTL__POR },
-	{ TOMTOM_A_CDC_TX5_MUX_CTL, TOMTOM_A_CDC_TX5_MUX_CTL__POR },
-	{ TOMTOM_A_CDC_TX6_MUX_CTL, TOMTOM_A_CDC_TX6_MUX_CTL__POR },
-	{ TOMTOM_A_CDC_TX7_MUX_CTL, TOMTOM_A_CDC_TX7_MUX_CTL__POR },
-	{ TOMTOM_A_CDC_TX8_MUX_CTL, TOMTOM_A_CDC_TX8_MUX_CTL__POR },
-	{ TOMTOM_A_CDC_TX9_MUX_CTL, TOMTOM_A_CDC_TX9_MUX_CTL__POR },
-	{ TOMTOM_A_CDC_TX10_MUX_CTL, TOMTOM_A_CDC_TX10_MUX_CTL__POR },
-	{ TOMTOM_A_CDC_TX1_CLK_FS_CTL, TOMTOM_A_CDC_TX1_CLK_FS_CTL__POR },
-	{ TOMTOM_A_CDC_TX2_CLK_FS_CTL, TOMTOM_A_CDC_TX2_CLK_FS_CTL__POR },
-	{ TOMTOM_A_CDC_TX3_CLK_FS_CTL, TOMTOM_A_CDC_TX3_CLK_FS_CTL__POR },
-	{ TOMTOM_A_CDC_TX4_CLK_FS_CTL, TOMTOM_A_CDC_TX4_CLK_FS_CTL__POR },
-	{ TOMTOM_A_CDC_TX5_CLK_FS_CTL, TOMTOM_A_CDC_TX5_CLK_FS_CTL__POR },
-	{ TOMTOM_A_CDC_TX6_CLK_FS_CTL, TOMTOM_A_CDC_TX6_CLK_FS_CTL__POR },
-	{ TOMTOM_A_CDC_TX7_CLK_FS_CTL, TOMTOM_A_CDC_TX7_CLK_FS_CTL__POR },
-	{ TOMTOM_A_CDC_TX8_CLK_FS_CTL, TOMTOM_A_CDC_TX8_CLK_FS_CTL__POR },
-	{ TOMTOM_A_CDC_TX9_CLK_FS_CTL, TOMTOM_A_CDC_TX9_CLK_FS_CTL__POR },
-	{ TOMTOM_A_CDC_TX10_CLK_FS_CTL, TOMTOM_A_CDC_TX10_CLK_FS_CTL__POR },
-	{ TOMTOM_A_CDC_TX1_DMIC_CTL, TOMTOM_A_CDC_TX1_DMIC_CTL__POR },
-	{ TOMTOM_A_CDC_TX2_DMIC_CTL, TOMTOM_A_CDC_TX2_DMIC_CTL__POR },
-	{ TOMTOM_A_CDC_TX3_DMIC_CTL, TOMTOM_A_CDC_TX3_DMIC_CTL__POR },
-	{ TOMTOM_A_CDC_TX4_DMIC_CTL, TOMTOM_A_CDC_TX4_DMIC_CTL__POR },
-	{ TOMTOM_A_CDC_TX5_DMIC_CTL, TOMTOM_A_CDC_TX5_DMIC_CTL__POR },
-	{ TOMTOM_A_CDC_TX6_DMIC_CTL, TOMTOM_A_CDC_TX6_DMIC_CTL__POR },
-	{ TOMTOM_A_CDC_TX7_DMIC_CTL, TOMTOM_A_CDC_TX7_DMIC_CTL__POR },
-	{ TOMTOM_A_CDC_TX8_DMIC_CTL, TOMTOM_A_CDC_TX8_DMIC_CTL__POR },
-	{ TOMTOM_A_CDC_TX9_DMIC_CTL, TOMTOM_A_CDC_TX9_DMIC_CTL__POR },
-	{ TOMTOM_A_CDC_TX10_DMIC_CTL, TOMTOM_A_CDC_TX10_DMIC_CTL__POR },
-	{ TOMTOM_A_CDC_SPKR_CLIPDET_VAL0, TOMTOM_A_CDC_SPKR_CLIPDET_VAL0__POR },
-	{ TOMTOM_A_CDC_SPKR_CLIPDET_VAL1, TOMTOM_A_CDC_SPKR_CLIPDET_VAL1__POR },
-	{ TOMTOM_A_CDC_SPKR_CLIPDET_VAL2, TOMTOM_A_CDC_SPKR_CLIPDET_VAL2__POR },
-	{ TOMTOM_A_CDC_SPKR_CLIPDET_VAL3, TOMTOM_A_CDC_SPKR_CLIPDET_VAL3__POR },
-	{ TOMTOM_A_CDC_SPKR_CLIPDET_VAL4, TOMTOM_A_CDC_SPKR_CLIPDET_VAL4__POR },
-	{ TOMTOM_A_CDC_SPKR_CLIPDET_VAL5, TOMTOM_A_CDC_SPKR_CLIPDET_VAL5__POR },
-	{ TOMTOM_A_CDC_SPKR_CLIPDET_VAL6, TOMTOM_A_CDC_SPKR_CLIPDET_VAL6__POR },
-	{ TOMTOM_A_CDC_SPKR_CLIPDET_VAL7, TOMTOM_A_CDC_SPKR_CLIPDET_VAL7__POR },
-	{ TOMTOM_A_CDC_DEBUG_B1_CTL, TOMTOM_A_CDC_DEBUG_B1_CTL__POR },
-	{ TOMTOM_A_CDC_DEBUG_B2_CTL, TOMTOM_A_CDC_DEBUG_B2_CTL__POR },
-	{ TOMTOM_A_CDC_DEBUG_B3_CTL, TOMTOM_A_CDC_DEBUG_B3_CTL__POR },
-	{ TOMTOM_A_CDC_DEBUG_B4_CTL, TOMTOM_A_CDC_DEBUG_B4_CTL__POR },
-	{ TOMTOM_A_CDC_DEBUG_B5_CTL, TOMTOM_A_CDC_DEBUG_B5_CTL__POR },
-	{ TOMTOM_A_CDC_DEBUG_B6_CTL, TOMTOM_A_CDC_DEBUG_B6_CTL__POR },
-	{ TOMTOM_A_CDC_DEBUG_B7_CTL, TOMTOM_A_CDC_DEBUG_B7_CTL__POR },
-	{ TOMTOM_A_CDC_SRC1_PDA_CFG, TOMTOM_A_CDC_SRC1_PDA_CFG__POR },
-	{ TOMTOM_A_CDC_SRC2_PDA_CFG, TOMTOM_A_CDC_SRC2_PDA_CFG__POR },
-	{ TOMTOM_A_CDC_SRC1_FS_CTL, TOMTOM_A_CDC_SRC1_FS_CTL__POR },
-	{ TOMTOM_A_CDC_SRC2_FS_CTL, TOMTOM_A_CDC_SRC2_FS_CTL__POR },
-	{ TOMTOM_A_CDC_RX1_B1_CTL, TOMTOM_A_CDC_RX1_B1_CTL__POR },
-	{ TOMTOM_A_CDC_RX2_B1_CTL, TOMTOM_A_CDC_RX2_B1_CTL__POR },
-	{ TOMTOM_A_CDC_RX3_B1_CTL, TOMTOM_A_CDC_RX3_B1_CTL__POR },
-	{ TOMTOM_A_CDC_RX4_B1_CTL, TOMTOM_A_CDC_RX4_B1_CTL__POR },
-	{ TOMTOM_A_CDC_RX5_B1_CTL, TOMTOM_A_CDC_RX5_B1_CTL__POR },
-	{ TOMTOM_A_CDC_RX6_B1_CTL, TOMTOM_A_CDC_RX6_B1_CTL__POR },
-	{ TOMTOM_A_CDC_RX7_B1_CTL, TOMTOM_A_CDC_RX7_B1_CTL__POR },
-	{ TOMTOM_A_CDC_RX1_B2_CTL, TOMTOM_A_CDC_RX1_B2_CTL__POR },
-	{ TOMTOM_A_CDC_RX2_B2_CTL, TOMTOM_A_CDC_RX2_B2_CTL__POR },
-	{ TOMTOM_A_CDC_RX3_B2_CTL, TOMTOM_A_CDC_RX3_B2_CTL__POR },
-	{ TOMTOM_A_CDC_RX4_B2_CTL, TOMTOM_A_CDC_RX4_B2_CTL__POR },
-	{ TOMTOM_A_CDC_RX5_B2_CTL, TOMTOM_A_CDC_RX5_B2_CTL__POR },
-	{ TOMTOM_A_CDC_RX6_B2_CTL, TOMTOM_A_CDC_RX6_B2_CTL__POR },
-	{ TOMTOM_A_CDC_RX7_B2_CTL, TOMTOM_A_CDC_RX7_B2_CTL__POR },
-	{ TOMTOM_A_CDC_RX1_B3_CTL, TOMTOM_A_CDC_RX1_B3_CTL__POR },
-	{ TOMTOM_A_CDC_RX2_B3_CTL, TOMTOM_A_CDC_RX2_B3_CTL__POR },
-	{ TOMTOM_A_CDC_RX3_B3_CTL, TOMTOM_A_CDC_RX3_B3_CTL__POR },
-	{ TOMTOM_A_CDC_RX4_B3_CTL, TOMTOM_A_CDC_RX4_B3_CTL__POR },
-	{ TOMTOM_A_CDC_RX5_B3_CTL, TOMTOM_A_CDC_RX5_B3_CTL__POR },
-	{ TOMTOM_A_CDC_RX6_B3_CTL, TOMTOM_A_CDC_RX6_B3_CTL__POR },
-	{ TOMTOM_A_CDC_RX7_B3_CTL, TOMTOM_A_CDC_RX7_B3_CTL__POR },
-	{ TOMTOM_A_CDC_RX1_B4_CTL, TOMTOM_A_CDC_RX1_B4_CTL__POR },
-	{ TOMTOM_A_CDC_RX2_B4_CTL, TOMTOM_A_CDC_RX2_B4_CTL__POR },
-	{ TOMTOM_A_CDC_RX3_B4_CTL, TOMTOM_A_CDC_RX3_B4_CTL__POR },
-	{ TOMTOM_A_CDC_RX4_B4_CTL, TOMTOM_A_CDC_RX4_B4_CTL__POR },
-	{ TOMTOM_A_CDC_RX5_B4_CTL, TOMTOM_A_CDC_RX5_B4_CTL__POR },
-	{ TOMTOM_A_CDC_RX6_B4_CTL, TOMTOM_A_CDC_RX6_B4_CTL__POR },
-	{ TOMTOM_A_CDC_RX7_B4_CTL, TOMTOM_A_CDC_RX7_B4_CTL__POR },
-	{ TOMTOM_A_CDC_RX1_B5_CTL, TOMTOM_A_CDC_RX1_B5_CTL__POR },
-	{ TOMTOM_A_CDC_RX2_B5_CTL, TOMTOM_A_CDC_RX2_B5_CTL__POR },
-	{ TOMTOM_A_CDC_RX3_B5_CTL, TOMTOM_A_CDC_RX3_B5_CTL__POR },
-	{ TOMTOM_A_CDC_RX4_B5_CTL, TOMTOM_A_CDC_RX4_B5_CTL__POR },
-	{ TOMTOM_A_CDC_RX5_B5_CTL, TOMTOM_A_CDC_RX5_B5_CTL__POR },
-	{ TOMTOM_A_CDC_RX6_B5_CTL, TOMTOM_A_CDC_RX6_B5_CTL__POR },
-	{ TOMTOM_A_CDC_RX7_B5_CTL, TOMTOM_A_CDC_RX7_B5_CTL__POR },
-	{ TOMTOM_A_CDC_RX1_B6_CTL, TOMTOM_A_CDC_RX1_B6_CTL__POR },
-	{ TOMTOM_A_CDC_RX2_B6_CTL, TOMTOM_A_CDC_RX2_B6_CTL__POR },
-	{ TOMTOM_A_CDC_RX3_B6_CTL, TOMTOM_A_CDC_RX3_B6_CTL__POR },
-	{ TOMTOM_A_CDC_RX4_B6_CTL, TOMTOM_A_CDC_RX4_B6_CTL__POR },
-	{ TOMTOM_A_CDC_RX5_B6_CTL, TOMTOM_A_CDC_RX5_B6_CTL__POR },
-	{ TOMTOM_A_CDC_RX6_B6_CTL, TOMTOM_A_CDC_RX6_B6_CTL__POR },
-	{ TOMTOM_A_CDC_RX7_B6_CTL, TOMTOM_A_CDC_RX7_B6_CTL__POR },
-	{ TOMTOM_A_CDC_RX1_VOL_CTL_B1_CTL,
-				TOMTOM_A_CDC_RX1_VOL_CTL_B1_CTL__POR },
-	{ TOMTOM_A_CDC_RX2_VOL_CTL_B1_CTL,
-				TOMTOM_A_CDC_RX2_VOL_CTL_B1_CTL__POR },
-	{ TOMTOM_A_CDC_RX3_VOL_CTL_B1_CTL,
-				TOMTOM_A_CDC_RX3_VOL_CTL_B1_CTL__POR },
-	{ TOMTOM_A_CDC_RX4_VOL_CTL_B1_CTL,
-				TOMTOM_A_CDC_RX4_VOL_CTL_B1_CTL__POR },
-	{ TOMTOM_A_CDC_RX5_VOL_CTL_B1_CTL,
-				TOMTOM_A_CDC_RX5_VOL_CTL_B1_CTL__POR },
-	{ TOMTOM_A_CDC_RX6_VOL_CTL_B1_CTL,
-				TOMTOM_A_CDC_RX6_VOL_CTL_B1_CTL__POR },
-	{ TOMTOM_A_CDC_RX7_VOL_CTL_B1_CTL,
-				TOMTOM_A_CDC_RX7_VOL_CTL_B1_CTL__POR },
-	{ TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL,
-				TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL__POR },
-	{ TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL,
-				TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL__POR },
-	{ TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL,
-				TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL__POR },
-	{ TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL,
-				TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL__POR },
-	{ TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL,
-				TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL__POR },
-	{ TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL,
-				TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL__POR },
-	{ TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL,
-				TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL__POR },
-	{ TOMTOM_A_CDC_VBAT_CFG, TOMTOM_A_CDC_VBAT_CFG__POR },
-	{ TOMTOM_A_CDC_VBAT_ADC_CAL1, TOMTOM_A_CDC_VBAT_ADC_CAL1__POR },
-	{ TOMTOM_A_CDC_VBAT_ADC_CAL2, TOMTOM_A_CDC_VBAT_ADC_CAL2__POR },
-	{ TOMTOM_A_CDC_VBAT_ADC_CAL3, TOMTOM_A_CDC_VBAT_ADC_CAL3__POR },
-	{ TOMTOM_A_CDC_VBAT_PK_EST1, TOMTOM_A_CDC_VBAT_PK_EST1__POR },
-	{ TOMTOM_A_CDC_VBAT_PK_EST2, TOMTOM_A_CDC_VBAT_PK_EST2__POR },
-	{ TOMTOM_A_CDC_VBAT_PK_EST3, TOMTOM_A_CDC_VBAT_PK_EST3__POR },
-	{ TOMTOM_A_CDC_VBAT_RF_PROC1, TOMTOM_A_CDC_VBAT_RF_PROC1__POR },
-	{ TOMTOM_A_CDC_VBAT_RF_PROC2, TOMTOM_A_CDC_VBAT_RF_PROC2__POR },
-	{ TOMTOM_A_CDC_VBAT_TAC1, TOMTOM_A_CDC_VBAT_TAC1__POR },
-	{ TOMTOM_A_CDC_VBAT_TAC2, TOMTOM_A_CDC_VBAT_TAC2__POR },
-	{ TOMTOM_A_CDC_VBAT_TAC3, TOMTOM_A_CDC_VBAT_TAC3__POR },
-	{ TOMTOM_A_CDC_VBAT_TAC4, TOMTOM_A_CDC_VBAT_TAC4__POR },
-	{ TOMTOM_A_CDC_VBAT_GAIN_UPD1, TOMTOM_A_CDC_VBAT_GAIN_UPD1__POR },
-	{ TOMTOM_A_CDC_VBAT_GAIN_UPD2, TOMTOM_A_CDC_VBAT_GAIN_UPD2__POR },
-	{ TOMTOM_A_CDC_VBAT_GAIN_UPD3, TOMTOM_A_CDC_VBAT_GAIN_UPD3__POR },
-	{ TOMTOM_A_CDC_VBAT_GAIN_UPD4, TOMTOM_A_CDC_VBAT_GAIN_UPD4__POR },
-	{ TOMTOM_A_CDC_VBAT_DEBUG1, TOMTOM_A_CDC_VBAT_DEBUG1__POR },
-	{ TOMTOM_A_CDC_VBAT_GAIN_UPD_MON, TOMTOM_A_CDC_VBAT_GAIN_UPD_MON__POR },
-	{ TOMTOM_A_CDC_VBAT_GAIN_MON_VAL, TOMTOM_A_CDC_VBAT_GAIN_MON_VAL__POR },
-	{ TOMTOM_A_CDC_CLK_ANC_RESET_CTL, TOMTOM_A_CDC_CLK_ANC_RESET_CTL__POR },
-	{ TOMTOM_A_CDC_CLK_RX_RESET_CTL, TOMTOM_A_CDC_CLK_RX_RESET_CTL__POR },
-	{ TOMTOM_A_CDC_CLK_TX_RESET_B1_CTL,
-				TOMTOM_A_CDC_CLK_TX_RESET_B1_CTL__POR },
-	{ TOMTOM_A_CDC_CLK_TX_RESET_B2_CTL,
-				TOMTOM_A_CDC_CLK_TX_RESET_B2_CTL__POR },
-	{ TOMTOM_A_CDC_CLK_RX_I2S_CTL, TOMTOM_A_CDC_CLK_RX_I2S_CTL__POR },
-	{ TOMTOM_A_CDC_CLK_TX_I2S_CTL, TOMTOM_A_CDC_CLK_TX_I2S_CTL__POR },
-	{ TOMTOM_A_CDC_CLK_OTHR_RESET_B1_CTL,
-				TOMTOM_A_CDC_CLK_OTHR_RESET_B1_CTL__POR },
-	{ TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL,
-				TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL__POR },
-	{ TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL,
-				TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL__POR },
-	{ TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL,
-				TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL__POR },
-	{ TOMTOM_A_CDC_CLK_OTHR_CTL, TOMTOM_A_CDC_CLK_OTHR_CTL__POR },
-	{ TOMTOM_A_CDC_CLK_ANC_CLK_EN_CTL,
-				TOMTOM_A_CDC_CLK_ANC_CLK_EN_CTL__POR },
-	{ TOMTOM_A_CDC_CLK_RX_B1_CTL, TOMTOM_A_CDC_CLK_RX_B1_CTL__POR },
-	{ TOMTOM_A_CDC_CLK_RX_B2_CTL, TOMTOM_A_CDC_CLK_RX_B2_CTL__POR },
-	{ TOMTOM_A_CDC_CLK_MCLK_CTL, TOMTOM_A_CDC_CLK_MCLK_CTL__POR },
-	{ TOMTOM_A_CDC_CLK_PDM_CTL, TOMTOM_A_CDC_CLK_PDM_CTL__POR },
-	{ TOMTOM_A_CDC_CLK_SD_CTL, TOMTOM_A_CDC_CLK_SD_CTL__POR },
-	{ TOMTOM_A_CDC_CLSH_B1_CTL, TOMTOM_A_CDC_CLSH_B1_CTL__POR },
-	{ TOMTOM_A_CDC_CLSH_B2_CTL, TOMTOM_A_CDC_CLSH_B2_CTL__POR },
-	{ TOMTOM_A_CDC_CLSH_B3_CTL, TOMTOM_A_CDC_CLSH_B3_CTL__POR },
-	{ TOMTOM_A_CDC_CLSH_BUCK_NCP_VARS,
-				TOMTOM_A_CDC_CLSH_BUCK_NCP_VARS__POR },
-	{ TOMTOM_A_CDC_CLSH_IDLE_HPH_THSD,
-				TOMTOM_A_CDC_CLSH_IDLE_HPH_THSD__POR },
-	{ TOMTOM_A_CDC_CLSH_IDLE_EAR_THSD,
-				TOMTOM_A_CDC_CLSH_IDLE_EAR_THSD__POR },
-	{ TOMTOM_A_CDC_CLSH_FCLKONLY_HPH_THSD,
-				TOMTOM_A_CDC_CLSH_FCLKONLY_HPH_THSD__POR },
-	{ TOMTOM_A_CDC_CLSH_FCLKONLY_EAR_THSD,
-				TOMTOM_A_CDC_CLSH_FCLKONLY_EAR_THSD__POR },
-	{ TOMTOM_A_CDC_CLSH_K_ADDR, TOMTOM_A_CDC_CLSH_K_ADDR__POR },
-	{ TOMTOM_A_CDC_CLSH_K_DATA, TOMTOM_A_CDC_CLSH_K_DATA__POR },
-	{ TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_L,
-				TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_L__POR },
-	{ TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_U,
-				TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_U__POR },
-	{ TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_L,
-				TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_L__POR },
-	{ TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_U,
-				TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_U__POR },
-	{ TOMTOM_A_CDC_CLSH_V_PA_HD_EAR, TOMTOM_A_CDC_CLSH_V_PA_HD_EAR__POR },
-	{ TOMTOM_A_CDC_CLSH_V_PA_HD_HPH, TOMTOM_A_CDC_CLSH_V_PA_HD_HPH__POR },
-	{ TOMTOM_A_CDC_CLSH_V_PA_MIN_EAR, TOMTOM_A_CDC_CLSH_V_PA_MIN_EAR__POR },
-	{ TOMTOM_A_CDC_CLSH_V_PA_MIN_HPH, TOMTOM_A_CDC_CLSH_V_PA_MIN_HPH__POR },
-	{ TOMTOM_A_CDC_IIR1_GAIN_B1_CTL, TOMTOM_A_CDC_IIR1_GAIN_B1_CTL__POR },
-	{ TOMTOM_A_CDC_IIR2_GAIN_B1_CTL, TOMTOM_A_CDC_IIR2_GAIN_B1_CTL__POR },
-	{ TOMTOM_A_CDC_IIR1_GAIN_B2_CTL, TOMTOM_A_CDC_IIR1_GAIN_B2_CTL__POR },
-	{ TOMTOM_A_CDC_IIR2_GAIN_B2_CTL, TOMTOM_A_CDC_IIR2_GAIN_B2_CTL__POR },
-	{ TOMTOM_A_CDC_IIR1_GAIN_B3_CTL, TOMTOM_A_CDC_IIR1_GAIN_B3_CTL__POR },
-	{ TOMTOM_A_CDC_IIR2_GAIN_B3_CTL, TOMTOM_A_CDC_IIR2_GAIN_B3_CTL__POR },
-	{ TOMTOM_A_CDC_IIR1_GAIN_B4_CTL, TOMTOM_A_CDC_IIR1_GAIN_B4_CTL__POR },
-	{ TOMTOM_A_CDC_IIR2_GAIN_B4_CTL, TOMTOM_A_CDC_IIR2_GAIN_B4_CTL__POR },
-	{ TOMTOM_A_CDC_IIR1_GAIN_B5_CTL, TOMTOM_A_CDC_IIR1_GAIN_B5_CTL__POR },
-	{ TOMTOM_A_CDC_IIR2_GAIN_B5_CTL, TOMTOM_A_CDC_IIR2_GAIN_B5_CTL__POR },
-	{ TOMTOM_A_CDC_IIR1_GAIN_B6_CTL, TOMTOM_A_CDC_IIR1_GAIN_B6_CTL__POR },
-	{ TOMTOM_A_CDC_IIR2_GAIN_B6_CTL, TOMTOM_A_CDC_IIR2_GAIN_B6_CTL__POR },
-	{ TOMTOM_A_CDC_IIR1_GAIN_B7_CTL, TOMTOM_A_CDC_IIR1_GAIN_B7_CTL__POR },
-	{ TOMTOM_A_CDC_IIR2_GAIN_B7_CTL, TOMTOM_A_CDC_IIR2_GAIN_B7_CTL__POR },
-	{ TOMTOM_A_CDC_IIR1_GAIN_B8_CTL, TOMTOM_A_CDC_IIR1_GAIN_B8_CTL__POR },
-	{ TOMTOM_A_CDC_IIR2_GAIN_B8_CTL, TOMTOM_A_CDC_IIR2_GAIN_B8_CTL__POR },
-	{ TOMTOM_A_CDC_IIR1_CTL, TOMTOM_A_CDC_IIR1_CTL__POR },
-	{ TOMTOM_A_CDC_IIR2_CTL, TOMTOM_A_CDC_IIR2_CTL__POR },
-	{ TOMTOM_A_CDC_IIR1_GAIN_TIMER_CTL,
-				TOMTOM_A_CDC_IIR1_GAIN_TIMER_CTL__POR },
-	{ TOMTOM_A_CDC_IIR2_GAIN_TIMER_CTL,
-				TOMTOM_A_CDC_IIR2_GAIN_TIMER_CTL__POR },
-	{ TOMTOM_A_CDC_IIR1_COEF_B1_CTL, TOMTOM_A_CDC_IIR1_COEF_B1_CTL__POR },
-	{ TOMTOM_A_CDC_IIR2_COEF_B1_CTL, TOMTOM_A_CDC_IIR2_COEF_B1_CTL__POR },
-	{ TOMTOM_A_CDC_IIR1_COEF_B2_CTL, TOMTOM_A_CDC_IIR1_COEF_B2_CTL__POR },
-	{ TOMTOM_A_CDC_IIR2_COEF_B2_CTL, TOMTOM_A_CDC_IIR2_COEF_B2_CTL__POR },
-	{ TOMTOM_A_CDC_TOP_GAIN_UPDATE, TOMTOM_A_CDC_TOP_GAIN_UPDATE__POR },
-	{ TOMTOM_A_CDC_PA_RAMP_B1_CTL, TOMTOM_A_CDC_PA_RAMP_B1_CTL__POR },
-	{ TOMTOM_A_CDC_PA_RAMP_B2_CTL, TOMTOM_A_CDC_PA_RAMP_B2_CTL__POR },
-	{ TOMTOM_A_CDC_PA_RAMP_B3_CTL, TOMTOM_A_CDC_PA_RAMP_B3_CTL__POR },
-	{ TOMTOM_A_CDC_PA_RAMP_B4_CTL, TOMTOM_A_CDC_PA_RAMP_B4_CTL__POR },
-	{ TOMTOM_A_CDC_SPKR_CLIPDET_B1_CTL,
-				TOMTOM_A_CDC_SPKR_CLIPDET_B1_CTL__POR },
-	{ TOMTOM_A_CDC_SPKR2_CLIPDET_B1_CTL,
-				TOMTOM_A_CDC_SPKR2_CLIPDET_B1_CTL__POR },
-	{ TOMTOM_A_CDC_COMP0_B1_CTL, TOMTOM_A_CDC_COMP0_B1_CTL__POR },
-	{ TOMTOM_A_CDC_COMP1_B1_CTL, TOMTOM_A_CDC_COMP1_B1_CTL__POR },
-	{ TOMTOM_A_CDC_COMP2_B1_CTL, TOMTOM_A_CDC_COMP2_B1_CTL__POR },
-	{ TOMTOM_A_CDC_COMP0_B2_CTL, TOMTOM_A_CDC_COMP0_B2_CTL__POR },
-	{ TOMTOM_A_CDC_COMP1_B2_CTL, TOMTOM_A_CDC_COMP1_B2_CTL__POR },
-	{ TOMTOM_A_CDC_COMP2_B2_CTL, TOMTOM_A_CDC_COMP2_B2_CTL__POR },
-	{ TOMTOM_A_CDC_COMP0_B3_CTL, TOMTOM_A_CDC_COMP0_B3_CTL__POR },
-	{ TOMTOM_A_CDC_COMP1_B3_CTL, TOMTOM_A_CDC_COMP1_B3_CTL__POR },
-	{ TOMTOM_A_CDC_COMP2_B3_CTL, TOMTOM_A_CDC_COMP2_B3_CTL__POR },
-	{ TOMTOM_A_CDC_COMP0_B4_CTL, TOMTOM_A_CDC_COMP0_B4_CTL__POR },
-	{ TOMTOM_A_CDC_COMP1_B4_CTL, TOMTOM_A_CDC_COMP1_B4_CTL__POR },
-	{ TOMTOM_A_CDC_COMP2_B4_CTL, TOMTOM_A_CDC_COMP2_B4_CTL__POR },
-	{ TOMTOM_A_CDC_COMP0_B5_CTL, TOMTOM_A_CDC_COMP0_B5_CTL__POR },
-	{ TOMTOM_A_CDC_COMP1_B5_CTL, TOMTOM_A_CDC_COMP1_B5_CTL__POR },
-	{ TOMTOM_A_CDC_COMP2_B5_CTL, TOMTOM_A_CDC_COMP2_B5_CTL__POR },
-	{ TOMTOM_A_CDC_COMP0_B6_CTL, TOMTOM_A_CDC_COMP0_B6_CTL__POR },
-	{ TOMTOM_A_CDC_COMP1_B6_CTL, TOMTOM_A_CDC_COMP1_B6_CTL__POR },
-	{ TOMTOM_A_CDC_COMP2_B6_CTL, TOMTOM_A_CDC_COMP2_B6_CTL__POR },
-	{ TOMTOM_A_CDC_COMP0_SHUT_DOWN_STATUS,
-				TOMTOM_A_CDC_COMP0_SHUT_DOWN_STATUS__POR },
-	{ TOMTOM_A_CDC_COMP1_SHUT_DOWN_STATUS,
-				TOMTOM_A_CDC_COMP1_SHUT_DOWN_STATUS__POR },
-	{ TOMTOM_A_CDC_COMP2_SHUT_DOWN_STATUS,
-				TOMTOM_A_CDC_COMP2_SHUT_DOWN_STATUS__POR },
-	{ TOMTOM_A_CDC_COMP0_FS_CFG, TOMTOM_A_CDC_COMP0_FS_CFG__POR },
-	{ TOMTOM_A_CDC_COMP1_FS_CFG, TOMTOM_A_CDC_COMP1_FS_CFG__POR },
-	{ TOMTOM_A_CDC_COMP2_FS_CFG, TOMTOM_A_CDC_COMP2_FS_CFG__POR },
-	{ TOMTOM_A_CDC_CONN_RX1_B1_CTL, TOMTOM_A_CDC_CONN_RX1_B1_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_RX1_B2_CTL, TOMTOM_A_CDC_CONN_RX1_B2_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_RX1_B3_CTL, TOMTOM_A_CDC_CONN_RX1_B3_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_RX2_B1_CTL, TOMTOM_A_CDC_CONN_RX2_B1_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_RX2_B2_CTL, TOMTOM_A_CDC_CONN_RX2_B2_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_RX2_B3_CTL, TOMTOM_A_CDC_CONN_RX2_B3_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_RX3_B1_CTL, TOMTOM_A_CDC_CONN_RX3_B1_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_RX3_B2_CTL, TOMTOM_A_CDC_CONN_RX3_B2_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_RX4_B1_CTL, TOMTOM_A_CDC_CONN_RX4_B1_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_RX4_B2_CTL, TOMTOM_A_CDC_CONN_RX4_B2_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_RX5_B1_CTL, TOMTOM_A_CDC_CONN_RX5_B1_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_RX5_B2_CTL, TOMTOM_A_CDC_CONN_RX5_B2_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_RX6_B1_CTL, TOMTOM_A_CDC_CONN_RX6_B1_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_RX6_B2_CTL, TOMTOM_A_CDC_CONN_RX6_B2_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_RX7_B1_CTL, TOMTOM_A_CDC_CONN_RX7_B1_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_RX7_B2_CTL, TOMTOM_A_CDC_CONN_RX7_B2_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_RX7_B3_CTL, TOMTOM_A_CDC_CONN_RX7_B3_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_ANC_B1_CTL, TOMTOM_A_CDC_CONN_ANC_B1_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_ANC_B2_CTL, TOMTOM_A_CDC_CONN_ANC_B2_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_TX_B1_CTL, TOMTOM_A_CDC_CONN_TX_B1_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_TX_B2_CTL, TOMTOM_A_CDC_CONN_TX_B2_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_TX_B3_CTL, TOMTOM_A_CDC_CONN_TX_B3_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_TX_B4_CTL, TOMTOM_A_CDC_CONN_TX_B4_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_EQ1_B1_CTL, TOMTOM_A_CDC_CONN_EQ1_B1_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_EQ1_B2_CTL, TOMTOM_A_CDC_CONN_EQ1_B2_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_EQ1_B3_CTL, TOMTOM_A_CDC_CONN_EQ1_B3_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_EQ1_B4_CTL, TOMTOM_A_CDC_CONN_EQ1_B4_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_EQ2_B1_CTL, TOMTOM_A_CDC_CONN_EQ2_B1_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_EQ2_B2_CTL, TOMTOM_A_CDC_CONN_EQ2_B2_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_EQ2_B3_CTL, TOMTOM_A_CDC_CONN_EQ2_B3_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_EQ2_B4_CTL, TOMTOM_A_CDC_CONN_EQ2_B4_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_SRC1_B1_CTL, TOMTOM_A_CDC_CONN_SRC1_B1_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_SRC1_B2_CTL, TOMTOM_A_CDC_CONN_SRC1_B2_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_SRC2_B1_CTL, TOMTOM_A_CDC_CONN_SRC2_B1_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_SRC2_B2_CTL, TOMTOM_A_CDC_CONN_SRC2_B2_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_TX_SB_B1_CTL, TOMTOM_A_CDC_CONN_TX_SB_B1_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_TX_SB_B2_CTL, TOMTOM_A_CDC_CONN_TX_SB_B2_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_TX_SB_B3_CTL, TOMTOM_A_CDC_CONN_TX_SB_B3_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_TX_SB_B4_CTL, TOMTOM_A_CDC_CONN_TX_SB_B4_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_TX_SB_B5_CTL, TOMTOM_A_CDC_CONN_TX_SB_B5_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_TX_SB_B6_CTL, TOMTOM_A_CDC_CONN_TX_SB_B6_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_TX_SB_B7_CTL, TOMTOM_A_CDC_CONN_TX_SB_B7_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_TX_SB_B8_CTL, TOMTOM_A_CDC_CONN_TX_SB_B8_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_TX_SB_B9_CTL, TOMTOM_A_CDC_CONN_TX_SB_B9_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_TX_SB_B10_CTL,
-				TOMTOM_A_CDC_CONN_TX_SB_B10_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_TX_SB_B11_CTL,
-				TOMTOM_A_CDC_CONN_TX_SB_B11_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_RX_SB_B1_CTL, TOMTOM_A_CDC_CONN_RX_SB_B1_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_RX_SB_B2_CTL, TOMTOM_A_CDC_CONN_RX_SB_B2_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_CLSH_CTL, TOMTOM_A_CDC_CONN_CLSH_CTL__POR },
-	{ TOMTOM_A_CDC_CONN_MISC, TOMTOM_A_CDC_CONN_MISC__POR },
-	{ TOMTOM_A_CDC_CONN_RX8_B1_CTL, TOMTOM_A_CDC_CONN_RX8_B1_CTL__POR },
-	{ TOMTOM_A_CDC_CLIP_ADJ_SPKR_B1_CTL,
-				TOMTOM_A_CDC_CLIP_ADJ_SPKR_B1_CTL__POR },
-	{ TOMTOM_A_CDC_CLIP_ADJ_SPKR_CLIP_LEVEL_ADJUST,
-			TOMTOM_A_CDC_CLIP_ADJ_SPKR_CLIP_LEVEL_ADJUST__POR },
-	{ TOMTOM_A_CDC_CLIP_ADJ_SPKR_MIN_CLIP_THRESHOLD,
-			TOMTOM_A_CDC_CLIP_ADJ_SPKR_MIN_CLIP_THRESHOLD__POR },
-	{ TOMTOM_A_CDC_CLIP_ADJ_SPKR_THRESHOLD_STATUS,
-			TOMTOM_A_CDC_CLIP_ADJ_SPKR_THRESHOLD_STATUS__POR },
-	{ TOMTOM_A_CDC_CLIP_ADJ_SPKR_SAMPLE_MARK,
-			TOMTOM_A_CDC_CLIP_ADJ_SPKR_SAMPLE_MARK__POR },
-	{ TOMTOM_A_CDC_CLIP_ADJ_SPKR_BOOST_GATING,
-			TOMTOM_A_CDC_CLIP_ADJ_SPKR_BOOST_GATING__POR },
-	{ TOMTOM_A_CDC_CLIP_ADJ_SPKR2_B1_CTL,
-				TOMTOM_A_CDC_CLIP_ADJ_SPKR2_B1_CTL__POR },
-	{ TOMTOM_A_CDC_CLIP_ADJ_SPKR2_CLIP_LEVEL_ADJUST,
-			TOMTOM_A_CDC_CLIP_ADJ_SPKR2_CLIP_LEVEL_ADJUST__POR },
-	{ TOMTOM_A_CDC_CLIP_ADJ_SPKR2_MIN_CLIP_THRESHOLD,
-			TOMTOM_A_CDC_CLIP_ADJ_SPKR2_MIN_CLIP_THRESHOLD__POR },
-	{ TOMTOM_A_CDC_CLIP_ADJ_SPKR2_THRESHOLD_STATUS,
-			TOMTOM_A_CDC_CLIP_ADJ_SPKR2_THRESHOLD_STATUS__POR },
-	{ TOMTOM_A_CDC_CLIP_ADJ_SPKR2_SAMPLE_MARK,
-				TOMTOM_A_CDC_CLIP_ADJ_SPKR2_SAMPLE_MARK__POR },
-	{ TOMTOM_A_CDC_CLIP_ADJ_SPKR2_BOOST_GATING,
-				TOMTOM_A_CDC_CLIP_ADJ_SPKR2_BOOST_GATING__POR },
-	{ TOMTOM_A_CDC_MBHC_EN_CTL, TOMTOM_A_CDC_MBHC_EN_CTL__POR },
-	{ TOMTOM_A_CDC_MBHC_FIR_B1_CFG, TOMTOM_A_CDC_MBHC_FIR_B1_CFG__POR },
-	{ TOMTOM_A_CDC_MBHC_FIR_B2_CFG, TOMTOM_A_CDC_MBHC_FIR_B2_CFG__POR },
-	{ TOMTOM_A_CDC_MBHC_TIMER_B1_CTL, TOMTOM_A_CDC_MBHC_TIMER_B1_CTL__POR },
-	{ TOMTOM_A_CDC_MBHC_TIMER_B2_CTL, TOMTOM_A_CDC_MBHC_TIMER_B2_CTL__POR },
-	{ TOMTOM_A_CDC_MBHC_TIMER_B3_CTL, TOMTOM_A_CDC_MBHC_TIMER_B3_CTL__POR },
-	{ TOMTOM_A_CDC_MBHC_TIMER_B4_CTL, TOMTOM_A_CDC_MBHC_TIMER_B4_CTL__POR },
-	{ TOMTOM_A_CDC_MBHC_TIMER_B5_CTL, TOMTOM_A_CDC_MBHC_TIMER_B5_CTL__POR },
-	{ TOMTOM_A_CDC_MBHC_TIMER_B6_CTL, TOMTOM_A_CDC_MBHC_TIMER_B6_CTL__POR },
-	{ TOMTOM_A_CDC_MBHC_B1_STATUS, TOMTOM_A_CDC_MBHC_B1_STATUS__POR },
-	{ TOMTOM_A_CDC_MBHC_B2_STATUS, TOMTOM_A_CDC_MBHC_B2_STATUS__POR },
-	{ TOMTOM_A_CDC_MBHC_B3_STATUS, TOMTOM_A_CDC_MBHC_B3_STATUS__POR },
-	{ TOMTOM_A_CDC_MBHC_B4_STATUS, TOMTOM_A_CDC_MBHC_B4_STATUS__POR },
-	{ TOMTOM_A_CDC_MBHC_B5_STATUS, TOMTOM_A_CDC_MBHC_B5_STATUS__POR },
-	{ TOMTOM_A_CDC_MBHC_B1_CTL, TOMTOM_A_CDC_MBHC_B1_CTL__POR },
-	{ TOMTOM_A_CDC_MBHC_B2_CTL, TOMTOM_A_CDC_MBHC_B2_CTL__POR },
-	{ TOMTOM_A_CDC_MBHC_VOLT_B1_CTL, TOMTOM_A_CDC_MBHC_VOLT_B1_CTL__POR },
-	{ TOMTOM_A_CDC_MBHC_VOLT_B2_CTL, TOMTOM_A_CDC_MBHC_VOLT_B2_CTL__POR },
-	{ TOMTOM_A_CDC_MBHC_VOLT_B3_CTL, TOMTOM_A_CDC_MBHC_VOLT_B3_CTL__POR },
-	{ TOMTOM_A_CDC_MBHC_VOLT_B4_CTL, TOMTOM_A_CDC_MBHC_VOLT_B4_CTL__POR },
-	{ TOMTOM_A_CDC_MBHC_VOLT_B5_CTL, TOMTOM_A_CDC_MBHC_VOLT_B5_CTL__POR },
-	{ TOMTOM_A_CDC_MBHC_VOLT_B6_CTL, TOMTOM_A_CDC_MBHC_VOLT_B6_CTL__POR },
-	{ TOMTOM_A_CDC_MBHC_VOLT_B7_CTL, TOMTOM_A_CDC_MBHC_VOLT_B7_CTL__POR },
-	{ TOMTOM_A_CDC_MBHC_VOLT_B8_CTL, TOMTOM_A_CDC_MBHC_VOLT_B8_CTL__POR },
-	{ TOMTOM_A_CDC_MBHC_VOLT_B9_CTL, TOMTOM_A_CDC_MBHC_VOLT_B9_CTL__POR },
-	{ TOMTOM_A_CDC_MBHC_VOLT_B10_CTL, TOMTOM_A_CDC_MBHC_VOLT_B10_CTL__POR },
-	{ TOMTOM_A_CDC_MBHC_VOLT_B11_CTL, TOMTOM_A_CDC_MBHC_VOLT_B11_CTL__POR },
-	{ TOMTOM_A_CDC_MBHC_VOLT_B12_CTL, TOMTOM_A_CDC_MBHC_VOLT_B12_CTL__POR },
-	{ TOMTOM_A_CDC_MBHC_CLK_CTL, TOMTOM_A_CDC_MBHC_CLK_CTL__POR },
-	{ TOMTOM_A_CDC_MBHC_INT_CTL, TOMTOM_A_CDC_MBHC_INT_CTL__POR },
-	{ TOMTOM_A_CDC_MBHC_DEBUG_CTL, TOMTOM_A_CDC_MBHC_DEBUG_CTL__POR },
-	{ TOMTOM_A_CDC_MBHC_SPARE, TOMTOM_A_CDC_MBHC_SPARE__POR },
-	{ TOMTOM_A_CDC_RX8_B1_CTL, TOMTOM_A_CDC_RX8_B1_CTL__POR },
-	{ TOMTOM_A_CDC_RX8_B2_CTL, TOMTOM_A_CDC_RX8_B2_CTL__POR },
-	{ TOMTOM_A_CDC_RX8_B3_CTL, TOMTOM_A_CDC_RX8_B3_CTL__POR },
-	{ TOMTOM_A_CDC_RX8_B4_CTL, TOMTOM_A_CDC_RX8_B4_CTL__POR },
-	{ TOMTOM_A_CDC_RX8_B5_CTL, TOMTOM_A_CDC_RX8_B5_CTL__POR },
-	{ TOMTOM_A_CDC_RX8_B6_CTL, TOMTOM_A_CDC_RX8_B6_CTL__POR },
-	{ TOMTOM_A_CDC_RX8_VOL_CTL_B1_CTL,
-				TOMTOM_A_CDC_RX8_VOL_CTL_B1_CTL__POR },
-	{ TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL,
-				TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL__POR },
-	{ TOMTOM_A_CDC_SPKR2_CLIPDET_VAL0,
-				TOMTOM_A_CDC_SPKR2_CLIPDET_VAL0__POR },
-	{ TOMTOM_A_CDC_SPKR2_CLIPDET_VAL1,
-				TOMTOM_A_CDC_SPKR2_CLIPDET_VAL1__POR },
-	{ TOMTOM_A_CDC_SPKR2_CLIPDET_VAL2,
-				TOMTOM_A_CDC_SPKR2_CLIPDET_VAL2__POR },
-	{ TOMTOM_A_CDC_SPKR2_CLIPDET_VAL3,
-				TOMTOM_A_CDC_SPKR2_CLIPDET_VAL3__POR },
-	{ TOMTOM_A_CDC_SPKR2_CLIPDET_VAL4,
-				TOMTOM_A_CDC_SPKR2_CLIPDET_VAL4__POR },
-	{ TOMTOM_A_CDC_SPKR2_CLIPDET_VAL5,
-				TOMTOM_A_CDC_SPKR2_CLIPDET_VAL5__POR },
-	{ TOMTOM_A_CDC_SPKR2_CLIPDET_VAL6,
-				TOMTOM_A_CDC_SPKR2_CLIPDET_VAL6__POR },
-	{ TOMTOM_A_CDC_SPKR2_CLIPDET_VAL7,
-				TOMTOM_A_CDC_SPKR2_CLIPDET_VAL7__POR },
-	{ TOMTOM_A_CDC_BOOST_MODE_CTL, TOMTOM_A_CDC_BOOST_MODE_CTL__POR },
-	{ TOMTOM_A_CDC_BOOST_THRESHOLD, TOMTOM_A_CDC_BOOST_THRESHOLD__POR },
-	{ TOMTOM_A_CDC_BOOST_TAP_SEL, TOMTOM_A_CDC_BOOST_TAP_SEL__POR },
-	{ TOMTOM_A_CDC_BOOST_HOLD_TIME, TOMTOM_A_CDC_BOOST_HOLD_TIME__POR },
-	{ TOMTOM_A_CDC_BOOST_TRGR_EN, TOMTOM_A_CDC_BOOST_TRGR_EN__POR },
-};
-
-static bool wcd9330_is_readable_register(struct device *dev, unsigned int reg)
-{
-	return tomtom_reg_readable[reg];
-}
-
-static bool tomtom_is_digital_gain_register(unsigned int reg)
-{
-	bool rtn = false;
-
-	switch (reg) {
-	case TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL:
-	case TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL:
-	case TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL:
-	case TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL:
-	case TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL:
-	case TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL:
-	case TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL:
-	case TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL:
-	case TOMTOM_A_CDC_TX1_VOL_CTL_GAIN:
-	case TOMTOM_A_CDC_TX2_VOL_CTL_GAIN:
-	case TOMTOM_A_CDC_TX3_VOL_CTL_GAIN:
-	case TOMTOM_A_CDC_TX4_VOL_CTL_GAIN:
-	case TOMTOM_A_CDC_TX5_VOL_CTL_GAIN:
-	case TOMTOM_A_CDC_TX6_VOL_CTL_GAIN:
-	case TOMTOM_A_CDC_TX7_VOL_CTL_GAIN:
-	case TOMTOM_A_CDC_TX8_VOL_CTL_GAIN:
-	case TOMTOM_A_CDC_TX9_VOL_CTL_GAIN:
-	case TOMTOM_A_CDC_TX10_VOL_CTL_GAIN:
-		rtn = true;
-		break;
-	default:
-		break;
-	}
-	return rtn;
-}
-
-static bool wcd9330_is_volatile_register(struct device *dev, unsigned int reg)
-{
-
-	if ((reg >= TOMTOM_A_CDC_MBHC_EN_CTL) || (reg < 0x100))
-		return true;
-
-	/* IIR Coeff registers are not cacheable */
-	if ((reg >= TOMTOM_A_CDC_IIR1_COEF_B1_CTL) &&
-		(reg <= TOMTOM_A_CDC_IIR2_COEF_B2_CTL))
-		return true;
-
-	/* ANC filter registers are not cacheable */
-	if ((reg >= TOMTOM_A_CDC_ANC1_IIR_B1_CTL) &&
-		(reg <= TOMTOM_A_CDC_ANC1_LPF_B2_CTL))
-		return true;
-
-	if ((reg >= TOMTOM_A_CDC_ANC2_IIR_B1_CTL) &&
-		(reg <= TOMTOM_A_CDC_ANC2_LPF_B2_CTL))
-		return true;
-
-	if (((reg >= TOMTOM_A_CDC_SPKR_CLIPDET_VAL0 &&
-	    reg <= TOMTOM_A_CDC_SPKR_CLIPDET_VAL7)) ||
-	    ((reg >= TOMTOM_A_CDC_SPKR2_CLIPDET_VAL0) &&
-	     (reg <= TOMTOM_A_CDC_SPKR2_CLIPDET_VAL7)))
-		return true;
-
-	/* Digital gain register is not cacheable so we have to write
-	 * the setting even it is the same
-	 */
-	if (tomtom_is_digital_gain_register(reg))
-		return true;
-
-	switch (reg) {
-	case TOMTOM_A_RX_HPH_L_STATUS:
-	case TOMTOM_A_RX_HPH_R_STATUS:
-	case TOMTOM_A_MBHC_INSERT_DET_STATUS:
-	case TOMTOM_A_RX_HPH_CNP_EN:
-	case TOMTOM_A_CDC_VBAT_GAIN_MON_VAL:
-	case TOMTOM_A_CDC_MAD_MAIN_CTL_1:
-	case TOMTOM_A_CDC_MAD_AUDIO_CTL_3:
-	case TOMTOM_A_CDC_MAD_AUDIO_CTL_4:
-	case TOMTOM_A_INTR_MODE:
-	case TOMTOM_A_INTR2_MASK0:
-	case TOMTOM_A_INTR2_STATUS0:
-	case TOMTOM_A_INTR2_CLEAR0:
-	case TOMTOM_SB_PGD_PORT_TX_BASE:
-	case TOMTOM_SB_PGD_PORT_RX_BASE:
-	case TOMTOM_A_CDC_ANC1_IIR_B1_CTL:
-	case TOMTOM_A_CDC_ANC1_GAIN_CTL:
-	case TOMTOM_A_SVASS_SPE_INBOX_TRG:
-		return true;
-	default:
-		return false;
-	}
-}
-
-struct regmap_config wcd9330_regmap_config = {
-	.reg_bits = 16,
-	.val_bits = 8,
-	.cache_type = REGCACHE_RBTREE,
-	.reg_defaults = wcd9330_defaults,
-	.num_reg_defaults = ARRAY_SIZE(wcd9330_defaults),
-	.max_register = WCD9330_MAX_REGISTER,
-	.volatile_reg = wcd9330_is_volatile_register,
-	.readable_reg = wcd9330_is_readable_register,
-};
diff --git a/drivers/mfd/wcd9xxx-core-init.c b/drivers/mfd/wcd9xxx-core-init.c
new file mode 100644
index 0000000..7f93399
--- /dev/null
+++ b/drivers/mfd/wcd9xxx-core-init.c
@@ -0,0 +1,55 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/mfd/msm-cdc-pinctrl.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx-irq.h>
+#include <linux/mfd/wcd9xxx/core.h>
+
+#define NUM_DRIVERS_REG_RET 3
+
+static int __init wcd9xxx_core_init(void)
+{
+	int ret[NUM_DRIVERS_REG_RET] = {0};
+	int i = 0;
+
+	ret[0] = msm_cdc_pinctrl_drv_init();
+	if (ret[0])
+		pr_err("%s: Failed init pinctrl drv: %d\n", __func__, ret[0]);
+
+	ret[1] = wcd9xxx_irq_drv_init();
+	if (ret[1])
+		pr_err("%s: Failed init irq drv: %d\n", __func__, ret[1]);
+
+	ret[2] = wcd9xxx_init();
+	if (ret[2])
+		pr_err("%s: Failed wcd core drv: %d\n", __func__, ret[2]);
+
+	for (i = 0; i < NUM_DRIVERS_REG_RET; i++) {
+		if (ret[i])
+			return ret[i];
+	}
+
+	return 0;
+}
+module_init(wcd9xxx_core_init);
+
+static void __exit wcd9xxx_core_exit(void)
+{
+	wcd9xxx_exit();
+	wcd9xxx_irq_drv_exit();
+	msm_cdc_pinctrl_drv_exit();
+}
+module_exit(wcd9xxx_core_exit);
+
+MODULE_DESCRIPTION("WCD9XXX CODEC core init driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/wcd9xxx-core.c b/drivers/mfd/wcd9xxx-core.c
index d143536..b373acb 100644
--- a/drivers/mfd/wcd9xxx-core.c
+++ b/drivers/mfd/wcd9xxx-core.c
@@ -1665,7 +1665,7 @@
 	.remove                 =       wcd9xxx_i2c_remove,
 };
 
-static int __init wcd9xxx_init(void)
+int wcd9xxx_init(void)
 {
 	int ret[NUM_WCD9XXX_REG_RET] = {0};
 	int i = 0;
@@ -1699,9 +1699,8 @@
 
 	return 0;
 }
-module_init(wcd9xxx_init);
 
-static void __exit wcd9xxx_exit(void)
+void wcd9xxx_exit(void)
 {
 	wcd9xxx_set_intf_type(WCD9XXX_INTERFACE_TYPE_PROBING);
 
@@ -1710,7 +1709,6 @@
 	i2c_del_driver(&wcd9335_i2c_driver);
 	slim_driver_unregister(&wcd_slim_driver);
 }
-module_exit(wcd9xxx_exit);
 
 MODULE_DESCRIPTION("Codec core driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/wcd9xxx-irq.c b/drivers/mfd/wcd9xxx-irq.c
index 0502e39d..092f446 100644
--- a/drivers/mfd/wcd9xxx-irq.c
+++ b/drivers/mfd/wcd9xxx-irq.c
@@ -406,30 +406,63 @@
 		return IRQ_NONE;
 }
 
+/**
+ * wcd9xxx_free_irq
+ *
+ * @wcd9xxx_res: pointer to core resource
+ * irq: irq number
+ * @data: data pointer
+ *
+ */
 void wcd9xxx_free_irq(struct wcd9xxx_core_resource *wcd9xxx_res,
 			int irq, void *data)
 {
 	free_irq(phyirq_to_virq(wcd9xxx_res, irq), data);
 }
+EXPORT_SYMBOL(wcd9xxx_free_irq);
 
+/**
+ * wcd9xxx_enable_irq
+ *
+ * @wcd9xxx_res: pointer to core resource
+ * irq: irq number
+ *
+ */
 void wcd9xxx_enable_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
 {
 	if (wcd9xxx_res->irq)
 		enable_irq(phyirq_to_virq(wcd9xxx_res, irq));
 }
+EXPORT_SYMBOL(wcd9xxx_enable_irq);
 
+/**
+ * wcd9xxx_disable_irq
+ *
+ * @wcd9xxx_res: pointer to core resource
+ * irq: irq number
+ *
+ */
 void wcd9xxx_disable_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
 {
 	if (wcd9xxx_res->irq)
 		disable_irq_nosync(phyirq_to_virq(wcd9xxx_res, irq));
 }
+EXPORT_SYMBOL(wcd9xxx_disable_irq);
 
+/**
+ * wcd9xxx_disable_irq_sync
+ *
+ * @wcd9xxx_res: pointer to core resource
+ * irq: irq number
+ *
+ */
 void wcd9xxx_disable_irq_sync(
 			struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
 {
 	if (wcd9xxx_res->irq)
 		disable_irq(phyirq_to_virq(wcd9xxx_res, irq));
 }
+EXPORT_SYMBOL(wcd9xxx_disable_irq_sync);
 
 static int wcd9xxx_irq_setup_downstream_irq(
 			struct wcd9xxx_core_resource *wcd9xxx_res)
@@ -470,6 +503,13 @@
 	return 0;
 }
 
+/**
+ * wcd9xxx_irq_init
+ *
+ * @wcd9xxx_res: pointer to core resource
+ *
+ * Returns 0 on success, appropriate error code otherwise
+ */
 int wcd9xxx_irq_init(struct wcd9xxx_core_resource *wcd9xxx_res)
 {
 	int i, ret;
@@ -568,6 +608,7 @@
 	mutex_destroy(&wcd9xxx_res->nested_irq_lock);
 	return ret;
 }
+EXPORT_SYMBOL(wcd9xxx_irq_init);
 
 int wcd9xxx_request_irq(struct wcd9xxx_core_resource *wcd9xxx_res,
 			int irq, irq_handler_t handler,
@@ -580,6 +621,7 @@
 	return request_threaded_irq(virq, NULL, handler, IRQF_TRIGGER_RISING,
 				    name, data);
 }
+EXPORT_SYMBOL(wcd9xxx_request_irq);
 
 void wcd9xxx_irq_exit(struct wcd9xxx_core_resource *wcd9xxx_res)
 {
@@ -799,15 +841,13 @@
 	},
 };
 
-static int wcd9xxx_irq_drv_init(void)
+int wcd9xxx_irq_drv_init(void)
 {
 	return platform_driver_register(&wcd9xxx_irq_driver);
 }
-subsys_initcall(wcd9xxx_irq_drv_init);
 
-static void wcd9xxx_irq_drv_exit(void)
+void wcd9xxx_irq_drv_exit(void)
 {
 	platform_driver_unregister(&wcd9xxx_irq_driver);
 }
-module_exit(wcd9xxx_irq_drv_exit);
 #endif /* CONFIG_OF */
diff --git a/drivers/mfd/wcd9xxx-regmap.h b/drivers/mfd/wcd9xxx-regmap.h
index 6db8fc5..f44e8b1 100644
--- a/drivers/mfd/wcd9xxx-regmap.h
+++ b/drivers/mfd/wcd9xxx-regmap.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -19,42 +19,25 @@
 
 typedef int (*regmap_patch_fptr)(struct regmap *, int);
 
-#ifdef CONFIG_WCD934X_CODEC
 extern struct regmap_config wcd934x_regmap_config;
 extern int wcd934x_regmap_register_patch(struct regmap *regmap,
 					 int version);
-#endif
 
-#ifdef CONFIG_WCD9335_CODEC
 extern struct regmap_config wcd9335_regmap_config;
 extern int wcd9335_regmap_register_patch(struct regmap *regmap,
 					 int version);
-#endif
-
-#ifdef CONFIG_WCD9330_CODEC
-extern struct regmap_config wcd9330_regmap_config;
-#endif
 
 static inline struct regmap_config *wcd9xxx_get_regmap_config(int type)
 {
 	struct regmap_config *regmap_config;
 
 	switch (type) {
-#ifdef CONFIG_WCD934X_CODEC
 	case WCD934X:
 		regmap_config = &wcd934x_regmap_config;
 		break;
-#endif
-#ifdef CONFIG_WCD9335_CODEC
 	case WCD9335:
 		regmap_config = &wcd9335_regmap_config;
 		break;
-#endif
-#ifdef CONFIG_WCD9330_CODEC
-	case WCD9330:
-		regmap_config = &wcd9330_regmap_config;
-		break;
-#endif
 	default:
 		regmap_config = NULL;
 		break;
@@ -68,16 +51,12 @@
 	regmap_patch_fptr apply_patch;
 
 	switch (type) {
-#ifdef CONFIG_WCD9335_CODEC
 	case WCD9335:
 		apply_patch = wcd9335_regmap_register_patch;
 		break;
-#endif
-#ifdef CONFIG_WCD934X_CODEC
 	case WCD934X:
 		apply_patch = wcd934x_regmap_register_patch;
 		break;
-#endif
 	default:
 		apply_patch = NULL;
 		break;
diff --git a/drivers/mfd/wcd9xxx-rst.c b/drivers/mfd/wcd9xxx-rst.c
new file mode 100644
index 0000000..c8e0b34
--- /dev/null
+++ b/drivers/mfd/wcd9xxx-rst.c
@@ -0,0 +1,443 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/mfd/wcd9xxx/pdata.h>
+#include <linux/mfd/wcd9xxx/core.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx-utils.h>
+#include <linux/mfd/wcd9335/registers.h>
+#include <linux/mfd/wcd934x/registers.h>
+#include <linux/mfd/wcd9335/irq.h>
+#include <linux/mfd/wcd934x/irq.h>
+
+/* wcd9335 interrupt table  */
+static const struct intr_data wcd9335_intr_table[] = {
+	{WCD9XXX_IRQ_SLIMBUS, false},
+	{WCD9335_IRQ_MBHC_SW_DET, true},
+	{WCD9335_IRQ_MBHC_BUTTON_PRESS_DET, true},
+	{WCD9335_IRQ_MBHC_BUTTON_RELEASE_DET, true},
+	{WCD9335_IRQ_MBHC_ELECT_INS_REM_DET, true},
+	{WCD9335_IRQ_MBHC_ELECT_INS_REM_LEG_DET, true},
+	{WCD9335_IRQ_FLL_LOCK_LOSS, false},
+	{WCD9335_IRQ_HPH_PA_CNPL_COMPLETE, false},
+	{WCD9335_IRQ_HPH_PA_CNPR_COMPLETE, false},
+	{WCD9335_IRQ_EAR_PA_CNP_COMPLETE, false},
+	{WCD9335_IRQ_LINE_PA1_CNP_COMPLETE, false},
+	{WCD9335_IRQ_LINE_PA2_CNP_COMPLETE, false},
+	{WCD9335_IRQ_LINE_PA3_CNP_COMPLETE, false},
+	{WCD9335_IRQ_LINE_PA4_CNP_COMPLETE, false},
+	{WCD9335_IRQ_HPH_PA_OCPL_FAULT, false},
+	{WCD9335_IRQ_HPH_PA_OCPR_FAULT, false},
+	{WCD9335_IRQ_EAR_PA_OCP_FAULT, false},
+	{WCD9335_IRQ_SOUNDWIRE, false},
+	{WCD9335_IRQ_VDD_DIG_RAMP_COMPLETE, false},
+	{WCD9335_IRQ_RCO_ERROR, false},
+	{WCD9335_IRQ_SVA_ERROR, false},
+	{WCD9335_IRQ_MAD_AUDIO, false},
+	{WCD9335_IRQ_MAD_BEACON, false},
+	{WCD9335_IRQ_SVA_OUTBOX1, true},
+	{WCD9335_IRQ_SVA_OUTBOX2, true},
+	{WCD9335_IRQ_MAD_ULTRASOUND, false},
+	{WCD9335_IRQ_VBAT_ATTACK, false},
+	{WCD9335_IRQ_VBAT_RESTORE, false},
+};
+
+static const struct intr_data wcd934x_intr_table[] = {
+	{WCD9XXX_IRQ_SLIMBUS, false},
+	{WCD934X_IRQ_MBHC_SW_DET, true},
+	{WCD934X_IRQ_MBHC_BUTTON_PRESS_DET, true},
+	{WCD934X_IRQ_MBHC_BUTTON_RELEASE_DET, true},
+	{WCD934X_IRQ_MBHC_ELECT_INS_REM_DET, true},
+	{WCD934X_IRQ_MBHC_ELECT_INS_REM_LEG_DET, true},
+	{WCD934X_IRQ_MISC, false},
+	{WCD934X_IRQ_HPH_PA_CNPL_COMPLETE, false},
+	{WCD934X_IRQ_HPH_PA_CNPR_COMPLETE, false},
+	{WCD934X_IRQ_EAR_PA_CNP_COMPLETE, false},
+	{WCD934X_IRQ_LINE_PA1_CNP_COMPLETE, false},
+	{WCD934X_IRQ_LINE_PA2_CNP_COMPLETE, false},
+	{WCD934X_IRQ_SLNQ_ANALOG_ERROR, false},
+	{WCD934X_IRQ_RESERVED_3, false},
+	{WCD934X_IRQ_HPH_PA_OCPL_FAULT, false},
+	{WCD934X_IRQ_HPH_PA_OCPR_FAULT, false},
+	{WCD934X_IRQ_EAR_PA_OCP_FAULT, false},
+	{WCD934X_IRQ_SOUNDWIRE, false},
+	{WCD934X_IRQ_VDD_DIG_RAMP_COMPLETE, false},
+	{WCD934X_IRQ_RCO_ERROR, false},
+	{WCD934X_IRQ_CPE_ERROR, false},
+	{WCD934X_IRQ_MAD_AUDIO, false},
+	{WCD934X_IRQ_MAD_BEACON, false},
+	{WCD934X_IRQ_CPE1_INTR, true},
+	{WCD934X_IRQ_RESERVED_4, false},
+	{WCD934X_IRQ_MAD_ULTRASOUND, false},
+	{WCD934X_IRQ_VBAT_ATTACK, false},
+	{WCD934X_IRQ_VBAT_RESTORE, false},
+};
+
+/*
+ * wcd9335_bring_down: Bringdown WCD Codec
+ *
+ * @wcd9xxx: Pointer to wcd9xxx structure
+ *
+ * Returns 0 for success or negative error code for failure
+ */
+static int wcd9335_bring_down(struct wcd9xxx *wcd9xxx)
+{
+	if (!wcd9xxx || !wcd9xxx->regmap)
+		return -EINVAL;
+
+	regmap_write(wcd9xxx->regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+		     0x04);
+
+	return 0;
+}
+
+/*
+ * wcd9335_bring_up: Bringup WCD Codec
+ *
+ * @wcd9xxx: Pointer to the wcd9xxx structure
+ *
+ * Returns 0 for success or negative error code for failure
+ */
+static int wcd9335_bring_up(struct wcd9xxx *wcd9xxx)
+{
+	int ret = 0;
+	int val, byte0;
+	struct regmap *wcd_regmap;
+
+	if (!wcd9xxx)
+		return -EINVAL;
+
+	if (!wcd9xxx->regmap) {
+		dev_err(wcd9xxx->dev, "%s: wcd9xxx regmap is null!\n",
+			__func__);
+		return -EINVAL;
+	}
+	wcd_regmap = wcd9xxx->regmap;
+
+	regmap_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT0, &val);
+	regmap_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE0, &byte0);
+
+	if ((val < 0) || (byte0 < 0)) {
+		dev_err(wcd9xxx->dev, "%s: tasha codec version detection fail!\n",
+			__func__);
+		return -EINVAL;
+	}
+	if ((val & 0x80) && (byte0 == 0x0)) {
+		dev_info(wcd9xxx->dev, "%s: wcd9335 codec version is v1.1\n",
+			 __func__);
+		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x01);
+		regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_2, 0xFC);
+		regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_4, 0x21);
+		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+			     0x5);
+		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+			     0x7);
+		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+			     0x3);
+		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x3);
+	} else if (byte0 == 0x1) {
+		dev_info(wcd9xxx->dev, "%s: wcd9335 codec version is v2.0\n",
+			 __func__);
+		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x01);
+		regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_TEST_2, 0x00);
+		regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_8, 0x6F);
+		regmap_write(wcd_regmap, WCD9335_BIAS_VBG_FINE_ADJ, 0x65);
+		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+			     0x5);
+		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+			     0x7);
+		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+			     0x3);
+		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x3);
+	} else if ((byte0 == 0) && (!(val & 0x80))) {
+		dev_info(wcd9xxx->dev, "%s: wcd9335 codec version is v1.0\n",
+			 __func__);
+		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x01);
+		regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_2, 0xFC);
+		regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_4, 0x21);
+		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+			     0x3);
+		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x3);
+	} else {
+		dev_err(wcd9xxx->dev, "%s: tasha codec version unknown\n",
+			__func__);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+/*
+ * wcd9335_get_cdc_info: Get codec specific information
+ *
+ * @wcd9xxx: pointer to wcd9xxx structure
+ * @wcd_type: pointer to wcd9xxx_codec_type structure
+ *
+ * Returns 0 for success or negative error code for failure
+ */
+static int wcd9335_get_cdc_info(struct wcd9xxx *wcd9xxx,
+			   struct wcd9xxx_codec_type *wcd_type)
+{
+	u16 id_minor, id_major;
+	struct regmap *wcd_regmap;
+	int rc, val, version = 0;
+
+	if (!wcd9xxx || !wcd_type)
+		return -EINVAL;
+
+	if (!wcd9xxx->regmap) {
+		dev_err(wcd9xxx->dev, "%s: wcd9xxx regmap is null!\n",
+			__func__);
+		return -EINVAL;
+	}
+	wcd_regmap = wcd9xxx->regmap;
+
+	rc = regmap_bulk_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE0,
+			(u8 *)&id_minor, sizeof(u16));
+	if (rc)
+		return -EINVAL;
+
+	rc = regmap_bulk_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE2,
+			      (u8 *)&id_major, sizeof(u16));
+	if (rc)
+		return -EINVAL;
+
+	dev_info(wcd9xxx->dev, "%s: wcd9xxx chip id major 0x%x, minor 0x%x\n",
+		 __func__, id_major, id_minor);
+
+	/* Version detection */
+	if (id_major == TASHA_MAJOR) {
+		regmap_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT0,
+			    &val);
+		version = ((u8)val & 0x80) >> 7;
+	} else if (id_major == TASHA2P0_MAJOR)
+		version = 2;
+	else
+		dev_err(wcd9xxx->dev, "%s: wcd9335 version unknown (major 0x%x, minor 0x%x)\n",
+			__func__, id_major, id_minor);
+
+	/* Fill codec type info */
+	wcd_type->id_major = id_major;
+	wcd_type->id_minor = id_minor;
+	wcd_type->num_irqs = WCD9335_NUM_IRQS;
+	wcd_type->version = version;
+	wcd_type->slim_slave_type = WCD9XXX_SLIM_SLAVE_ADDR_TYPE_1;
+	wcd_type->i2c_chip_status = 0x01;
+	wcd_type->intr_tbl = wcd9335_intr_table;
+	wcd_type->intr_tbl_size = ARRAY_SIZE(wcd9335_intr_table);
+
+	wcd_type->intr_reg[WCD9XXX_INTR_STATUS_BASE] =
+						WCD9335_INTR_PIN1_STATUS0;
+	wcd_type->intr_reg[WCD9XXX_INTR_CLEAR_BASE] =
+						WCD9335_INTR_PIN1_CLEAR0;
+	wcd_type->intr_reg[WCD9XXX_INTR_MASK_BASE] =
+						WCD9335_INTR_PIN1_MASK0;
+	wcd_type->intr_reg[WCD9XXX_INTR_LEVEL_BASE] =
+						WCD9335_INTR_LEVEL0;
+	wcd_type->intr_reg[WCD9XXX_INTR_CLR_COMMIT] =
+						WCD9335_INTR_CLR_COMMIT;
+
+	return rc;
+}
+
+/*
+ * wcd934x_bring_down: Bringdown WCD Codec
+ *
+ * @wcd9xxx: Pointer to wcd9xxx structure
+ *
+ * Returns 0 for success or negative error code for failure
+ */
+static int wcd934x_bring_down(struct wcd9xxx *wcd9xxx)
+{
+	if (!wcd9xxx || !wcd9xxx->regmap)
+		return -EINVAL;
+
+	regmap_write(wcd9xxx->regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+		     0x04);
+
+	return 0;
+}
+
+/*
+ * wcd934x_bring_up: Bringup WCD Codec
+ *
+ * @wcd9xxx: Pointer to the wcd9xxx structure
+ *
+ * Returns 0 for success or negative error code for failure
+ */
+static int wcd934x_bring_up(struct wcd9xxx *wcd9xxx)
+{
+	struct regmap *wcd_regmap;
+
+	if (!wcd9xxx)
+		return -EINVAL;
+
+	if (!wcd9xxx->regmap) {
+		dev_err(wcd9xxx->dev, "%s: wcd9xxx regmap is null!\n",
+			__func__);
+		return -EINVAL;
+	}
+	wcd_regmap = wcd9xxx->regmap;
+
+	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x01);
+	regmap_write(wcd_regmap, WCD934X_SIDO_NEW_VOUT_A_STARTUP, 0x19);
+	regmap_write(wcd_regmap, WCD934X_SIDO_NEW_VOUT_D_STARTUP, 0x15);
+	/* Add 1msec delay for VOUT to settle */
+	usleep_range(1000, 1100);
+	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x5);
+	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x7);
+	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x3);
+	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x7);
+	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x3);
+
+	return 0;
+}
+
+/*
+ * wcd934x_get_cdc_info: Get codec specific information
+ *
+ * @wcd9xxx: pointer to wcd9xxx structure
+ * @wcd_type: pointer to wcd9xxx_codec_type structure
+ *
+ * Returns 0 for success or negative error code for failure
+ */
+static int wcd934x_get_cdc_info(struct wcd9xxx *wcd9xxx,
+			   struct wcd9xxx_codec_type *wcd_type)
+{
+	u16 id_minor, id_major;
+	struct regmap *wcd_regmap;
+	int rc, version = -1;
+
+	if (!wcd9xxx || !wcd_type)
+		return -EINVAL;
+
+	if (!wcd9xxx->regmap) {
+		dev_err(wcd9xxx->dev, "%s: wcd9xxx regmap is null\n", __func__);
+		return -EINVAL;
+	}
+	wcd_regmap = wcd9xxx->regmap;
+
+	rc = regmap_bulk_read(wcd_regmap, WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE0,
+			      (u8 *)&id_minor, sizeof(u16));
+	if (rc)
+		return -EINVAL;
+
+	rc = regmap_bulk_read(wcd_regmap, WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE2,
+			      (u8 *)&id_major, sizeof(u16));
+	if (rc)
+		return -EINVAL;
+
+	dev_info(wcd9xxx->dev, "%s: wcd9xxx chip id major 0x%x, minor 0x%x\n",
+		 __func__, id_major, id_minor);
+
+	if (id_major != TAVIL_MAJOR)
+		goto version_unknown;
+
+	/*
+	 * As fine version info cannot be retrieved before tavil probe.
+	 * Assign coarse versions for possible future use before tavil probe.
+	 */
+	if (id_minor == cpu_to_le16(0))
+		version = TAVIL_VERSION_1_0;
+	else if (id_minor == cpu_to_le16(0x01))
+		version = TAVIL_VERSION_1_1;
+
+version_unknown:
+	if (version < 0)
+		dev_err(wcd9xxx->dev, "%s: wcd934x version unknown\n",
+			__func__);
+
+	/* Fill codec type info */
+	wcd_type->id_major = id_major;
+	wcd_type->id_minor = id_minor;
+	wcd_type->num_irqs = WCD934X_NUM_IRQS;
+	wcd_type->version = version;
+	wcd_type->slim_slave_type = WCD9XXX_SLIM_SLAVE_ADDR_TYPE_1;
+	wcd_type->i2c_chip_status = 0x01;
+	wcd_type->intr_tbl = wcd934x_intr_table;
+	wcd_type->intr_tbl_size = ARRAY_SIZE(wcd934x_intr_table);
+
+	wcd_type->intr_reg[WCD9XXX_INTR_STATUS_BASE] =
+						WCD934X_INTR_PIN1_STATUS0;
+	wcd_type->intr_reg[WCD9XXX_INTR_CLEAR_BASE] =
+						WCD934X_INTR_PIN1_CLEAR0;
+	wcd_type->intr_reg[WCD9XXX_INTR_MASK_BASE] =
+						WCD934X_INTR_PIN1_MASK0;
+	wcd_type->intr_reg[WCD9XXX_INTR_LEVEL_BASE] =
+						WCD934X_INTR_LEVEL0;
+	wcd_type->intr_reg[WCD9XXX_INTR_CLR_COMMIT] =
+						WCD934X_INTR_CLR_COMMIT;
+
+	return rc;
+}
+
+codec_bringdown_fn wcd9xxx_bringdown_fn(int type)
+{
+	codec_bringdown_fn cdc_bdown_fn;
+
+	switch (type) {
+	case WCD934X:
+		cdc_bdown_fn = wcd934x_bring_down;
+		break;
+	case WCD9335:
+		cdc_bdown_fn = wcd9335_bring_down;
+		break;
+	default:
+		cdc_bdown_fn = NULL;
+		break;
+	}
+
+	return cdc_bdown_fn;
+}
+
+codec_bringup_fn wcd9xxx_bringup_fn(int type)
+{
+	codec_bringup_fn cdc_bup_fn;
+
+	switch (type) {
+	case WCD934X:
+		cdc_bup_fn = wcd934x_bring_up;
+		break;
+	case WCD9335:
+		cdc_bup_fn = wcd9335_bring_up;
+		break;
+	default:
+		cdc_bup_fn = NULL;
+		break;
+	}
+
+	return cdc_bup_fn;
+}
+
+codec_type_fn wcd9xxx_get_codec_info_fn(int type)
+{
+	codec_type_fn cdc_type_fn;
+
+	switch (type) {
+	case WCD934X:
+		cdc_type_fn = wcd934x_get_cdc_info;
+		break;
+	case WCD9335:
+		cdc_type_fn = wcd9335_get_cdc_info;
+		break;
+	default:
+		cdc_type_fn = NULL;
+		break;
+	}
+
+	return cdc_type_fn;
+}
+
diff --git a/drivers/mfd/wcd9xxx-slimslave.c b/drivers/mfd/wcd9xxx-slimslave.c
index 8bf1404..a99ad5a 100644
--- a/drivers/mfd/wcd9xxx-slimslave.c
+++ b/drivers/mfd/wcd9xxx-slimslave.c
@@ -47,7 +47,18 @@
 	return 0;
 }
 
-
+/**
+ * wcd9xxx_init_slimslave
+ *
+ * @wcd9xxx: pointer to wcd9xxx struct
+ * @wcd9xxx_pgd_la: pgd_la value
+ * @tx_num: tx number
+ * @rx_num: rx number
+ * @tx_slot: pointer to tx slot
+ * @rx_slot: pointer to rx slot
+ *
+ * Returns 0 on success, appropriate error code otherwise
+ */
 int wcd9xxx_init_slimslave(struct wcd9xxx *wcd9xxx, u8 wcd9xxx_pgd_la,
 			   unsigned int tx_num, unsigned int *tx_slot,
 			   unsigned int rx_num, unsigned int *rx_slot)
@@ -117,6 +128,7 @@
 err:
 	return ret;
 }
+EXPORT_SYMBOL(wcd9xxx_init_slimslave);
 
 int wcd9xxx_deinit_slimslave(struct wcd9xxx *wcd9xxx)
 {
diff --git a/drivers/misc/c2port/c2port-duramar2150.c b/drivers/misc/c2port/c2port-duramar2150.c
index 5484301..3dc61ea 100644
--- a/drivers/misc/c2port/c2port-duramar2150.c
+++ b/drivers/misc/c2port/c2port-duramar2150.c
@@ -129,8 +129,8 @@
 
 	duramar2150_c2port_dev = c2port_device_register("uc",
 					&duramar2150_c2port_ops, NULL);
-	if (!duramar2150_c2port_dev) {
-		ret = -ENODEV;
+	if (IS_ERR(duramar2150_c2port_dev)) {
+		ret = PTR_ERR(duramar2150_c2port_dev);
 		goto free_region;
 	}
 
diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
index 88e4523..fed992e 100644
--- a/drivers/misc/mic/vop/vop_vringh.c
+++ b/drivers/misc/mic/vop/vop_vringh.c
@@ -292,7 +292,6 @@
 	if (ret) {
 		dev_err(vop_dev(vdev), "%s %d err %d\n",
 			__func__, __LINE__, ret);
-		kfree(vdev);
 		return ret;
 	}
 
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index bd51c6c..e8b9b48 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -2912,7 +2912,11 @@
 		}
 		if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
 			pr_warn("RPMB key status is 0x%x\n", resp.result);
-			*(uint32_t *)req.resp_buf = resp.result;
+			if (put_user(resp.result,
+				(uint32_t __user *)req.resp_buf)) {
+				ret = -EINVAL;
+				goto exit;
+			}
 			ret = 0;
 		}
 		break;
@@ -6507,11 +6511,16 @@
 	void *cmd_buf = NULL;
 	size_t cmd_len;
 	struct sglist_info *table = data->sglistinfo_ptr;
+	void *req_ptr = NULL;
+	void *resp_ptr = NULL;
 
 	ret  = __qseecom_qteec_validate_msg(data, req);
 	if (ret)
 		return ret;
 
+	req_ptr = req->req_ptr;
+	resp_ptr = req->resp_ptr;
+
 	/* find app_id & img_name from list */
 	spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
 	list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
@@ -6529,6 +6538,11 @@
 		return -ENOENT;
 	}
 
+	req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
+						(uintptr_t)req->req_ptr);
+	req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
+						(uintptr_t)req->resp_ptr);
+
 	if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
 			(cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
 		ret = __qseecom_update_qteec_req_buf(
@@ -6540,10 +6554,10 @@
 	if (qseecom.qsee_version < QSEE_VERSION_40) {
 		ireq.app_id = data->client.app_id;
 		ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
-						(uintptr_t)req->req_ptr);
+						(uintptr_t)req_ptr);
 		ireq.req_len = req->req_len;
 		ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
-						(uintptr_t)req->resp_ptr);
+						(uintptr_t)resp_ptr);
 		ireq.resp_len = req->resp_len;
 		ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
 		ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
@@ -6554,10 +6568,10 @@
 	} else {
 		ireq_64bit.app_id = data->client.app_id;
 		ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
-						(uintptr_t)req->req_ptr);
+						(uintptr_t)req_ptr);
 		ireq_64bit.req_len = req->req_len;
 		ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
-						(uintptr_t)req->resp_ptr);
+						(uintptr_t)resp_ptr);
 		ireq_64bit.resp_len = req->resp_len;
 		if ((data->client.app_arch == ELFCLASS32) &&
 			((ireq_64bit.req_ptr >=
diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c
index 871040e..8bf4c57 100644
--- a/drivers/misc/uid_sys_stats.c
+++ b/drivers/misc/uid_sys_stats.c
@@ -95,7 +95,7 @@
 
 static int uid_cputime_show(struct seq_file *m, void *v)
 {
-	struct uid_entry *uid_entry;
+	struct uid_entry *uid_entry = NULL;
 	struct task_struct *task, *temp;
 	struct user_namespace *user_ns = current_user_ns();
 	cputime_t utime;
@@ -113,7 +113,8 @@
 	read_lock(&tasklist_lock);
 	do_each_thread(temp, task) {
 		uid = from_kuid_munged(user_ns, task_uid(task));
-		uid_entry = find_or_register_uid(uid);
+		if (!uid_entry || uid_entry->uid != uid)
+			uid_entry = find_or_register_uid(uid);
 		if (!uid_entry) {
 			read_unlock(&tasklist_lock);
 			rt_mutex_unlock(&uid_lock);
@@ -252,7 +253,7 @@
 
 static void update_io_stats_all_locked(void)
 {
-	struct uid_entry *uid_entry;
+	struct uid_entry *uid_entry = NULL;
 	struct task_struct *task, *temp;
 	struct user_namespace *user_ns = current_user_ns();
 	unsigned long bkt;
@@ -265,7 +266,8 @@
 	rcu_read_lock();
 	do_each_thread(temp, task) {
 		uid = from_kuid_munged(user_ns, task_uid(task));
-		uid_entry = find_or_register_uid(uid);
+		if (!uid_entry || uid_entry->uid != uid)
+			uid_entry = find_or_register_uid(uid);
 		if (!uid_entry)
 			continue;
 		add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index f300435..c6f3496 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1718,6 +1718,8 @@
 
 	/* We couldn't get a response from the card.  Give up. */
 	if (err) {
+		if (card->err_in_sdr104)
+			return ERR_RETRY;
 		/* Check if the card is removed */
 		if (mmc_detect_card_removed(card->host))
 			return ERR_NOMEDIUM;
@@ -2208,7 +2210,8 @@
 	     brq->data.error == -ETIMEDOUT ||
 	     brq->cmd.error == -EILSEQ ||
 	     brq->cmd.error == -EIO ||
-	     brq->cmd.error == -ETIMEDOUT))
+	     brq->cmd.error == -ETIMEDOUT ||
+	     brq->sbc.error))
 		card->err_in_sdr104 = true;
 
 	/*
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index e3696c5..a531cb4 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -398,8 +398,6 @@
 			       mmc_hostname(card->host), __func__, ret);
 	}
 
-	device_enable_async_suspend(&card->dev);
-
 	ret = device_add(&card->dev);
 	if (ret)
 		return ret;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 787779c..093f28a 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -456,6 +456,22 @@
 }
 EXPORT_SYMBOL(mmc_clk_update_freq);
 
+void mmc_recovery_fallback_lower_speed(struct mmc_host *host)
+{
+	if (!host->card)
+		return;
+
+	if (host->sdr104_wa && mmc_card_sd(host->card) &&
+	    (host->ios.timing == MMC_TIMING_UHS_SDR104) &&
+	    !host->card->sdr104_blocked) {
+		pr_err("%s: %s: blocked SDR104, lower the bus-speed (SDR50 / DDR50)\n",
+			mmc_hostname(host), __func__);
+		mmc_host_clear_sdr104(host);
+		mmc_hw_reset(host);
+		host->card->sdr104_blocked = true;
+	}
+}
+
 static int mmc_devfreq_set_target(struct device *dev,
 				unsigned long *freq, u32 devfreq_flags)
 {
@@ -507,6 +523,9 @@
 	if (abort)
 		goto out;
 
+	if (mmc_card_sd(host->card) && host->card->sdr104_blocked)
+		goto rel_host;
+
 	/*
 	 * In case we were able to claim host there is no need to
 	 * defer the frequency change. It will be done now
@@ -515,15 +534,18 @@
 
 	mmc_host_clk_hold(host);
 	err = mmc_clk_update_freq(host, *freq, clk_scaling->state);
-	if (err && err != -EAGAIN)
+	if (err && err != -EAGAIN) {
 		pr_err("%s: clock scale to %lu failed with error %d\n",
 			mmc_hostname(host), *freq, err);
-	else
+		mmc_recovery_fallback_lower_speed(host);
+	} else {
 		pr_debug("%s: clock change to %lu finished successfully (%s)\n",
 			mmc_hostname(host), *freq, current->comm);
+	}
 
 
 	mmc_host_clk_release(host);
+rel_host:
 	mmc_release_host(host);
 out:
 	return err;
@@ -544,6 +566,9 @@
 	if (!host->clk_scaling.enable)
 		return;
 
+	if (mmc_card_sd(host->card) && host->card->sdr104_blocked)
+		return;
+
 	spin_lock_bh(&host->clk_scaling.lock);
 
 	if (host->clk_scaling.clk_scaling_in_progress ||
@@ -564,13 +589,15 @@
 
 	err = mmc_clk_update_freq(host, target_freq,
 		host->clk_scaling.state);
-	if (err && err != -EAGAIN)
+	if (err && err != -EAGAIN) {
 		pr_err("%s: failed on deferred scale clocks (%d)\n",
 			mmc_hostname(host), err);
-	else
+		mmc_recovery_fallback_lower_speed(host);
+	} else {
 		pr_debug("%s: clocks were successfully scaled to %lu (%s)\n",
 			mmc_hostname(host),
 			target_freq, current->comm);
+	}
 	host->clk_scaling.clk_scaling_in_progress = false;
 	atomic_dec(&host->clk_scaling.devfreq_abort);
 }
@@ -1571,8 +1598,13 @@
 			}
 		}
 		if (!cmd->error || !cmd->retries ||
-		    mmc_card_removed(host->card))
+		    mmc_card_removed(host->card)) {
+			if (cmd->error && !cmd->retries &&
+			     cmd->opcode != MMC_SEND_STATUS &&
+			     cmd->opcode != MMC_SEND_TUNING_BLOCK)
+				mmc_recovery_fallback_lower_speed(host);
 			break;
+		}
 
 		mmc_retune_recheck(host);
 
@@ -2252,6 +2284,7 @@
 	int claimed_host = 0;
 	unsigned long flags;
 	int retry_cnt = delay_ms/10;
+	bool pm = false;
 
 	do {
 		spin_lock_irqsave(&host->lock, flags);
@@ -2260,11 +2293,17 @@
 			host->claimer = current;
 			host->claim_cnt += 1;
 			claimed_host = 1;
+			if (host->claim_cnt == 1)
+				pm = true;
 		}
 		spin_unlock_irqrestore(&host->lock, flags);
 		if (!claimed_host)
 			mmc_delay(10);
 	} while (!claimed_host && retry_cnt--);
+
+	if (pm)
+		pm_runtime_get_sync(mmc_dev(host));
+
 	if (host->ops->enable && claimed_host && host->claim_cnt == 1)
 		host->ops->enable(host);
 	return claimed_host;
@@ -4257,12 +4296,18 @@
 	}
 
 	if (ret) {
-		mmc_card_set_removed(host->card);
-		if (host->card->sdr104_blocked) {
-			mmc_host_set_sdr104(host);
-			host->card->sdr104_blocked = false;
+		if (host->ops->get_cd && host->ops->get_cd(host)) {
+			mmc_recovery_fallback_lower_speed(host);
+			ret = 0;
+		} else {
+			mmc_card_set_removed(host->card);
+			if (host->card->sdr104_blocked) {
+				mmc_host_set_sdr104(host);
+				host->card->sdr104_blocked = false;
+			}
+			pr_debug("%s: card remove detected\n",
+					mmc_hostname(host));
 		}
-		pr_debug("%s: card remove detected\n", mmc_hostname(host));
 	}
 
 	return ret;
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index c92ea77..127ab0f 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -224,7 +224,7 @@
 	host->clk_requests--;
 	if (mmc_host_may_gate_card(host->card) &&
 	    !host->clk_requests)
-		schedule_delayed_work(&host->clk_gate_work,
+		queue_delayed_work(host->clk_gate_wq, &host->clk_gate_work,
 				      msecs_to_jiffies(host->clkgate_delay));
 	spin_unlock_irqrestore(&host->clk_lock, flags);
 }
@@ -283,6 +283,8 @@
 		mmc_host_clk_gate_delayed(host);
 	if (host->clk_gated)
 		mmc_host_clk_hold(host);
+	if (host->clk_gate_wq)
+		destroy_workqueue(host->clk_gate_wq);
 	/* There should be only one user now */
 	WARN_ON(host->clk_requests > 1);
 }
@@ -298,6 +300,42 @@
 		pr_err("%s: Failed to create clkgate_delay sysfs entry\n",
 				mmc_hostname(host));
 }
+
+static inline bool mmc_host_clk_gate_wq_init(struct mmc_host *host)
+{
+	char *wq = NULL;
+	int wq_nl;
+	bool ret = true;
+
+	wq_nl = sizeof("mmc_clk_gate/") + sizeof(mmc_hostname(host)) + 1;
+
+	wq = kzalloc(wq_nl, GFP_KERNEL);
+	if (!wq) {
+		ret = false;
+		goto out;
+	}
+
+	snprintf(wq, wq_nl, "mmc_clk_gate/%s", mmc_hostname(host));
+
+	/*
+	 * Create a work queue with flag WQ_MEM_RECLAIM set for
+	 * mmc clock gate work. Because mmc thread is created with
+	 * flag PF_MEMALLOC set, kernel will check for work queue
+	 * flag WQ_MEM_RECLAIM when flush the work queue. If work
+	 * queue flag WQ_MEM_RECLAIM is not set, kernel warning
+	 * will be triggered.
+	 */
+	host->clk_gate_wq = create_workqueue(wq);
+	if (!host->clk_gate_wq) {
+		ret = false;
+		dev_err(host->parent,
+				"failed to create clock gate work queue\n");
+	}
+
+	kfree(wq);
+out:
+	return ret;
+}
 #else
 
 static inline void mmc_host_clk_init(struct mmc_host *host)
@@ -316,6 +354,11 @@
 {
 	return false;
 }
+
+static inline bool mmc_host_clk_gate_wq_init(struct mmc_host *host)
+{
+	return true;
+}
 #endif
 
 void mmc_retune_enable(struct mmc_host *host)
@@ -644,6 +687,11 @@
 		return NULL;
 	}
 
+	if (!mmc_host_clk_gate_wq_init(host)) {
+		kfree(host);
+		return NULL;
+	}
+
 	mmc_host_clk_init(host);
 
 	spin_lock_init(&host->lock);
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 1499d53..e32ed3d 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -333,7 +333,6 @@
 
 	sdio_set_of_node(func);
 	sdio_acpi_set_handle(func);
-	device_enable_async_suspend(&func->dev);
 	ret = device_add(&func->dev);
 	if (ret == 0)
 		sdio_func_set_present(func);
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index b5c81e4..91ad946 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -77,6 +77,15 @@
 int mmc_gpio_get_cd(struct mmc_host *host)
 {
 	struct mmc_gpio *ctx = host->slot.handler_priv;
+	int ret;
+
+	if (host->extcon) {
+		ret =  extcon_get_state(host->extcon, EXTCON_MECHANICAL);
+		if (ret < 0)
+			dev_err(mmc_dev(host), "%s: Extcon failed to check card state, ret=%d\n",
+					__func__, ret);
+		return ret;
+	}
 
 	if (!ctx || !ctx->cd_gpio)
 		return -ENOSYS;
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 1d9cf34..f3f181d 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1824,7 +1824,7 @@
 	}
 
 	pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
-	if (gpio_is_valid(pdata->status_gpio) & !(flags & OF_GPIO_ACTIVE_LOW))
+	if (gpio_is_valid(pdata->status_gpio) && !(flags & OF_GPIO_ACTIVE_LOW))
 		pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
 
 	of_property_read_u32(np, "qcom,bus-width", &bus_width);
@@ -2345,21 +2345,6 @@
 	return ret;
 }
 
-/*
- * Reset vreg by ensuring it is off during probe. A call
- * to enable vreg is needed to balance disable vreg
- */
-static int sdhci_msm_vreg_reset(struct sdhci_msm_pltfm_data *pdata)
-{
-	int ret;
-
-	ret = sdhci_msm_setup_vreg(pdata, 1, true);
-	if (ret)
-		return ret;
-	ret = sdhci_msm_setup_vreg(pdata, 0, true);
-	return ret;
-}
-
 /* This init function should be called only once for each SDHC slot */
 static int sdhci_msm_vreg_init(struct device *dev,
 				struct sdhci_msm_pltfm_data *pdata,
@@ -2394,7 +2379,7 @@
 		if (ret)
 			goto vdd_reg_deinit;
 	}
-	ret = sdhci_msm_vreg_reset(pdata);
+
 	if (ret)
 		dev_err(dev, "vreg reset failed (%d)\n", ret);
 	goto out;
@@ -2571,7 +2556,9 @@
 		io_level = REQ_IO_HIGH;
 	}
 	if (irq_status & CORE_PWRCTL_BUS_OFF) {
-		ret = sdhci_msm_setup_vreg(msm_host->pdata, false, false);
+		if (msm_host->pltfm_init_done)
+			ret = sdhci_msm_setup_vreg(msm_host->pdata,
+					false, false);
 		if (!ret) {
 			ret = sdhci_msm_setup_pins(msm_host->pdata, false);
 			ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
@@ -4512,6 +4499,8 @@
 		goto vreg_deinit;
 	}
 
+	msm_host->pltfm_init_done = true;
+
 	pm_runtime_set_active(&pdev->dev);
 	pm_runtime_enable(&pdev->dev);
 	pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_AUTOSUSPEND_DELAY_MS);
diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h
index 2b907e9..c536a7d 100644
--- a/drivers/mmc/host/sdhci-msm.h
+++ b/drivers/mmc/host/sdhci-msm.h
@@ -225,6 +225,7 @@
 	bool mci_removed;
 	const struct sdhci_msm_offset *offset;
 	bool core_3_0v_support;
+	bool pltfm_init_done;
 };
 
 extern char *saved_command_line;
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index d0fc165..21dde52 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -1255,6 +1255,13 @@
 		return -EINVAL;
 	}
 
+	ret = spi_nor_wait_till_ready(nor);
+	if (ret) {
+		dev_err(nor->dev,
+			"timeout while writing configuration register\n");
+		return ret;
+	}
+
 	/* read back and check it */
 	ret = read_cr(nor);
 	if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 7ab24c5..05369dc 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -265,6 +265,8 @@
 			     sizeof(*dm),
 			     1000);
 
+	kfree(dm);
+
 	return rc;
 }
 
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 9d91f96..8cc7467 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -112,7 +112,6 @@
 source "drivers/net/ethernet/micrel/Kconfig"
 source "drivers/net/ethernet/microchip/Kconfig"
 source "drivers/net/ethernet/moxa/Kconfig"
-source "drivers/net/ethernet/msm/Kconfig"
 source "drivers/net/ethernet/myricom/Kconfig"
 
 config FEALNX
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index b31cbc2..a09423d 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -52,7 +52,6 @@
 obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
 obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
 obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/
-obj-$(CONFIG_ARCH_QCOM) += msm/
 obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
 obj-$(CONFIG_FEALNX) += fealnx.o
 obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 8af2c88..45bb0fe 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -1153,6 +1153,12 @@
 		if (skb == NULL)
 			break;
 		np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+		if (pci_dma_mapping_error(np->pci_dev,
+					  np->rx_info[i].mapping)) {
+			dev_kfree_skb(skb);
+			np->rx_info[i].skb = NULL;
+			break;
+		}
 		/* Grrr, we cannot offset to correctly align the IP header. */
 		np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
 	}
@@ -1183,8 +1189,9 @@
 {
 	struct netdev_private *np = netdev_priv(dev);
 	unsigned int entry;
+	unsigned int prev_tx;
 	u32 status;
-	int i;
+	int i, j;
 
 	/*
 	 * be cautious here, wrapping the queue has weird semantics
@@ -1202,6 +1209,7 @@
 	}
 #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
 
+	prev_tx = np->cur_tx;
 	entry = np->cur_tx % TX_RING_SIZE;
 	for (i = 0; i < skb_num_frags(skb); i++) {
 		int wrap_ring = 0;
@@ -1235,6 +1243,11 @@
 					       skb_frag_size(this_frag),
 					       PCI_DMA_TODEVICE);
 		}
+		if (pci_dma_mapping_error(np->pci_dev,
+					  np->tx_info[entry].mapping)) {
+			dev->stats.tx_dropped++;
+			goto err_out;
+		}
 
 		np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
 		np->tx_ring[entry].status = cpu_to_le32(status);
@@ -1269,8 +1282,30 @@
 		netif_stop_queue(dev);
 
 	return NETDEV_TX_OK;
-}
 
+err_out:
+	entry = prev_tx % TX_RING_SIZE;
+	np->tx_info[entry].skb = NULL;
+	if (i > 0) {
+		pci_unmap_single(np->pci_dev,
+				 np->tx_info[entry].mapping,
+				 skb_first_frag_len(skb),
+				 PCI_DMA_TODEVICE);
+		np->tx_info[entry].mapping = 0;
+		entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
+		for (j = 1; j < i; j++) {
+			pci_unmap_single(np->pci_dev,
+					 np->tx_info[entry].mapping,
+					 skb_frag_size(
+						&skb_shinfo(skb)->frags[j-1]),
+					 PCI_DMA_TODEVICE);
+			entry++;
+		}
+	}
+	dev_kfree_skb_any(skb);
+	np->cur_tx = prev_tx;
+	return NETDEV_TX_OK;
+}
 
 /* The interrupt handler does all of the Rx thread work and cleans up
    after the Tx thread. */
@@ -1570,6 +1605,12 @@
 				break;	/* Better luck next round. */
 			np->rx_info[entry].mapping =
 				pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+			if (pci_dma_mapping_error(np->pci_dev,
+						np->rx_info[entry].mapping)) {
+				dev_kfree_skb(skb);
+				np->rx_info[entry].skb = NULL;
+				break;
+			}
 			np->rx_ring[entry].rxaddr =
 				cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
 		}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 48ee411..5cc0f8c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1499,6 +1499,7 @@
 			netdev_warn(bp->dev, "Link speed %d no longer supported\n",
 				    speed);
 		}
+		set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
 		/* fall thru */
 	}
 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
@@ -5110,6 +5111,7 @@
 	struct hwrm_port_phy_qcfg_input req = {0};
 	struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
 	u8 link_up = link_info->link_up;
+	u16 diff;
 
 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
 
@@ -5197,6 +5199,18 @@
 		link_info->link_up = 0;
 	}
 	mutex_unlock(&bp->hwrm_cmd_lock);
+
+	diff = link_info->support_auto_speeds ^ link_info->advertising;
+	if ((link_info->support_auto_speeds | diff) !=
+	    link_info->support_auto_speeds) {
+		/* An advertised speed is no longer supported, so we need to
+		 * update the advertisement settings.  Caller holds RTNL
+		 * so we can modify link settings.
+		 */
+		link_info->advertising = link_info->support_auto_speeds;
+		if (link_info->autoneg & BNXT_AUTONEG_SPEED)
+			bnxt_hwrm_set_link_setting(bp, true, false);
+	}
 	return 0;
 }
 
@@ -6080,29 +6094,37 @@
 	mod_timer(&bp->timer, jiffies + bp->current_interval);
 }
 
-/* Only called from bnxt_sp_task() */
-static void bnxt_reset(struct bnxt *bp, bool silent)
+static void bnxt_rtnl_lock_sp(struct bnxt *bp)
 {
-	/* bnxt_reset_task() calls bnxt_close_nic() which waits
-	 * for BNXT_STATE_IN_SP_TASK to clear.
-	 * If there is a parallel dev_close(), bnxt_close() may be holding
+	/* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
+	 * set.  If the device is being closed, bnxt_close() may be holding
 	 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
 	 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
 	 */
 	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
 	rtnl_lock();
-	if (test_bit(BNXT_STATE_OPEN, &bp->state))
-		bnxt_reset_task(bp, silent);
+}
+
+static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
+{
 	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
 	rtnl_unlock();
 }
 
+/* Only called from bnxt_sp_task() */
+static void bnxt_reset(struct bnxt *bp, bool silent)
+{
+	bnxt_rtnl_lock_sp(bp);
+	if (test_bit(BNXT_STATE_OPEN, &bp->state))
+		bnxt_reset_task(bp, silent);
+	bnxt_rtnl_unlock_sp(bp);
+}
+
 static void bnxt_cfg_ntp_filters(struct bnxt *);
 
 static void bnxt_sp_task(struct work_struct *work)
 {
 	struct bnxt *bp = container_of(work, struct bnxt, sp_task);
-	int rc;
 
 	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
 	smp_mb__after_atomic();
@@ -6116,12 +6138,6 @@
 
 	if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
 		bnxt_cfg_ntp_filters(bp);
-	if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
-		rc = bnxt_update_link(bp, true);
-		if (rc)
-			netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
-				   rc);
-	}
 	if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
 		bnxt_hwrm_exec_fwd_req(bp);
 	if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
@@ -6142,18 +6158,39 @@
 		bnxt_hwrm_tunnel_dst_port_free(
 			bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
 	}
+	if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
+		bnxt_hwrm_port_qstats(bp);
+
+	/* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
+	 * must be the last functions to be called before exiting.
+	 */
+	if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
+		int rc = 0;
+
+		if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
+				       &bp->sp_event))
+			bnxt_hwrm_phy_qcaps(bp);
+
+		bnxt_rtnl_lock_sp(bp);
+		if (test_bit(BNXT_STATE_OPEN, &bp->state))
+			rc = bnxt_update_link(bp, true);
+		bnxt_rtnl_unlock_sp(bp);
+		if (rc)
+			netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
+				   rc);
+	}
+	if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
+		bnxt_rtnl_lock_sp(bp);
+		if (test_bit(BNXT_STATE_OPEN, &bp->state))
+			bnxt_get_port_module_status(bp);
+		bnxt_rtnl_unlock_sp(bp);
+	}
 	if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
 		bnxt_reset(bp, false);
 
 	if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
 		bnxt_reset(bp, true);
 
-	if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event))
-		bnxt_get_port_module_status(bp);
-
-	if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
-		bnxt_hwrm_port_qstats(bp);
-
 	smp_mb__before_atomic();
 	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
 }
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 51b164a..666bc06 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1089,6 +1089,7 @@
 #define BNXT_RESET_TASK_SILENT_SP_EVENT	11
 #define BNXT_GENEVE_ADD_PORT_SP_EVENT	12
 #define BNXT_GENEVE_DEL_PORT_SP_EVENT	13
+#define BNXT_LINK_SPEED_CHNG_SP_EVENT	14
 
 	struct bnxt_pf_info	pf;
 #ifdef CONFIG_BNXT_SRIOV
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 050e21f..679679a4 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -31,6 +31,7 @@
 	u8                      lmac_type;
 	u8                      lane_to_sds;
 	bool                    use_training;
+	bool                    autoneg;
 	bool			link_up;
 	int			lmacid; /* ID within BGX */
 	int			lmacid_bd; /* ID on board */
@@ -418,7 +419,17 @@
 	/* power down, reset autoneg, autoneg enable */
 	cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
 	cfg &= ~PCS_MRX_CTL_PWR_DN;
-	cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
+	cfg |= PCS_MRX_CTL_RST_AN;
+	if (lmac->phydev) {
+		cfg |= PCS_MRX_CTL_AN_EN;
+	} else {
+		/* In scenarios where PHY driver is not present or it's a
+		 * non-standard PHY, FW sets AN_EN to inform Linux driver
+		 * to do auto-neg and link polling or not.
+		 */
+		if (cfg & PCS_MRX_CTL_AN_EN)
+			lmac->autoneg = true;
+	}
 	bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
 
 	if (lmac->lmac_type == BGX_MODE_QSGMII) {
@@ -429,7 +440,7 @@
 		return 0;
 	}
 
-	if (lmac->lmac_type == BGX_MODE_SGMII) {
+	if ((lmac->lmac_type == BGX_MODE_SGMII) && lmac->phydev) {
 		if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
 				 PCS_MRX_STATUS_AN_CPT, false)) {
 			dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
@@ -623,12 +634,71 @@
 	return -1;
 }
 
+static void bgx_poll_for_sgmii_link(struct lmac *lmac)
+{
+	u64 pcs_link, an_result;
+	u8 speed;
+
+	pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
+				BGX_GMP_PCS_MRX_STATUS);
+
+	/*Link state bit is sticky, read it again*/
+	if (!(pcs_link & PCS_MRX_STATUS_LINK))
+		pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
+					BGX_GMP_PCS_MRX_STATUS);
+
+	if (bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_GMP_PCS_MRX_STATUS,
+			 PCS_MRX_STATUS_AN_CPT, false)) {
+		lmac->link_up = false;
+		lmac->last_speed = SPEED_UNKNOWN;
+		lmac->last_duplex = DUPLEX_UNKNOWN;
+		goto next_poll;
+	}
+
+	lmac->link_up = ((pcs_link & PCS_MRX_STATUS_LINK) != 0) ? true : false;
+	an_result = bgx_reg_read(lmac->bgx, lmac->lmacid,
+				 BGX_GMP_PCS_ANX_AN_RESULTS);
+
+	speed = (an_result >> 3) & 0x3;
+	lmac->last_duplex = (an_result >> 1) & 0x1;
+	switch (speed) {
+	case 0:
+		lmac->last_speed = 10;
+		break;
+	case 1:
+		lmac->last_speed = 100;
+		break;
+	case 2:
+		lmac->last_speed = 1000;
+		break;
+	default:
+		lmac->link_up = false;
+		lmac->last_speed = SPEED_UNKNOWN;
+		lmac->last_duplex = DUPLEX_UNKNOWN;
+		break;
+	}
+
+next_poll:
+
+	if (lmac->last_link != lmac->link_up) {
+		if (lmac->link_up)
+			bgx_sgmii_change_link_state(lmac);
+		lmac->last_link = lmac->link_up;
+	}
+
+	queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 3);
+}
+
 static void bgx_poll_for_link(struct work_struct *work)
 {
 	struct lmac *lmac;
 	u64 spu_link, smu_link;
 
 	lmac = container_of(work, struct lmac, dwork.work);
+	if (lmac->is_sgmii) {
+		bgx_poll_for_sgmii_link(lmac);
+		return;
+	}
 
 	/* Receive link is latching low. Force it high and verify it */
 	bgx_reg_modify(lmac->bgx, lmac->lmacid,
@@ -720,9 +790,21 @@
 	    (lmac->lmac_type != BGX_MODE_XLAUI) &&
 	    (lmac->lmac_type != BGX_MODE_40G_KR) &&
 	    (lmac->lmac_type != BGX_MODE_10G_KR)) {
-		if (!lmac->phydev)
-			return -ENODEV;
-
+		if (!lmac->phydev) {
+			if (lmac->autoneg) {
+				bgx_reg_write(bgx, lmacid,
+					      BGX_GMP_PCS_LINKX_TIMER,
+					      PCS_LINKX_TIMER_COUNT);
+				goto poll;
+			} else {
+				/* Default to below link speed and duplex */
+				lmac->link_up = true;
+				lmac->last_speed = 1000;
+				lmac->last_duplex = 1;
+				bgx_sgmii_change_link_state(lmac);
+				return 0;
+			}
+		}
 		lmac->phydev->dev_flags = 0;
 
 		if (phy_connect_direct(&lmac->netdev, lmac->phydev,
@@ -731,15 +813,17 @@
 			return -ENODEV;
 
 		phy_start_aneg(lmac->phydev);
-	} else {
-		lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
-						   WQ_MEM_RECLAIM, 1);
-		if (!lmac->check_link)
-			return -ENOMEM;
-		INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
-		queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
+		return 0;
 	}
 
+poll:
+	lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
+					   WQ_MEM_RECLAIM, 1);
+	if (!lmac->check_link)
+		return -ENOMEM;
+	INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
+	queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
+
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index 01cc7c8..1143e957 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -144,10 +144,15 @@
 #define	 PCS_MRX_CTL_LOOPBACK1			BIT_ULL(14)
 #define	 PCS_MRX_CTL_RESET			BIT_ULL(15)
 #define BGX_GMP_PCS_MRX_STATUS		0x30008
+#define	 PCS_MRX_STATUS_LINK			BIT_ULL(2)
 #define	 PCS_MRX_STATUS_AN_CPT			BIT_ULL(5)
+#define BGX_GMP_PCS_ANX_ADV		0x30010
 #define BGX_GMP_PCS_ANX_AN_RESULTS	0x30020
+#define BGX_GMP_PCS_LINKX_TIMER		0x30040
+#define PCS_LINKX_TIMER_COUNT			0x1E84
 #define BGX_GMP_PCS_SGM_AN_ADV		0x30068
 #define BGX_GMP_PCS_MISCX_CTL		0x30078
+#define  PCS_MISC_CTL_MODE			BIT_ULL(8)
 #define  PCS_MISC_CTL_DISP_EN			BIT_ULL(13)
 #define  PCS_MISC_CTL_GMX_ENO			BIT_ULL(11)
 #define  PCS_MISC_CTL_SAMP_PT_MASK	0x7Full
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index f9c2feb..0c2a32a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2201,9 +2201,10 @@
 {
 	int err;
 
+	mutex_lock(&uld_mutex);
 	err = setup_sge_queues(adap);
 	if (err)
-		goto out;
+		goto rel_lock;
 	err = setup_rss(adap);
 	if (err)
 		goto freeq;
@@ -2227,7 +2228,6 @@
 			goto irq_err;
 	}
 
-	mutex_lock(&uld_mutex);
 	enable_rx(adap);
 	t4_sge_start(adap);
 	t4_intr_enable(adap);
@@ -2240,13 +2240,15 @@
 #endif
 	/* Initialize hash mac addr list*/
 	INIT_LIST_HEAD(&adap->mac_hlist);
- out:
 	return err;
+
  irq_err:
 	dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
  freeq:
 	t4_free_sge_resources(adap);
-	goto out;
+ rel_lock:
+	mutex_unlock(&uld_mutex);
+	return err;
 }
 
 static void cxgb_down(struct adapter *adapter)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 9061c2f..d391bee 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2007,8 +2007,8 @@
 		if (!rxb->page)
 			continue;
 
-		dma_unmap_single(rx_queue->dev, rxb->dma,
-				 PAGE_SIZE, DMA_FROM_DEVICE);
+		dma_unmap_page(rx_queue->dev, rxb->dma,
+			       PAGE_SIZE, DMA_FROM_DEVICE);
 		__free_page(rxb->page);
 
 		rxb->page = NULL;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index dff7b60..c06845b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -304,8 +304,8 @@
 			struct hns_nic_ring_data *ring_data)
 {
 	struct hns_nic_priv *priv = netdev_priv(ndev);
-	struct device *dev = priv->dev;
 	struct hnae_ring *ring = ring_data->ring;
+	struct device *dev = ring_to_dev(ring);
 	struct netdev_queue *dev_queue;
 	struct skb_frag_struct *frag;
 	int buf_num;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 0fbf686..9f2184b 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -189,9 +189,10 @@
 	}
 	ltb->map_id = adapter->map_id;
 	adapter->map_id++;
+
+	init_completion(&adapter->fw_done);
 	send_request_map(adapter, ltb->addr,
 			 ltb->size, ltb->map_id);
-	init_completion(&adapter->fw_done);
 	wait_for_completion(&adapter->fw_done);
 	return 0;
 }
@@ -505,7 +506,7 @@
 	adapter->rx_pool = NULL;
 rx_pool_arr_alloc_failed:
 	for (i = 0; i < adapter->req_rx_queues; i++)
-		napi_enable(&adapter->napi[i]);
+		napi_disable(&adapter->napi[i]);
 alloc_napi_failed:
 	return -ENOMEM;
 }
@@ -1133,10 +1134,10 @@
 	crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
 	crq.request_statistics.len =
 	    cpu_to_be32(sizeof(struct ibmvnic_statistics));
-	ibmvnic_send_crq(adapter, &crq);
 
 	/* Wait for data to be written */
 	init_completion(&adapter->stats_done);
+	ibmvnic_send_crq(adapter, &crq);
 	wait_for_completion(&adapter->stats_done);
 
 	for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
@@ -2197,12 +2198,12 @@
 
 	if (!found) {
 		dev_err(dev, "Couldn't find error id %x\n",
-			crq->request_error_rsp.error_id);
+			be32_to_cpu(crq->request_error_rsp.error_id));
 		return;
 	}
 
 	dev_err(dev, "Detailed info for error id %x:",
-		crq->request_error_rsp.error_id);
+		be32_to_cpu(crq->request_error_rsp.error_id));
 
 	for (i = 0; i < error_buff->len; i++) {
 		pr_cont("%02x", (int)error_buff->buff[i]);
@@ -2281,8 +2282,8 @@
 	dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
 		crq->error_indication.
 		    flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
-		crq->error_indication.error_id,
-		crq->error_indication.error_cause);
+		be32_to_cpu(crq->error_indication.error_id),
+		be16_to_cpu(crq->error_indication.error_cause));
 
 	error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
 	if (!error_buff)
@@ -2400,10 +2401,10 @@
 	case PARTIALSUCCESS:
 		dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
 			 *req_value,
-			 (long int)be32_to_cpu(crq->request_capability_rsp.
+			 (long int)be64_to_cpu(crq->request_capability_rsp.
 					       number), name);
 		release_sub_crqs_no_irqs(adapter);
-		*req_value = be32_to_cpu(crq->request_capability_rsp.number);
+		*req_value = be64_to_cpu(crq->request_capability_rsp.number);
 		init_sub_crqs(adapter, 1);
 		return;
 	default:
@@ -2809,9 +2810,9 @@
 	crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator;
 	crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok);
 	crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size;
-	ibmvnic_send_crq(adapter, &crq);
 
 	init_completion(&adapter->fw_done);
+	ibmvnic_send_crq(adapter, &crq);
 	wait_for_completion(&adapter->fw_done);
 
 	if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
@@ -3591,9 +3592,9 @@
 	memset(&crq, 0, sizeof(crq));
 	crq.request_dump_size.first = IBMVNIC_CRQ_CMD;
 	crq.request_dump_size.cmd = REQUEST_DUMP_SIZE;
-	ibmvnic_send_crq(adapter, &crq);
 
 	init_completion(&adapter->fw_done);
+	ibmvnic_send_crq(adapter, &crq);
 	wait_for_completion(&adapter->fw_done);
 
 	seq_write(seq, adapter->dump_data, adapter->dump_data_size);
@@ -3639,8 +3640,8 @@
 		}
 	}
 
-	send_version_xchg(adapter);
 	reinit_completion(&adapter->init_done);
+	send_version_xchg(adapter);
 	if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
 		dev_err(dev, "Passive init timeout\n");
 		goto task_failed;
@@ -3650,9 +3651,9 @@
 		if (adapter->renegotiate) {
 			adapter->renegotiate = false;
 			release_sub_crqs_no_irqs(adapter);
-			send_cap_queries(adapter);
 
 			reinit_completion(&adapter->init_done);
+			send_cap_queries(adapter);
 			if (!wait_for_completion_timeout(&adapter->init_done,
 							 timeout)) {
 				dev_err(dev, "Passive init timeout\n");
@@ -3780,9 +3781,9 @@
 			adapter->debugfs_dump = ent;
 		}
 	}
-	ibmvnic_send_crq_init(adapter);
 
 	init_completion(&adapter->init_done);
+	ibmvnic_send_crq_init(adapter);
 	if (!wait_for_completion_timeout(&adapter->init_done, timeout))
 		return 0;
 
@@ -3790,9 +3791,9 @@
 		if (adapter->renegotiate) {
 			adapter->renegotiate = false;
 			release_sub_crqs_no_irqs(adapter);
-			send_cap_queries(adapter);
 
 			reinit_completion(&adapter->init_done);
+			send_cap_queries(adapter);
 			if (!wait_for_completion_timeout(&adapter->init_done,
 							 timeout))
 				return 0;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 86a89cb..4832223 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2518,7 +2518,7 @@
 }
 
 const struct of_device_id of_mtk_match[] = {
-	{ .compatible = "mediatek,mt7623-eth" },
+	{ .compatible = "mediatek,mt2701-eth" },
 	{},
 };
 MODULE_DEVICE_TABLE(of, of_mtk_match);
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index c7e9399..53daa6c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -158,7 +158,7 @@
 	return -ETIMEDOUT;
 }
 
-static int mlx4_comm_internal_err(u32 slave_read)
+int mlx4_comm_internal_err(u32 slave_read)
 {
 	return (u32)COMM_CHAN_EVENT_INTERNAL_ERR ==
 		(slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index 0e8b7c4..8258d08 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -222,6 +222,18 @@
 		return;
 
 	mlx4_stop_catas_poll(dev);
+	if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION &&
+	    mlx4_is_slave(dev)) {
+		/* In mlx4_remove_one on a VF */
+		u32 slave_read =
+			swab32(readl(&mlx4_priv(dev)->mfunc.comm->slave_read));
+
+		if (mlx4_comm_internal_err(slave_read)) {
+			mlx4_dbg(dev, "%s: comm channel is down, entering error state.\n",
+				 __func__);
+			mlx4_enter_error_state(dev->persist);
+		}
+	}
 	mutex_lock(&intf_mutex);
 
 	list_for_each_entry(intf, &intf_list, list)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 88ee7d8..086920b6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1220,6 +1220,7 @@
 void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
 
 void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
+int mlx4_comm_internal_err(u32 slave_read);
 
 int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
 		    enum mlx4_port_type *type);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 36fbc6b..8cd7227 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -1081,7 +1081,7 @@
 					       MLX5_FLOW_NAMESPACE_KERNEL);
 
 	if (!priv->fs.ns)
-		return -EINVAL;
+		return -EOPNOTSUPP;
 
 	err = mlx5e_arfs_create_tables(priv);
 	if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index c7011ef..a8966e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -352,7 +352,7 @@
 	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
 	if (!root_ns) {
 		esw_warn(dev, "Failed to get FDB flow namespace\n");
-		return -ENOMEM;
+		return -EOPNOTSUPP;
 	}
 
 	flow_group_in = mlx5_vzalloc(inlen);
@@ -961,7 +961,7 @@
 	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
 	if (!root_ns) {
 		esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
-		return -EIO;
+		return -EOPNOTSUPP;
 	}
 
 	flow_group_in = mlx5_vzalloc(inlen);
@@ -1078,7 +1078,7 @@
 	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
 	if (!root_ns) {
 		esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
-		return -EIO;
+		return -EOPNOTSUPP;
 	}
 
 	flow_group_in = mlx5_vzalloc(inlen);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index d239f5d..b08b9e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -414,6 +414,7 @@
 	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
 	if (!root_ns) {
 		esw_warn(dev, "Failed to get FDB flow namespace\n");
+		err = -EOPNOTSUPP;
 		goto ns_err;
 	}
 
@@ -520,7 +521,7 @@
 	ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
 	if (!ns) {
 		esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
-		return -ENOMEM;
+		return -EOPNOTSUPP;
 	}
 
 	ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0);
@@ -639,7 +640,7 @@
 		esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
 		err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
 		if (err1)
-			esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err);
+			esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
 	}
 	return err;
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 7e20e4b..4de3c28 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1678,7 +1678,7 @@
 	struct mlx5_flow_table *ft;
 
 	ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
-	if (!ns)
+	if (WARN_ON(!ns))
 		return -EINVAL;
 	ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL);
 	if (IS_ERR(ft)) {
diff --git a/drivers/net/ethernet/msm/Kconfig b/drivers/net/ethernet/msm/Kconfig
deleted file mode 100644
index 586e03e..0000000
--- a/drivers/net/ethernet/msm/Kconfig
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-# msm network device configuration
-#
-
-config ECM_IPA
-	tristate "STD ECM LAN Driver support"
-	depends on IPA || IPA3
-	help
-	  Enables LAN between applications processor and a tethered
-	  host using the STD ECM protocol.
-	  This Network interface is aimed to allow data path go through
-	  IPA core while using STD ECM protocol.
-
-config RNDIS_IPA
-	tristate "RNDIS_IPA Network Interface Driver support"
-	depends on IPA || IPA3
-	help
-	  Enables LAN between applications processor and a tethered
-	  host using the RNDIS protocol.
-	  This Network interface is aimed to allow data path go through
-	  IPA core while using RNDIS protocol.
-
diff --git a/drivers/net/ethernet/msm/Makefile b/drivers/net/ethernet/msm/Makefile
deleted file mode 100644
index ec2699a..0000000
--- a/drivers/net/ethernet/msm/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-#
-# Makefile for the msm networking support.
-#
-
-obj-$(CONFIG_ECM_IPA) += ecm_ipa.o
-obj-$(CONFIG_RNDIS_IPA) += rndis_ipa.o
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 862f18e..510ff62 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -179,6 +179,49 @@
 	.get_mdio_data = ravb_get_mdio_data,
 };
 
+/* Free TX skb function for AVB-IP */
+static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+	struct net_device_stats *stats = &priv->stats[q];
+	struct ravb_tx_desc *desc;
+	int free_num = 0;
+	int entry;
+	u32 size;
+
+	for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
+		bool txed;
+
+		entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
+					     NUM_TX_DESC);
+		desc = &priv->tx_ring[q][entry];
+		txed = desc->die_dt == DT_FEMPTY;
+		if (free_txed_only && !txed)
+			break;
+		/* Descriptor type must be checked before all other reads */
+		dma_rmb();
+		size = le16_to_cpu(desc->ds_tagl) & TX_DS;
+		/* Free the original skb. */
+		if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
+			dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
+					 size, DMA_TO_DEVICE);
+			/* Last packet descriptor? */
+			if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
+				entry /= NUM_TX_DESC;
+				dev_kfree_skb_any(priv->tx_skb[q][entry]);
+				priv->tx_skb[q][entry] = NULL;
+				if (txed)
+					stats->tx_packets++;
+			}
+			free_num++;
+		}
+		if (txed)
+			stats->tx_bytes += size;
+		desc->die_dt = DT_EEMPTY;
+	}
+	return free_num;
+}
+
 /* Free skb's and DMA buffers for Ethernet AVB */
 static void ravb_ring_free(struct net_device *ndev, int q)
 {
@@ -194,19 +237,21 @@
 	kfree(priv->rx_skb[q]);
 	priv->rx_skb[q] = NULL;
 
-	/* Free TX skb ringbuffer */
-	if (priv->tx_skb[q]) {
-		for (i = 0; i < priv->num_tx_ring[q]; i++)
-			dev_kfree_skb(priv->tx_skb[q][i]);
-	}
-	kfree(priv->tx_skb[q]);
-	priv->tx_skb[q] = NULL;
-
 	/* Free aligned TX buffers */
 	kfree(priv->tx_align[q]);
 	priv->tx_align[q] = NULL;
 
 	if (priv->rx_ring[q]) {
+		for (i = 0; i < priv->num_rx_ring[q]; i++) {
+			struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
+
+			if (!dma_mapping_error(ndev->dev.parent,
+					       le32_to_cpu(desc->dptr)))
+				dma_unmap_single(ndev->dev.parent,
+						 le32_to_cpu(desc->dptr),
+						 PKT_BUF_SZ,
+						 DMA_FROM_DEVICE);
+		}
 		ring_size = sizeof(struct ravb_ex_rx_desc) *
 			    (priv->num_rx_ring[q] + 1);
 		dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
@@ -215,12 +260,20 @@
 	}
 
 	if (priv->tx_ring[q]) {
+		ravb_tx_free(ndev, q, false);
+
 		ring_size = sizeof(struct ravb_tx_desc) *
 			    (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
 		dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
 				  priv->tx_desc_dma[q]);
 		priv->tx_ring[q] = NULL;
 	}
+
+	/* Free TX skb ringbuffer.
+	 * SKBs are freed by ravb_tx_free() call above.
+	 */
+	kfree(priv->tx_skb[q]);
+	priv->tx_skb[q] = NULL;
 }
 
 /* Format skb and descriptor buffer for Ethernet AVB */
@@ -431,44 +484,6 @@
 	return 0;
 }
 
-/* Free TX skb function for AVB-IP */
-static int ravb_tx_free(struct net_device *ndev, int q)
-{
-	struct ravb_private *priv = netdev_priv(ndev);
-	struct net_device_stats *stats = &priv->stats[q];
-	struct ravb_tx_desc *desc;
-	int free_num = 0;
-	int entry;
-	u32 size;
-
-	for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
-		entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
-					     NUM_TX_DESC);
-		desc = &priv->tx_ring[q][entry];
-		if (desc->die_dt != DT_FEMPTY)
-			break;
-		/* Descriptor type must be checked before all other reads */
-		dma_rmb();
-		size = le16_to_cpu(desc->ds_tagl) & TX_DS;
-		/* Free the original skb. */
-		if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
-			dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
-					 size, DMA_TO_DEVICE);
-			/* Last packet descriptor? */
-			if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
-				entry /= NUM_TX_DESC;
-				dev_kfree_skb_any(priv->tx_skb[q][entry]);
-				priv->tx_skb[q][entry] = NULL;
-				stats->tx_packets++;
-			}
-			free_num++;
-		}
-		stats->tx_bytes += size;
-		desc->die_dt = DT_EEMPTY;
-	}
-	return free_num;
-}
-
 static void ravb_get_tx_tstamp(struct net_device *ndev)
 {
 	struct ravb_private *priv = netdev_priv(ndev);
@@ -902,7 +917,7 @@
 			spin_lock_irqsave(&priv->lock, flags);
 			/* Clear TX interrupt */
 			ravb_write(ndev, ~mask, TIS);
-			ravb_tx_free(ndev, q);
+			ravb_tx_free(ndev, q, true);
 			netif_wake_subqueue(ndev, q);
 			mmiowb();
 			spin_unlock_irqrestore(&priv->lock, flags);
@@ -1571,7 +1586,8 @@
 
 	priv->cur_tx[q] += NUM_TX_DESC;
 	if (priv->cur_tx[q] - priv->dirty_tx[q] >
-	    (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q))
+	    (priv->num_tx_ring[q] - 1) * NUM_TX_DESC &&
+	    !ravb_tx_free(ndev, q, true))
 		netif_stop_subqueue(ndev, q);
 
 exit:
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 93dc10b..aa02a03 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -100,6 +100,14 @@
 /* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
 #define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT)
 
+#ifdef __BIG_ENDIAN
+#define xemaclite_readl		ioread32be
+#define xemaclite_writel	iowrite32be
+#else
+#define xemaclite_readl		ioread32
+#define xemaclite_writel	iowrite32
+#endif
+
 /**
  * struct net_local - Our private per device data
  * @ndev:		instance of the network device
@@ -156,15 +164,15 @@
 	u32 reg_data;
 
 	/* Enable the Tx interrupts for the first Buffer */
-	reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET);
-	__raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
-		     drvdata->base_addr + XEL_TSR_OFFSET);
+	reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET);
+	xemaclite_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
+			 drvdata->base_addr + XEL_TSR_OFFSET);
 
 	/* Enable the Rx interrupts for the first buffer */
-	__raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
+	xemaclite_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
 
 	/* Enable the Global Interrupt Enable */
-	__raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
+	xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
 }
 
 /**
@@ -179,17 +187,17 @@
 	u32 reg_data;
 
 	/* Disable the Global Interrupt Enable */
-	__raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
+	xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
 
 	/* Disable the Tx interrupts for the first buffer */
-	reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET);
-	__raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
-		     drvdata->base_addr + XEL_TSR_OFFSET);
+	reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET);
+	xemaclite_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
+			 drvdata->base_addr + XEL_TSR_OFFSET);
 
 	/* Disable the Rx interrupts for the first buffer */
-	reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET);
-	__raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
-		     drvdata->base_addr + XEL_RSR_OFFSET);
+	reg_data = xemaclite_readl(drvdata->base_addr + XEL_RSR_OFFSET);
+	xemaclite_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
+			 drvdata->base_addr + XEL_RSR_OFFSET);
 }
 
 /**
@@ -321,7 +329,7 @@
 		byte_count = ETH_FRAME_LEN;
 
 	/* Check if the expected buffer is available */
-	reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
+	reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
 	if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
 	     XEL_TSR_XMIT_ACTIVE_MASK)) == 0) {
 
@@ -334,7 +342,7 @@
 
 		addr = (void __iomem __force *)((u32 __force)addr ^
 						 XEL_BUFFER_OFFSET);
-		reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
+		reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
 
 		if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
 		     XEL_TSR_XMIT_ACTIVE_MASK)) != 0)
@@ -345,16 +353,16 @@
 	/* Write the frame to the buffer */
 	xemaclite_aligned_write(data, (u32 __force *) addr, byte_count);
 
-	__raw_writel((byte_count & XEL_TPLR_LENGTH_MASK),
-		     addr + XEL_TPLR_OFFSET);
+	xemaclite_writel((byte_count & XEL_TPLR_LENGTH_MASK),
+			 addr + XEL_TPLR_OFFSET);
 
 	/* Update the Tx Status Register to indicate that there is a
 	 * frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which
 	 * is used by the interrupt handler to check whether a frame
 	 * has been transmitted */
-	reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
+	reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
 	reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK);
-	__raw_writel(reg_data, addr + XEL_TSR_OFFSET);
+	xemaclite_writel(reg_data, addr + XEL_TSR_OFFSET);
 
 	return 0;
 }
@@ -369,7 +377,7 @@
  *
  * Return:	Total number of bytes received
  */
-static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
+static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
 {
 	void __iomem *addr;
 	u16 length, proto_type;
@@ -379,7 +387,7 @@
 	addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use);
 
 	/* Verify which buffer has valid data */
-	reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
+	reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
 
 	if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) {
 		if (drvdata->rx_ping_pong != 0)
@@ -396,27 +404,28 @@
 			return 0;	/* No data was available */
 
 		/* Verify that buffer has valid data */
-		reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
+		reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
 		if ((reg_data & XEL_RSR_RECV_DONE_MASK) !=
 		     XEL_RSR_RECV_DONE_MASK)
 			return 0;	/* No data was available */
 	}
 
 	/* Get the protocol type of the ethernet frame that arrived */
-	proto_type = ((ntohl(__raw_readl(addr + XEL_HEADER_OFFSET +
+	proto_type = ((ntohl(xemaclite_readl(addr + XEL_HEADER_OFFSET +
 			XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) &
 			XEL_RPLR_LENGTH_MASK);
 
 	/* Check if received ethernet frame is a raw ethernet frame
 	 * or an IP packet or an ARP packet */
-	if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
+	if (proto_type > ETH_DATA_LEN) {
 
 		if (proto_type == ETH_P_IP) {
-			length = ((ntohl(__raw_readl(addr +
+			length = ((ntohl(xemaclite_readl(addr +
 					XEL_HEADER_IP_LENGTH_OFFSET +
 					XEL_RXBUFF_OFFSET)) >>
 					XEL_HEADER_SHIFT) &
 					XEL_RPLR_LENGTH_MASK);
+			length = min_t(u16, length, ETH_DATA_LEN);
 			length += ETH_HLEN + ETH_FCS_LEN;
 
 		} else if (proto_type == ETH_P_ARP)
@@ -429,14 +438,17 @@
 		/* Use the length in the frame, plus the header and trailer */
 		length = proto_type + ETH_HLEN + ETH_FCS_LEN;
 
+	if (WARN_ON(length > maxlen))
+		length = maxlen;
+
 	/* Read from the EmacLite device */
 	xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET),
 				data, length);
 
 	/* Acknowledge the frame */
-	reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
+	reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
 	reg_data &= ~XEL_RSR_RECV_DONE_MASK;
-	__raw_writel(reg_data, addr + XEL_RSR_OFFSET);
+	xemaclite_writel(reg_data, addr + XEL_RSR_OFFSET);
 
 	return length;
 }
@@ -463,14 +475,14 @@
 
 	xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN);
 
-	__raw_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
+	xemaclite_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
 
 	/* Update the MAC address in the EmacLite */
-	reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
-	__raw_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET);
+	reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
+	xemaclite_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET);
 
 	/* Wait for EmacLite to finish with the MAC address update */
-	while ((__raw_readl(addr + XEL_TSR_OFFSET) &
+	while ((xemaclite_readl(addr + XEL_TSR_OFFSET) &
 		XEL_TSR_PROG_MAC_ADDR) != 0)
 		;
 }
@@ -603,7 +615,7 @@
 
 	skb_reserve(skb, 2);
 
-	len = xemaclite_recv_data(lp, (u8 *) skb->data);
+	len = xemaclite_recv_data(lp, (u8 *) skb->data, len);
 
 	if (!len) {
 		dev->stats.rx_errors++;
@@ -640,32 +652,32 @@
 	u32 tx_status;
 
 	/* Check if there is Rx Data available */
-	if ((__raw_readl(base_addr + XEL_RSR_OFFSET) &
+	if ((xemaclite_readl(base_addr + XEL_RSR_OFFSET) &
 			 XEL_RSR_RECV_DONE_MASK) ||
-	    (__raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
+	    (xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
 			 & XEL_RSR_RECV_DONE_MASK))
 
 		xemaclite_rx_handler(dev);
 
 	/* Check if the Transmission for the first buffer is completed */
-	tx_status = __raw_readl(base_addr + XEL_TSR_OFFSET);
+	tx_status = xemaclite_readl(base_addr + XEL_TSR_OFFSET);
 	if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
 		(tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
 
 		tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
-		__raw_writel(tx_status, base_addr + XEL_TSR_OFFSET);
+		xemaclite_writel(tx_status, base_addr + XEL_TSR_OFFSET);
 
 		tx_complete = true;
 	}
 
 	/* Check if the Transmission for the second buffer is completed */
-	tx_status = __raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
+	tx_status = xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
 	if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
 		(tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
 
 		tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
-		__raw_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
-			     XEL_TSR_OFFSET);
+		xemaclite_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
+				 XEL_TSR_OFFSET);
 
 		tx_complete = true;
 	}
@@ -698,7 +710,7 @@
 	/* wait for the MDIO interface to not be busy or timeout
 	   after some time.
 	*/
-	while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
+	while (xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
 			XEL_MDIOCTRL_MDIOSTS_MASK) {
 		if (time_before_eq(end, jiffies)) {
 			WARN_ON(1);
@@ -734,17 +746,17 @@
 	 * MDIO Address register. Set the Status bit in the MDIO Control
 	 * register to start a MDIO read transaction.
 	 */
-	ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
-	__raw_writel(XEL_MDIOADDR_OP_MASK |
-		     ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
-		     lp->base_addr + XEL_MDIOADDR_OFFSET);
-	__raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
-		     lp->base_addr + XEL_MDIOCTRL_OFFSET);
+	ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
+	xemaclite_writel(XEL_MDIOADDR_OP_MASK |
+			 ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
+			 lp->base_addr + XEL_MDIOADDR_OFFSET);
+	xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
+			 lp->base_addr + XEL_MDIOCTRL_OFFSET);
 
 	if (xemaclite_mdio_wait(lp))
 		return -ETIMEDOUT;
 
-	rc = __raw_readl(lp->base_addr + XEL_MDIORD_OFFSET);
+	rc = xemaclite_readl(lp->base_addr + XEL_MDIORD_OFFSET);
 
 	dev_dbg(&lp->ndev->dev,
 		"xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n",
@@ -781,13 +793,13 @@
 	 * Data register. Finally, set the Status bit in the MDIO Control
 	 * register to start a MDIO write transaction.
 	 */
-	ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
-	__raw_writel(~XEL_MDIOADDR_OP_MASK &
-		     ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
-		     lp->base_addr + XEL_MDIOADDR_OFFSET);
-	__raw_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET);
-	__raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
-		     lp->base_addr + XEL_MDIOCTRL_OFFSET);
+	ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
+	xemaclite_writel(~XEL_MDIOADDR_OP_MASK &
+			 ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
+			 lp->base_addr + XEL_MDIOADDR_OFFSET);
+	xemaclite_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET);
+	xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
+			 lp->base_addr + XEL_MDIOCTRL_OFFSET);
 
 	return 0;
 }
@@ -834,8 +846,8 @@
 	/* Enable the MDIO bus by asserting the enable bit in MDIO Control
 	 * register.
 	 */
-	__raw_writel(XEL_MDIOCTRL_MDIOEN_MASK,
-		     lp->base_addr + XEL_MDIOCTRL_OFFSET);
+	xemaclite_writel(XEL_MDIOCTRL_MDIOEN_MASK,
+			 lp->base_addr + XEL_MDIOCTRL_OFFSET);
 
 	bus = mdiobus_alloc();
 	if (!bus) {
@@ -1140,8 +1152,8 @@
 	}
 
 	/* Clear the Tx CSR's in case this is a restart */
-	__raw_writel(0, lp->base_addr + XEL_TSR_OFFSET);
-	__raw_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
+	xemaclite_writel(0, lp->base_addr + XEL_TSR_OFFSET);
+	xemaclite_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
 
 	/* Set the MAC address in the EmacLite device */
 	xemaclite_update_address(lp, ndev->dev_addr);
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 97e0cbc..cebde07 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1372,3 +1372,4 @@
 MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>");
 MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
 MODULE_ALIAS_RTNL_LINK("gtp");
+MODULE_ALIAS_GENL_FAMILY("gtp");
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 1dfe230..e0a6b1a 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -648,8 +648,8 @@
 {
 	/* Finish setting up the DEVICE info. */
 	dev->mtu             = AX_MTU;
-	dev->hard_header_len = 0;
-	dev->addr_len        = 0;
+	dev->hard_header_len = AX25_MAX_HEADER_LEN;
+	dev->addr_len        = AX25_ADDR_LEN;
 	dev->type            = ARPHRD_AX25;
 	dev->tx_queue_len    = 10;
 	dev->header_ops      = &ax25_header_ops;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index d6a541b..2f70f80 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1114,8 +1114,6 @@
 		if (adv < 0)
 			return adv;
 
-		lpa &= adv;
-
 		if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
 			phydev->duplex = DUPLEX_FULL;
 		else
diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/phy/mdio-bcm-iproc.c
index c0b4e65..46fe1ae 100644
--- a/drivers/net/phy/mdio-bcm-iproc.c
+++ b/drivers/net/phy/mdio-bcm-iproc.c
@@ -81,8 +81,6 @@
 	if (rc)
 		return rc;
 
-	iproc_mdio_config_clk(priv->base);
-
 	/* Prepare the read operation */
 	cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) |
 		(reg << MII_DATA_RA_SHIFT) |
@@ -112,8 +110,6 @@
 	if (rc)
 		return rc;
 
-	iproc_mdio_config_clk(priv->base);
-
 	/* Prepare the write operation */
 	cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) |
 		(reg << MII_DATA_RA_SHIFT) |
@@ -163,6 +159,8 @@
 	bus->read = iproc_mdio_read;
 	bus->write = iproc_mdio_write;
 
+	iproc_mdio_config_clk(priv->base);
+
 	rc = of_mdiobus_register(bus, pdev->dev.of_node);
 	if (rc) {
 		dev_err(&pdev->dev, "MDIO bus registration failed\n");
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index ea92d52..fab56c9 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1014,6 +1014,20 @@
 	.get_stats	= kszphy_get_stats,
 	.suspend	= genphy_suspend,
 	.resume		= genphy_resume,
+}, {
+	.phy_id		= PHY_ID_KSZ8795,
+	.phy_id_mask	= MICREL_PHY_ID_MASK,
+	.name		= "Micrel KSZ8795",
+	.features	= (SUPPORTED_Pause | SUPPORTED_Asym_Pause),
+	.flags		= PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+	.config_init	= kszphy_config_init,
+	.config_aneg	= ksz8873mll_config_aneg,
+	.read_status	= ksz8873mll_read_status,
+	.get_sset_count = kszphy_get_sset_count,
+	.get_strings	= kszphy_get_strings,
+	.get_stats	= kszphy_get_stats,
+	.suspend	= genphy_suspend,
+	.resume		= genphy_resume,
 } };
 
 module_phy_driver(ksphy_driver);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index c4ceb08..14d57d0 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -860,6 +860,7 @@
 	struct module *ndev_owner = dev->dev.parent->driver->owner;
 	struct mii_bus *bus = phydev->mdio.bus;
 	struct device *d = &phydev->mdio.dev;
+	bool using_genphy = false;
 	int err;
 
 	/* For Ethernet device drivers that register their own MDIO bus, we
@@ -885,12 +886,22 @@
 			d->driver =
 				&genphy_driver[GENPHY_DRV_1G].mdiodrv.driver;
 
+		using_genphy = true;
+	}
+
+	if (!try_module_get(d->driver->owner)) {
+		dev_err(&dev->dev, "failed to get the device driver module\n");
+		err = -EIO;
+		goto error_put_device;
+	}
+
+	if (using_genphy) {
 		err = d->driver->probe(d);
 		if (err >= 0)
 			err = device_bind_driver(d);
 
 		if (err)
-			goto error;
+			goto error_module_put;
 	}
 
 	if (phydev->attached_dev) {
@@ -926,6 +937,13 @@
 	return err;
 
 error:
+	/* phy_detach() does all of the cleanup below */
+	phy_detach(phydev);
+	return err;
+
+error_module_put:
+	module_put(d->driver->owner);
+error_put_device:
 	put_device(d);
 	if (ndev_owner != bus->owner)
 		module_put(bus->owner);
@@ -987,6 +1005,8 @@
 	phydev->attached_dev = NULL;
 	phy_suspend(phydev);
 
+	module_put(phydev->mdio.dev.driver->owner);
+
 	/* If the device had no specific driver before (i.e. - it
 	 * was using the generic driver), we unbind the device
 	 * from the generic driver so that there's a chance a
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 4b7a363..35aa28b 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1276,6 +1276,10 @@
 		return -EINVAL;
 	}
 
+	if (!(tun->flags & IFF_NO_PI))
+		if (pi.flags & htons(CHECKSUM_UNNECESSARY))
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+
 	switch (tun->flags & TUN_TYPE_MASK) {
 	case IFF_TUN:
 		if (tun->flags & IFF_NO_PI) {
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 90b426c..afb953a 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -32,7 +32,7 @@
 #define NETNEXT_VERSION		"08"
 
 /* Information for net */
-#define NET_VERSION		"7"
+#define NET_VERSION		"8"
 
 #define DRIVER_VERSION		"v1." NETNEXT_VERSION "." NET_VERSION
 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -1936,6 +1936,9 @@
 		napi_complete(napi);
 		if (!list_empty(&tp->rx_done))
 			napi_schedule(napi);
+		else if (!skb_queue_empty(&tp->tx_queue) &&
+			 !list_empty(&tp->tx_free))
+			napi_schedule(napi);
 	}
 
 	return work_done;
@@ -3155,10 +3158,13 @@
 		if (!netif_carrier_ok(netdev)) {
 			tp->rtl_ops.enable(tp);
 			set_bit(RTL8152_SET_RX_MODE, &tp->flags);
+			netif_stop_queue(netdev);
 			napi_disable(&tp->napi);
 			netif_carrier_on(netdev);
 			rtl_start_rx(tp);
 			napi_enable(&tp->napi);
+			netif_wake_queue(netdev);
+			netif_info(tp, link, netdev, "carrier on\n");
 		}
 	} else {
 		if (netif_carrier_ok(netdev)) {
@@ -3166,6 +3172,7 @@
 			napi_disable(&tp->napi);
 			tp->rtl_ops.disable(tp);
 			napi_enable(&tp->napi);
+			netif_info(tp, link, netdev, "carrier off\n");
 		}
 	}
 }
@@ -3515,12 +3522,12 @@
 	if (!netif_running(netdev))
 		return 0;
 
+	netif_stop_queue(netdev);
 	napi_disable(&tp->napi);
 	clear_bit(WORK_ENABLE, &tp->flags);
 	usb_kill_urb(tp->intr_urb);
 	cancel_delayed_work_sync(&tp->schedule);
 	if (netif_carrier_ok(netdev)) {
-		netif_stop_queue(netdev);
 		mutex_lock(&tp->control);
 		tp->rtl_ops.disable(tp);
 		mutex_unlock(&tp->control);
@@ -3545,12 +3552,17 @@
 	if (netif_carrier_ok(netdev)) {
 		mutex_lock(&tp->control);
 		tp->rtl_ops.enable(tp);
+		rtl_start_rx(tp);
 		rtl8152_set_rx_mode(netdev);
 		mutex_unlock(&tp->control);
-		netif_wake_queue(netdev);
 	}
 
 	napi_enable(&tp->napi);
+	netif_wake_queue(netdev);
+	usb_submit_urb(tp->intr_urb, GFP_KERNEL);
+
+	if (!list_empty(&tp->rx_done))
+		napi_schedule(&tp->napi);
 
 	return 0;
 }
@@ -3583,10 +3595,15 @@
 	struct net_device *netdev = tp->netdev;
 	int ret = 0;
 
+	set_bit(SELECTIVE_SUSPEND, &tp->flags);
+	smp_mb__after_atomic();
+
 	if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
 		u32 rcr = 0;
 
 		if (delay_autosuspend(tp)) {
+			clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+			smp_mb__after_atomic();
 			ret = -EBUSY;
 			goto out1;
 		}
@@ -3603,6 +3620,8 @@
 			if (!(ocp_data & RXFIFO_EMPTY)) {
 				rxdy_gated_en(tp, false);
 				ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr);
+				clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+				smp_mb__after_atomic();
 				ret = -EBUSY;
 				goto out1;
 			}
@@ -3622,8 +3641,6 @@
 		}
 	}
 
-	set_bit(SELECTIVE_SUSPEND, &tp->flags);
-
 out1:
 	return ret;
 }
@@ -3679,12 +3696,15 @@
 	if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
 		if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
 			tp->rtl_ops.autosuspend_en(tp, false);
-			clear_bit(SELECTIVE_SUSPEND, &tp->flags);
 			napi_disable(&tp->napi);
 			set_bit(WORK_ENABLE, &tp->flags);
 			if (netif_carrier_ok(tp->netdev))
 				rtl_start_rx(tp);
 			napi_enable(&tp->napi);
+			clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+			smp_mb__after_atomic();
+			if (!list_empty(&tp->rx_done))
+				napi_schedule(&tp->napi);
 		} else {
 			tp->rtl_ops.up(tp);
 			netif_carrier_off(tp->netdev);
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index a2515887..0b5a84c 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -73,8 +73,6 @@
 /* Private data structure */
 struct sierra_net_data {
 
-	u8 ethr_hdr_tmpl[ETH_HLEN]; /* ethernet header template for rx'd pkts */
-
 	u16 link_up;		/* air link up or down */
 	u8 tx_hdr_template[4];	/* part of HIP hdr for tx'd packets */
 
@@ -122,6 +120,7 @@
 
 /* LSI Protocol types */
 #define SIERRA_NET_PROTOCOL_UMTS      0x01
+#define SIERRA_NET_PROTOCOL_UMTS_DS   0x04
 /* LSI Coverage */
 #define SIERRA_NET_COVERAGE_NONE      0x00
 #define SIERRA_NET_COVERAGE_NOPACKET  0x01
@@ -129,7 +128,8 @@
 /* LSI Session */
 #define SIERRA_NET_SESSION_IDLE       0x00
 /* LSI Link types */
-#define SIERRA_NET_AS_LINK_TYPE_IPv4  0x00
+#define SIERRA_NET_AS_LINK_TYPE_IPV4  0x00
+#define SIERRA_NET_AS_LINK_TYPE_IPV6  0x02
 
 struct lsi_umts {
 	u8 protocol;
@@ -137,9 +137,14 @@
 	__be16 length;
 	/* eventually use a union for the rest - assume umts for now */
 	u8 coverage;
-	u8 unused2[41];
+	u8 network_len; /* network name len */
+	u8 network[40]; /* network name (UCS2, bigendian) */
 	u8 session_state;
 	u8 unused3[33];
+} __packed;
+
+struct lsi_umts_single {
+	struct lsi_umts lsi;
 	u8 link_type;
 	u8 pdp_addr_len; /* NW-supplied PDP address len */
 	u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */
@@ -158,10 +163,31 @@
 	u8 reserved[8];
 } __packed;
 
+struct lsi_umts_dual {
+	struct lsi_umts lsi;
+	u8 pdp_addr4_len; /* NW-supplied PDP IPv4 address len */
+	u8 pdp_addr4[4];  /* NW-supplied PDP IPv4 address (bigendian)) */
+	u8 pdp_addr6_len; /* NW-supplied PDP IPv6 address len */
+	u8 pdp_addr6[16]; /* NW-supplied PDP IPv6 address (bigendian)) */
+	u8 unused4[23];
+	u8 dns1_addr4_len; /* NW-supplied 1st DNS v4 address len (bigendian) */
+	u8 dns1_addr4[4];  /* NW-supplied 1st DNS v4 address */
+	u8 dns1_addr6_len; /* NW-supplied 1st DNS v6 address len */
+	u8 dns1_addr6[16]; /* NW-supplied 1st DNS v6 address (bigendian)*/
+	u8 dns2_addr4_len; /* NW-supplied 2nd DNS v4 address len (bigendian) */
+	u8 dns2_addr4[4];  /* NW-supplied 2nd DNS v4 address */
+	u8 dns2_addr6_len; /* NW-supplied 2nd DNS v6 address len */
+	u8 dns2_addr6[16]; /* NW-supplied 2nd DNS v6 address (bigendian)*/
+	u8 unused5[68];
+} __packed;
+
 #define SIERRA_NET_LSI_COMMON_LEN      4
-#define SIERRA_NET_LSI_UMTS_LEN        (sizeof(struct lsi_umts))
+#define SIERRA_NET_LSI_UMTS_LEN        (sizeof(struct lsi_umts_single))
 #define SIERRA_NET_LSI_UMTS_STATUS_LEN \
 	(SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN)
+#define SIERRA_NET_LSI_UMTS_DS_LEN     (sizeof(struct lsi_umts_dual))
+#define SIERRA_NET_LSI_UMTS_DS_STATUS_LEN \
+	(SIERRA_NET_LSI_UMTS_DS_LEN - SIERRA_NET_LSI_COMMON_LEN)
 
 /* Forward definitions */
 static void sierra_sync_timer(unsigned long syncdata);
@@ -191,10 +217,11 @@
 	dev->data[0] = (unsigned long)priv;
 }
 
-/* is packet IPv4 */
+/* is packet IPv4/IPv6 */
 static inline int is_ip(struct sk_buff *skb)
 {
-	return skb->protocol == cpu_to_be16(ETH_P_IP);
+	return skb->protocol == cpu_to_be16(ETH_P_IP) ||
+	       skb->protocol == cpu_to_be16(ETH_P_IPV6);
 }
 
 /*
@@ -350,46 +377,51 @@
 static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen)
 {
 	struct lsi_umts *lsi = (struct lsi_umts *)data;
+	u32 expected_length;
 
-	if (datalen < sizeof(struct lsi_umts)) {
-		netdev_err(dev->net, "%s: Data length %d, exp %Zu\n",
-				__func__, datalen,
-				sizeof(struct lsi_umts));
+	if (datalen < sizeof(struct lsi_umts_single)) {
+		netdev_err(dev->net, "%s: Data length %d, exp >= %Zu\n",
+			   __func__, datalen, sizeof(struct lsi_umts_single));
 		return -1;
 	}
 
-	if (lsi->length != cpu_to_be16(SIERRA_NET_LSI_UMTS_STATUS_LEN)) {
-		netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n",
-				__func__, be16_to_cpu(lsi->length),
-				(u32)SIERRA_NET_LSI_UMTS_STATUS_LEN);
-		return -1;
-	}
-
-	/* Validate the protocol  - only support UMTS for now */
-	if (lsi->protocol != SIERRA_NET_PROTOCOL_UMTS) {
-		netdev_err(dev->net, "Protocol unsupported, 0x%02x\n",
-			lsi->protocol);
-		return -1;
-	}
-
-	/* Validate the link type */
-	if (lsi->link_type != SIERRA_NET_AS_LINK_TYPE_IPv4) {
-		netdev_err(dev->net, "Link type unsupported: 0x%02x\n",
-			lsi->link_type);
-		return -1;
-	}
-
-	/* Validate the coverage */
-	if (lsi->coverage == SIERRA_NET_COVERAGE_NONE
-	   || lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) {
-		netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage);
-		return 0;
-	}
-
 	/* Validate the session state */
 	if (lsi->session_state == SIERRA_NET_SESSION_IDLE) {
 		netdev_err(dev->net, "Session idle, 0x%02x\n",
-			lsi->session_state);
+			   lsi->session_state);
+		return 0;
+	}
+
+	/* Validate the protocol  - only support UMTS for now */
+	if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS) {
+		struct lsi_umts_single *single = (struct lsi_umts_single *)lsi;
+
+		/* Validate the link type */
+		if (single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV4 &&
+		    single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV6) {
+			netdev_err(dev->net, "Link type unsupported: 0x%02x\n",
+				   single->link_type);
+			return -1;
+		}
+		expected_length = SIERRA_NET_LSI_UMTS_STATUS_LEN;
+	} else if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS_DS) {
+		expected_length = SIERRA_NET_LSI_UMTS_DS_STATUS_LEN;
+	} else {
+		netdev_err(dev->net, "Protocol unsupported, 0x%02x\n",
+			   lsi->protocol);
+		return -1;
+	}
+
+	if (be16_to_cpu(lsi->length) != expected_length) {
+		netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n",
+			   __func__, be16_to_cpu(lsi->length), expected_length);
+		return -1;
+	}
+
+	/* Validate the coverage */
+	if (lsi->coverage == SIERRA_NET_COVERAGE_NONE ||
+	    lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) {
+		netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage);
 		return 0;
 	}
 
@@ -662,7 +694,6 @@
 	u8	numendpoints;
 	u16	fwattr = 0;
 	int	status;
-	struct ethhdr *eth;
 	struct sierra_net_data *priv;
 	static const u8 sync_tmplate[sizeof(priv->sync_msg)] = {
 		0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00};
@@ -700,11 +731,6 @@
 	dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
 	dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
 
-	/* we will have to manufacture ethernet headers, prepare template */
-	eth = (struct ethhdr *)priv->ethr_hdr_tmpl;
-	memcpy(&eth->h_dest, dev->net->dev_addr, ETH_ALEN);
-	eth->h_proto = cpu_to_be16(ETH_P_IP);
-
 	/* prepare shutdown message template */
 	memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg));
 	/* set context index initially to 0 - prepares tx hdr template */
@@ -833,9 +859,14 @@
 
 		skb_pull(skb, hh.hdrlen);
 
-		/* We are going to accept this packet, prepare it */
-		memcpy(skb->data, sierra_net_get_private(dev)->ethr_hdr_tmpl,
-			ETH_HLEN);
+		/* We are going to accept this packet, prepare it.
+		 * In case protocol is IPv6, keep it, otherwise force IPv4.
+		 */
+		skb_reset_mac_header(skb);
+		if (eth_hdr(skb)->h_proto != cpu_to_be16(ETH_P_IPV6))
+			eth_hdr(skb)->h_proto = cpu_to_be16(ETH_P_IP);
+		eth_zero_addr(eth_hdr(skb)->h_source);
+		memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
 
 		/* Last packet in batch handled by usbnet */
 		if (hh.payload_len.word == skb->len)
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 030d849..d092d34 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -121,4 +121,6 @@
 	 Select Y to compile the driver in order to have WLAN functionality
 	 support.
 
+source "drivers/net/wireless/cnss_utils/Kconfig"
+
 endif # WLAN
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 91594de..005523c 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -27,3 +27,5 @@
 obj-$(CONFIG_MAC80211_HWSIM)	+= mac80211_hwsim.o
 
 obj-$(CONFIG_WCNSS_MEM_PRE_ALLOC) += cnss_prealloc/
+
+obj-$(CONFIG_CNSS_UTILS) += cnss_utils/
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 0457e31..6063cf4 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -1647,6 +1647,8 @@
 
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
 
+	napi_enable(&ar->napi);
+
 	ath10k_pci_irq_enable(ar);
 	ath10k_pci_rx_post(ar);
 
@@ -2531,7 +2533,6 @@
 		ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
 		goto err_ce;
 	}
-	napi_enable(&ar->napi);
 
 	return 0;
 
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index c7c1e99..d231042 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -442,7 +442,7 @@
 	const char *nvram_name;
 	u16 domain_nr;
 	u16 bus_nr;
-	void (*done)(struct device *dev, const struct firmware *fw,
+	void (*done)(struct device *dev, int err, const struct firmware *fw,
 		     void *nvram_image, u32 nvram_len);
 };
 
@@ -477,52 +477,51 @@
 	if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
 		goto fail;
 
-	fwctx->done(fwctx->dev, fwctx->code, nvram, nvram_length);
+	fwctx->done(fwctx->dev, 0, fwctx->code, nvram, nvram_length);
 	kfree(fwctx);
 	return;
 
 fail:
 	brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
 	release_firmware(fwctx->code);
-	device_release_driver(fwctx->dev);
+	fwctx->done(fwctx->dev, -ENOENT, NULL, NULL, 0);
 	kfree(fwctx);
 }
 
 static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx)
 {
 	struct brcmf_fw *fwctx = ctx;
-	int ret;
+	int ret = 0;
 
 	brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
-	if (!fw)
+	if (!fw) {
+		ret = -ENOENT;
 		goto fail;
-
-	/* only requested code so done here */
-	if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) {
-		fwctx->done(fwctx->dev, fw, NULL, 0);
-		kfree(fwctx);
-		return;
 	}
+	/* only requested code so done here */
+	if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM))
+		goto done;
+
 	fwctx->code = fw;
 	ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name,
 				      fwctx->dev, GFP_KERNEL, fwctx,
 				      brcmf_fw_request_nvram_done);
 
-	if (!ret)
-		return;
-
-	brcmf_fw_request_nvram_done(NULL, fwctx);
+	/* pass NULL to nvram callback for bcm47xx fallback */
+	if (ret)
+		brcmf_fw_request_nvram_done(NULL, fwctx);
 	return;
 
 fail:
 	brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
-	device_release_driver(fwctx->dev);
+done:
+	fwctx->done(fwctx->dev, ret, fw, NULL, 0);
 	kfree(fwctx);
 }
 
 int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
 				const char *code, const char *nvram,
-				void (*fw_cb)(struct device *dev,
+				void (*fw_cb)(struct device *dev, int err,
 					      const struct firmware *fw,
 					      void *nvram_image, u32 nvram_len),
 				u16 domain_nr, u16 bus_nr)
@@ -555,7 +554,7 @@
 
 int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
 			   const char *code, const char *nvram,
-			   void (*fw_cb)(struct device *dev,
+			   void (*fw_cb)(struct device *dev, int err,
 					 const struct firmware *fw,
 					 void *nvram_image, u32 nvram_len))
 {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
index d3c9f0d..8fa4b7e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
@@ -73,13 +73,13 @@
  */
 int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
 				const char *code, const char *nvram,
-				void (*fw_cb)(struct device *dev,
+				void (*fw_cb)(struct device *dev, int err,
 					      const struct firmware *fw,
 					      void *nvram_image, u32 nvram_len),
 				u16 domain_nr, u16 bus_nr);
 int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
 			   const char *code, const char *nvram,
-			   void (*fw_cb)(struct device *dev,
+			   void (*fw_cb)(struct device *dev, int err,
 					 const struct firmware *fw,
 					 void *nvram_image, u32 nvram_len));
 
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 3deba90..d3d7921 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -1618,16 +1618,23 @@
 	.write32 = brcmf_pcie_buscore_write32,
 };
 
-static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw,
+static void brcmf_pcie_setup(struct device *dev, int ret,
+			     const struct firmware *fw,
 			     void *nvram, u32 nvram_len)
 {
-	struct brcmf_bus *bus = dev_get_drvdata(dev);
-	struct brcmf_pciedev *pcie_bus_dev = bus->bus_priv.pcie;
-	struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo;
+	struct brcmf_bus *bus;
+	struct brcmf_pciedev *pcie_bus_dev;
+	struct brcmf_pciedev_info *devinfo;
 	struct brcmf_commonring **flowrings;
-	int ret;
 	u32 i;
 
+	/* check firmware loading result */
+	if (ret)
+		goto fail;
+
+	bus = dev_get_drvdata(dev);
+	pcie_bus_dev = bus->bus_priv.pcie;
+	devinfo = pcie_bus_dev->devinfo;
 	brcmf_pcie_attach(devinfo);
 
 	/* Some of the firmwares have the size of the memory of the device
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 2458e6e..8744b9b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -3975,21 +3975,26 @@
 	.get_memdump = brcmf_sdio_bus_get_memdump,
 };
 
-static void brcmf_sdio_firmware_callback(struct device *dev,
+static void brcmf_sdio_firmware_callback(struct device *dev, int err,
 					 const struct firmware *code,
 					 void *nvram, u32 nvram_len)
 {
-	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
-	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
-	struct brcmf_sdio *bus = sdiodev->bus;
-	int err = 0;
+	struct brcmf_bus *bus_if;
+	struct brcmf_sdio_dev *sdiodev;
+	struct brcmf_sdio *bus;
 	u8 saveclk;
 
-	brcmf_dbg(TRACE, "Enter: dev=%s\n", dev_name(dev));
+	brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err);
+	bus_if = dev_get_drvdata(dev);
+	sdiodev = bus_if->bus_priv.sdio;
+	if (err)
+		goto fail;
 
 	if (!bus_if->drvr)
 		return;
 
+	bus = sdiodev->bus;
+
 	/* try to download image and nvram to the dongle */
 	bus->alp_only = true;
 	err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len);
@@ -4076,6 +4081,7 @@
 fail:
 	brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
 	device_release_driver(dev);
+	device_release_driver(&sdiodev->func[2]->dev);
 }
 
 struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index 2f978a3..053f3b5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -1158,17 +1158,18 @@
 	return ret;
 }
 
-static void brcmf_usb_probe_phase2(struct device *dev,
+static void brcmf_usb_probe_phase2(struct device *dev, int ret,
 				   const struct firmware *fw,
 				   void *nvram, u32 nvlen)
 {
 	struct brcmf_bus *bus = dev_get_drvdata(dev);
-	struct brcmf_usbdev_info *devinfo;
-	int ret;
+	struct brcmf_usbdev_info *devinfo = bus->bus_priv.usb->devinfo;
+
+	if (ret)
+		goto error;
 
 	brcmf_dbg(USB, "Start fw downloading\n");
 
-	devinfo = bus->bus_priv.usb->devinfo;
 	ret = check_file(fw->data);
 	if (ret < 0) {
 		brcmf_err("invalid firmware\n");
diff --git a/drivers/net/wireless/cnss_utils/Kconfig b/drivers/net/wireless/cnss_utils/Kconfig
new file mode 100644
index 0000000..5f43e48
--- /dev/null
+++ b/drivers/net/wireless/cnss_utils/Kconfig
@@ -0,0 +1,6 @@
+config CNSS_UTILS
+	bool "CNSS utilities support"
+	---help---
+	  Add CNSS utilities support for the WLAN driver module.
+	  This feature enable wlan driver to use CNSS utilities APIs to set
+	  and get wlan related information.
\ No newline at end of file
diff --git a/drivers/net/wireless/cnss_utils/Makefile b/drivers/net/wireless/cnss_utils/Makefile
new file mode 100644
index 0000000..0d1ed7a
--- /dev/null
+++ b/drivers/net/wireless/cnss_utils/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_CNSS_UTILS) += cnss_utils.o
diff --git a/drivers/net/wireless/cnss_utils/cnss_utils.c b/drivers/net/wireless/cnss_utils/cnss_utils.c
new file mode 100644
index 0000000..d73846e
--- /dev/null
+++ b/drivers/net/wireless/cnss_utils/cnss_utils.c
@@ -0,0 +1,310 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "cnss_utils: " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/etherdevice.h>
+#include <net/cnss_utils.h>
+
+#define CNSS_MAX_CH_NUM 45
+struct cnss_unsafe_channel_list {
+	u16 unsafe_ch_count;
+	u16 unsafe_ch_list[CNSS_MAX_CH_NUM];
+};
+
+struct cnss_dfs_nol_info {
+	void *dfs_nol_info;
+	u16 dfs_nol_info_len;
+};
+
+#define MAX_NO_OF_MAC_ADDR 4
+struct cnss_wlan_mac_addr {
+	u8 mac_addr[MAX_NO_OF_MAC_ADDR][ETH_ALEN];
+	u32 no_of_mac_addr_set;
+};
+
+static struct cnss_utils_priv {
+	struct cnss_unsafe_channel_list unsafe_channel_list;
+	struct cnss_dfs_nol_info dfs_nol_info;
+	/* generic mutex for unsafe channel */
+	struct mutex unsafe_channel_list_lock;
+	/* generic spin-lock for dfs_nol info */
+	spinlock_t dfs_nol_info_lock;
+	int driver_load_cnt;
+	bool is_wlan_mac_set;
+	struct cnss_wlan_mac_addr wlan_mac_addr;
+	enum cnss_utils_cc_src cc_source;
+} *cnss_utils_priv;
+
+int cnss_utils_set_wlan_unsafe_channel(struct device *dev,
+				       u16 *unsafe_ch_list, u16 ch_count)
+{
+	struct cnss_utils_priv *priv = cnss_utils_priv;
+
+	if (!priv)
+		return -EINVAL;
+
+	mutex_lock(&priv->unsafe_channel_list_lock);
+	if ((!unsafe_ch_list) || (ch_count > CNSS_MAX_CH_NUM)) {
+		mutex_unlock(&priv->unsafe_channel_list_lock);
+		return -EINVAL;
+	}
+
+	priv->unsafe_channel_list.unsafe_ch_count = ch_count;
+
+	if (ch_count == 0)
+		goto end;
+
+	memcpy(priv->unsafe_channel_list.unsafe_ch_list,
+	       unsafe_ch_list, ch_count * sizeof(u16));
+
+end:
+	mutex_unlock(&priv->unsafe_channel_list_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(cnss_utils_set_wlan_unsafe_channel);
+
+int cnss_utils_get_wlan_unsafe_channel(struct device *dev,
+				       u16 *unsafe_ch_list,
+				       u16 *ch_count, u16 buf_len)
+{
+	struct cnss_utils_priv *priv = cnss_utils_priv;
+
+	if (!priv)
+		return -EINVAL;
+
+	mutex_lock(&priv->unsafe_channel_list_lock);
+	if (!unsafe_ch_list || !ch_count) {
+		mutex_unlock(&priv->unsafe_channel_list_lock);
+		return -EINVAL;
+	}
+
+	if (buf_len <
+	    (priv->unsafe_channel_list.unsafe_ch_count * sizeof(u16))) {
+		mutex_unlock(&priv->unsafe_channel_list_lock);
+		return -ENOMEM;
+	}
+
+	*ch_count = priv->unsafe_channel_list.unsafe_ch_count;
+	memcpy(unsafe_ch_list, priv->unsafe_channel_list.unsafe_ch_list,
+	       priv->unsafe_channel_list.unsafe_ch_count * sizeof(u16));
+	mutex_unlock(&priv->unsafe_channel_list_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(cnss_utils_get_wlan_unsafe_channel);
+
+int cnss_utils_wlan_set_dfs_nol(struct device *dev,
+				const void *info, u16 info_len)
+{
+	void *temp;
+	void *old_nol_info;
+	struct cnss_dfs_nol_info *dfs_info;
+	struct cnss_utils_priv *priv = cnss_utils_priv;
+
+	if (!priv)
+		return -EINVAL;
+
+	if (!info || !info_len)
+		return -EINVAL;
+
+	temp = kmalloc(info_len, GFP_ATOMIC);
+	if (!temp)
+		return -ENOMEM;
+
+	memcpy(temp, info, info_len);
+	spin_lock_bh(&priv->dfs_nol_info_lock);
+	dfs_info = &priv->dfs_nol_info;
+	old_nol_info = dfs_info->dfs_nol_info;
+	dfs_info->dfs_nol_info = temp;
+	dfs_info->dfs_nol_info_len = info_len;
+	spin_unlock_bh(&priv->dfs_nol_info_lock);
+	kfree(old_nol_info);
+
+	return 0;
+}
+EXPORT_SYMBOL(cnss_utils_wlan_set_dfs_nol);
+
+int cnss_utils_wlan_get_dfs_nol(struct device *dev,
+				void *info, u16 info_len)
+{
+	int len;
+	struct cnss_dfs_nol_info *dfs_info;
+	struct cnss_utils_priv *priv = cnss_utils_priv;
+
+	if (!priv)
+		return -EINVAL;
+
+	if (!info || !info_len)
+		return -EINVAL;
+
+	spin_lock_bh(&priv->dfs_nol_info_lock);
+
+	dfs_info = &priv->dfs_nol_info;
+	if (!dfs_info->dfs_nol_info ||
+	    dfs_info->dfs_nol_info_len == 0) {
+		spin_unlock_bh(&priv->dfs_nol_info_lock);
+		return -ENOENT;
+	}
+
+	len = min(info_len, dfs_info->dfs_nol_info_len);
+	memcpy(info, dfs_info->dfs_nol_info, len);
+	spin_unlock_bh(&priv->dfs_nol_info_lock);
+
+	return len;
+}
+EXPORT_SYMBOL(cnss_utils_wlan_get_dfs_nol);
+
+void cnss_utils_increment_driver_load_cnt(struct device *dev)
+{
+	struct cnss_utils_priv *priv = cnss_utils_priv;
+
+	if (!priv)
+		return;
+
+	++(priv->driver_load_cnt);
+}
+EXPORT_SYMBOL(cnss_utils_increment_driver_load_cnt);
+
+int cnss_utils_get_driver_load_cnt(struct device *dev)
+{
+	struct cnss_utils_priv *priv = cnss_utils_priv;
+
+	if (!priv)
+		return -EINVAL;
+
+	return priv->driver_load_cnt;
+}
+EXPORT_SYMBOL(cnss_utils_get_driver_load_cnt);
+
+int cnss_utils_set_wlan_mac_address(const u8 *in, const uint32_t len)
+{
+	struct cnss_utils_priv *priv = cnss_utils_priv;
+	u32 no_of_mac_addr;
+	struct cnss_wlan_mac_addr *addr = NULL;
+	int iter;
+	u8 *temp = NULL;
+
+	if (!priv)
+		return -EINVAL;
+
+	if (priv->is_wlan_mac_set) {
+		pr_debug("WLAN MAC address is already set\n");
+		return 0;
+	}
+
+	if (len == 0 || (len % ETH_ALEN) != 0) {
+		pr_err("Invalid length %d\n", len);
+		return -EINVAL;
+	}
+
+	no_of_mac_addr = len / ETH_ALEN;
+	if (no_of_mac_addr > MAX_NO_OF_MAC_ADDR) {
+		pr_err("Exceed maximum supported MAC address %u %u\n",
+		       MAX_NO_OF_MAC_ADDR, no_of_mac_addr);
+		return -EINVAL;
+	}
+
+	priv->is_wlan_mac_set = true;
+	addr = &priv->wlan_mac_addr;
+	addr->no_of_mac_addr_set = no_of_mac_addr;
+	temp = &addr->mac_addr[0][0];
+
+	for (iter = 0; iter < no_of_mac_addr;
+	     ++iter, temp += ETH_ALEN, in += ETH_ALEN) {
+		ether_addr_copy(temp, in);
+		pr_debug("MAC_ADDR:%02x:%02x:%02x:%02x:%02x:%02x\n",
+			 temp[0], temp[1], temp[2],
+			 temp[3], temp[4], temp[5]);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(cnss_utils_set_wlan_mac_address);
+
+u8 *cnss_utils_get_wlan_mac_address(struct device *dev, uint32_t *num)
+{
+	struct cnss_utils_priv *priv = cnss_utils_priv;
+	struct cnss_wlan_mac_addr *addr = NULL;
+
+	if (!priv)
+		goto out;
+
+	if (!priv->is_wlan_mac_set) {
+		pr_debug("WLAN MAC address is not set\n");
+		goto out;
+	}
+
+	addr = &priv->wlan_mac_addr;
+	*num = addr->no_of_mac_addr_set;
+	return &addr->mac_addr[0][0];
+out:
+	*num = 0;
+	return NULL;
+}
+EXPORT_SYMBOL(cnss_utils_get_wlan_mac_address);
+
+void cnss_utils_set_cc_source(struct device *dev,
+			      enum cnss_utils_cc_src cc_source)
+{
+	struct cnss_utils_priv *priv = cnss_utils_priv;
+
+	if (!priv)
+		return;
+
+	priv->cc_source = cc_source;
+}
+EXPORT_SYMBOL(cnss_utils_set_cc_source);
+
+enum cnss_utils_cc_src cnss_utils_get_cc_source(struct device *dev)
+{
+	struct cnss_utils_priv *priv = cnss_utils_priv;
+
+	if (!priv)
+		return -EINVAL;
+
+	return priv->cc_source;
+}
+EXPORT_SYMBOL(cnss_utils_get_cc_source);
+
+static int __init cnss_utils_init(void)
+{
+	struct cnss_utils_priv *priv = NULL;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->cc_source = CNSS_UTILS_SOURCE_CORE;
+
+	mutex_init(&priv->unsafe_channel_list_lock);
+	spin_lock_init(&priv->dfs_nol_info_lock);
+
+	cnss_utils_priv = priv;
+
+	return 0;
+}
+
+static void __exit cnss_utils_exit(void)
+{
+	kfree(cnss_utils_priv);
+	cnss_utils_priv = NULL;
+}
+
+module_init(cnss_utils_init);
+module_exit(cnss_utils_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(DEVICE "CNSS Utilities Driver");
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index b7273be..c8d9075 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -1422,21 +1422,6 @@
 	cancel_work_sync(&rt2x00dev->intf_work);
 	cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
 	cancel_work_sync(&rt2x00dev->sleep_work);
-#if IS_ENABLED(CONFIG_RT2X00_LIB_USB)
-	if (rt2x00_is_usb(rt2x00dev)) {
-		usb_kill_anchored_urbs(rt2x00dev->anchor);
-		hrtimer_cancel(&rt2x00dev->txstatus_timer);
-		cancel_work_sync(&rt2x00dev->rxdone_work);
-		cancel_work_sync(&rt2x00dev->txdone_work);
-	}
-#endif
-	if (rt2x00dev->workqueue)
-		destroy_workqueue(rt2x00dev->workqueue);
-
-	/*
-	 * Free the tx status fifo.
-	 */
-	kfifo_free(&rt2x00dev->txstatus_fifo);
 
 	/*
 	 * Kill the tx status tasklet.
@@ -1452,6 +1437,14 @@
 	 */
 	rt2x00lib_uninitialize(rt2x00dev);
 
+	if (rt2x00dev->workqueue)
+		destroy_workqueue(rt2x00dev->workqueue);
+
+	/*
+	 * Free the tx status fifo.
+	 */
+	kfifo_free(&rt2x00dev->txstatus_fifo);
+
 	/*
 	 * Free extra components
 	 */
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
index 662705e..631df69 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
@@ -740,6 +740,11 @@
 {
 	struct data_queue *queue;
 
+	usb_kill_anchored_urbs(rt2x00dev->anchor);
+	hrtimer_cancel(&rt2x00dev->txstatus_timer);
+	cancel_work_sync(&rt2x00dev->rxdone_work);
+	cancel_work_sync(&rt2x00dev->txdone_work);
+
 	queue_for_each(rt2x00dev, queue)
 		rt2x00usb_free_entries(queue);
 }
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 005ef5d..ca8ddc3 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -438,6 +438,10 @@
 {
 	struct ib_recv_wr *bad_wr;
 
+	ib_dma_sync_single_for_device(ndev->device,
+		cmd->sge[0].addr, cmd->sge[0].length,
+		DMA_FROM_DEVICE);
+
 	if (ndev->srq)
 		return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
 	return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
@@ -538,6 +542,11 @@
 		first_wr = &rsp->send_wr;
 
 	nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
+
+	ib_dma_sync_single_for_device(rsp->queue->dev->device,
+		rsp->send_sge.addr, rsp->send_sge.length,
+		DMA_TO_DEVICE);
+
 	if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) {
 		pr_err("sending cmd response failed\n");
 		nvmet_rdma_release_rsp(rsp);
@@ -698,6 +707,14 @@
 	cmd->n_rdma = 0;
 	cmd->req.port = queue->port;
 
+
+	ib_dma_sync_single_for_cpu(queue->dev->device,
+		cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
+		DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_cpu(queue->dev->device,
+		cmd->send_sge.addr, cmd->send_sge.length,
+		DMA_TO_DEVICE);
+
 	if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
 			&queue->nvme_sq, &nvmet_rdma_ops))
 		return;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 66af185..c0914fb 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -741,9 +741,12 @@
 	const char *pathp;
 	int offset, rc = 0, depth = -1;
 
-        for (offset = fdt_next_node(blob, -1, &depth);
-             offset >= 0 && depth >= 0 && !rc;
-             offset = fdt_next_node(blob, offset, &depth)) {
+	if (!blob)
+		return 0;
+
+	for (offset = fdt_next_node(blob, -1, &depth);
+	     offset >= 0 && depth >= 0 && !rc;
+	     offset = fdt_next_node(blob, offset, &depth)) {
 
 		pathp = fdt_get_name(blob, offset, NULL);
 		if (*pathp == '/')
diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c
index 6e3a60c..50f3bb0 100644
--- a/drivers/parport/parport_gsc.c
+++ b/drivers/parport/parport_gsc.c
@@ -293,7 +293,7 @@
 		p->irq = PARPORT_IRQ_NONE;
 	}
 	if (p->irq != PARPORT_IRQ_NONE) {
-		printk(", irq %d", p->irq);
+		pr_cont(", irq %d", p->irq);
 
 		if (p->dma == PARPORT_DMA_AUTO) {
 			p->dma = PARPORT_DMA_NONE;
@@ -303,8 +303,8 @@
                                            is mandatory (see above) */
 		p->dma = PARPORT_DMA_NONE;
 
-	printk(" [");
-#define printmode(x) {if(p->modes&PARPORT_MODE_##x){printk("%s%s",f?",":"",#x);f++;}}
+	pr_cont(" [");
+#define printmode(x) {if(p->modes&PARPORT_MODE_##x){pr_cont("%s%s",f?",":"",#x);f++;}}
 	{
 		int f = 0;
 		printmode(PCSPP);
@@ -315,7 +315,7 @@
 //		printmode(DMA);
 	}
 #undef printmode
-	printk("]\n");
+	pr_cont("]\n");
 
 	if (p->irq != PARPORT_IRQ_NONE) {
 		if (request_irq (p->irq, parport_irq_handler,
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index 361d7dd0..0491a86 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -4926,9 +4926,8 @@
 {
 	struct iommu_domain *domain = iommu_get_domain_for_dev(&pdev->dev);
 	struct iommu_domain_geometry geometry;
-	int ret, fastmap_en = 0, bypass_en = 0;
-	dma_addr_t iova;
-	phys_addr_t gicm_db_offset;
+	int fastmap_en = 0, bypass_en = 0;
+	dma_addr_t iova, addr;
 
 	msg->address_hi = 0;
 	msg->address_lo = dev->msi_gicm_addr;
@@ -4970,18 +4969,15 @@
 		iova = rounddown(pcie_base_addr, PAGE_SIZE);
 	}
 
-	ret = iommu_map(domain, iova, rounddown(dev->msi_gicm_addr, PAGE_SIZE),
-			PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
-	if (ret < 0) {
-		PCIE_ERR(dev,
-			"PCIe: RC%d: ret: %d: Could not do iommu map for QGIC address\n",
-			dev->rc_idx, ret);
-		return -ENOMEM;
+	addr = dma_map_resource(&pdev->dev, dev->msi_gicm_addr, PAGE_SIZE,
+				DMA_BIDIRECTIONAL, 0);
+	if (dma_mapping_error(&pdev->dev, addr)) {
+		PCIE_ERR(dev, "PCIe: RC%d: failed to map QGIC address",
+			dev->rc_idx);
+		return -EIO;
 	}
 
-	gicm_db_offset = dev->msi_gicm_addr -
-		rounddown(dev->msi_gicm_addr, PAGE_SIZE);
-	msg->address_lo = iova + gicm_db_offset;
+	msg->address_lo = iova + addr;
 
 	return 0;
 }
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 579c494..e7d4048 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2142,7 +2142,8 @@
 
 	if (!pm_runtime_suspended(dev)
 	    || pci_target_state(pci_dev) != pci_dev->current_state
-	    || platform_pci_need_resume(pci_dev))
+	    || platform_pci_need_resume(pci_dev)
+	    || (pci_dev->dev_flags & PCI_DEV_FLAGS_NEEDS_RESUME))
 		return false;
 
 	/*
diff --git a/drivers/pinctrl/berlin/berlin-bg4ct.c b/drivers/pinctrl/berlin/berlin-bg4ct.c
index 0917204..c617ec4 100644
--- a/drivers/pinctrl/berlin/berlin-bg4ct.c
+++ b/drivers/pinctrl/berlin/berlin-bg4ct.c
@@ -217,7 +217,7 @@
 	BERLIN_PINCTRL_GROUP("SCRD0_CRD_PRES", 0xc, 0x3, 0x15,
 			BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO20 */
 			BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* crd pres */
-			BERLIN_PINCTRL_FUNCTION(0x1, "sd1a")), /* DAT3 */
+			BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* DAT3 */
 	BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x18,
 			BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */
 			BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 583ae3f..5419de8 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1250,10 +1250,12 @@
 			debounce = readl(db_reg);
 			debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
 
+			if (arg)
+				conf |= BYT_DEBOUNCE_EN;
+			else
+				conf &= ~BYT_DEBOUNCE_EN;
+
 			switch (arg) {
-			case 0:
-				conf &= BYT_DEBOUNCE_EN;
-				break;
 			case 375:
 				debounce |= BYT_DEBOUNCE_PULSE_375US;
 				break;
@@ -1276,7 +1278,9 @@
 				debounce |= BYT_DEBOUNCE_PULSE_24MS;
 				break;
 			default:
-				ret = -EINVAL;
+				if (arg)
+					ret = -EINVAL;
+				break;
 			}
 
 			if (!ret)
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 40ee647..02b28bd 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -150,7 +150,7 @@
 
 config PINCTRL_WCD
 	tristate "Qualcomm Technologies, Inc WCD pin controller driver"
-	depends on WCD934X_CODEC
+	depends on WCD9XXX_CODEC_CORE
 	help
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
 	  WCD gpio controller block.
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index 1946204..e5fe6ba 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -56,6 +56,24 @@
 	  for RmNet Data Driver and also exchange of QMI messages between
 	  A7 and Q6 IPA-driver.
 
+config ECM_IPA
+	tristate "STD ECM LAN Driver support"
+	depends on IPA || IPA3
+	help
+	  Enables LAN between applications processor and a tethered
+	  host using the STD ECM protocol.
+	  This Network interface is aimed to allow data path go through
+	  IPA core while using STD ECM protocol.
+
+config RNDIS_IPA
+	tristate "RNDIS_IPA Network Interface Driver support"
+	depends on IPA || IPA3
+	help
+	  Enables LAN between applications processor and a tethered
+	  host using the RNDIS protocol.
+	  This Network interface is aimed to allow data path go through
+	  IPA core while using RNDIS protocol.
+
 config IPA_UT
 	tristate "IPA Unit-Test Framework and Test Suites"
 	depends on IPA3 && DEBUG_FS
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 38264d9..23103da 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -172,6 +172,7 @@
 	__stringify(IPA_CLIENT_TEST3_CONS),
 	__stringify(IPA_CLIENT_TEST4_PROD),
 	__stringify(IPA_CLIENT_TEST4_CONS),
+	__stringify(IPA_CLIENT_DUMMY_CONS),
 };
 
 /**
diff --git a/drivers/platform/msm/ipa/ipa_clients/Makefile b/drivers/platform/msm/ipa/ipa_clients/Makefile
index 61cef2d..61625f5 100644
--- a/drivers/platform/msm/ipa/ipa_clients/Makefile
+++ b/drivers/platform/msm/ipa/ipa_clients/Makefile
@@ -1,2 +1,4 @@
 obj-$(CONFIG_IPA3) += ipa_usb.o odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o
 obj-$(CONFIG_IPA) += odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o
+obj-$(CONFIG_ECM_IPA) += ecm_ipa.o
+obj-$(CONFIG_RNDIS_IPA) += rndis_ipa.o
diff --git a/drivers/net/ethernet/msm/ecm_ipa.c b/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c
similarity index 100%
rename from drivers/net/ethernet/msm/ecm_ipa.c
rename to drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
index 5aa39b6..9b3b53d 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
@@ -2046,6 +2046,8 @@
 	if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
 		ipa_mhi_update_host_ch_state(true);
 
+	return 0;
+
 fail_stop_event_update_dl_channel:
 		ipa_mhi_resume_channels(true,
 				ipa_mhi_client_ctx->dl_channels);
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
index ae06d54..2dd82c1 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
@@ -58,7 +58,6 @@
 	IPA_UC_OFFLOAD_STATE_INVALID,
 	IPA_UC_OFFLOAD_STATE_INITIALIZED,
 	IPA_UC_OFFLOAD_STATE_UP,
-	IPA_UC_OFFLOAD_STATE_DOWN,
 };
 
 struct ipa_uc_offload_ctx {
@@ -413,8 +412,7 @@
 		return -EINVAL;
 	}
 
-	if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED &&
-		offload_ctx->state != IPA_UC_OFFLOAD_STATE_DOWN) {
+	if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED) {
 		IPA_UC_OFFLOAD_ERR("Invalid state %d\n", offload_ctx->state);
 		return -EPERM;
 	}
@@ -471,7 +469,7 @@
 	int ipa_ep_idx_ul, ipa_ep_idx_dl;
 	int ret = 0;
 
-	ntn_ctx->state = IPA_UC_OFFLOAD_STATE_DOWN;
+	ntn_ctx->state = IPA_UC_OFFLOAD_STATE_INITIALIZED;
 
 	ret = ipa_rm_release_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
 	if (ret) {
@@ -597,7 +595,7 @@
 		return -EINVAL;
 	}
 
-	if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_DOWN) {
+	if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED) {
 		IPA_UC_OFFLOAD_ERR("Invalid State %d\n", offload_ctx->state);
 		return -EINVAL;
 	}
diff --git a/drivers/net/ethernet/msm/rndis_ipa.c b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
similarity index 100%
rename from drivers/net/ethernet/msm/rndis_ipa.c
rename to drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
diff --git a/drivers/net/ethernet/msm/rndis_ipa_trace.h b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa_trace.h
similarity index 100%
rename from drivers/net/ethernet/msm/rndis_ipa_trace.h
rename to drivers/platform/msm/ipa/ipa_clients/rndis_ipa_trace.h
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
index e8710a6..2b517a1 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -83,6 +83,10 @@
 	__stringify(IPA_QUOTA_REACH),
 	__stringify(IPA_SSR_BEFORE_SHUTDOWN),
 	__stringify(IPA_SSR_AFTER_POWERUP),
+	__stringify(ADD_VLAN_IFACE),
+	__stringify(DEL_VLAN_IFACE),
+	__stringify(ADD_L2TP_VLAN_MAPPING),
+	__stringify(DEL_L2TP_VLAN_MAPPING)
 };
 
 const char *ipa_hdr_l2_type_name[] = {
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index 29766fb..11eeb2f 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -2616,6 +2616,9 @@
 	if (!data->set_quota)
 		ipa_qmi_stop_data_qouta();
 
+	/* prevent string buffer overflows */
+	data->interface_name[IFNAMSIZ-1] = '\0';
+
 	index = find_vchannel_name_index(data->interface_name);
 	IPAWANERR("iface name %s, quota %lu\n",
 			  data->interface_name,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 85bfe95..1e2b200 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -546,6 +546,90 @@
 	return 0;
 }
 
+static void ipa3_vlan_l2tp_msg_free_cb(void *buff, u32 len, u32 type)
+{
+	if (!buff) {
+		IPAERR("Null buffer\n");
+		return;
+	}
+
+	if (type != ADD_VLAN_IFACE &&
+	    type != DEL_VLAN_IFACE &&
+	    type != ADD_L2TP_VLAN_MAPPING &&
+		type != DEL_L2TP_VLAN_MAPPING) {
+		IPAERR("Wrong type given. buff %pK type %d\n", buff, type);
+		return;
+	}
+
+	kfree(buff);
+}
+
+static int ipa3_send_vlan_l2tp_msg(unsigned long usr_param, uint8_t msg_type)
+{
+	int retval;
+	struct ipa_ioc_vlan_iface_info *vlan_info;
+	struct ipa_ioc_l2tp_vlan_mapping_info *mapping_info;
+	struct ipa_msg_meta msg_meta;
+
+	if (msg_type == ADD_VLAN_IFACE ||
+		msg_type == DEL_VLAN_IFACE) {
+		vlan_info = kzalloc(sizeof(struct ipa_ioc_vlan_iface_info),
+			GFP_KERNEL);
+		if (!vlan_info) {
+			IPAERR("no memory\n");
+			return -ENOMEM;
+		}
+
+		if (copy_from_user((u8 *)vlan_info, (void __user *)usr_param,
+			sizeof(struct ipa_ioc_vlan_iface_info))) {
+			kfree(vlan_info);
+			return -EFAULT;
+		}
+
+		memset(&msg_meta, 0, sizeof(msg_meta));
+		msg_meta.msg_type = msg_type;
+		msg_meta.msg_len = sizeof(struct ipa_ioc_vlan_iface_info);
+		retval = ipa3_send_msg(&msg_meta, vlan_info,
+			ipa3_vlan_l2tp_msg_free_cb);
+		if (retval) {
+			IPAERR("ipa3_send_msg failed: %d\n", retval);
+			kfree(vlan_info);
+			return retval;
+		}
+	} else if (msg_type == ADD_L2TP_VLAN_MAPPING ||
+		msg_type == DEL_L2TP_VLAN_MAPPING) {
+		mapping_info = kzalloc(sizeof(struct
+			ipa_ioc_l2tp_vlan_mapping_info), GFP_KERNEL);
+		if (!mapping_info) {
+			IPAERR("no memory\n");
+			return -ENOMEM;
+		}
+
+		if (copy_from_user((u8 *)mapping_info,
+			(void __user *)usr_param,
+			sizeof(struct ipa_ioc_l2tp_vlan_mapping_info))) {
+			kfree(mapping_info);
+			return -EFAULT;
+		}
+
+		memset(&msg_meta, 0, sizeof(msg_meta));
+		msg_meta.msg_type = msg_type;
+		msg_meta.msg_len = sizeof(struct
+			ipa_ioc_l2tp_vlan_mapping_info);
+		retval = ipa3_send_msg(&msg_meta, mapping_info,
+			ipa3_vlan_l2tp_msg_free_cb);
+		if (retval) {
+			IPAERR("ipa3_send_msg failed: %d\n", retval);
+			kfree(mapping_info);
+			return retval;
+		}
+	} else {
+		IPAERR("Unexpected event\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
 
 static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
@@ -1530,6 +1614,34 @@
 		}
 		break;
 
+	case IPA_IOC_ADD_VLAN_IFACE:
+		if (ipa3_send_vlan_l2tp_msg(arg, ADD_VLAN_IFACE)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_DEL_VLAN_IFACE:
+		if (ipa3_send_vlan_l2tp_msg(arg, DEL_VLAN_IFACE)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_ADD_L2TP_VLAN_MAPPING:
+		if (ipa3_send_vlan_l2tp_msg(arg, ADD_L2TP_VLAN_MAPPING)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_DEL_L2TP_VLAN_MAPPING:
+		if (ipa3_send_vlan_l2tp_msg(arg, DEL_L2TP_VLAN_MAPPING)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
 	default:        /* redundant, as cmd was checked against MAXNR */
 		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 		return -ENOTTY;
@@ -3998,7 +4110,8 @@
 	 * IPAv3.5 and above requires to disable prefetch for USB in order
 	 * to allow MBIM to work, currently MBIM is not needed in MHI mode.
 	 */
-	if ((ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5) &&
+	if ((ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5
+		&& ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) &&
 		(!ipa3_ctx->ipa_config_is_mhi))
 		ipa3_disable_prefetch(IPA_CLIENT_USB_CONS);
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 2a7b977..5e789af 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -64,6 +64,10 @@
 	__stringify(IPA_QUOTA_REACH),
 	__stringify(IPA_SSR_BEFORE_SHUTDOWN),
 	__stringify(IPA_SSR_AFTER_POWERUP),
+	__stringify(ADD_VLAN_IFACE),
+	__stringify(DEL_VLAN_IFACE),
+	__stringify(ADD_L2TP_VLAN_MAPPING),
+	__stringify(DEL_L2TP_VLAN_MAPPING)
 };
 
 const char *ipa3_hdr_l2_type_name[] = {
@@ -78,6 +82,8 @@
 	__stringify(IPA_HDR_PROC_ETHII_TO_802_3),
 	__stringify(IPA_HDR_PROC_802_3_TO_ETHII),
 	__stringify(IPA_HDR_PROC_802_3_TO_802_3),
+	__stringify(IPA_HDR_PROC_L2TP_HEADER_ADD),
+	__stringify(IPA_HDR_PROC_L2TP_HEADER_REMOVE),
 };
 
 static struct dentry *dent;
@@ -524,7 +530,8 @@
 	}
 
 	if ((attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) ||
-		(attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3)) {
+		(attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) ||
+		(attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP)) {
 		pr_err("dst_mac_addr:%pM ", attrib->dst_mac_addr);
 	}
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 915f2b8..4fb4da8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -1836,6 +1836,8 @@
 	struct gsi_xfer_elem gsi_xfer_elem_one;
 	u32 curr;
 
+	spin_lock_bh(&sys->spinlock);
+
 	rx_len_cached = sys->len;
 	curr = atomic_read(&sys->repl.head_idx);
 
@@ -1878,6 +1880,7 @@
 		mb();
 		atomic_set(&sys->repl.head_idx, curr);
 	}
+	spin_unlock_bh(&sys->spinlock);
 
 	queue_work(sys->repl_wq, &sys->repl_work);
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index 6e51472..14d776e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -13,7 +13,7 @@
 #include "ipa_i.h"
 #include "ipahal/ipahal.h"
 
-static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 24, 36, 60};
+static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 24, 36, 64};
 static const u32 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN_MAX] = { 32, 64};
 
 #define HDR_TYPE_IS_VALID(type) \
@@ -78,7 +78,8 @@
 				entry->hdr->is_hdr_proc_ctx,
 				entry->hdr->phys_base,
 				hdr_base_addr,
-				entry->hdr->offset_entry);
+				entry->hdr->offset_entry,
+				entry->l2tp_params);
 		if (ret)
 			return ret;
 	}
@@ -353,6 +354,7 @@
 
 	entry->type = proc_ctx->type;
 	entry->hdr = hdr_entry;
+	entry->l2tp_params = proc_ctx->l2tp_params;
 	if (add_ref_hdr)
 		hdr_entry->ref_cnt++;
 	entry->cookie = IPA_COOKIE;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index c6d5c6e..6a5ec31 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -322,7 +322,8 @@
 /**
  * struct ipa3_hdr_proc_ctx_entry - IPA processing context header table entry
  * @link: entry's link in global header table entries list
- * @type:
+ * @type: header processing context type
+ * @l2tp_params: L2TP parameters
  * @offset_entry: entry's offset
  * @hdr: the header
  * @cookie: cookie used for validity check
@@ -333,6 +334,7 @@
 struct ipa3_hdr_proc_ctx_entry {
 	struct list_head link;
 	enum ipa_hdr_proc_type type;
+	union ipa_l2tp_hdr_proc_ctx_params l2tp_params;
 	struct ipa3_hdr_proc_ctx_offset_entry *offset_entry;
 	struct ipa3_hdr_entry *hdr;
 	u32 cookie;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 0abe5fe..edd5b54 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -43,6 +43,7 @@
 #define IPA_BCR_REG_VAL_v3_0 (0x00000001)
 #define IPA_BCR_REG_VAL_v3_5 (0x0000003B)
 #define IPA_BCR_REG_VAL_v4_0 (0x00000039)
+#define IPA_CLKON_CFG_v4_0 (0x30000000)
 #define IPA_AGGR_GRAN_MIN (1)
 #define IPA_AGGR_GRAN_MAX (32)
 #define IPA_EOT_COAL_GRAN_MIN (1)
@@ -630,7 +631,12 @@
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 29, 14, 8, 8, IPA_EE_AP } },
-
+	/* Dummy consumer (pipe 31) is used in L2TP rt rule */
+	[IPA_3_0][IPA_CLIENT_DUMMY_CONS]          = {
+			true, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 31, 31, 8, 8, IPA_EE_AP } },
 
 	/* IPA_3_5 */
 	[IPA_3_5][IPA_CLIENT_WLAN1_PROD]          = {
@@ -777,6 +783,12 @@
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
 			{ 19, 13, 8, 8, IPA_EE_AP } },
+	/* Dummy consumer (pipe 31) is used in L2TP rt rule */
+	[IPA_3_5][IPA_CLIENT_DUMMY_CONS]          = {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 31, 31, 8, 8, IPA_EE_AP } },
 
 	/* IPA_3_5_MHI */
 	[IPA_3_5_MHI][IPA_CLIENT_USB_PROD]            = {
@@ -927,6 +939,12 @@
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
 			{ 19, 13, 8, 8, IPA_EE_AP } },
+	/* Dummy consumer (pipe 31) is used in L2TP rt rule */
+	[IPA_3_5_MHI][IPA_CLIENT_DUMMY_CONS]          = {
+			true, IPA_v3_5_MHI_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 31, 31, 8, 8, IPA_EE_AP } },
 
 	/* IPA_3_5_1 */
 	[IPA_3_5_1][IPA_CLIENT_WLAN1_PROD]          = {
@@ -1072,7 +1090,13 @@
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 11, 2, 4, 6, IPA_EE_AP } },
-
+	/* Dummy consumer (pipe 31) is used in L2TP rt rule */
+	[IPA_3_5_1][IPA_CLIENT_DUMMY_CONS]          = {
+			true, IPA_v3_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 31, 31, 8, 8, IPA_EE_AP } },
 
 	/* IPA_4_0 */
 	[IPA_4_0][IPA_CLIENT_WLAN1_PROD]          = {
@@ -1272,6 +1296,13 @@
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
 			{ 21, 15, 9, 9, IPA_EE_AP } },
+	/* Dummy consumer (pipe 31) is used in L2TP rt rule */
+	[IPA_4_0][IPA_CLIENT_DUMMY_CONS]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 31, 31, 8, 8, IPA_EE_AP } },
 
 	/* IPA_4_0_MHI */
 	[IPA_4_0_MHI][IPA_CLIENT_USB_PROD]            = {
@@ -1451,8 +1482,13 @@
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
 			{ 21, 15, 9, 9, IPA_EE_AP } },
-
-
+	/* Dummy consumer (pipe 31) is used in L2TP rt rule */
+	[IPA_4_0_MHI][IPA_CLIENT_DUMMY_CONS]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 31, 31, 8, 8, IPA_EE_AP } },
 };
 
 static struct msm_bus_vectors ipa_init_vectors_v3_0[]  = {
@@ -2055,6 +2091,16 @@
 
 	ipahal_write_reg(IPA_BCR, val);
 
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		struct ipahal_reg_tx_cfg cfg;
+
+		ipahal_write_reg(IPA_CLKON_CFG, IPA_CLKON_CFG_v4_0);
+		ipahal_read_reg_fields(IPA_TX_CFG, &cfg);
+		/* disable PA_MASK_EN to allow holb drop */
+		cfg.pa_mask_en = 0;
+		ipahal_write_reg_fields(IPA_TX_CFG, &cfg);
+	}
+
 	ipa3_cfg_qsb();
 
 	return 0;
@@ -2125,7 +2171,8 @@
 
 	ipa_ep_idx = ipa3_ep_mapping[ipa3_get_hw_type_index()][client].
 		ipa_gsi_ep_info.ipa_ep_num;
-	if (ipa_ep_idx < 0 || ipa_ep_idx >= IPA3_MAX_NUM_PIPES)
+	if (ipa_ep_idx < 0 || (ipa_ep_idx >= IPA3_MAX_NUM_PIPES
+		&& client != IPA_CLIENT_DUMMY_CONS))
 		return IPA_EP_NOT_ALLOCATED;
 
 	return ipa_ep_idx;
@@ -2892,7 +2939,8 @@
 		if (ep_mode->mode == IPA_DMA)
 			type = IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY;
 		else
-			type = IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP;
+			type =
+			   IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP;
 
 		IPADBG(" set sequencers to sequance 0x%x, ep = %d\n", type,
 				clnt_hdl);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
index 6f46ebf..ae1b989 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
@@ -1153,12 +1153,14 @@
  * @phys_base: memory location in DDR
  * @hdr_base_addr: base address in table
  * @offset_entry: offset from hdr_base_addr in table
+ * @l2tp_params: l2tp parameters
  */
 static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type,
 		void *const base, u32 offset,
 		u32 hdr_len, bool is_hdr_proc_ctx,
 		dma_addr_t phys_base, u32 hdr_base_addr,
-		struct ipa_hdr_offset_entry *offset_entry){
+		struct ipa_hdr_offset_entry *offset_entry,
+		union ipa_l2tp_hdr_proc_ctx_params l2tp_params){
 	if (type == IPA_HDR_PROC_NONE) {
 		struct ipa_hw_hdr_proc_ctx_add_hdr_seq *ctx;
 
@@ -1174,6 +1176,58 @@
 		ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
 		ctx->end.length = 0;
 		ctx->end.value = 0;
+	} else if (type == IPA_HDR_PROC_L2TP_HEADER_ADD) {
+		struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq *ctx;
+
+		ctx = (struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq *)
+			(base + offset);
+		ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
+		ctx->hdr_add.tlv.length = 1;
+		ctx->hdr_add.tlv.value = hdr_len;
+		ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base :
+			hdr_base_addr + offset_entry->offset;
+		IPAHAL_DBG("header address 0x%x\n",
+			ctx->hdr_add.hdr_addr);
+		ctx->l2tp_params.tlv.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
+		ctx->l2tp_params.tlv.length = 1;
+		ctx->l2tp_params.tlv.value =
+				IPA_HDR_UCP_L2TP_HEADER_ADD;
+		ctx->l2tp_params.l2tp_params.eth_hdr_retained =
+			l2tp_params.hdr_add_param.eth_hdr_retained;
+		ctx->l2tp_params.l2tp_params.input_ip_version =
+			l2tp_params.hdr_add_param.input_ip_version;
+		ctx->l2tp_params.l2tp_params.output_ip_version =
+			l2tp_params.hdr_add_param.output_ip_version;
+
+		IPAHAL_DBG("command id %d\n", ctx->l2tp_params.tlv.value);
+		ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
+		ctx->end.length = 0;
+		ctx->end.value = 0;
+	} else if (type == IPA_HDR_PROC_L2TP_HEADER_REMOVE) {
+		struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq *ctx;
+
+		ctx = (struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq *)
+			(base + offset);
+		ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
+		ctx->hdr_add.tlv.length = 1;
+		ctx->hdr_add.tlv.value = hdr_len;
+		ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base :
+			hdr_base_addr + offset_entry->offset;
+		IPAHAL_DBG("header address 0x%x length %d\n",
+			ctx->hdr_add.hdr_addr, ctx->hdr_add.tlv.value);
+		ctx->l2tp_params.tlv.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
+		ctx->l2tp_params.tlv.length = 1;
+		ctx->l2tp_params.tlv.value =
+				IPA_HDR_UCP_L2TP_HEADER_REMOVE;
+		ctx->l2tp_params.l2tp_params.hdr_len_remove =
+			l2tp_params.hdr_remove_param.hdr_len_remove;
+		ctx->l2tp_params.l2tp_params.eth_hdr_retained =
+			l2tp_params.hdr_remove_param.eth_hdr_retained;
+
+		IPAHAL_DBG("command id %d\n", ctx->l2tp_params.tlv.value);
+		ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
+		ctx->end.length = 0;
+		ctx->end.value = 0;
 	} else {
 		struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq *ctx;
 
@@ -1242,7 +1296,8 @@
 			void *const base, u32 offset, u32 hdr_len,
 			bool is_hdr_proc_ctx, dma_addr_t phys_base,
 			u32 hdr_base_addr,
-			struct ipa_hdr_offset_entry *offset_entry);
+			struct ipa_hdr_offset_entry *offset_entry,
+			union ipa_l2tp_hdr_proc_ctx_params l2tp_params);
 
 	int (*ipahal_get_proc_ctx_needed_len)(enum ipa_hdr_proc_type type);
 };
@@ -1307,11 +1362,13 @@
  * @phys_base: memory location in DDR
  * @hdr_base_addr: base address in table
  * @offset_entry: offset from hdr_base_addr in table
+ * @l2tp_params: l2tp parameters
  */
 int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
 		void *const base, u32 offset, u32 hdr_len,
 		bool is_hdr_proc_ctx, dma_addr_t phys_base,
-		u32 hdr_base_addr, struct ipa_hdr_offset_entry *offset_entry)
+		u32 hdr_base_addr, struct ipa_hdr_offset_entry *offset_entry,
+		union ipa_l2tp_hdr_proc_ctx_params l2tp_params)
 {
 	IPAHAL_DBG(
 		"type %d, base %p, offset %d, hdr_len %d, is_hdr_proc_ctx %d, hdr_base_addr %d, offset_entry %p\n"
@@ -1332,7 +1389,7 @@
 
 	return hdr_funcs.ipahal_cp_proc_ctx_to_hw_buff(type, base, offset,
 			hdr_len, is_hdr_proc_ctx, phys_base,
-			hdr_base_addr, offset_entry);
+			hdr_base_addr, offset_entry, l2tp_params);
 }
 
 /*
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
index f8bdc2c..724cdec 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
@@ -625,12 +625,14 @@
  * @phys_base: memory location in DDR
  * @hdr_base_addr: base address in table
  * @offset_entry: offset from hdr_base_addr in table
+ * @l2tp_params: l2tp parameters
  */
 int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
 		void *base, u32 offset, u32 hdr_len,
 		bool is_hdr_proc_ctx, dma_addr_t phys_base,
 		u32 hdr_base_addr,
-		struct ipa_hdr_offset_entry *offset_entry);
+		struct ipa_hdr_offset_entry *offset_entry,
+		union ipa_l2tp_hdr_proc_ctx_params l2tp_params);
 
 /*
  * ipahal_get_proc_ctx_needed_len() - calculates the needed length for addition
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
index 4f20e0f..2253b4b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
@@ -797,6 +797,38 @@
 		ihl_ofst_meq32++;
 	}
 
+	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ(
+			ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]);
+		/* populate first ihl meq eq */
+		extra = ipa_write_8(8, extra);
+		rest = ipa_write_8(attrib->dst_mac_addr_mask[3], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr_mask[2], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr_mask[1], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr_mask[0], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr[3], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr[2], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr[1], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr[0], rest);
+		/* populate second ihl meq eq */
+		extra = ipa_write_8(12, extra);
+		rest = ipa_write_16(0, rest);
+		rest = ipa_write_8(attrib->dst_mac_addr_mask[5], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr_mask[4], rest);
+		rest = ipa_write_16(0, rest);
+		rest = ipa_write_8(attrib->dst_mac_addr[5], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr[4], rest);
+		ihl_ofst_meq32 += 2;
+	}
+
 	if (attrib->attrib_mask & IPA_FLT_META_DATA) {
 		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE);
 		rest = ipa_write_32(attrib->meta_data_mask, rest);
@@ -1103,6 +1135,38 @@
 		ihl_ofst_meq32++;
 	}
 
+	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ(
+			ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]);
+		/* populate first ihl meq eq */
+		extra = ipa_write_8(8, extra);
+		rest = ipa_write_8(attrib->dst_mac_addr_mask[3], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr_mask[2], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr_mask[1], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr_mask[0], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr[3], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr[2], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr[1], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr[0], rest);
+		/* populate second ihl meq eq */
+		extra = ipa_write_8(12, extra);
+		rest = ipa_write_16(0, rest);
+		rest = ipa_write_8(attrib->dst_mac_addr_mask[5], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr_mask[4], rest);
+		rest = ipa_write_16(0, rest);
+		rest = ipa_write_8(attrib->dst_mac_addr[5], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr[4], rest);
+		ihl_ofst_meq32 += 2;
+	}
+
 	if (attrib->attrib_mask & IPA_FLT_META_DATA) {
 		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE);
 		rest = ipa_write_32(attrib->meta_data_mask, rest);
@@ -1613,6 +1677,40 @@
 		ofst_meq128++;
 	}
 
+	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ(
+			ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]);
+		/* populate the first ihl meq 32 eq */
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 8;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+			(attrib->dst_mac_addr_mask[3] & 0xFF) |
+			((attrib->dst_mac_addr_mask[2] << 8) & 0xFF00) |
+			((attrib->dst_mac_addr_mask[1] << 16) & 0xFF0000) |
+			((attrib->dst_mac_addr_mask[0] << 24) & 0xFF000000);
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+			(attrib->dst_mac_addr[3] & 0xFF) |
+			((attrib->dst_mac_addr[2] << 8) & 0xFF00) |
+			((attrib->dst_mac_addr[1] << 16) & 0xFF0000) |
+			((attrib->dst_mac_addr[0] << 24) & 0xFF000000);
+		/* populate the second ihl meq 32 eq */
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].offset = 12;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].mask =
+			((attrib->dst_mac_addr_mask[5] << 16) & 0xFF0000) |
+			((attrib->dst_mac_addr_mask[4] << 24) & 0xFF000000);
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].value =
+			((attrib->dst_mac_addr[5] << 16) & 0xFF0000) |
+			((attrib->dst_mac_addr[4] << 24) & 0xFF000000);
+		ihl_ofst_meq32 += 2;
+	}
+
 	if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
 		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
 			IPAHAL_ERR("ran out of meq32 eq\n");
@@ -1976,6 +2074,40 @@
 		ofst_meq128++;
 	}
 
+	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ(
+			ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]);
+		/* populate the first ihl meq 32 eq */
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 8;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+			(attrib->dst_mac_addr_mask[3] & 0xFF) |
+			((attrib->dst_mac_addr_mask[2] << 8) & 0xFF00) |
+			((attrib->dst_mac_addr_mask[1] << 16) & 0xFF0000) |
+			((attrib->dst_mac_addr_mask[0] << 24) & 0xFF000000);
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+			(attrib->dst_mac_addr[3] & 0xFF) |
+			((attrib->dst_mac_addr[2] << 8) & 0xFF00) |
+			((attrib->dst_mac_addr[1] << 16) & 0xFF0000) |
+			((attrib->dst_mac_addr[0] << 24) & 0xFF000000);
+		/* populate the second ihl meq 32 eq */
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].offset = 12;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].mask =
+			((attrib->dst_mac_addr_mask[5] << 16) & 0xFF0000) |
+			((attrib->dst_mac_addr_mask[4] << 24) & 0xFF000000);
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].value =
+			((attrib->dst_mac_addr[5] << 16) & 0xFF0000) |
+			((attrib->dst_mac_addr[4] << 24) & 0xFF000000);
+		ihl_ofst_meq32 += 2;
+	}
+
 	if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
 		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
 			IPAHAL_ERR("ran out of meq32 eq\n");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
index c023082..1c4b287 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
@@ -555,6 +555,8 @@
 #define IPA_HDR_UCP_802_3_TO_ETHII 7
 #define IPA_HDR_UCP_ETHII_TO_802_3 8
 #define IPA_HDR_UCP_ETHII_TO_ETHII 9
+#define IPA_HDR_UCP_L2TP_HEADER_ADD 10
+#define IPA_HDR_UCP_L2TP_HEADER_REMOVE 11
 
 /* Processing context TLV type */
 #define IPA_PROC_CTX_TLV_TYPE_END 0
@@ -596,6 +598,28 @@
 };
 
 /**
+ * struct ipa_hw_hdr_proc_ctx_l2tp_add_hdr -
+ * HW structure of IPA processing context - add l2tp header tlv
+ * @tlv: IPA processing context TLV
+ * @l2tp_params: l2tp parameters
+ */
+struct ipa_hw_hdr_proc_ctx_l2tp_add_hdr {
+	struct ipa_hw_hdr_proc_ctx_tlv tlv;
+	struct ipa_l2tp_header_add_procparams l2tp_params;
+};
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_l2tp_remove_hdr -
+ * HW structure of IPA processing context - remove l2tp header tlv
+ * @tlv: IPA processing context TLV
+ * @l2tp_params: l2tp parameters
+ */
+struct ipa_hw_hdr_proc_ctx_l2tp_remove_hdr {
+	struct ipa_hw_hdr_proc_ctx_tlv tlv;
+	struct ipa_l2tp_header_remove_procparams l2tp_params;
+};
+
+/**
  * struct ipa_hw_hdr_proc_ctx_add_hdr_seq -
  * IPA processing context header - add header sequence
  * @hdr_add: add header command
@@ -619,6 +643,32 @@
 	struct ipa_hw_hdr_proc_ctx_tlv end;
 };
 
+/**
+ * struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq -
+ * IPA processing context header - process command sequence
+ * @hdr_add: add header command
+ * @l2tp_params: l2tp params for header addition
+ * @end: tlv end command (cmd.type must be 0)
+ */
+struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq {
+	struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add;
+	struct ipa_hw_hdr_proc_ctx_l2tp_add_hdr l2tp_params;
+	struct ipa_hw_hdr_proc_ctx_tlv end;
+};
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq -
+ * IPA processing context header - process command sequence
+ * @hdr_add: add header command
+ * @l2tp_params: l2tp params for header removal
+ * @end: tlv end command (cmd.type must be 0)
+ */
+struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq {
+	struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add;
+	struct ipa_hw_hdr_proc_ctx_l2tp_remove_hdr l2tp_params;
+	struct ipa_hw_hdr_proc_ctx_tlv end;
+};
+
 /* IPA HW DPS/HPS image memory sizes */
 #define IPA_HW_DPS_IMG_MEM_SIZE_V3_0 128
 #define IPA_HW_HPS_IMG_MEM_SIZE_V3_0 320
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index 3019e4d..af717cd 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -85,6 +85,7 @@
 	__stringify(IPA_IDLE_INDICATION_CFG),
 	__stringify(IPA_DPS_SEQUENCER_FIRST),
 	__stringify(IPA_HPS_SEQUENCER_FIRST),
+	__stringify(IPA_CLKON_CFG),
 };
 
 static void ipareg_construct_dummy(enum ipahal_reg_name reg,
@@ -1490,6 +1491,9 @@
 	[IPA_HW_v4_0][IPA_ENDP_STATUS_n] = {
 		ipareg_construct_endp_status_n_v4_0, ipareg_parse_dummy,
 		0x00000840, 0x70},
+	[IPA_HW_v4_0][IPA_CLKON_CFG] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000044, 0},
 };
 
 /*
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
index c9293b8..79e2b9c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
@@ -88,6 +88,7 @@
 	IPA_IDLE_INDICATION_CFG,
 	IPA_DPS_SEQUENCER_FIRST,
 	IPA_HPS_SEQUENCER_FIRST,
+	IPA_CLKON_CFG,
 	IPA_REG_MAX,
 };
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index fcaabe3..16585a2 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -1141,7 +1141,8 @@
 		memset(&meta, 0, sizeof(meta));
 		meta.pkt_init_dst_ep_valid = true;
 		meta.pkt_init_dst_ep_remote = true;
-		meta.pkt_init_dst_ep = IPA_CLIENT_Q6_LAN_CONS;
+		meta.pkt_init_dst_ep =
+			ipa3_get_ep_mapping(IPA_CLIENT_Q6_WAN_CONS);
 		ret = ipa3_tx_dp(IPA_CLIENT_APPS_WAN_PROD, skb, &meta);
 	} else {
 		ret = ipa3_tx_dp(IPA_CLIENT_APPS_WAN_PROD, skb, NULL);
@@ -2725,6 +2726,9 @@
 	if (!data->set_quota)
 		ipa3_qmi_stop_data_qouta();
 
+	/* prevent string buffer overflows */
+	data->interface_name[IFNAMSIZ-1] = '\0';
+
 	index = find_vchannel_name_index(data->interface_name);
 	IPAWANERR("iface name %s, quota %lu\n",
 		  data->interface_name,
diff --git a/drivers/platform/msm/ipa/test/ipa_test_mhi.c b/drivers/platform/msm/ipa/test/ipa_test_mhi.c
index 5a41d64..3a89c7d 100644
--- a/drivers/platform/msm/ipa/test/ipa_test_mhi.c
+++ b/drivers/platform/msm/ipa/test/ipa_test_mhi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -326,6 +326,7 @@
 	struct ipa_mem_buffer out_buffer;
 	u32 prod_hdl;
 	u32 cons_hdl;
+	u32 test_prod_hdl;
 };
 
 static struct ipa_test_mhi_context *test_mhi_ctx;
@@ -774,6 +775,7 @@
 static int ipa_test_mhi_suite_setup(void **ppriv)
 {
 	int rc = 0;
+	struct ipa_sys_connect_params sys_in;
 
 	IPA_UT_DBG("Start Setup\n");
 
@@ -815,9 +817,22 @@
 		goto fail_free_mmio_spc;
 	}
 
+	/* connect PROD pipe for remote wakeup */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_TEST_PROD;
+	sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+	sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
+	sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MHI_CONS;
+	if (ipa_setup_sys_pipe(&sys_in, &test_mhi_ctx->test_prod_hdl)) {
+		IPA_UT_ERR("setup sys pipe failed.\n");
+		goto fail_destroy_data_structures;
+	}
+
 	*ppriv = test_mhi_ctx;
 	return 0;
 
+fail_destroy_data_structures:
+	ipa_mhi_test_destroy_data_structures();
 fail_free_mmio_spc:
 	ipa_test_mhi_free_mmio_space();
 fail_iounmap:
@@ -838,6 +853,7 @@
 	if (!test_mhi_ctx)
 		return  0;
 
+	ipa_teardown_sys_pipe(test_mhi_ctx->test_prod_hdl);
 	ipa_mhi_test_destroy_data_structures();
 	ipa_test_mhi_free_mmio_space();
 	iounmap(test_mhi_ctx->gsi_mmio);
@@ -1811,7 +1827,7 @@
 		memset(test_mhi_ctx->out_buffer.base + i, i & 0xFF, 1);
 	}
 
-	rc = ipa_tx_dp(IPA_CLIENT_MHI_CONS, skb, NULL);
+	rc = ipa_tx_dp(IPA_CLIENT_TEST_PROD, skb, NULL);
 	if (rc) {
 		IPA_UT_LOG("ipa_tx_dp failed %d\n", rc);
 		IPA_UT_TEST_FAIL_REPORT("ipa tx dp fail");
@@ -1982,7 +1998,7 @@
 		memset(test_mhi_ctx->out_buffer.base + i, i & 0xFF, 1);
 	}
 
-	rc = ipa_tx_dp(IPA_CLIENT_MHI_CONS, skb, NULL);
+	rc = ipa_tx_dp(IPA_CLIENT_TEST_PROD, skb, NULL);
 	if (rc) {
 		IPA_UT_LOG("ipa_tx_dp failed %d\n", rc);
 		IPA_UT_TEST_FAIL_REPORT("ipa tx dp fail");
diff --git a/drivers/platform/msm/seemp_core/seemp_logk.c b/drivers/platform/msm/seemp_core/seemp_logk.c
index ce073ed..a528e16 100644
--- a/drivers/platform/msm/seemp_core/seemp_logk.c
+++ b/drivers/platform/msm/seemp_core/seemp_logk.c
@@ -289,7 +289,7 @@
 	unsigned int bit_num = index%8;
 	unsigned char byte;
 
-	if (DIV_ROUND_UP(index, 8) > MASK_BUFFER_SIZE)
+	if (byte_num >= MASK_BUFFER_SIZE)
 		return false;
 
 	byte = pVec[byte_num];
diff --git a/drivers/power/supply/qcom/smb1355-charger.c b/drivers/power/supply/qcom/smb1355-charger.c
index d5fff74..b2c0059 100644
--- a/drivers/power/supply/qcom/smb1355-charger.c
+++ b/drivers/power/supply/qcom/smb1355-charger.c
@@ -16,6 +16,7 @@
 #include <linux/regmap.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
+#include <linux/iio/consumer.h>
 #include <linux/platform_device.h>
 #include <linux/qpnp/qpnp-revid.h>
 #include <linux/of.h>
@@ -61,6 +62,15 @@
 #define CHGR_BATTOV_CFG_REG			(CHGR_BASE + 0x70)
 #define BATTOV_SETTING_MASK			GENMASK(7, 0)
 
+#define TEMP_COMP_STATUS_REG			(MISC_BASE + 0x07)
+#define SKIN_TEMP_RST_HOT_BIT			BIT(6)
+#define SKIN_TEMP_UB_HOT_BIT			BIT(5)
+#define SKIN_TEMP_LB_HOT_BIT			BIT(4)
+#define DIE_TEMP_TSD_HOT_BIT			BIT(3)
+#define DIE_TEMP_RST_HOT_BIT			BIT(2)
+#define DIE_TEMP_UB_HOT_BIT			BIT(1)
+#define DIE_TEMP_LB_HOT_BIT			BIT(0)
+
 #define BARK_BITE_WDOG_PET_REG			(MISC_BASE + 0x43)
 #define BARK_BITE_WDOG_PET_BIT			BIT(0)
 
@@ -115,12 +125,18 @@
 	int			irq;
 };
 
+struct smb_iio {
+	struct iio_channel	*temp_chan;
+	struct iio_channel	*temp_max_chan;
+};
+
 struct smb1355 {
 	struct device		*dev;
 	char			*name;
 	struct regmap		*regmap;
 
 	struct smb_params	param;
+	struct smb_iio		iio;
 
 	struct mutex		write_lock;
 
@@ -257,9 +273,13 @@
 	POWER_SUPPLY_PROP_CHARGING_ENABLED,
 	POWER_SUPPLY_PROP_PIN_ENABLED,
 	POWER_SUPPLY_PROP_INPUT_SUSPEND,
+	POWER_SUPPLY_PROP_CHARGER_TEMP,
+	POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
 	POWER_SUPPLY_PROP_VOLTAGE_MAX,
 	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
 	POWER_SUPPLY_PROP_MODEL_NAME,
+	POWER_SUPPLY_PROP_PARALLEL_MODE,
+	POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
 };
 
 static int smb1355_get_prop_batt_charge_type(struct smb1355 *chip,
@@ -305,6 +325,65 @@
 	return 0;
 }
 
+static int smb1355_get_prop_connector_health(struct smb1355 *chip)
+{
+	u8 temp;
+	int rc;
+
+	rc = smb1355_read(chip, TEMP_COMP_STATUS_REG, &temp);
+	if (rc < 0) {
+		pr_err("Couldn't read comp stat reg rc = %d\n", rc);
+		return POWER_SUPPLY_HEALTH_UNKNOWN;
+	}
+
+	if (temp & SKIN_TEMP_RST_HOT_BIT)
+		return POWER_SUPPLY_HEALTH_OVERHEAT;
+
+	if (temp & SKIN_TEMP_UB_HOT_BIT)
+		return POWER_SUPPLY_HEALTH_HOT;
+
+	if (temp & SKIN_TEMP_LB_HOT_BIT)
+		return POWER_SUPPLY_HEALTH_WARM;
+
+	return POWER_SUPPLY_HEALTH_COOL;
+}
+
+
+static int smb1355_get_prop_charger_temp(struct smb1355 *chip,
+				union power_supply_propval *val)
+{
+	int rc;
+
+	if (!chip->iio.temp_chan ||
+		PTR_ERR(chip->iio.temp_chan) == -EPROBE_DEFER)
+		chip->iio.temp_chan = devm_iio_channel_get(chip->dev,
+						"charger_temp");
+
+	if (IS_ERR(chip->iio.temp_chan))
+		return PTR_ERR(chip->iio.temp_chan);
+
+	rc = iio_read_channel_processed(chip->iio.temp_chan, &val->intval);
+	val->intval /= 100;
+	return rc;
+}
+
+static int smb1355_get_prop_charger_temp_max(struct smb1355 *chip,
+				union power_supply_propval *val)
+{
+	int rc;
+
+	if (!chip->iio.temp_max_chan ||
+		PTR_ERR(chip->iio.temp_max_chan) == -EPROBE_DEFER)
+		chip->iio.temp_max_chan = devm_iio_channel_get(chip->dev,
+							"charger_temp_max");
+	if (IS_ERR(chip->iio.temp_max_chan))
+		return PTR_ERR(chip->iio.temp_max_chan);
+
+	rc = iio_read_channel_processed(chip->iio.temp_max_chan, &val->intval);
+	val->intval /= 100;
+	return rc;
+}
+
 static int smb1355_parallel_get_prop(struct power_supply *psy,
 				     enum power_supply_property prop,
 				     union power_supply_propval *val)
@@ -327,6 +406,12 @@
 		if (rc >= 0)
 			val->intval = !(stat & DISABLE_CHARGING_BIT);
 		break;
+	case POWER_SUPPLY_PROP_CHARGER_TEMP:
+		rc = smb1355_get_prop_charger_temp(chip, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
+		rc = smb1355_get_prop_charger_temp_max(chip, val);
+		break;
 	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
 		rc = smb1355_get_parallel_charging(chip, &val->intval);
 		break;
@@ -344,6 +429,9 @@
 	case POWER_SUPPLY_PROP_PARALLEL_MODE:
 		val->intval = POWER_SUPPLY_PL_USBMID_USBMID;
 		break;
+	case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
+		val->intval = smb1355_get_prop_connector_health(chip);
+		break;
 	default:
 		pr_err_ratelimited("parallel psy get prop %d not supported\n",
 			prop);
diff --git a/drivers/regulator/rpmh-regulator.c b/drivers/regulator/rpmh-regulator.c
index 4f5f86c..1ba8926 100644
--- a/drivers/regulator/rpmh-regulator.c
+++ b/drivers/regulator/rpmh-regulator.c
@@ -1187,6 +1187,9 @@
 	aggr_vreg->vreg_count = 0;
 
 	for_each_available_child_of_node(aggr_vreg->dev->of_node, node) {
+		/* Skip child nodes handled by other drivers. */
+		if (of_find_property(node, "compatible", NULL))
+			continue;
 		aggr_vreg->vreg_count++;
 	}
 
@@ -1202,6 +1205,10 @@
 
 	i = 0;
 	for_each_available_child_of_node(aggr_vreg->dev->of_node, node) {
+		/* Skip child nodes handled by other drivers. */
+		if (of_find_property(node, "compatible", NULL))
+			continue;
+
 		aggr_vreg->vreg[i].of_node = node;
 		aggr_vreg->vreg[i].aggr_vreg = aggr_vreg;
 
@@ -1623,6 +1630,7 @@
 		mutex_unlock(&aggr_vreg->lock);
 	}
 
+	of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
 	platform_set_drvdata(pdev, aggr_vreg);
 
 	aggr_vreg_debug(aggr_vreg, "successfully probed; addr=0x%05X, type=%s\n",
diff --git a/drivers/scsi/ufs/ufs-debugfs.c b/drivers/scsi/ufs/ufs-debugfs.c
index bc2d2d4..5a578f1 100644
--- a/drivers/scsi/ufs/ufs-debugfs.c
+++ b/drivers/scsi/ufs/ufs-debugfs.c
@@ -850,8 +850,8 @@
 static int ufsdbg_dump_device_desc_show(struct seq_file *file, void *data)
 {
 	int err = 0;
-	int buff_len = QUERY_DESC_DEVICE_MAX_SIZE;
-	u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
+	int buff_len = QUERY_DESC_DEVICE_DEF_SIZE;
+	u8 desc_buf[QUERY_DESC_DEVICE_DEF_SIZE];
 	struct ufs_hba *hba = (struct ufs_hba *)file->private;
 
 	struct desc_field_offset device_desc_field_name[] = {
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 3245fe1..f85a67d 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -65,6 +65,7 @@
 #define UFS_MAX_LUNS		(SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID)
 #define UFS_UPIU_WLUN_ID	(1 << 7)
 #define UFS_UPIU_MAX_GENERAL_LUN	8
+#define QUERY_DESC_IDN_CONFIGURATION	QUERY_DESC_IDN_CONFIGURAION
 
 /* Well known logical unit id in LUN field of UPIU */
 enum {
@@ -144,19 +145,13 @@
 	QUERY_DESC_DESC_TYPE_OFFSET	= 0x01,
 };
 
-enum ufs_desc_max_size {
-	QUERY_DESC_DEVICE_MAX_SIZE		= 0x40,
-	QUERY_DESC_CONFIGURAION_MAX_SIZE	= 0x90,
-	QUERY_DESC_UNIT_MAX_SIZE		= 0x23,
-	QUERY_DESC_INTERCONNECT_MAX_SIZE	= 0x06,
-	/*
-	 * Max. 126 UNICODE characters (2 bytes per character) plus 2 bytes
-	 * of descriptor header.
-	 */
-	QUERY_DESC_STRING_MAX_SIZE		= 0xFE,
-	QUERY_DESC_GEOMETRY_MAZ_SIZE		= 0x44,
-	QUERY_DESC_POWER_MAX_SIZE		= 0x62,
-	QUERY_DESC_RFU_MAX_SIZE			= 0x00,
+enum ufs_desc_def_size {
+	QUERY_DESC_DEVICE_DEF_SIZE		= 0x40,
+	QUERY_DESC_CONFIGURATION_DEF_SIZE	= 0x90,
+	QUERY_DESC_UNIT_DEF_SIZE		= 0x23,
+	QUERY_DESC_INTERCONNECT_DEF_SIZE	= 0x06,
+	QUERY_DESC_GEOMETRY_DEF_SIZE		= 0x44,
+	QUERY_DESC_POWER_DEF_SIZE		= 0x62,
 };
 
 /* Unit descriptor parameters offsets in bytes*/
diff --git a/drivers/scsi/ufs/ufs_quirks.c b/drivers/scsi/ufs/ufs_quirks.c
index 3210d60..da2bfd5 100644
--- a/drivers/scsi/ufs/ufs_quirks.c
+++ b/drivers/scsi/ufs/ufs_quirks.c
@@ -51,7 +51,7 @@
 void ufs_advertise_fixup_device(struct ufs_hba *hba)
 {
 	int err;
-	u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1];
+	u8 str_desc_buf[QUERY_DESC_MAX_SIZE + 1];
 	char *model;
 	struct ufs_card_fix *f;
 
@@ -59,13 +59,13 @@
 	if (!model)
 		goto out;
 
-	memset(str_desc_buf, 0, QUERY_DESC_STRING_MAX_SIZE);
+	memset(str_desc_buf, 0, QUERY_DESC_MAX_SIZE);
 	err = ufshcd_read_string_desc(hba, hba->dev_info.i_product_name,
-			str_desc_buf, QUERY_DESC_STRING_MAX_SIZE, ASCII_STD);
+			str_desc_buf, QUERY_DESC_MAX_SIZE, ASCII_STD);
 	if (err)
 		goto out;
 
-	str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0';
+	str_desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
 	strlcpy(model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
 		min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
 		      MAX_MODEL_LEN));
diff --git a/drivers/scsi/ufs/ufs_test.c b/drivers/scsi/ufs/ufs_test.c
index d41871a..2e3997d 100644
--- a/drivers/scsi/ufs/ufs_test.c
+++ b/drivers/scsi/ufs/ufs_test.c
@@ -603,8 +603,8 @@
 	struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
 	struct scsi_device *sdev;
 	struct ufs_hba *hba;
-	int buff_len = QUERY_DESC_UNIT_MAX_SIZE;
-	u8 desc_buf[QUERY_DESC_UNIT_MAX_SIZE];
+	int buff_len = QUERY_DESC_UNIT_DEF_SIZE;
+	u8 desc_buf[QUERY_DESC_UNIT_DEF_SIZE];
 	bool flag;
 	u32 att;
 	int ret = 0;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 59222ea..a2b5ea0 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -251,19 +251,6 @@
 		       16, 4, buf, len, false);
 }
 
-static u32 ufs_query_desc_max_size[] = {
-	QUERY_DESC_DEVICE_MAX_SIZE,
-	QUERY_DESC_CONFIGURAION_MAX_SIZE,
-	QUERY_DESC_UNIT_MAX_SIZE,
-	QUERY_DESC_RFU_MAX_SIZE,
-	QUERY_DESC_INTERCONNECT_MAX_SIZE,
-	QUERY_DESC_STRING_MAX_SIZE,
-	QUERY_DESC_RFU_MAX_SIZE,
-	QUERY_DESC_GEOMETRY_MAZ_SIZE,
-	QUERY_DESC_POWER_MAX_SIZE,
-	QUERY_DESC_RFU_MAX_SIZE,
-};
-
 enum {
 	UFSHCD_MAX_CHANNEL	= 0,
 	UFSHCD_MAX_ID		= 1,
@@ -3628,7 +3615,7 @@
 		goto out;
 	}
 
-	if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
+	if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
 		dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
 				__func__, *buf_len);
 		err = -EINVAL;
@@ -3708,6 +3695,92 @@
 EXPORT_SYMBOL(ufshcd_query_descriptor);
 
 /**
+ * ufshcd_read_desc_length - read the specified descriptor length from header
+ * @hba: Pointer to adapter instance
+ * @desc_id: descriptor idn value
+ * @desc_index: descriptor index
+ * @desc_length: pointer to variable to read the length of descriptor
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+static int ufshcd_read_desc_length(struct ufs_hba *hba,
+	enum desc_idn desc_id,
+	int desc_index,
+	int *desc_length)
+{
+	int ret;
+	u8 header[QUERY_DESC_HDR_SIZE];
+	int header_len = QUERY_DESC_HDR_SIZE;
+
+	if (desc_id >= QUERY_DESC_IDN_MAX)
+		return -EINVAL;
+
+	ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
+					desc_id, desc_index, 0, header,
+					&header_len);
+
+	if (ret) {
+		dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
+			__func__, desc_id);
+		return ret;
+	} else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
+		dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
+			__func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
+			desc_id);
+		ret = -EINVAL;
+	}
+
+	*desc_length = header[QUERY_DESC_LENGTH_OFFSET];
+	return ret;
+
+}
+
+/**
+ * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
+ * @hba: Pointer to adapter instance
+ * @desc_id: descriptor idn value
+ * @desc_len: mapped desc length (out)
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
+	enum desc_idn desc_id, int *desc_len)
+{
+	switch (desc_id) {
+	case QUERY_DESC_IDN_DEVICE:
+		*desc_len = hba->desc_size.dev_desc;
+		break;
+	case QUERY_DESC_IDN_POWER:
+		*desc_len = hba->desc_size.pwr_desc;
+		break;
+	case QUERY_DESC_IDN_GEOMETRY:
+		*desc_len = hba->desc_size.geom_desc;
+		break;
+	case QUERY_DESC_IDN_CONFIGURATION:
+		*desc_len = hba->desc_size.conf_desc;
+		break;
+	case QUERY_DESC_IDN_UNIT:
+		*desc_len = hba->desc_size.unit_desc;
+		break;
+	case QUERY_DESC_IDN_INTERCONNECT:
+		*desc_len = hba->desc_size.interc_desc;
+		break;
+	case QUERY_DESC_IDN_STRING:
+		*desc_len = QUERY_DESC_MAX_SIZE;
+		break;
+	case QUERY_DESC_IDN_RFU_0:
+	case QUERY_DESC_IDN_RFU_1:
+		*desc_len = 0;
+		break;
+	default:
+		*desc_len = 0;
+		return -EINVAL;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
+
+/**
  * ufshcd_read_desc_param - read the specified descriptor parameter
  * @hba: Pointer to adapter instance
  * @desc_id: descriptor idn value
@@ -3721,37 +3794,45 @@
 static int ufshcd_read_desc_param(struct ufs_hba *hba,
 				  enum desc_idn desc_id,
 				  int desc_index,
-				  u32 param_offset,
+				  u8 param_offset,
 				  u8 *param_read_buf,
-				  u32 param_size)
+				  u8 param_size)
 {
 	int ret;
 	u8 *desc_buf;
-	u32 buff_len;
+	int buff_len;
 	bool is_kmalloc = true;
 
-	/* safety checks */
-	if (desc_id >= QUERY_DESC_IDN_MAX)
+	/* Safety check */
+	if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
 		return -EINVAL;
 
-	buff_len = ufs_query_desc_max_size[desc_id];
-	if ((param_offset + param_size) > buff_len)
-		return -EINVAL;
+	/* Get the max length of descriptor from structure filled up at probe
+	 * time.
+	 */
+	ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
 
-	if (!param_offset && (param_size == buff_len)) {
-		/* memory space already available to hold full descriptor */
-		desc_buf = param_read_buf;
-		is_kmalloc = false;
-	} else {
-		/* allocate memory to hold full descriptor */
+	/* Sanity checks */
+	if (ret || !buff_len) {
+		dev_err(hba->dev, "%s: Failed to get full descriptor length",
+			__func__);
+		return ret;
+	}
+
+	/* Check whether we need temp memory */
+	if (param_offset != 0 || param_size < buff_len) {
 		desc_buf = kmalloc(buff_len, GFP_KERNEL);
 		if (!desc_buf)
 			return -ENOMEM;
+	} else {
+		desc_buf = param_read_buf;
+		is_kmalloc = false;
 	}
 
+	/* Request for full descriptor */
 	ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
-				      desc_id, desc_index, 0, desc_buf,
-				      &buff_len);
+					desc_id, desc_index, 0,
+					desc_buf, &buff_len);
 
 	if (ret) {
 		dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
@@ -3768,25 +3849,9 @@
 		goto out;
 	}
 
-	/*
-	 * While reading variable size descriptors (like string descriptor),
-	 * some UFS devices may report the "LENGTH" (field in "Transaction
-	 * Specific fields" of Query Response UPIU) same as what was requested
-	 * in Query Request UPIU instead of reporting the actual size of the
-	 * variable size descriptor.
-	 * Although it's safe to ignore the "LENGTH" field for variable size
-	 * descriptors as we can always derive the length of the descriptor from
-	 * the descriptor header fields. Hence this change impose the length
-	 * match check only for fixed size descriptors (for which we always
-	 * request the correct size as part of Query Request UPIU).
-	 */
-	if ((desc_id != QUERY_DESC_IDN_STRING) &&
-	    (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
-		dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
-			__func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
-		ret = -EINVAL;
-		goto out;
-	}
+	/* Check wherher we will not copy more data, than available */
+	if (is_kmalloc && param_size > buff_len)
+		param_size = buff_len;
 
 	if (is_kmalloc)
 		memcpy(param_read_buf, &desc_buf[param_offset], param_size);
@@ -7170,10 +7235,19 @@
 static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
 {
 	int ret;
-	int buff_len = QUERY_DESC_POWER_MAX_SIZE;
-	u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE];
+	int buff_len = hba->desc_size.pwr_desc;
+	u8 *desc_buf = NULL;
 	u32 icc_level;
 
+	if (buff_len) {
+		desc_buf = kmalloc(buff_len, GFP_KERNEL);
+		if (!desc_buf) {
+			dev_err(hba->dev,
+				"%s: Failed to allocate desc_buf\n", __func__);
+			return;
+		}
+	}
+
 	ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
 	if (ret) {
 		dev_err(hba->dev,
@@ -7554,9 +7628,18 @@
 static int ufs_read_device_desc_data(struct ufs_hba *hba)
 {
 	int err;
-	u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
+	u8 *desc_buf = NULL;
 
-	err = ufshcd_read_device_desc(hba, desc_buf, sizeof(desc_buf));
+	if (hba->desc_size.dev_desc) {
+		desc_buf = kmalloc(hba->desc_size.dev_desc, GFP_KERNEL);
+		if (!desc_buf) {
+			err = -ENOMEM;
+			dev_err(hba->dev,
+				"%s: Failed to allocate desc_buf\n", __func__);
+			return err;
+		}
+	}
+	err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
 	if (err)
 		return err;
 
@@ -7574,6 +7657,51 @@
 	return 0;
 }
 
+static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
+{
+	int err;
+
+	err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
+		&hba->desc_size.dev_desc);
+	if (err)
+		hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
+
+	err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
+		&hba->desc_size.pwr_desc);
+	if (err)
+		hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
+
+	err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
+		&hba->desc_size.interc_desc);
+	if (err)
+		hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
+
+	err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
+		&hba->desc_size.conf_desc);
+	if (err)
+		hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
+
+	err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
+		&hba->desc_size.unit_desc);
+	if (err)
+		hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
+
+	err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
+		&hba->desc_size.geom_desc);
+	if (err)
+		hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
+}
+
+static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
+{
+	hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
+	hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
+	hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
+	hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
+	hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
+	hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
+}
+
 /**
  * ufshcd_probe_hba - probe hba to detect device and initialize
  * @hba: per-adapter instance
@@ -7614,6 +7742,8 @@
 	if (ret)
 		goto out;
 
+	/* Init check for device descriptor sizes */
+	ufshcd_init_desc_sizes(hba);
 	ufs_advertise_fixup_device(hba);
 	ufshcd_tune_unipro_params(hba);
 
@@ -10075,6 +10205,9 @@
 
 	ufshcd_init_lanes_per_dir(hba);
 
+	/* Set descriptor lengths to specification defaults */
+	ufshcd_def_desc_sizes(hba);
+
 	err = ufshcd_hba_init(hba);
 	if (err)
 		goto out_error;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index a485885..343f327 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -249,6 +249,15 @@
 	struct ufs_query query;
 };
 
+struct ufs_desc_size {
+	int dev_desc;
+	int pwr_desc;
+	int geom_desc;
+	int interc_desc;
+	int unit_desc;
+	int conf_desc;
+};
+
 /**
  * struct ufs_clk_info - UFS clock related info
  * @list: list headed by hba->clk_list_head
@@ -738,6 +747,7 @@
  * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
  *  device is known or not.
  * @scsi_block_reqs_cnt: reference counting for scsi block requests
+ * @desc_size: descriptor sizes reported by device
  */
 struct ufs_hba {
 	void __iomem *mmio_base;
@@ -967,6 +977,7 @@
 
 	int latency_hist_enabled;
 	struct io_latency_state io_lat_s;
+	struct ufs_desc_size desc_size;
 };
 
 static inline void ufshcd_mark_shutdown_ongoing(struct ufs_hba *hba)
@@ -1208,6 +1219,10 @@
 			     struct ufs_pa_layer_attr *pwr_mode);
 void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba,
 		int result);
+
+int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
+	int *desc_length);
+
 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
 
 void ufshcd_scsi_block_requests(struct ufs_hba *hba);
diff --git a/drivers/sensors/sensors_ssc.c b/drivers/sensors/sensors_ssc.c
index d738767..dfdbd8e 100644
--- a/drivers/sensors/sensors_ssc.c
+++ b/drivers/sensors/sensors_ssc.c
@@ -32,6 +32,7 @@
 
 #define IMAGE_LOAD_CMD 1
 #define IMAGE_UNLOAD_CMD 0
+#define SSR_RESET_CMD 1
 #define CLASS_NAME	"ssc"
 #define DRV_NAME	"sensors"
 #define DRV_VERSION	"2.00"
@@ -53,6 +54,10 @@
 	struct kobj_attribute *attr,
 	const char *buf, size_t count);
 
+static ssize_t slpi_ssr_store(struct kobject *kobj,
+	struct kobj_attribute *attr,
+	const char *buf, size_t count);
+
 struct slpi_loader_private {
 	void *pil_h;
 	struct kobject *boot_slpi_obj;
@@ -62,8 +67,12 @@
 static struct kobj_attribute slpi_boot_attribute =
 	__ATTR(boot, 0220, NULL, slpi_boot_store);
 
+static struct kobj_attribute slpi_ssr_attribute =
+	__ATTR(ssr, 0220, NULL, slpi_ssr_store);
+
 static struct attribute *attrs[] = {
 	&slpi_boot_attribute.attr,
+	&slpi_ssr_attribute.attr,
 	NULL,
 };
 
@@ -138,6 +147,44 @@
 	}
 }
 
+static ssize_t slpi_ssr_store(struct kobject *kobj,
+	struct kobj_attribute *attr,
+	const char *buf,
+	size_t count)
+{
+	int ssr_cmd = 0;
+	struct subsys_device *sns_dev = NULL;
+	struct platform_device *pdev = slpi_private;
+	struct slpi_loader_private *priv = NULL;
+
+	pr_debug("%s: going to call slpi_ssr\n", __func__);
+
+	if (kstrtoint(buf, 10, &ssr_cmd) < 0)
+		return -EINVAL;
+
+	if (ssr_cmd != SSR_RESET_CMD)
+		return -EINVAL;
+
+	priv = platform_get_drvdata(pdev);
+	if (!priv)
+		return -EINVAL;
+
+	sns_dev = (struct subsys_device *)priv->pil_h;
+	if (!sns_dev)
+		return -EINVAL;
+
+	dev_err(&pdev->dev, "Something went wrong with SLPI, restarting\n");
+
+	/* subsystem_restart_dev has worker queue to handle */
+	if (subsystem_restart_dev(sns_dev) != 0) {
+		dev_err(&pdev->dev, "subsystem_restart_dev failed\n");
+		return -EINVAL;
+	}
+
+	dev_dbg(&pdev->dev, "SLPI restarted\n");
+	return count;
+}
+
 static ssize_t slpi_boot_store(struct kobject *kobj,
 	struct kobj_attribute *attr,
 	const char *buf,
diff --git a/drivers/slimbus/slim-msm-ngd.c b/drivers/slimbus/slim-msm-ngd.c
index a72cb17..ca56462 100644
--- a/drivers/slimbus/slim-msm-ngd.c
+++ b/drivers/slimbus/slim-msm-ngd.c
@@ -9,11 +9,13 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
+#include <asm/dma-iommu.h>
 #include <linux/irq.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/io.h>
+#include <linux/iommu.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
@@ -23,6 +25,7 @@
 #include <linux/clk.h>
 #include <linux/pm_runtime.h>
 #include <linux/of.h>
+#include <linux/of_platform.h>
 #include <linux/of_slimbus.h>
 #include <linux/timer.h>
 #include <linux/msm-sps.h>
@@ -1665,6 +1668,43 @@
 
 static DEVICE_ATTR(debug_mask, 0644, show_mask, set_mask);
 
+static const struct of_device_id ngd_slim_dt_match[] = {
+	{
+		.compatible = "qcom,slim-ngd",
+	},
+	{
+		.compatible = "qcom,iommu-slim-ctrl-cb",
+	},
+	{}
+};
+
+static int ngd_slim_iommu_probe(struct device *dev)
+{
+	struct platform_device *pdev;
+	struct msm_slim_ctrl *ctrl_dev;
+
+	if (unlikely(!dev->parent)) {
+		dev_err(dev, "%s no parent for this device\n", __func__);
+		return -EINVAL;
+	}
+
+	pdev = to_platform_device(dev->parent);
+	if (!pdev) {
+		dev_err(dev, "%s Parent platform device not found\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_dev = platform_get_drvdata(pdev);
+	if (!ctrl_dev) {
+		dev_err(dev, "%s NULL controller device\n", __func__);
+		return -EINVAL;
+
+	}
+	ctrl_dev->iommu_desc.cb_dev = dev;
+	SLIM_INFO(ctrl_dev, "NGD IOMMU initialization complete\n");
+	return 0;
+}
+
 static int ngd_slim_probe(struct platform_device *pdev)
 {
 	struct msm_slim_ctrl *dev;
@@ -1676,6 +1716,10 @@
 	bool			slim_mdm = false;
 	const char		*ext_modem_id = NULL;
 
+	if (of_device_is_compatible(pdev->dev.of_node,
+				    "qcom,iommu-slim-ctrl-cb"))
+		return ngd_slim_iommu_probe(&pdev->dev);
+
 	slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 						"slimbus_physical");
 	if (!slim_mem) {
@@ -1774,6 +1818,17 @@
 					"qcom,slim-mdm", &ext_modem_id);
 		if (!ret)
 			slim_mdm = true;
+
+		dev->iommu_desc.s1_bypass = of_property_read_bool(
+							pdev->dev.of_node,
+							"qcom,iommu-s1-bypass");
+		ret = of_platform_populate(pdev->dev.of_node, ngd_slim_dt_match,
+					   NULL, &pdev->dev);
+		if (ret) {
+			dev_err(dev->dev, "%s: Failed to of_platform_populate %d\n",
+				__func__, ret);
+			goto err_ctrl_failed;
+		}
 	} else {
 		dev->ctrl.nr = pdev->id;
 	}
@@ -1920,6 +1975,10 @@
 	struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
 
 	ngd_slim_enable(dev, false);
+	if (!IS_ERR_OR_NULL(dev->iommu_desc.iommu_map)) {
+		arm_iommu_detach_device(dev->iommu_desc.cb_dev);
+		arm_iommu_release_mapping(dev->iommu_desc.iommu_map);
+	}
 	if (dev->sysfs_created)
 		sysfs_remove_file(&dev->dev->kobj,
 				&dev_attr_debug_mask.attr);
@@ -2015,8 +2074,13 @@
 {
 	int ret = -EBUSY;
 	struct platform_device *pdev = to_platform_device(dev);
-	struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
+	struct msm_slim_ctrl *cdev;
 
+	if (of_device_is_compatible(pdev->dev.of_node,
+				    "qcom,iommu-slim-ctrl-cb"))
+		return 0;
+
+	cdev = platform_get_drvdata(pdev);
 	if (!pm_runtime_enabled(dev) ||
 		(!pm_runtime_suspended(dev) &&
 			cdev->state == MSM_CTRL_IDLE)) {
@@ -2052,9 +2116,14 @@
 static int ngd_slim_resume(struct device *dev)
 {
 	struct platform_device *pdev = to_platform_device(dev);
-	struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
+	struct msm_slim_ctrl *cdev;
 	int ret = 0;
 
+	if (of_device_is_compatible(pdev->dev.of_node,
+				    "qcom,iommu-slim-ctrl-cb"))
+		return 0;
+
+	cdev = platform_get_drvdata(pdev);
 	/*
 	 * If deferred response was requested for power-off and it failed,
 	 * mark runtime-pm status as active to be consistent
@@ -2091,13 +2160,6 @@
 	)
 };
 
-static const struct of_device_id ngd_slim_dt_match[] = {
-	{
-		.compatible = "qcom,slim-ngd",
-	},
-	{}
-};
-
 static struct platform_driver ngd_slim_driver = {
 	.probe = ngd_slim_probe,
 	.remove = ngd_slim_remove,
diff --git a/drivers/slimbus/slim-msm.c b/drivers/slimbus/slim-msm.c
index ef10e64..d8c5ea8 100644
--- a/drivers/slimbus/slim-msm.c
+++ b/drivers/slimbus/slim-msm.c
@@ -9,17 +9,21 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
-#include <linux/pm_runtime.h>
-#include <linux/dma-mapping.h>
+#include <asm/dma-iommu.h>
 #include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/gcd.h>
+#include <linux/msm-sps.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/slab.h>
 #include <linux/slimbus/slimbus.h>
-#include <linux/msm-sps.h>
-#include <linux/gcd.h>
 #include "slim-msm.h"
 
 /* Pipe Number Offset Mask */
 #define P_OFF_MASK 0x3FC
+#define MSM_SLIM_VA_START	(0x40000000)
+#define MSM_SLIM_VA_SIZE	(0xC0000000)
 
 int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len)
 {
@@ -164,17 +168,61 @@
 	ep->sps = NULL;
 }
 
+static int msm_slim_iommu_attach(struct msm_slim_ctrl *ctrl_dev)
+{
+	struct dma_iommu_mapping *iommu_map;
+	dma_addr_t va_start = MSM_SLIM_VA_START;
+	size_t va_size = MSM_SLIM_VA_SIZE;
+	int bypass = 1;
+	struct device *dev;
+
+	if (unlikely(!ctrl_dev))
+		return -EINVAL;
+
+	if (!ctrl_dev->iommu_desc.cb_dev)
+		return 0;
+
+	dev = ctrl_dev->iommu_desc.cb_dev;
+	iommu_map = arm_iommu_create_mapping(&platform_bus_type,
+						va_start, va_size);
+	if (IS_ERR(iommu_map)) {
+		dev_err(dev, "%s iommu_create_mapping failure\n", __func__);
+		return PTR_ERR(iommu_map);
+	}
+
+	if (ctrl_dev->iommu_desc.s1_bypass) {
+		if (iommu_domain_set_attr(iommu_map->domain,
+					DOMAIN_ATTR_S1_BYPASS, &bypass)) {
+			dev_err(dev, "%s Can't bypass s1 translation\n",
+				__func__);
+			arm_iommu_release_mapping(iommu_map);
+			return -EIO;
+		}
+	}
+
+	if (arm_iommu_attach_device(dev, iommu_map)) {
+		dev_err(dev, "%s can't arm_iommu_attach_device\n", __func__);
+		arm_iommu_release_mapping(iommu_map);
+		return -EIO;
+	}
+	ctrl_dev->iommu_desc.iommu_map = iommu_map;
+	SLIM_INFO(ctrl_dev, "NGD IOMMU Attach complete\n");
+	return 0;
+}
+
 int msm_slim_sps_mem_alloc(
 		struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem, u32 len)
 {
 	dma_addr_t phys;
+	struct device *dma_dev = dev->iommu_desc.cb_dev ?
+					dev->iommu_desc.cb_dev : dev->dev;
 
 	mem->size = len;
 	mem->min_size = 0;
-	mem->base = dma_alloc_coherent(dev->dev, mem->size, &phys, GFP_KERNEL);
+	mem->base = dma_alloc_coherent(dma_dev, mem->size, &phys, GFP_KERNEL);
 
 	if (!mem->base) {
-		dev_err(dev->dev, "dma_alloc_coherent(%d) failed\n", len);
+		dev_err(dma_dev, "dma_alloc_coherent(%d) failed\n", len);
 		return -ENOMEM;
 	}
 
@@ -387,6 +435,10 @@
 	if (pn >= dev->port_nums)
 		return -ENODEV;
 
+	ret = msm_slim_iommu_attach(dev);
+	if (ret)
+		return ret;
+
 	endpoint = &dev->pipes[pn];
 	ret = msm_slim_init_endpoint(dev, endpoint);
 	dev_dbg(dev->dev, "sps register bam error code:%x\n", ret);
@@ -435,9 +487,37 @@
 	return SLIM_P_INPROGRESS;
 }
 
+static int msm_slim_iommu_map(struct msm_slim_ctrl *dev, phys_addr_t iobuf,
+			      u32 len)
+{
+	int ret;
+
+	if (!dev->iommu_desc.cb_dev)
+		return 0;
+
+	ret = iommu_map(dev->iommu_desc.iommu_map->domain,
+			rounddown(iobuf, PAGE_SIZE),
+			rounddown(iobuf, PAGE_SIZE),
+			roundup((len + (iobuf - rounddown(iobuf, PAGE_SIZE))),
+				PAGE_SIZE), IOMMU_READ | IOMMU_WRITE);
+	return ret;
+}
+
+static void msm_slim_iommu_unmap(struct msm_slim_ctrl *dev, phys_addr_t iobuf,
+				u32 len)
+{
+	if (!dev->iommu_desc.cb_dev)
+		return;
+
+	iommu_unmap(dev->iommu_desc.iommu_map->domain,
+		    rounddown(iobuf, PAGE_SIZE),
+		    roundup((len + (iobuf - rounddown(iobuf, PAGE_SIZE))),
+			    PAGE_SIZE));
+}
+
 static void msm_slim_port_cb(struct sps_event_notify *ev)
 {
-
+	struct msm_slim_ctrl *dev = ev->user;
 	struct completion *comp = ev->data.transfer.user;
 	struct sps_iovec *iovec = &ev->data.transfer.iovec;
 
@@ -450,6 +530,8 @@
 		pr_err("%s: ERR event %d\n",
 					__func__, ev->event_id);
 	}
+	if (dev)
+		msm_slim_iommu_unmap(dev, iovec->addr, iovec->size);
 	if (comp)
 		complete(comp);
 }
@@ -467,14 +549,19 @@
 	if (!dev->pipes[pn].connected)
 		return -ENOTCONN;
 
+	ret = msm_slim_iommu_map(dev, iobuf, len);
+	if (ret)
+		return ret;
+
 	sreg.options = (SPS_EVENT_DESC_DONE|SPS_EVENT_ERROR);
 	sreg.mode = SPS_TRIGGER_WAIT;
 	sreg.xfer_done = NULL;
 	sreg.callback = msm_slim_port_cb;
-	sreg.user = NULL;
+	sreg.user = dev;
 	ret = sps_register_event(dev->pipes[pn].sps, &sreg);
 	if (ret) {
 		dev_dbg(dev->dev, "sps register event error:%x\n", ret);
+		msm_slim_iommu_unmap(dev, iobuf, len);
 		return ret;
 	}
 	ret = sps_transfer_one(dev->pipes[pn].sps, iobuf, len, comp,
@@ -490,6 +577,8 @@
 				PGD_THIS_EE(PGD_PORT_INT_EN_EEn, dev->ver));
 		/* Make sure that port registers are updated before returning */
 		mb();
+	} else {
+		msm_slim_iommu_unmap(dev, iobuf, len);
 	}
 
 	return ret;
@@ -1102,6 +1191,12 @@
 	}
 
 init_msgq:
+	ret = msm_slim_iommu_attach(dev);
+	if (ret) {
+		sps_deregister_bam_device(bam_handle);
+		return ret;
+	}
+
 	ret = msm_slim_init_rx_msgq(dev, pipe_reg);
 	if (ret)
 		dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret);
diff --git a/drivers/slimbus/slim-msm.h b/drivers/slimbus/slim-msm.h
index ee0f625..5859c5f 100644
--- a/drivers/slimbus/slim-msm.h
+++ b/drivers/slimbus/slim-msm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -263,10 +263,17 @@
 	bool		in_progress;
 };
 
+struct msm_slim_iommu {
+	struct device			*cb_dev;
+	struct dma_iommu_mapping	*iommu_map;
+	bool				s1_bypass;
+};
+
 struct msm_slim_ctrl {
 	struct slim_controller  ctrl;
 	struct slim_framer	framer;
 	struct device		*dev;
+	struct msm_slim_iommu	iommu_desc;
 	void __iomem		*base;
 	struct resource		*slew_mem;
 	struct resource		*bam_mem;
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 8317c09..e6c2aa3 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -506,6 +506,7 @@
 
 config ICNSS
         tristate "Platform driver for Q6 integrated connectivity"
+        select CNSS_UTILS
         ---help---
           This module adds support for Q6 integrated WLAN connectivity
           subsystem. This module is responsible for communicating WLAN on/off
@@ -545,6 +546,38 @@
 	  used by audio driver to configure QDSP6v2's
 	  ASM, ADM and AFE.
 
+config MSM_QDSP6_SSR
+	bool "Audio QDSP6 SSR support"
+	depends on MSM_QDSP6_APRV2_GLINK || MSM_QDSP6_APRV3_GLINK
+	help
+	  Enable Subsystem Restart. Reset audio
+	  clients when the ADSP subsystem is
+	  restarted. Subsystem Restart for audio
+	  is only used for processes on the ADSP
+	  and signals audio drivers through APR.
+
+
+config MSM_QDSP6_PDR
+	bool "Audio QDSP6 PDR support"
+	depends on MSM_QDSP6_APRV2_GLINK || MSM_QDSP6_APRV3_GLINK
+	help
+	  Enable Protection Domain Restart. Reset
+          audio clients when a process on the ADSP
+          is restarted. PDR for audio is only used
+          for processes on the ADSP and signals
+          audio drivers through APR.
+
+config MSM_QDSP6_NOTIFIER
+	bool "Audio QDSP6 PDR support"
+	depends on MSM_QDSP6_SSR || MSM_QDSP6_PDR
+	help
+	  Enable notifier which decides whether
+	  to use SSR or PDR and notifies all
+	  audio clients of the event. Both SSR
+	  and PDR are recovery methods when
+	  there is a crash on ADSP. Audio drivers
+	  are contacted by ADSP through APR.
+
 config MSM_ADSP_LOADER
 	tristate "ADSP loader support"
 	select SND_SOC_MSM_APRV2_INTF
@@ -701,3 +734,5 @@
 	  This forces a watchdog bite when the device restarts due to a
 	  kernel panic. On certain MSM SoCs, this provides us
 	  additional debugging information.
+
+source "drivers/soc/qcom/memshare/Kconfig"
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index dc303e2..64fb7a0 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -56,6 +56,7 @@
 obj-$(CONFIG_MSM_SYSMON_GLINK_COMM) += sysmon-glink.o sysmon-qmi.o
 obj-$(CONFIG_ICNSS) += icnss.o wlan_firmware_service_v01.o icnss_utils.o
 
+obj-$(CONFIG_MEM_SHARE_QMI_SERVICE)		+= memshare/
 obj-$(CONFIG_MSM_PIL_SSR_GENERIC) += subsys-pil-tz.o
 obj-$(CONFIG_MSM_PIL_MSS_QDSP6V5) += pil-q6v5.o pil-msa.o pil-q6v5-mss.o
 obj-$(CONFIG_MSM_PIL)   +=      peripheral-loader.o
diff --git a/drivers/soc/qcom/early_random.c b/drivers/soc/qcom/early_random.c
index 5156bc1..06601dd 100644
--- a/drivers/soc/qcom/early_random.c
+++ b/drivers/soc/qcom/early_random.c
@@ -56,9 +56,18 @@
 					&desc);
 
 	if (!ret) {
+		u64 bytes_received = desc.ret[0];
+
+		if (bytes_received != SZ_512)
+			pr_warn("Did not receive the expected number of bytes from PRNG: %llu\n",
+				bytes_received);
+
 		dmac_inv_range(random_buffer, random_buffer +
 						RANDOM_BUFFER_SIZE);
-		add_hwgenerator_randomness(random_buffer, SZ_512, SZ_512 << 3);
+		bytes_received = (bytes_received <= RANDOM_BUFFER_SIZE) ?
+					bytes_received : RANDOM_BUFFER_SIZE;
+		add_hwgenerator_randomness(random_buffer, bytes_received,
+					   bytes_received << 3);
 	}
 }
 
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index fd4c604..d31bf8d 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -1930,6 +1930,7 @@
 			kfree(flcid);
 		}
 
+		ctx->transport_ptr = xprt_ctx;
 		list_add_tail(&ctx->port_list_node, &xprt_ctx->channels);
 
 		GLINK_INFO_PERF_CH_XPRT(ctx, xprt_ctx,
@@ -2616,7 +2617,6 @@
 	ctx->local_xprt_req = best_id;
 	ctx->no_migrate = cfg->transport &&
 				!(cfg->options & GLINK_OPT_INITIAL_XPORT);
-	ctx->transport_ptr = transport_ptr;
 	ctx->local_open_state = GLINK_CHANNEL_OPENING;
 	GLINK_INFO_PERF_CH(ctx,
 		"%s: local:GLINK_CHANNEL_CLOSED->GLINK_CHANNEL_OPENING\n",
@@ -2862,7 +2862,7 @@
 	struct channel_ctx *ctx = (struct channel_ctx *)handle;
 	uint32_t riid;
 	int ret = 0;
-	struct glink_core_tx_pkt *tx_info;
+	struct glink_core_tx_pkt *tx_info = NULL;
 	size_t intent_size;
 	bool is_atomic =
 		tx_flags & (GLINK_TX_SINGLE_THREADED | GLINK_TX_ATOMIC);
@@ -2877,6 +2877,13 @@
 		return ret;
 
 	rwref_read_get_atomic(&ctx->ch_state_lhb2, is_atomic);
+	tx_info = kzalloc(sizeof(struct glink_core_tx_pkt),
+				is_atomic ? GFP_ATOMIC : GFP_KERNEL);
+	if (!tx_info) {
+		GLINK_ERR_CH(ctx, "%s: No memory for allocation\n", __func__);
+		ret = -ENOMEM;
+		goto glink_tx_common_err;
+	}
 	if (!(vbuf_provider || pbuf_provider)) {
 		ret = -EINVAL;
 		goto glink_tx_common_err;
@@ -2996,14 +3003,7 @@
 	GLINK_INFO_PERF_CH(ctx, "%s: R[%u]:%zu data[%p], size[%zu]. TID %u\n",
 			__func__, riid, intent_size,
 			data ? data : iovec, size, current->pid);
-	tx_info = kzalloc(sizeof(struct glink_core_tx_pkt),
-				is_atomic ? GFP_ATOMIC : GFP_KERNEL);
-	if (!tx_info) {
-		GLINK_ERR_CH(ctx, "%s: No memory for allocation\n", __func__);
-		ch_push_remote_rx_intent(ctx, intent_size, riid, cookie);
-		ret = -ENOMEM;
-		goto glink_tx_common_err;
-	}
+
 	rwref_lock_init(&tx_info->pkt_ref, glink_tx_pkt_release);
 	INIT_LIST_HEAD(&tx_info->list_done);
 	INIT_LIST_HEAD(&tx_info->list_node);
@@ -3028,10 +3028,15 @@
 	else
 		xprt_schedule_tx(ctx->transport_ptr, ctx, tx_info);
 
+	rwref_read_put(&ctx->ch_state_lhb2);
+	glink_put_ch_ctx(ctx, false);
+	return ret;
+
 glink_tx_common_err:
 	rwref_read_put(&ctx->ch_state_lhb2);
 glink_tx_common_err_2:
 	glink_put_ch_ctx(ctx, false);
+	kfree(tx_info);
 	return ret;
 }
 
@@ -4095,7 +4100,7 @@
 	xprt_ptr = kzalloc(sizeof(*xprt_ptr), GFP_KERNEL);
 	if (!xprt_ptr)
 		return ERR_PTR(-ENOMEM);
-	if_ptr = kmalloc(sizeof(*if_ptr), GFP_KERNEL);
+	if_ptr = kzalloc(sizeof(*if_ptr), GFP_KERNEL);
 	if (!if_ptr) {
 		kfree(xprt_ptr);
 		return ERR_PTR(-ENOMEM);
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
index 0ce1bda..94dffa5 100644
--- a/drivers/soc/qcom/glink_smem_native_xprt.c
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -2388,7 +2388,7 @@
 	einfo->tx_fifo = smem_alloc(SMEM_GLINK_NATIVE_XPRT_FIFO_0,
 							einfo->tx_fifo_size,
 							einfo->remote_proc_id,
-							SMEM_ITEM_CACHED_FLAG);
+							0);
 	if (!einfo->tx_fifo) {
 		pr_err("%s: smem alloc of tx fifo failed\n", __func__);
 		rc = -ENOMEM;
diff --git a/drivers/soc/qcom/glink_spi_xprt.c b/drivers/soc/qcom/glink_spi_xprt.c
index e02c07a..c44aa93 100644
--- a/drivers/soc/qcom/glink_spi_xprt.c
+++ b/drivers/soc/qcom/glink_spi_xprt.c
@@ -111,7 +111,7 @@
  * @xprt_cfg:			The transport configuration for the glink core
  *				assocaited with this edge.
  * @subsys_name:		Name of the remote subsystem in the edge.
- * @spi_dev:			Pointer to the connectingSPI Device.
+ * @spi_ops:			Function pointers for ops provided by spi.
  * @fifo_size:			Size of the FIFO at the remote end.
  * @tx_fifo_start:		Base Address of the TX FIFO.
  * @tx_fifo_end:		End Address of the TX FIFO.
@@ -147,7 +147,7 @@
 	struct glink_transport_if xprt_if;
 	struct glink_core_transport_cfg xprt_cfg;
 	char subsys_name[GLINK_NAME_SIZE];
-	struct spi_device *spi_dev;
+	struct wcd_spi_ops spi_ops;
 
 	uint32_t fifo_size;
 	uint32_t tx_fifo_start;
@@ -286,11 +286,14 @@
 {
 	struct wcd_spi_msg spi_msg;
 
+	if (unlikely(!einfo->spi_ops.read_dev))
+		return -EINVAL;
+
 	memset(&spi_msg, 0, sizeof(spi_msg));
 	spi_msg.data = dst;
 	spi_msg.remote_addr = (uint32_t)(size_t)src;
 	spi_msg.len = (size_t)size;
-	return wcd_spi_data_read(einfo->spi_dev, &spi_msg);
+	return einfo->spi_ops.read_dev(einfo->spi_ops.spi_dev, &spi_msg);
 }
 
 /**
@@ -310,11 +313,14 @@
 {
 	struct wcd_spi_msg spi_msg;
 
+	if (unlikely(!einfo->spi_ops.write_dev))
+		return -EINVAL;
+
 	memset(&spi_msg, 0, sizeof(spi_msg));
 	spi_msg.data = src;
 	spi_msg.remote_addr = (uint32_t)(size_t)dst;
 	spi_msg.len = (size_t)size;
-	return wcd_spi_data_write(einfo->spi_dev, &spi_msg);
+	return einfo->spi_ops.write_dev(einfo->spi_ops.spi_dev, &spi_msg);
 }
 
 /**
@@ -1796,27 +1802,20 @@
 {
 	struct edge_info *einfo = dev_get_drvdata(dev);
 	struct glink_cmpnt *cmpnt = &einfo->cmpnt;
-	struct device *sdev;
-	struct spi_device *spi_dev;
+	int rc = -EINVAL;
 
 	switch (event) {
 	case WDSP_EVENT_PRE_BOOTUP:
 		if (cmpnt && cmpnt->master_dev &&
 		    cmpnt->master_ops &&
-		    cmpnt->master_ops->get_dev_for_cmpnt)
-			sdev = cmpnt->master_ops->get_dev_for_cmpnt(
-				cmpnt->master_dev, WDSP_CMPNT_TRANSPORT);
-		else
-			sdev = NULL;
+		    cmpnt->master_ops->get_devops_for_cmpnt)
+			rc = cmpnt->master_ops->get_devops_for_cmpnt(
+				cmpnt->master_dev, WDSP_CMPNT_TRANSPORT,
+				&einfo->spi_ops);
 
-		if (!sdev) {
+		if (rc)
 			dev_err(dev, "%s: Failed to get transport device\n",
 				__func__);
-			break;
-		}
-
-		spi_dev = to_spi_device(sdev);
-		einfo->spi_dev = spi_dev;
 		break;
 	case WDSP_EVENT_POST_BOOTUP:
 		einfo->in_ssr = false;
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index b5bb719..28f89bf 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -168,6 +168,76 @@
 	ICNSS_DRIVER_EVENT_MAX,
 };
 
+enum icnss_msa_perm {
+	ICNSS_MSA_PERM_HLOS_ALL = 0,
+	ICNSS_MSA_PERM_WLAN_HW_RW = 1,
+	ICNSS_MSA_PERM_DUMP_COLLECT = 2,
+	ICNSS_MSA_PERM_MAX,
+};
+
+#define ICNSS_MAX_VMIDS     4
+
+struct icnss_mem_region_info {
+	uint64_t reg_addr;
+	uint32_t size;
+	uint8_t secure_flag;
+	enum icnss_msa_perm perm;
+};
+
+struct icnss_msa_perm_list_t {
+	int vmids[ICNSS_MAX_VMIDS];
+	int perms[ICNSS_MAX_VMIDS];
+	int nelems;
+};
+
+struct icnss_msa_perm_list_t msa_perm_secure_list[ICNSS_MSA_PERM_MAX] = {
+	[ICNSS_MSA_PERM_HLOS_ALL] = {
+		.vmids = {VMID_HLOS},
+		.perms = {PERM_READ | PERM_WRITE | PERM_EXEC},
+		.nelems = 1,
+	},
+
+	[ICNSS_MSA_PERM_WLAN_HW_RW] = {
+		.vmids = {VMID_MSS_MSA, VMID_WLAN},
+		.perms = {PERM_READ | PERM_WRITE,
+			PERM_READ | PERM_WRITE},
+		.nelems = 2,
+	},
+
+	[ICNSS_MSA_PERM_DUMP_COLLECT] = {
+		.vmids = {VMID_MSS_MSA, VMID_WLAN, VMID_HLOS},
+		.perms = {PERM_READ | PERM_WRITE,
+			PERM_READ | PERM_WRITE,
+			PERM_READ},
+		.nelems = 3,
+	},
+};
+
+struct icnss_msa_perm_list_t msa_perm_list[ICNSS_MSA_PERM_MAX] = {
+	[ICNSS_MSA_PERM_HLOS_ALL] = {
+		.vmids = {VMID_HLOS},
+		.perms = {PERM_READ | PERM_WRITE | PERM_EXEC},
+		.nelems = 1,
+	},
+
+	[ICNSS_MSA_PERM_WLAN_HW_RW] = {
+		.vmids = {VMID_MSS_MSA, VMID_WLAN, VMID_WLAN_CE},
+		.perms = {PERM_READ | PERM_WRITE,
+			PERM_READ | PERM_WRITE,
+			PERM_READ | PERM_WRITE},
+		.nelems = 3,
+	},
+
+	[ICNSS_MSA_PERM_DUMP_COLLECT] = {
+		.vmids = {VMID_MSS_MSA, VMID_WLAN, VMID_WLAN_CE, VMID_HLOS},
+		.perms = {PERM_READ | PERM_WRITE,
+			PERM_READ | PERM_WRITE,
+			PERM_READ | PERM_WRITE,
+			PERM_READ},
+		.nelems = 4,
+	},
+};
+
 struct icnss_event_pd_service_down_data {
 	bool crashed;
 	bool fw_rejuvenate;
@@ -375,6 +445,84 @@
 static void icnss_ignore_qmi_timeout(bool ignore) { }
 #endif
 
+static int icnss_assign_msa_perm(struct icnss_mem_region_info
+				 *mem_region, enum icnss_msa_perm new_perm)
+{
+	int ret = 0;
+	phys_addr_t addr;
+	u32 size;
+	u32 i = 0;
+	u32 source_vmids[ICNSS_MAX_VMIDS];
+	u32 source_nelems;
+	u32 dest_vmids[ICNSS_MAX_VMIDS];
+	u32 dest_perms[ICNSS_MAX_VMIDS];
+	u32 dest_nelems;
+	enum icnss_msa_perm cur_perm = mem_region->perm;
+	struct icnss_msa_perm_list_t *new_perm_list, *old_perm_list;
+
+	addr = mem_region->reg_addr;
+	size = mem_region->size;
+
+	if (mem_region->secure_flag) {
+		new_perm_list = &msa_perm_secure_list[new_perm];
+		old_perm_list = &msa_perm_secure_list[cur_perm];
+	} else {
+		new_perm_list = &msa_perm_list[new_perm];
+		old_perm_list = &msa_perm_list[cur_perm];
+	}
+
+	source_nelems = old_perm_list->nelems;
+	dest_nelems = new_perm_list->nelems;
+
+	for (i = 0; i < source_nelems; ++i)
+		source_vmids[i] = old_perm_list->vmids[i];
+
+	for (i = 0; i < dest_nelems; ++i) {
+		dest_vmids[i] = new_perm_list->vmids[i];
+		dest_perms[i] = new_perm_list->perms[i];
+	}
+
+	ret = hyp_assign_phys(addr, size, source_vmids, source_nelems,
+			      dest_vmids, dest_perms, dest_nelems);
+	if (ret) {
+		icnss_pr_err("Hyperviser map failed for PA=%pa size=%u err=%d\n",
+			     &addr, size, ret);
+		goto out;
+	}
+
+	icnss_pr_dbg("Hypervisor map for source_nelems=%d, source[0]=%x, source[1]=%x, source[2]=%x,"
+		     "source[3]=%x, dest_nelems=%d, dest[0]=%x, dest[1]=%x, dest[2]=%x, dest[3]=%x\n",
+		     source_nelems, source_vmids[0], source_vmids[1],
+		     source_vmids[2], source_vmids[3], dest_nelems,
+		     dest_vmids[0], dest_vmids[1], dest_vmids[2],
+		     dest_vmids[3]);
+out:
+	return ret;
+}
+
+static int icnss_assign_msa_perm_all(struct icnss_priv *priv,
+				     enum icnss_msa_perm new_perm)
+{
+	int ret;
+	int i;
+	enum icnss_msa_perm old_perm;
+
+	for (i = 0; i < priv->nr_mem_region; i++) {
+		old_perm = priv->mem_region[i].perm;
+		ret = icnss_assign_msa_perm(&priv->mem_region[i], new_perm);
+		if (ret)
+			goto err_unmap;
+		priv->mem_region[i].perm = new_perm;
+	}
+	return 0;
+
+err_unmap:
+	for (i--; i >= 0; i--) {
+		icnss_assign_msa_perm(&priv->mem_region[i], old_perm);
+	}
+	return ret;
+}
+
 static void icnss_pm_stay_awake(struct icnss_priv *priv)
 {
 	if (atomic_inc_return(&priv->pm_count) != 1)
@@ -980,119 +1128,6 @@
 }
 EXPORT_SYMBOL(icnss_power_off);
 
-static int icnss_map_msa_permissions(struct icnss_mem_region_info *mem_region)
-{
-	int ret = 0;
-	phys_addr_t addr;
-	u32 size;
-	u32 source_vmlist[1] = {VMID_HLOS};
-	int dest_vmids[3] = {VMID_MSS_MSA, VMID_WLAN, 0};
-	int dest_perms[3] = {PERM_READ|PERM_WRITE,
-			     PERM_READ|PERM_WRITE,
-			     PERM_READ|PERM_WRITE};
-	int source_nelems = sizeof(source_vmlist)/sizeof(u32);
-	int dest_nelems = 0;
-
-	addr = mem_region->reg_addr;
-	size = mem_region->size;
-
-	if (!mem_region->secure_flag) {
-		dest_vmids[2] = VMID_WLAN_CE;
-		dest_nelems = 3;
-	} else {
-		dest_vmids[2] = 0;
-		dest_nelems = 2;
-	}
-	ret = hyp_assign_phys(addr, size, source_vmlist, source_nelems,
-			      dest_vmids, dest_perms, dest_nelems);
-	if (ret) {
-		icnss_pr_err("Hyperviser map failed for PA=%pa size=%u err=%d\n",
-			     &addr, size, ret);
-		goto out;
-	}
-
-	icnss_pr_dbg("Hypervisor map for source=%x, dest_nelems=%d, dest[0]=%x, dest[1]=%x, dest[2]=%x\n",
-		     source_vmlist[0], dest_nelems, dest_vmids[0],
-		     dest_vmids[1], dest_vmids[2]);
-out:
-	return ret;
-
-}
-
-static int icnss_unmap_msa_permissions(struct icnss_mem_region_info *mem_region)
-{
-	int ret = 0;
-	phys_addr_t addr;
-	u32 size;
-	u32 dest_vmids[1] = {VMID_HLOS};
-	int source_vmlist[3] = {VMID_MSS_MSA, VMID_WLAN, 0};
-	int dest_perms[1] = {PERM_READ|PERM_WRITE|PERM_EXEC};
-	int source_nelems = 0;
-	int dest_nelems = sizeof(dest_vmids)/sizeof(u32);
-
-	addr = mem_region->reg_addr;
-	size = mem_region->size;
-
-	if (!mem_region->secure_flag) {
-		source_vmlist[2] = VMID_WLAN_CE;
-		source_nelems = 3;
-	} else {
-		source_vmlist[2] = 0;
-		source_nelems = 2;
-	}
-
-	ret = hyp_assign_phys(addr, size, source_vmlist, source_nelems,
-			      dest_vmids, dest_perms, dest_nelems);
-	if (ret) {
-		icnss_pr_err("Hyperviser unmap failed for PA=%pa size=%u err=%d\n",
-			     &addr, size, ret);
-		goto out;
-	}
-	icnss_pr_dbg("Hypervisor unmap for source_nelems=%d, source[0]=%x, source[1]=%x, source[2]=%x, dest=%x\n",
-		     source_nelems, source_vmlist[0], source_vmlist[1],
-		     source_vmlist[2], dest_vmids[0]);
-out:
-	return ret;
-}
-
-static int icnss_setup_msa_permissions(struct icnss_priv *priv)
-{
-	int ret;
-	int i;
-
-	if (test_bit(ICNSS_MSA0_ASSIGNED, &priv->state))
-		return 0;
-
-	for (i = 0; i < priv->nr_mem_region; i++) {
-
-		ret = icnss_map_msa_permissions(&priv->mem_region[i]);
-		if (ret)
-			goto err_unmap;
-	}
-
-	set_bit(ICNSS_MSA0_ASSIGNED, &priv->state);
-
-	return 0;
-
-err_unmap:
-	for (i--; i >= 0; i--)
-		icnss_unmap_msa_permissions(&priv->mem_region[i]);
-	return ret;
-}
-
-static void icnss_remove_msa_permissions(struct icnss_priv *priv)
-{
-	int i;
-
-	if (!test_bit(ICNSS_MSA0_ASSIGNED, &priv->state))
-		return;
-
-	for (i = 0; i < priv->nr_mem_region; i++)
-		icnss_unmap_msa_permissions(&priv->mem_region[i]);
-
-	clear_bit(ICNSS_MSA0_ASSIGNED, &priv->state);
-}
-
 static int wlfw_msa_mem_info_send_sync_msg(void)
 {
 	int ret;
@@ -1898,9 +1933,12 @@
 	if (ret < 0)
 		goto err_power_on;
 
-	ret = icnss_setup_msa_permissions(penv);
-	if (ret < 0)
-		goto err_power_on;
+	if (!test_bit(ICNSS_MSA0_ASSIGNED, &penv->state)) {
+		ret = icnss_assign_msa_perm_all(penv, ICNSS_MSA_PERM_WLAN_HW_RW);
+		if (ret < 0)
+			goto err_power_on;
+		set_bit(ICNSS_MSA0_ASSIGNED, &penv->state);
+	}
 
 	ret = wlfw_msa_ready_send_sync_msg();
 	if (ret < 0)
@@ -1918,7 +1956,7 @@
 	return ret;
 
 err_setup_msa:
-	icnss_remove_msa_permissions(penv);
+	icnss_assign_msa_perm_all(penv, ICNSS_MSA_PERM_HLOS_ALL);
 err_power_on:
 	icnss_hw_power_off(penv);
 fail:
@@ -2333,14 +2371,22 @@
 	struct icnss_priv *priv = container_of(nb, struct icnss_priv,
 					       modem_ssr_nb);
 	struct icnss_uevent_fw_down_data fw_down_data;
+	int ret = 0;
 
 	icnss_pr_vdbg("Modem-Notify: event %lu\n", code);
 
-	if (code == SUBSYS_AFTER_SHUTDOWN &&
-		notif->crashed == CRASH_STATUS_ERR_FATAL) {
-		icnss_remove_msa_permissions(priv);
-		icnss_pr_info("Collecting msa0 segment dump\n");
-		icnss_msa0_ramdump(priv);
+	if (code == SUBSYS_AFTER_SHUTDOWN) {
+		ret = icnss_assign_msa_perm_all(priv,
+						ICNSS_MSA_PERM_DUMP_COLLECT);
+		if (!ret) {
+			icnss_pr_info("Collecting msa0 segment dump\n");
+			icnss_msa0_ramdump(priv);
+			icnss_assign_msa_perm_all(priv,
+						  ICNSS_MSA_PERM_WLAN_HW_RW);
+		} else {
+			icnss_pr_err("Not able to Collect msa0 segment dump"
+				     "Apps permissions not assigned %d\n", ret);
+		}
 		return NOTIFY_OK;
 	}
 
@@ -4307,7 +4353,8 @@
 
 	icnss_hw_power_off(penv);
 
-	icnss_remove_msa_permissions(penv);
+	icnss_assign_msa_perm_all(penv, ICNSS_MSA_PERM_HLOS_ALL);
+	clear_bit(ICNSS_MSA0_ASSIGNED, &penv->state);
 
 	dev_set_drvdata(&pdev->dev, NULL);
 
diff --git a/drivers/soc/qcom/memshare/Kconfig b/drivers/soc/qcom/memshare/Kconfig
new file mode 100644
index 0000000..7eb1415
--- /dev/null
+++ b/drivers/soc/qcom/memshare/Kconfig
@@ -0,0 +1,9 @@
+config MEM_SHARE_QMI_SERVICE
+       depends on MSM_QMI_INTERFACE
+       bool "Shared Heap for external processors"
+       help
+		Memory Share Kernel Qualcomm Messaging Interface Service
+		receives requests from Modem Processor Sub System
+		for heap alloc/free from Application Processor
+		Sub System and send a response back to client with
+		proper handle/address.
diff --git a/drivers/soc/qcom/memshare/Makefile b/drivers/soc/qcom/memshare/Makefile
new file mode 100644
index 0000000..cf49fbc
--- /dev/null
+++ b/drivers/soc/qcom/memshare/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_MEM_SHARE_QMI_SERVICE) := heap_mem_ext_v01.o msm_memshare.o
\ No newline at end of file
diff --git a/drivers/soc/qcom/memshare/heap_mem_ext_v01.c b/drivers/soc/qcom/memshare/heap_mem_ext_v01.c
new file mode 100644
index 0000000..afe9a87
--- /dev/null
+++ b/drivers/soc/qcom/memshare/heap_mem_ext_v01.c
@@ -0,0 +1,472 @@
+/* Copyright (c) 2013-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/qmi_encdec.h>
+#include <soc/qcom/msm_qmi_interface.h>
+#include "heap_mem_ext_v01.h"
+
+struct elem_info mem_alloc_req_msg_data_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct mem_alloc_req_msg_v01,
+					num_bytes),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct mem_alloc_req_msg_v01,
+					block_alignment_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct mem_alloc_req_msg_v01,
+					block_alignment),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info mem_alloc_resp_msg_data_v01_ei[] = {
+	{
+		.data_type      = QMI_SIGNED_2_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct mem_alloc_resp_msg_v01,
+					resp),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct mem_alloc_resp_msg_v01,
+					handle_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct mem_alloc_resp_msg_v01,
+					handle),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct mem_alloc_resp_msg_v01,
+					num_bytes_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct mem_alloc_resp_msg_v01,
+					num_bytes),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info mem_free_req_msg_data_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct mem_free_req_msg_v01,
+					handle),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info mem_free_resp_msg_data_v01_ei[] = {
+	{
+		.data_type      = QMI_SIGNED_2_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct mem_free_resp_msg_v01,
+					resp),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info dhms_mem_alloc_addr_info_type_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+		.offset         = offsetof(struct
+					dhms_mem_alloc_addr_info_type_v01,
+					phy_addr),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+		.offset         = offsetof(struct
+					dhms_mem_alloc_addr_info_type_v01,
+					num_bytes),
+	},
+		{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info mem_alloc_generic_req_msg_data_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct mem_alloc_generic_req_msg_v01,
+					num_bytes),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct mem_alloc_generic_req_msg_v01,
+					client_id),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x03,
+		.offset         = offsetof(struct mem_alloc_generic_req_msg_v01,
+					proc_id),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x04,
+		.offset         = offsetof(struct mem_alloc_generic_req_msg_v01,
+					sequence_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct mem_alloc_generic_req_msg_v01,
+					alloc_contiguous_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct mem_alloc_generic_req_msg_v01,
+					alloc_contiguous),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct mem_alloc_generic_req_msg_v01,
+					block_alignment_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct mem_alloc_generic_req_msg_v01,
+					block_alignment),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info mem_alloc_generic_resp_msg_data_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct	qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+						mem_alloc_generic_resp_msg_v01,
+					resp),
+		.ei_array		= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+						mem_alloc_generic_resp_msg_v01,
+					sequence_id_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+						mem_alloc_generic_resp_msg_v01,
+					sequence_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct
+						mem_alloc_generic_resp_msg_v01,
+					dhms_mem_alloc_addr_info_valid),
+	},
+	{
+		.data_type	    = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct
+						mem_alloc_generic_resp_msg_v01,
+					dhms_mem_alloc_addr_info_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = MAX_ARR_CNT_V01,
+		.elem_size      = sizeof(struct
+					dhms_mem_alloc_addr_info_type_v01),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct
+						mem_alloc_generic_resp_msg_v01,
+					dhms_mem_alloc_addr_info),
+		.ei_array       = dhms_mem_alloc_addr_info_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info mem_free_generic_req_msg_data_v01_ei[] = {
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct mem_free_generic_req_msg_v01,
+					dhms_mem_alloc_addr_info_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = MAX_ARR_CNT_V01,
+		.elem_size      = sizeof(struct
+					dhms_mem_alloc_addr_info_type_v01),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct mem_free_generic_req_msg_v01,
+					dhms_mem_alloc_addr_info),
+		.ei_array		= dhms_mem_alloc_addr_info_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct mem_free_generic_req_msg_v01,
+					client_id_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct mem_free_generic_req_msg_v01,
+					client_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct mem_free_generic_req_msg_v01,
+					proc_id_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct mem_free_generic_req_msg_v01,
+					proc_id),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info mem_free_generic_resp_msg_data_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct	qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+						mem_free_generic_resp_msg_v01,
+					resp),
+		.ei_array		= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info mem_query_size_req_msg_data_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct mem_query_size_req_msg_v01,
+					client_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct mem_query_size_req_msg_v01,
+					proc_id_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct mem_query_size_req_msg_v01,
+					proc_id),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info mem_query_size_resp_msg_data_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct	qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+						mem_query_size_rsp_msg_v01,
+					resp),
+		.ei_array		= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct mem_query_size_rsp_msg_v01,
+					size_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct mem_query_size_rsp_msg_v01,
+					size),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
diff --git a/drivers/soc/qcom/memshare/heap_mem_ext_v01.h b/drivers/soc/qcom/memshare/heap_mem_ext_v01.h
new file mode 100644
index 0000000..cfe3e49
--- /dev/null
+++ b/drivers/soc/qcom/memshare/heap_mem_ext_v01.h
@@ -0,0 +1,356 @@
+/* Copyright (c) 2013-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef HEAP_MEM_EXT_SERVICE_01_H
+#define HEAP_MEM_EXT_SERVICE_01_H
+
+#include <soc/qcom/msm_qmi_interface.h>
+
+#define MEM_ALLOC_REQ_MAX_MSG_LEN_V01 255
+#define MEM_FREE_REQ_MAX_MSG_LEN_V01 255
+#define MAX_ARR_CNT_V01 64
+
+struct dhms_mem_alloc_addr_info_type_v01 {
+	uint64_t phy_addr;
+	uint32_t num_bytes;
+};
+
+enum dhms_mem_proc_id_v01 {
+	/* To force a 32 bit signed enum.  Do not change or use */
+	DHMS_MEM_PROC_ID_MIN_ENUM_VAL_V01 = -2147483647,
+	/* Request from MPSS processor */
+	DHMS_MEM_PROC_MPSS_V01 = 0,
+	/* Request from ADSP processor */
+	DHMS_MEM_PROC_ADSP_V01 = 1,
+	/* Request from WCNSS processor */
+	DHMS_MEM_PROC_WCNSS_V01 = 2,
+	/* To force a 32 bit signed enum.  Do not change or use */
+	DHMS_MEM_PROC_ID_MAX_ENUM_VAL_V01 = 2147483647
+};
+
+enum dhms_mem_client_id_v01 {
+	/*To force a 32 bit signed enum.  Do not change or use*/
+	DHMS_MEM_CLIENT_ID_MIN_ENUM_VAL_V01 = -2147483647,
+	/*  Request from GPS Client    */
+	DHMS_MEM_CLIENT_GPS_V01 = 0,
+	/* Invalid Client */
+	DHMS_MEM_CLIENT_INVALID = 1000,
+	/* To force a 32 bit signed enum.  Do not change or use */
+	DHMS_MEM_CLIENT_ID_MAX_ENUM_VAL_V01 = 2147483647
+};
+
+enum dhms_mem_block_align_enum_v01 {
+	/* To force a 32 bit signed enum.  Do not change or use
+	 */
+	DHMS_MEM_BLOCK_ALIGN_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+	/* Align allocated memory by 2 bytes  */
+	DHMS_MEM_BLOCK_ALIGN_2_V01 = 0,
+	/* Align allocated memory by 4 bytes  */
+	DHMS_MEM_BLOCK_ALIGN_4_V01 = 1,
+	/**<  Align allocated memory by 8 bytes */
+	DHMS_MEM_BLOCK_ALIGN_8_V01 = 2,
+	/**<  Align allocated memory by 16 bytes */
+	DHMS_MEM_BLOCK_ALIGN_16_V01 = 3,
+	/**<  Align allocated memory by 32 bytes */
+	DHMS_MEM_BLOCK_ALIGN_32_V01 = 4,
+	/**<  Align allocated memory by 64 bytes */
+	DHMS_MEM_BLOCK_ALIGN_64_V01 = 5,
+	/**<  Align allocated memory by 128 bytes */
+	DHMS_MEM_BLOCK_ALIGN_128_V01 = 6,
+	/**<  Align allocated memory by 256 bytes */
+	DHMS_MEM_BLOCK_ALIGN_256_V01 = 7,
+	/**<  Align allocated memory by 512 bytes */
+	DHMS_MEM_BLOCK_ALIGN_512_V01 = 8,
+	/**<  Align allocated memory by 1024 bytes */
+	DHMS_MEM_BLOCK_ALIGN_1K_V01 = 9,
+	/**<  Align allocated memory by 2048 bytes */
+	DHMS_MEM_BLOCK_ALIGN_2K_V01 = 10,
+	/**<  Align allocated memory by 4096 bytes */
+	DHMS_MEM_BLOCK_ALIGN_4K_V01 = 11,
+	DHMS_MEM_BLOCK_ALIGN_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+	/* To force a 32 bit signed enum.  Do not change or use
+	 */
+};
+
+/* Request Message; This command is used for getting
+ * the multiple physically contiguous
+ * memory blocks from the server memory subsystem
+ */
+struct mem_alloc_req_msg_v01 {
+
+	/* Mandatory */
+	/*requested size*/
+	uint32_t num_bytes;
+
+	/* Optional */
+	/* Must be set to true if block_alignment
+	 * is being passed
+	 */
+	uint8_t block_alignment_valid;
+	/* The block alignment for the memory block to be allocated
+	 */
+	enum dhms_mem_block_align_enum_v01 block_alignment;
+};  /* Message */
+
+/* Response Message; This command is used for getting
+ * the multiple physically contiguous memory blocks
+ * from the server memory subsystem
+ */
+struct mem_alloc_resp_msg_v01 {
+
+	/* Mandatory */
+	/*  Result Code */
+	/* The result of the requested memory operation
+	 */
+	enum qmi_result_type_v01 resp;
+	/* Optional */
+	/*  Memory Block Handle
+	 */
+	/* Must be set to true if handle is being passed
+	 */
+	uint8_t handle_valid;
+	/* The physical address of the memory allocated on the HLOS
+	 */
+	uint64_t handle;
+	/* Optional */
+	/* Memory block size */
+	/* Must be set to true if num_bytes is being passed
+	 */
+	uint8_t num_bytes_valid;
+	/* The number of bytes actually allocated for the request.
+	 * This value can be smaller than the size requested in
+	 * QMI_DHMS_MEM_ALLOC_REQ_MSG.
+	 */
+	uint32_t num_bytes;
+};  /* Message */
+
+/* Request Message; This command is used for releasing
+ * the multiple physically contiguous
+ * memory blocks to the server memory subsystem
+ */
+struct mem_free_req_msg_v01 {
+
+	/* Mandatory */
+	/* Physical address of memory to be freed
+	 */
+	uint32_t handle;
+};  /* Message */
+
+/* Response Message; This command is used for releasing
+ * the multiple physically contiguous
+ * memory blocks to the server memory subsystem
+ */
+struct mem_free_resp_msg_v01 {
+
+	/* Mandatory */
+	/* Result of the requested memory operation, todo,
+	 * need to check the async operation for free
+	 */
+	enum qmi_result_type_v01 resp;
+};  /* Message */
+
+/* Request Message; This command is used for getting
+ * the multiple physically contiguous
+ * memory blocks from the server memory subsystem
+ */
+struct mem_alloc_generic_req_msg_v01 {
+
+	/* Mandatory */
+	/*requested size*/
+	uint32_t num_bytes;
+
+	/* Mandatory */
+	/* client id */
+	enum dhms_mem_client_id_v01 client_id;
+
+	/* Mandatory */
+	/* Peripheral Id*/
+	enum dhms_mem_proc_id_v01 proc_id;
+
+	/* Mandatory */
+	/* Sequence id */
+	uint32_t sequence_id;
+
+	/* Optional */
+	/*  alloc_contiguous */
+	/* Must be set to true if alloc_contiguous is being passed */
+	uint8_t alloc_contiguous_valid;
+
+	/* Alloc_contiguous is used to identify that clients are requesting
+	 * for contiguous or non contiguous memory, default is contiguous
+	 * 0 = non contiguous else contiguous
+	 */
+	uint8_t alloc_contiguous;
+
+	/* Optional */
+	/* Must be set to true if block_alignment
+	 * is being passed
+	 */
+	uint8_t block_alignment_valid;
+
+	/* The block alignment for the memory block to be allocated
+	 */
+	enum dhms_mem_block_align_enum_v01 block_alignment;
+
+};  /* Message */
+
+/* Response Message; This command is used for getting
+ * the multiple physically contiguous memory blocks
+ * from the server memory subsystem
+ */
+struct mem_alloc_generic_resp_msg_v01 {
+
+	/* Mandatory */
+	/*  Result Code */
+	/* The result of the requested memory operation
+	 */
+	struct qmi_response_type_v01 resp;
+
+	/* Optional */
+	/* Sequence ID */
+	/* Must be set to true if sequence_id is being passed */
+	uint8_t sequence_id_valid;
+
+
+	/* Mandatory */
+	/* Sequence id */
+	uint32_t sequence_id;
+
+	/* Optional */
+	/*  Memory Block Handle
+	 */
+	/* Must be set to true if handle is being passed
+	 */
+	uint8_t dhms_mem_alloc_addr_info_valid;
+
+	/* Optional */
+	/* Handle Size */
+	uint32_t dhms_mem_alloc_addr_info_len;
+
+	/* Optional */
+	/* The physical address of the memory allocated on the HLOS
+	 */
+	struct dhms_mem_alloc_addr_info_type_v01
+		dhms_mem_alloc_addr_info[MAX_ARR_CNT_V01];
+
+};  /* Message */
+
+/* Request Message; This command is used for releasing
+ * the multiple physically contiguous
+ * memory blocks to the server memory subsystem
+ */
+struct mem_free_generic_req_msg_v01 {
+
+	/* Mandatory */
+	/* Must be set to # of  elments in array*/
+	uint32_t dhms_mem_alloc_addr_info_len;
+
+	/* Mandatory */
+	/* Physical address and size of the memory allocated
+	 * on the HLOS to be freed.
+	 */
+	struct dhms_mem_alloc_addr_info_type_v01
+			dhms_mem_alloc_addr_info[MAX_ARR_CNT_V01];
+
+	/* Optional */
+	/* Client ID */
+	/* Must be set to true if client_id is being passed */
+	uint8_t client_id_valid;
+
+	/* Optional */
+	/* Client Id */
+	enum dhms_mem_client_id_v01 client_id;
+
+	/* Optional */
+	/* Proc ID */
+	/* Must be set to true if proc_id is being passed */
+	uint8_t proc_id_valid;
+
+	/* Optional */
+	/* Peripheral */
+	enum dhms_mem_proc_id_v01 proc_id;
+
+};  /* Message */
+
+/* Response Message; This command is used for releasing
+ * the multiple physically contiguous
+ * memory blocks to the server memory subsystem
+ */
+struct mem_free_generic_resp_msg_v01 {
+
+	/*
+	 * Mandatory
+	 * Result of the requested memory operation, todo,
+	 * need to check the async operation for free
+	 */
+	struct qmi_response_type_v01 resp;
+
+};  /* Message */
+
+struct mem_query_size_req_msg_v01 {
+
+	/* Mandatory */
+	enum dhms_mem_client_id_v01 client_id;
+
+	/*
+	 * Optional
+	 * Proc ID
+	 * proc_id_valid must be set to true if proc_id is being passed
+	 */
+	uint8_t proc_id_valid;
+
+	enum dhms_mem_proc_id_v01 proc_id;
+};  /* Message */
+
+struct mem_query_size_rsp_msg_v01 {
+
+	/*
+	 * Mandatory
+	 * Result Code
+	 */
+	struct qmi_response_type_v01 resp;
+
+	/*
+	 * Optional
+	 * size_valid must be set to true if size is being passed
+	 */
+	uint8_t size_valid;
+
+	uint32_t size;
+};  /* Message */
+
+
+extern struct elem_info mem_alloc_req_msg_data_v01_ei[];
+extern struct elem_info mem_alloc_resp_msg_data_v01_ei[];
+extern struct elem_info mem_free_req_msg_data_v01_ei[];
+extern struct elem_info mem_free_resp_msg_data_v01_ei[];
+extern struct elem_info mem_alloc_generic_req_msg_data_v01_ei[];
+extern struct elem_info mem_alloc_generic_resp_msg_data_v01_ei[];
+extern struct elem_info mem_free_generic_req_msg_data_v01_ei[];
+extern struct elem_info mem_free_generic_resp_msg_data_v01_ei[];
+extern struct elem_info mem_query_size_req_msg_data_v01_ei[];
+extern struct elem_info mem_query_size_resp_msg_data_v01_ei[];
+
+/*Service Message Definition*/
+#define MEM_ALLOC_REQ_MSG_V01 0x0020
+#define MEM_ALLOC_RESP_MSG_V01 0x0020
+#define MEM_FREE_REQ_MSG_V01 0x0021
+#define MEM_FREE_RESP_MSG_V01 0x0021
+#define MEM_ALLOC_GENERIC_REQ_MSG_V01 0x0022
+#define MEM_ALLOC_GENERIC_RESP_MSG_V01 0x0022
+#define MEM_FREE_GENERIC_REQ_MSG_V01 0x0023
+#define MEM_FREE_GENERIC_RESP_MSG_V01 0x0023
+#define MEM_QUERY_SIZE_REQ_MSG_V01	0x0024
+#define MEM_QUERY_SIZE_RESP_MSG_V01	0x0024
+
+#endif
diff --git a/drivers/soc/qcom/memshare/msm_memshare.c b/drivers/soc/qcom/memshare/msm_memshare.c
new file mode 100644
index 0000000..7298f30
--- /dev/null
+++ b/drivers/soc/qcom/memshare/msm_memshare.c
@@ -0,0 +1,1074 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/notifier.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/msm_qmi_interface.h>
+#include <soc/qcom/scm.h>
+#include "msm_memshare.h"
+#include "heap_mem_ext_v01.h"
+
+#include <soc/qcom/secure_buffer.h>
+#include <soc/qcom/ramdump.h>
+
+/* Macros */
+#define MEMSHARE_DEV_NAME "memshare"
+#define MEMSHARE_CHILD_DEV_NAME "memshare_child"
+static unsigned long(attrs);
+
+static struct qmi_handle *mem_share_svc_handle;
+static void mem_share_svc_recv_msg(struct work_struct *work);
+static DECLARE_DELAYED_WORK(work_recv_msg, mem_share_svc_recv_msg);
+static struct workqueue_struct *mem_share_svc_workqueue;
+static uint64_t bootup_request;
+static bool ramdump_event;
+static void *memshare_ramdump_dev[MAX_CLIENTS];
+static struct device *memshare_dev[MAX_CLIENTS];
+
+/* Memshare Driver Structure */
+struct memshare_driver {
+	struct device *dev;
+	struct mutex mem_share;
+	struct mutex mem_free;
+	struct work_struct memshare_init_work;
+};
+
+struct memshare_child {
+	struct device *dev;
+};
+
+static struct memshare_driver *memsh_drv;
+static struct memshare_child *memsh_child;
+static struct mem_blocks memblock[MAX_CLIENTS];
+static uint32_t num_clients;
+static struct msg_desc mem_share_svc_alloc_req_desc = {
+	.max_msg_len = MEM_ALLOC_REQ_MAX_MSG_LEN_V01,
+	.msg_id = MEM_ALLOC_REQ_MSG_V01,
+	.ei_array = mem_alloc_req_msg_data_v01_ei,
+};
+
+static struct msg_desc mem_share_svc_alloc_resp_desc = {
+	.max_msg_len = MEM_ALLOC_REQ_MAX_MSG_LEN_V01,
+	.msg_id = MEM_ALLOC_RESP_MSG_V01,
+	.ei_array = mem_alloc_resp_msg_data_v01_ei,
+};
+
+static struct msg_desc mem_share_svc_free_req_desc = {
+	.max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01,
+	.msg_id = MEM_FREE_REQ_MSG_V01,
+	.ei_array = mem_free_req_msg_data_v01_ei,
+};
+
+static struct msg_desc mem_share_svc_free_resp_desc = {
+	.max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01,
+	.msg_id = MEM_FREE_RESP_MSG_V01,
+	.ei_array = mem_free_resp_msg_data_v01_ei,
+};
+
+static struct msg_desc mem_share_svc_alloc_generic_req_desc = {
+	.max_msg_len = MEM_ALLOC_REQ_MAX_MSG_LEN_V01,
+	.msg_id = MEM_ALLOC_GENERIC_REQ_MSG_V01,
+	.ei_array = mem_alloc_generic_req_msg_data_v01_ei,
+};
+
+static struct msg_desc mem_share_svc_alloc_generic_resp_desc = {
+	.max_msg_len = MEM_ALLOC_REQ_MAX_MSG_LEN_V01,
+	.msg_id = MEM_ALLOC_GENERIC_RESP_MSG_V01,
+	.ei_array = mem_alloc_generic_resp_msg_data_v01_ei,
+};
+
+static struct msg_desc mem_share_svc_free_generic_req_desc = {
+	.max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01,
+	.msg_id = MEM_FREE_GENERIC_REQ_MSG_V01,
+	.ei_array = mem_free_generic_req_msg_data_v01_ei,
+};
+
+static struct msg_desc mem_share_svc_free_generic_resp_desc = {
+	.max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01,
+	.msg_id = MEM_FREE_GENERIC_RESP_MSG_V01,
+	.ei_array = mem_free_generic_resp_msg_data_v01_ei,
+};
+
+static struct msg_desc mem_share_svc_size_query_req_desc = {
+	.max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01,
+	.msg_id = MEM_QUERY_SIZE_REQ_MSG_V01,
+	.ei_array = mem_query_size_req_msg_data_v01_ei,
+};
+
+static struct msg_desc mem_share_svc_size_query_resp_desc = {
+	.max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01,
+	.msg_id = MEM_QUERY_SIZE_RESP_MSG_V01,
+	.ei_array = mem_query_size_resp_msg_data_v01_ei,
+};
+
+/*
+ *  This API creates ramdump dev handlers
+ *  for each of the memshare clients.
+ *  These dev handlers will be used for
+ *  extracting the ramdump for loaned memory
+ *  segments.
+ */
+
+static int mem_share_configure_ramdump(int client)
+{
+	char client_name[18];
+	const char *clnt = NULL;
+
+	switch (client) {
+	case 0:
+		clnt = "GPS";
+		break;
+	case 1:
+		clnt = "FTM";
+		break;
+	case 2:
+		clnt = "DIAG";
+		break;
+	default:
+		pr_err("memshare: no memshare clients registered\n");
+		return -EINVAL;
+	}
+
+	snprintf(client_name, sizeof(client_name),
+		"memshare_%s", clnt);
+	if (memshare_dev[client]) {
+		memshare_ramdump_dev[client] =
+			create_ramdump_device(client_name,
+				memshare_dev[client]);
+	} else {
+		pr_err("memshare:%s: invalid memshare device\n", __func__);
+		return -ENODEV;
+	}
+	if (IS_ERR_OR_NULL(memshare_ramdump_dev[client])) {
+		pr_err("memshare: %s: Unable to create memshare ramdump device\n",
+				__func__);
+		memshare_ramdump_dev[client] = NULL;
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int check_client(int client_id, int proc, int request)
+{
+	int i = 0, rc;
+	int found = DHMS_MEM_CLIENT_INVALID;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (memblock[i].client_id == client_id &&
+				memblock[i].peripheral == proc) {
+			found = i;
+			break;
+		}
+	}
+	if ((found == DHMS_MEM_CLIENT_INVALID) && !request) {
+		pr_debug("memshare: No registered client, adding a new client\n");
+		/* Add a new client */
+		for (i = 0; i < MAX_CLIENTS; i++) {
+			if (memblock[i].client_id == DHMS_MEM_CLIENT_INVALID) {
+				memblock[i].client_id = client_id;
+				memblock[i].allotted = 0;
+				memblock[i].guarantee = 0;
+				memblock[i].peripheral = proc;
+				found = i;
+
+				if (!memblock[i].file_created) {
+					rc = mem_share_configure_ramdump(i);
+					if (rc)
+						pr_err("memshare: %s, Cannot create ramdump for client: %d\n",
+							__func__, client_id);
+					else
+						memblock[i].file_created = 1;
+				}
+
+				break;
+			}
+		}
+	}
+
+	return found;
+}
+
+static void free_client(int id)
+{
+	memblock[id].phy_addr = 0;
+	memblock[id].virtual_addr = 0;
+	memblock[id].allotted = 0;
+	memblock[id].guarantee = 0;
+	memblock[id].sequence_id = -1;
+	memblock[id].memory_type = MEMORY_CMA;
+
+}
+
+static void fill_alloc_response(struct mem_alloc_generic_resp_msg_v01 *resp,
+						int id, int *flag)
+{
+	resp->sequence_id_valid = 1;
+	resp->sequence_id = memblock[id].sequence_id;
+	resp->dhms_mem_alloc_addr_info_valid = 1;
+	resp->dhms_mem_alloc_addr_info_len = 1;
+	resp->dhms_mem_alloc_addr_info[0].phy_addr = memblock[id].phy_addr;
+	resp->dhms_mem_alloc_addr_info[0].num_bytes = memblock[id].size;
+	if (!*flag) {
+		resp->resp.result = QMI_RESULT_SUCCESS_V01;
+		resp->resp.error = QMI_ERR_NONE_V01;
+	} else {
+		resp->resp.result = QMI_RESULT_FAILURE_V01;
+		resp->resp.error = QMI_ERR_NO_MEMORY_V01;
+	}
+
+}
+
+static void initialize_client(void)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		memblock[i].allotted = 0;
+		memblock[i].size = 0;
+		memblock[i].guarantee = 0;
+		memblock[i].phy_addr = 0;
+		memblock[i].virtual_addr = 0;
+		memblock[i].client_id = DHMS_MEM_CLIENT_INVALID;
+		memblock[i].peripheral = -1;
+		memblock[i].sequence_id = -1;
+		memblock[i].memory_type = MEMORY_CMA;
+		memblock[i].free_memory = 0;
+		memblock[i].hyp_mapping = 0;
+		memblock[i].file_created = 0;
+	}
+	attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
+}
+
+/*
+ *  mem_share_do_ramdump() function initializes the
+ *  ramdump segments with the physical address and
+ *  size of the memshared clients. Extraction of ramdump
+ *  is skipped if memshare client is not allotted
+ *  This calls the ramdump api in extracting the
+ *  ramdump in elf format.
+ */
+
+static int mem_share_do_ramdump(void)
+{
+	int i = 0, ret;
+	char *client_name = NULL;
+
+	for (i = 0; i < num_clients; i++) {
+
+		struct ramdump_segment *ramdump_segments_tmp = NULL;
+
+		switch (i) {
+		case 0:
+			client_name = "GPS";
+			break;
+		case 1:
+			client_name = "FTM";
+			break;
+		case 2:
+			client_name = "DIAG";
+			break;
+		default:
+			pr_err("memshare: no memshare clients registered\n");
+			return -EINVAL;
+		}
+
+		if (!memblock[i].allotted) {
+			pr_err("memshare:%s memblock is not allotted\n",
+			client_name);
+			continue;
+		}
+
+		ramdump_segments_tmp = kcalloc(1,
+			sizeof(struct ramdump_segment),
+			GFP_KERNEL);
+		if (!ramdump_segments_tmp)
+			return -ENOMEM;
+
+		ramdump_segments_tmp[0].size = memblock[i].size;
+		ramdump_segments_tmp[0].address = memblock[i].phy_addr;
+
+		pr_debug("memshare: %s:%s client:id: %d:size = %d\n",
+		__func__, client_name, i, memblock[i].size);
+
+		ret = do_elf_ramdump(memshare_ramdump_dev[i],
+					ramdump_segments_tmp, 1);
+		kfree(ramdump_segments_tmp);
+		if (ret < 0) {
+			pr_err("memshare: Unable to dump: %d\n", ret);
+			return ret;
+		}
+	}
+	return 0;
+}
+
+static int modem_notifier_cb(struct notifier_block *this, unsigned long code,
+					void *_cmd)
+{
+	int i;
+	int ret;
+	u32 source_vmlist[2] = {VMID_HLOS, VMID_MSS_MSA};
+	int dest_vmids[1] = {VMID_HLOS};
+	int dest_perms[1] = {PERM_READ|PERM_WRITE|PERM_EXEC};
+	struct notif_data *notifdata = NULL;
+
+	mutex_lock(&memsh_drv->mem_share);
+
+	switch (code) {
+
+	case SUBSYS_BEFORE_SHUTDOWN:
+		bootup_request++;
+		break;
+
+	case SUBSYS_RAMDUMP_NOTIFICATION:
+		ramdump_event = 1;
+		break;
+
+	case SUBSYS_BEFORE_POWERUP:
+		if (_cmd) {
+			notifdata = (struct notif_data *) _cmd;
+		} else {
+			ramdump_event = 0;
+			break;
+		}
+
+		if (notifdata->enable_ramdump && ramdump_event) {
+			pr_debug("memshare: %s, Ramdump collection is enabled\n",
+					__func__);
+			ret = mem_share_do_ramdump();
+			if (ret)
+				pr_err("memshare: Ramdump collection failed\n");
+			ramdump_event = 0;
+		}
+		break;
+
+	case SUBSYS_AFTER_POWERUP:
+		pr_debug("memshare: Modem has booted up\n");
+		for (i = 0; i < MAX_CLIENTS; i++) {
+			if (memblock[i].free_memory > 0 &&
+					bootup_request >= 2) {
+				memblock[i].free_memory -= 1;
+				pr_debug("memshare: free_memory count: %d for client id: %d\n",
+					memblock[i].free_memory,
+					memblock[i].client_id);
+			}
+
+			if (memblock[i].free_memory == 0) {
+				if (memblock[i].peripheral ==
+					DHMS_MEM_PROC_MPSS_V01 &&
+					!memblock[i].guarantee &&
+					memblock[i].allotted) {
+					pr_debug("memshare: hypervisor unmapping  for client id: %d\n",
+						memblock[i].client_id);
+					ret = hyp_assign_phys(
+							memblock[i].phy_addr,
+							memblock[i].size,
+							source_vmlist,
+							2, dest_vmids,
+							dest_perms, 1);
+					if (ret &&
+						memblock[i].hyp_mapping == 1) {
+						/*
+						 * This is an error case as hyp
+						 * mapping was successful
+						 * earlier but during unmap
+						 * it lead to failure.
+						 */
+						pr_err("memshare: %s, failed to unmap the region\n",
+							__func__);
+						memblock[i].hyp_mapping = 1;
+					} else {
+						memblock[i].hyp_mapping = 0;
+					}
+					dma_free_attrs(memsh_drv->dev,
+						memblock[i].size,
+						memblock[i].virtual_addr,
+						memblock[i].phy_addr,
+						attrs);
+					free_client(i);
+				}
+			}
+		}
+		bootup_request++;
+		break;
+
+	default:
+		break;
+	}
+
+	mutex_unlock(&memsh_drv->mem_share);
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block nb = {
+	.notifier_call = modem_notifier_cb,
+};
+
+static void shared_hyp_mapping(int client_id)
+{
+	int ret;
+	u32 source_vmlist[1] = {VMID_HLOS};
+	int dest_vmids[2] = {VMID_HLOS, VMID_MSS_MSA};
+	int dest_perms[2] = {PERM_READ|PERM_WRITE,
+				PERM_READ|PERM_WRITE};
+
+	if (client_id == DHMS_MEM_CLIENT_INVALID) {
+		pr_err("memshare: %s, Invalid Client\n", __func__);
+		return;
+	}
+
+	ret = hyp_assign_phys(memblock[client_id].phy_addr,
+			memblock[client_id].size,
+			source_vmlist, 1, dest_vmids,
+			dest_perms, 2);
+
+	if (ret != 0) {
+		pr_err("memshare: hyp_assign_phys failed size=%u err=%d\n",
+				memblock[client_id].size, ret);
+		return;
+	}
+	memblock[client_id].hyp_mapping = 1;
+}
+
+static int handle_alloc_req(void *req_h, void *req, void *conn_h)
+{
+	struct mem_alloc_req_msg_v01 *alloc_req;
+	struct mem_alloc_resp_msg_v01 alloc_resp;
+	int rc = 0;
+
+	mutex_lock(&memsh_drv->mem_share);
+	alloc_req = (struct mem_alloc_req_msg_v01 *)req;
+	pr_debug("memshare: %s: Received Alloc Request: alloc_req->num_bytes = %d\n",
+		__func__, alloc_req->num_bytes);
+	if (!memblock[GPS].size) {
+		memset(&alloc_resp, 0, sizeof(alloc_resp));
+		alloc_resp.resp = QMI_RESULT_FAILURE_V01;
+		rc = memshare_alloc(memsh_drv->dev, alloc_req->num_bytes,
+					&memblock[GPS]);
+	}
+	alloc_resp.num_bytes_valid = 1;
+	alloc_resp.num_bytes =  alloc_req->num_bytes;
+	alloc_resp.handle_valid = 1;
+	alloc_resp.handle = memblock[GPS].phy_addr;
+	if (rc) {
+		alloc_resp.resp = QMI_RESULT_FAILURE_V01;
+		memblock[GPS].size = 0;
+	} else {
+		alloc_resp.resp = QMI_RESULT_SUCCESS_V01;
+	}
+
+	mutex_unlock(&memsh_drv->mem_share);
+
+	pr_debug("memshare: %s, alloc_resp.num_bytes :%d, alloc_resp.resp :%lx\n",
+			  __func__, alloc_resp.num_bytes,
+			  (unsigned long int)alloc_resp.resp);
+	rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h,
+			&mem_share_svc_alloc_resp_desc, &alloc_resp,
+			sizeof(alloc_resp));
+	if (rc < 0)
+		pr_err("memshare: %s, Error sending the alloc request: %d\n",
+					__func__, rc);
+
+	return rc;
+}
+
+static int handle_alloc_generic_req(void *req_h, void *req, void *conn_h)
+{
+	struct mem_alloc_generic_req_msg_v01 *alloc_req;
+	struct mem_alloc_generic_resp_msg_v01 *alloc_resp;
+	int rc, resp = 0;
+	int client_id;
+
+	mutex_lock(&memsh_drv->mem_share);
+	alloc_req = (struct mem_alloc_generic_req_msg_v01 *)req;
+	pr_debug("memshare: alloc request client id: %d proc _id: %d\n",
+			alloc_req->client_id, alloc_req->proc_id);
+	alloc_resp = kzalloc(sizeof(*alloc_resp),
+					GFP_KERNEL);
+	if (!alloc_resp) {
+		mutex_unlock(&memsh_drv->mem_share);
+		return -ENOMEM;
+	}
+	alloc_resp->resp.result = QMI_RESULT_FAILURE_V01;
+	alloc_resp->resp.error = QMI_ERR_NO_MEMORY_V01;
+	client_id = check_client(alloc_req->client_id, alloc_req->proc_id,
+								CHECK);
+
+	if (client_id >= MAX_CLIENTS) {
+		pr_err("memshare: %s client not found, requested client: %d, proc_id: %d\n",
+				__func__, alloc_req->client_id,
+				alloc_req->proc_id);
+		kfree(alloc_resp);
+		alloc_resp = NULL;
+		mutex_unlock(&memsh_drv->mem_share);
+		return -EINVAL;
+	}
+
+	memblock[client_id].free_memory += 1;
+	pr_debug("memshare: %s, free memory count for client id: %d = %d",
+		__func__, memblock[client_id].client_id,
+			memblock[client_id].free_memory);
+	if (!memblock[client_id].allotted) {
+		rc = memshare_alloc(memsh_drv->dev, alloc_req->num_bytes,
+					&memblock[client_id]);
+		if (rc) {
+			pr_err("memshare: %s,Unable to allocate memory for requested client\n",
+							__func__);
+			resp = 1;
+		}
+		if (!resp) {
+			memblock[client_id].allotted = 1;
+			memblock[client_id].size = alloc_req->num_bytes;
+			memblock[client_id].peripheral = alloc_req->proc_id;
+		}
+	}
+	memblock[client_id].sequence_id = alloc_req->sequence_id;
+
+	fill_alloc_response(alloc_resp, client_id, &resp);
+	/*
+	 * Perform the Hypervisor mapping in order to avoid XPU viloation
+	 * to the allocated region for Modem Clients
+	 */
+	if (!memblock[client_id].hyp_mapping &&
+		memblock[client_id].allotted)
+		shared_hyp_mapping(client_id);
+	mutex_unlock(&memsh_drv->mem_share);
+	pr_debug("memshare: alloc_resp.num_bytes :%d, alloc_resp.resp.result :%lx\n",
+			  alloc_resp->dhms_mem_alloc_addr_info[0].num_bytes,
+			  (unsigned long int)alloc_resp->resp.result);
+	rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h,
+			&mem_share_svc_alloc_generic_resp_desc, alloc_resp,
+			sizeof(alloc_resp));
+
+	if (rc < 0)
+		pr_err("memshare: %s, Error sending the alloc request: %d\n",
+							__func__, rc);
+
+	kfree(alloc_resp);
+	alloc_resp = NULL;
+	return rc;
+}
+
+static int handle_free_req(void *req_h, void *req, void *conn_h)
+{
+	struct mem_free_req_msg_v01 *free_req;
+	struct mem_free_resp_msg_v01 free_resp;
+	int rc;
+
+	mutex_lock(&memsh_drv->mem_free);
+	if (!memblock[GPS].guarantee) {
+		free_req = (struct mem_free_req_msg_v01 *)req;
+		pr_debug("memshare: %s: Received Free Request\n", __func__);
+		memset(&free_resp, 0, sizeof(free_resp));
+		dma_free_coherent(memsh_drv->dev, memblock[GPS].size,
+			memblock[GPS].virtual_addr,
+				free_req->handle);
+	}
+	free_resp.resp = QMI_RESULT_SUCCESS_V01;
+	mutex_unlock(&memsh_drv->mem_free);
+	rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h,
+			&mem_share_svc_free_resp_desc, &free_resp,
+			sizeof(free_resp));
+	if (rc < 0)
+		pr_err("memshare: %s, Error sending the free request: %d\n",
+					__func__, rc);
+
+	return rc;
+}
+
+static int handle_free_generic_req(void *req_h, void *req, void *conn_h)
+{
+	struct mem_free_generic_req_msg_v01 *free_req;
+	struct mem_free_generic_resp_msg_v01 free_resp;
+	int rc;
+	int flag = 0;
+	uint32_t client_id;
+
+	mutex_lock(&memsh_drv->mem_free);
+	free_req = (struct mem_free_generic_req_msg_v01 *)req;
+	pr_debug("memshare: %s: Received Free Request\n", __func__);
+	memset(&free_resp, 0, sizeof(free_resp));
+	free_resp.resp.error = QMI_ERR_INTERNAL_V01;
+	free_resp.resp.result = QMI_RESULT_FAILURE_V01;
+	pr_debug("memshare: Client id: %d proc id: %d\n", free_req->client_id,
+				free_req->proc_id);
+	client_id = check_client(free_req->client_id, free_req->proc_id, FREE);
+	if (client_id == DHMS_MEM_CLIENT_INVALID) {
+		pr_err("memshare: %s, Invalid client request to free memory\n",
+					__func__);
+		flag = 1;
+	} else if (!memblock[client_id].guarantee &&
+					memblock[client_id].allotted) {
+		pr_debug("memshare: %s: size: %d",
+				__func__, memblock[client_id].size);
+		dma_free_attrs(memsh_drv->dev, memblock[client_id].size,
+			memblock[client_id].virtual_addr,
+			memblock[client_id].phy_addr,
+			attrs);
+		free_client(client_id);
+	} else {
+		pr_err("memshare: %s, Request came for a guaranteed client cannot free up the memory\n",
+						__func__);
+	}
+
+	if (flag) {
+		free_resp.resp.result = QMI_RESULT_FAILURE_V01;
+		free_resp.resp.error = QMI_ERR_INVALID_ID_V01;
+	} else {
+		free_resp.resp.result = QMI_RESULT_SUCCESS_V01;
+		free_resp.resp.error = QMI_ERR_NONE_V01;
+	}
+
+	mutex_unlock(&memsh_drv->mem_free);
+	rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h,
+		&mem_share_svc_free_generic_resp_desc, &free_resp,
+		sizeof(free_resp));
+
+	if (rc < 0)
+		pr_err("memshare: %s, Error sending the free request: %d\n",
+					__func__, rc);
+
+	return rc;
+}
+
+static int handle_query_size_req(void *req_h, void *req, void *conn_h)
+{
+	int rc, client_id;
+	struct mem_query_size_req_msg_v01 *query_req;
+	struct mem_query_size_rsp_msg_v01 *query_resp;
+
+	mutex_lock(&memsh_drv->mem_share);
+	query_req = (struct mem_query_size_req_msg_v01 *)req;
+	query_resp = kzalloc(sizeof(*query_resp),
+					GFP_KERNEL);
+	if (!query_resp) {
+		mutex_unlock(&memsh_drv->mem_share);
+		return -ENOMEM;
+	}
+	pr_debug("memshare: query request client id: %d proc _id: %d\n",
+		query_req->client_id, query_req->proc_id);
+	client_id = check_client(query_req->client_id, query_req->proc_id,
+								CHECK);
+
+	if (client_id >= MAX_CLIENTS) {
+		pr_err("memshare: %s client not found, requested client: %d, proc_id: %d\n",
+				__func__, query_req->client_id,
+				query_req->proc_id);
+		kfree(query_resp);
+		query_resp = NULL;
+		mutex_unlock(&memsh_drv->mem_share);
+		return -EINVAL;
+	}
+
+	if (memblock[client_id].size) {
+		query_resp->size_valid = 1;
+		query_resp->size = memblock[client_id].size;
+	} else {
+		query_resp->size_valid = 1;
+		query_resp->size = 0;
+	}
+	query_resp->resp.result = QMI_RESULT_SUCCESS_V01;
+	query_resp->resp.error = QMI_ERR_NONE_V01;
+	mutex_unlock(&memsh_drv->mem_share);
+
+	pr_debug("memshare: query_resp.size :%d, query_resp.resp.result :%lx\n",
+			  query_resp->size,
+			  (unsigned long int)query_resp->resp.result);
+	rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h,
+			&mem_share_svc_size_query_resp_desc, query_resp,
+			sizeof(query_resp));
+
+	if (rc < 0)
+		pr_err("memshare: %s, Error sending the query request: %d\n",
+							__func__, rc);
+
+	kfree(query_resp);
+	query_resp = NULL;
+	return rc;
+}
+
+static int mem_share_svc_connect_cb(struct qmi_handle *handle,
+			       void *conn_h)
+{
+	if (mem_share_svc_handle != handle || !conn_h)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int mem_share_svc_disconnect_cb(struct qmi_handle *handle,
+				  void *conn_h)
+{
+	if (mem_share_svc_handle != handle || !conn_h)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int mem_share_svc_req_desc_cb(unsigned int msg_id,
+				struct msg_desc **req_desc)
+{
+	int rc;
+
+	pr_debug("memshare: %s\n", __func__);
+	switch (msg_id) {
+	case MEM_ALLOC_REQ_MSG_V01:
+		*req_desc = &mem_share_svc_alloc_req_desc;
+		rc = sizeof(struct mem_alloc_req_msg_v01);
+		break;
+
+	case MEM_FREE_REQ_MSG_V01:
+		*req_desc = &mem_share_svc_free_req_desc;
+		rc = sizeof(struct mem_free_req_msg_v01);
+		break;
+
+	case MEM_ALLOC_GENERIC_REQ_MSG_V01:
+		*req_desc = &mem_share_svc_alloc_generic_req_desc;
+		rc = sizeof(struct mem_alloc_generic_req_msg_v01);
+		break;
+
+	case MEM_FREE_GENERIC_REQ_MSG_V01:
+		*req_desc = &mem_share_svc_free_generic_req_desc;
+		rc = sizeof(struct mem_free_generic_req_msg_v01);
+		break;
+
+	case MEM_QUERY_SIZE_REQ_MSG_V01:
+		*req_desc = &mem_share_svc_size_query_req_desc;
+		rc = sizeof(struct mem_query_size_req_msg_v01);
+		break;
+
+	default:
+		rc = -ENOTSUPP;
+		break;
+	}
+	return rc;
+}
+
+static int mem_share_svc_req_cb(struct qmi_handle *handle, void *conn_h,
+			void *req_h, unsigned int msg_id, void *req)
+{
+	int rc;
+
+	pr_debug("memshare: %s\n", __func__);
+	if (mem_share_svc_handle != handle || !conn_h)
+		return -EINVAL;
+
+	switch (msg_id) {
+	case MEM_ALLOC_REQ_MSG_V01:
+		rc = handle_alloc_req(req_h, req, conn_h);
+		break;
+
+	case MEM_FREE_REQ_MSG_V01:
+		rc = handle_free_req(req_h, req, conn_h);
+		break;
+
+	case MEM_ALLOC_GENERIC_REQ_MSG_V01:
+		rc = handle_alloc_generic_req(req_h, req, conn_h);
+		break;
+
+	case MEM_FREE_GENERIC_REQ_MSG_V01:
+		rc = handle_free_generic_req(req_h, req, conn_h);
+		break;
+
+	case MEM_QUERY_SIZE_REQ_MSG_V01:
+		rc = handle_query_size_req(req_h, req, conn_h);
+		break;
+
+	default:
+		rc = -ENOTSUPP;
+		break;
+	}
+	return rc;
+}
+
+static void mem_share_svc_recv_msg(struct work_struct *work)
+{
+	int rc;
+
+	pr_debug("memshare: %s\n", __func__);
+	do {
+		rc = qmi_recv_msg(mem_share_svc_handle);
+		pr_debug("memshare: %s: Notified about a Receive Event",
+			__func__);
+	} while (!rc);
+
+	if (rc != -ENOMSG)
+		pr_err("memshare: %s: Error = %d while receiving message\n",
+			__func__, rc);
+}
+
+static void qmi_mem_share_svc_ntfy(struct qmi_handle *handle,
+		enum qmi_event_type event, void *priv)
+{
+	pr_debug("memshare: %s\n", __func__);
+
+	if (event == QMI_RECV_MSG)
+		queue_delayed_work(mem_share_svc_workqueue,
+				   &work_recv_msg, 0);
+}
+
+static struct qmi_svc_ops_options mem_share_svc_ops_options = {
+	.version = 1,
+	.service_id = MEM_SHARE_SERVICE_SVC_ID,
+	.service_vers = MEM_SHARE_SERVICE_VERS,
+	.service_ins = MEM_SHARE_SERVICE_INS_ID,
+	.connect_cb = mem_share_svc_connect_cb,
+	.disconnect_cb = mem_share_svc_disconnect_cb,
+	.req_desc_cb = mem_share_svc_req_desc_cb,
+	.req_cb = mem_share_svc_req_cb,
+};
+
+int memshare_alloc(struct device *dev,
+					unsigned int block_size,
+					struct mem_blocks *pblk)
+{
+	pr_debug("memshare: %s", __func__);
+
+	if (!pblk) {
+		pr_err("memshare: %s: Failed memory block allocation\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	pblk->virtual_addr = dma_alloc_attrs(dev, block_size,
+						&pblk->phy_addr, GFP_KERNEL,
+						attrs);
+	if (pblk->virtual_addr == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void memshare_init_worker(struct work_struct *work)
+{
+	int rc;
+
+	mem_share_svc_workqueue =
+		create_singlethread_workqueue("mem_share_svc");
+	if (!mem_share_svc_workqueue)
+		return;
+
+	mem_share_svc_handle = qmi_handle_create(qmi_mem_share_svc_ntfy, NULL);
+	if (!mem_share_svc_handle) {
+		pr_err("memshare: %s: Creating mem_share_svc qmi handle failed\n",
+			__func__);
+		destroy_workqueue(mem_share_svc_workqueue);
+		return;
+	}
+	rc = qmi_svc_register(mem_share_svc_handle, &mem_share_svc_ops_options);
+	if (rc < 0) {
+		pr_err("memshare: %s: Registering mem share svc failed %d\n",
+			__func__, rc);
+		qmi_handle_destroy(mem_share_svc_handle);
+		destroy_workqueue(mem_share_svc_workqueue);
+		return;
+	}
+	pr_debug("memshare: memshare_init successful\n");
+}
+
+static int memshare_child_probe(struct platform_device *pdev)
+{
+	int rc;
+	uint32_t size, client_id;
+	const char *name;
+	struct memshare_child *drv;
+
+	drv = devm_kzalloc(&pdev->dev, sizeof(struct memshare_child),
+							GFP_KERNEL);
+
+	if (!drv)
+		return -ENOMEM;
+
+	drv->dev = &pdev->dev;
+	memsh_child = drv;
+	platform_set_drvdata(pdev, memsh_child);
+
+	rc = of_property_read_u32(pdev->dev.of_node, "qcom,peripheral-size",
+						&size);
+	if (rc) {
+		pr_err("memshare: %s, Error reading size of clients, rc: %d\n",
+				__func__, rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node, "qcom,client-id",
+						&client_id);
+	if (rc) {
+		pr_err("memshare: %s, Error reading client id, rc: %d\n",
+				__func__, rc);
+		return rc;
+	}
+
+	memblock[num_clients].guarantee = of_property_read_bool(
+							pdev->dev.of_node,
+							"qcom,allocate-boot-time");
+
+	rc = of_property_read_string(pdev->dev.of_node, "label",
+						&name);
+	if (rc) {
+		pr_err("memshare: %s, Error reading peripheral info for client, rc: %d\n",
+					__func__, rc);
+		return rc;
+	}
+
+	if (strcmp(name, "modem") == 0)
+		memblock[num_clients].peripheral = DHMS_MEM_PROC_MPSS_V01;
+	else if (strcmp(name, "adsp") == 0)
+		memblock[num_clients].peripheral = DHMS_MEM_PROC_ADSP_V01;
+	else if (strcmp(name, "wcnss") == 0)
+		memblock[num_clients].peripheral = DHMS_MEM_PROC_WCNSS_V01;
+
+	memblock[num_clients].size = size;
+	memblock[num_clients].client_id = client_id;
+
+  /*
+   *	Memshare allocation for guaranteed clients
+   */
+	if (memblock[num_clients].guarantee) {
+		rc = memshare_alloc(memsh_child->dev,
+				memblock[num_clients].size,
+				&memblock[num_clients]);
+		if (rc) {
+			pr_err("memshare: %s, Unable to allocate memory for guaranteed clients, rc: %d\n",
+							__func__, rc);
+			return rc;
+		}
+		memblock[num_clients].allotted = 1;
+	}
+
+	/*
+	 *  call for creating ramdump dev handlers for
+	 *  memshare clients
+	 */
+
+	memshare_dev[num_clients] = &pdev->dev;
+
+	if (!memblock[num_clients].file_created) {
+		rc = mem_share_configure_ramdump(num_clients);
+		if (rc)
+			pr_err("memshare: %s, cannot collect dumps for client id: %d\n",
+					__func__,
+					memblock[num_clients].client_id);
+		else
+			memblock[num_clients].file_created = 1;
+	}
+
+	num_clients++;
+
+	return 0;
+}
+
+static int memshare_probe(struct platform_device *pdev)
+{
+	int rc;
+	struct memshare_driver *drv;
+
+	drv = devm_kzalloc(&pdev->dev, sizeof(struct memshare_driver),
+							GFP_KERNEL);
+
+	if (!drv)
+		return -ENOMEM;
+
+	/* Memory allocation has been done successfully */
+	mutex_init(&drv->mem_free);
+	mutex_init(&drv->mem_share);
+
+	INIT_WORK(&drv->memshare_init_work, memshare_init_worker);
+	schedule_work(&drv->memshare_init_work);
+
+	drv->dev = &pdev->dev;
+	memsh_drv = drv;
+	platform_set_drvdata(pdev, memsh_drv);
+	initialize_client();
+	num_clients = 0;
+
+	rc = of_platform_populate(pdev->dev.of_node, NULL, NULL,
+				&pdev->dev);
+
+	if (rc) {
+		pr_err("memshare: %s, error populating the devices\n",
+			__func__);
+		return rc;
+	}
+
+	subsys_notif_register_notifier("modem", &nb);
+	pr_debug("memshare: %s, Memshare inited\n", __func__);
+
+	return 0;
+}
+
+static int memshare_remove(struct platform_device *pdev)
+{
+	if (!memsh_drv)
+		return 0;
+
+	qmi_svc_unregister(mem_share_svc_handle);
+	flush_workqueue(mem_share_svc_workqueue);
+	qmi_handle_destroy(mem_share_svc_handle);
+	destroy_workqueue(mem_share_svc_workqueue);
+
+	return 0;
+}
+
+static int memshare_child_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static const struct of_device_id memshare_match_table[] = {
+	{
+		.compatible = "qcom,memshare",
+	},
+	{}
+};
+
+static const struct of_device_id memshare_match_table1[] = {
+	{
+		.compatible = "qcom,memshare-peripheral",
+	},
+	{}
+};
+
+
+static struct platform_driver memshare_pdriver = {
+	.probe          = memshare_probe,
+	.remove         = memshare_remove,
+	.driver = {
+		.name   = MEMSHARE_DEV_NAME,
+		.owner  = THIS_MODULE,
+		.of_match_table = memshare_match_table,
+	},
+};
+
+static struct platform_driver memshare_pchild = {
+	.probe          = memshare_child_probe,
+	.remove         = memshare_child_remove,
+	.driver = {
+		.name   = MEMSHARE_CHILD_DEV_NAME,
+		.owner  = THIS_MODULE,
+		.of_match_table = memshare_match_table1,
+	},
+};
+
+module_platform_driver(memshare_pdriver);
+module_platform_driver(memshare_pchild);
+
+MODULE_DESCRIPTION("Mem Share QMI Service Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/memshare/msm_memshare.h b/drivers/soc/qcom/memshare/msm_memshare.h
new file mode 100644
index 0000000..f3b594a
--- /dev/null
+++ b/drivers/soc/qcom/memshare/msm_memshare.h
@@ -0,0 +1,64 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_MEM_SHARE_H
+#define _LINUX_MEM_SHARE_H
+
+#define MEM_SHARE_SERVICE_SVC_ID 0x00000034
+#define MEM_SHARE_SERVICE_INS_ID 1
+#define MEM_SHARE_SERVICE_VERS 1
+
+#define MEMORY_CMA	1
+#define MEMORY_NON_CMA	0
+#define MAX_CLIENTS 10
+#define GPS	0
+#define CHECK	0
+#define FREE	1
+
+struct mem_blocks {
+	/* Client Id information */
+	uint32_t client_id;
+	/* Peripheral associated with client */
+	uint32_t peripheral;
+	/* Sequence Id */
+	uint32_t sequence_id;
+	/* CMA or Non-CMA region */
+	uint32_t memory_type;
+	/* Guaranteed Memory */
+	uint32_t guarantee;
+	/* Memory alloted or not */
+	uint32_t allotted;
+	/* Size required for client */
+	uint32_t size;
+	/*
+	 * start address of the memory block reserved by server memory
+	 * subsystem to client
+	 */
+	phys_addr_t phy_addr;
+	/* Virtual address for the physical address allocated */
+	void *virtual_addr;
+	/* Release memory only when XPU is released*/
+	uint8_t free_memory;
+	/* Need Hypervisor mapping*/
+	uint8_t hyp_mapping;
+	/* Status flag which checks if ramdump file is created*/
+	int file_created;
+
+};
+
+int memshare_alloc(struct device *dev,
+					unsigned int block_size,
+					struct mem_blocks *pblk);
+void memshare_free(unsigned int block_size,
+					struct mem_blocks *pblk);
+#endif /* _LINUX_MEM_SHARE_H */
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index cfa4ca9..91c9441 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -934,13 +934,13 @@
 						priv->region_start),
 					VMID_HLOS);
 			}
+			if (desc->clear_fw_region && priv->region_start)
+				pil_clear_segment(desc);
 			dma_free_attrs(desc->dev, priv->region_size,
 					priv->region, priv->region_start,
 					desc->attrs);
 			priv->region = NULL;
 		}
-		if (desc->clear_fw_region && priv->region_start)
-			pil_clear_segment(desc);
 		pil_release_mmap(desc);
 	}
 	return ret;
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
index 4a586ac..20b9769 100644
--- a/drivers/soc/qcom/pil-msa.c
+++ b/drivers/soc/qcom/pil-msa.c
@@ -677,7 +677,15 @@
 
 	/* Load the MBA image into memory */
 	count = fw->size;
-	memcpy(mba_dp_virt, data, count);
+	if (count <= SZ_1M) {
+		/* Ensures memcpy is done for max 1MB fw size */
+		memcpy(mba_dp_virt, data, count);
+	} else {
+		dev_err(pil->dev, "%s fw image loading into memory is failed due to fw size overflow\n",
+			__func__);
+		ret = -EINVAL;
+		goto err_mba_data;
+	}
 	/* Ensure memcpy of the MBA memory is done before loading the DP */
 	wmb();
 
diff --git a/drivers/soc/qcom/ramdump.c b/drivers/soc/qcom/ramdump.c
index d5b051e..dd77062 100644
--- a/drivers/soc/qcom/ramdump.c
+++ b/drivers/soc/qcom/ramdump.c
@@ -16,7 +16,6 @@
 #include <linux/jiffies.h>
 #include <linux/sched.h>
 #include <linux/module.h>
-#include <linux/miscdevice.h>
 #include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
@@ -24,10 +23,20 @@
 #include <linux/uaccess.h>
 #include <linux/elf.h>
 #include <linux/wait.h>
+#include <linux/cdev.h>
 #include <soc/qcom/ramdump.h>
 #include <linux/dma-mapping.h>
 #include <linux/of.h>
 
+
+#define RAMDUMP_NUM_DEVICES	256
+#define RAMDUMP_NAME		"ramdump"
+
+static struct class *ramdump_class;
+static dev_t ramdump_dev;
+static DEFINE_MUTEX(rd_minor_mutex);
+static DEFINE_IDA(rd_minor_id);
+static bool ramdump_devnode_inited;
 #define RAMDUMP_WAIT_MSECS	120000
 
 struct ramdump_device {
@@ -38,7 +47,8 @@
 	int ramdump_status;
 
 	struct completion ramdump_complete;
-	struct miscdevice device;
+	struct cdev cdev;
+	struct device *dev;
 
 	wait_queue_head_t dump_wait_q;
 	int nsegments;
@@ -51,17 +61,19 @@
 
 static int ramdump_open(struct inode *inode, struct file *filep)
 {
-	struct ramdump_device *rd_dev = container_of(filep->private_data,
-				struct ramdump_device, device);
+	struct ramdump_device *rd_dev = container_of(inode->i_cdev,
+					struct ramdump_device, cdev);
 	rd_dev->consumer_present = 1;
 	rd_dev->ramdump_status = 0;
+	filep->private_data = rd_dev;
 	return 0;
 }
 
 static int ramdump_release(struct inode *inode, struct file *filep)
 {
-	struct ramdump_device *rd_dev = container_of(filep->private_data,
-				struct ramdump_device, device);
+
+	struct ramdump_device *rd_dev = container_of(inode->i_cdev,
+					struct ramdump_device, cdev);
 	rd_dev->consumer_present = 0;
 	rd_dev->data_ready = 0;
 	complete(&rd_dev->ramdump_complete);
@@ -105,8 +117,7 @@
 static ssize_t ramdump_read(struct file *filep, char __user *buf, size_t count,
 			loff_t *pos)
 {
-	struct ramdump_device *rd_dev = container_of(filep->private_data,
-				struct ramdump_device, device);
+	struct ramdump_device *rd_dev = filep->private_data;
 	void *device_mem = NULL, *origdevice_mem = NULL, *vaddr = NULL;
 	unsigned long data_left = 0, bytes_before, bytes_after;
 	unsigned long addr = 0;
@@ -154,7 +165,7 @@
 
 	rd_dev->attrs = 0;
 	rd_dev->attrs |= DMA_ATTR_SKIP_ZEROING;
-	device_mem = vaddr ?: dma_remap(rd_dev->device.parent, NULL, addr,
+	device_mem = vaddr ?: dma_remap(rd_dev->dev->parent, NULL, addr,
 						copy_size, rd_dev->attrs);
 	origdevice_mem = device_mem;
 
@@ -206,7 +217,7 @@
 
 	kfree(finalbuf);
 	if (!vaddr && origdevice_mem)
-		dma_unremap(rd_dev->device.parent, origdevice_mem, copy_size);
+		dma_unremap(rd_dev->dev->parent, origdevice_mem, copy_size);
 
 	*pos += copy_size;
 
@@ -217,7 +228,7 @@
 
 ramdump_done:
 	if (!vaddr && origdevice_mem)
-		dma_unremap(rd_dev->device.parent, origdevice_mem, copy_size);
+		dma_unremap(rd_dev->dev->parent, origdevice_mem, copy_size);
 
 	kfree(finalbuf);
 	rd_dev->data_ready = 0;
@@ -229,8 +240,7 @@
 static unsigned int ramdump_poll(struct file *filep,
 					struct poll_table_struct *wait)
 {
-	struct ramdump_device *rd_dev = container_of(filep->private_data,
-				struct ramdump_device, device);
+	struct ramdump_device *rd_dev = filep->private_data;
 	unsigned int mask = 0;
 
 	if (rd_dev->data_ready)
@@ -247,9 +257,26 @@
 	.poll = ramdump_poll
 };
 
-void *create_ramdump_device(const char *dev_name, struct device *parent)
+static int ramdump_devnode_init(void)
 {
 	int ret;
+
+	ramdump_class = class_create(THIS_MODULE, RAMDUMP_NAME);
+	ret = alloc_chrdev_region(&ramdump_dev, 0, RAMDUMP_NUM_DEVICES,
+				  RAMDUMP_NAME);
+	if (ret < 0) {
+		pr_warn("%s: unable to allocate major\n", __func__);
+		return ret;
+	}
+
+	ramdump_devnode_inited = true;
+
+	return 0;
+}
+
+void *create_ramdump_device(const char *dev_name, struct device *parent)
+{
+	int ret, minor;
 	struct ramdump_device *rd_dev;
 
 	if (!dev_name) {
@@ -257,6 +284,14 @@
 		return NULL;
 	}
 
+	mutex_lock(&rd_minor_mutex);
+	if (!ramdump_devnode_inited) {
+		ret = ramdump_devnode_init();
+		if (ret)
+			return ERR_PTR(ret);
+	}
+	mutex_unlock(&rd_minor_mutex);
+
 	rd_dev = kzalloc(sizeof(struct ramdump_device), GFP_KERNEL);
 
 	if (!rd_dev) {
@@ -265,15 +300,20 @@
 		return NULL;
 	}
 
+	/* get a minor number */
+	minor = ida_simple_get(&rd_minor_id, 0, RAMDUMP_NUM_DEVICES,
+			GFP_KERNEL);
+	if (minor < 0) {
+		pr_err("%s: No more minor numbers left! rc:%d\n", __func__,
+			minor);
+		ret = -ENODEV;
+		goto fail_out_of_minors;
+	}
+
 	snprintf(rd_dev->name, ARRAY_SIZE(rd_dev->name), "ramdump_%s",
 		 dev_name);
 
 	init_completion(&rd_dev->ramdump_complete);
-
-	rd_dev->device.minor = MISC_DYNAMIC_MINOR;
-	rd_dev->device.name = rd_dev->name;
-	rd_dev->device.fops = &ramdump_file_ops;
-	rd_dev->device.parent = parent;
 	if (parent) {
 		rd_dev->complete_ramdump = of_property_read_bool(
 				parent->of_node, "qcom,complete-ramdump");
@@ -284,27 +324,48 @@
 
 	init_waitqueue_head(&rd_dev->dump_wait_q);
 
-	ret = misc_register(&rd_dev->device);
-
-	if (ret) {
-		pr_err("%s: misc_register failed for %s (%d)", __func__,
+	rd_dev->dev = device_create(ramdump_class, parent,
+				    MKDEV(MAJOR(ramdump_dev), minor),
+				   rd_dev, rd_dev->name);
+	if (IS_ERR(rd_dev->dev)) {
+		ret = PTR_ERR(rd_dev->dev);
+		pr_err("%s: device_create failed for %s (%d)", __func__,
 				dev_name, ret);
-		kfree(rd_dev);
-		return NULL;
+		goto fail_return_minor;
+	}
+
+	cdev_init(&rd_dev->cdev, &ramdump_file_ops);
+
+	ret = cdev_add(&rd_dev->cdev, MKDEV(MAJOR(ramdump_dev), minor), 1);
+	if (ret < 0) {
+		pr_err("%s: cdev_add failed for %s (%d)", __func__,
+				dev_name, ret);
+		goto fail_cdev_add;
 	}
 
 	return (void *)rd_dev;
+
+fail_cdev_add:
+	device_unregister(rd_dev->dev);
+fail_return_minor:
+	ida_simple_remove(&rd_minor_id, minor);
+fail_out_of_minors:
+	kfree(rd_dev);
+	return ERR_PTR(ret);
 }
 EXPORT_SYMBOL(create_ramdump_device);
 
 void destroy_ramdump_device(void *dev)
 {
 	struct ramdump_device *rd_dev = dev;
+	int minor = MINOR(rd_dev->cdev.dev);
 
 	if (IS_ERR_OR_NULL(rd_dev))
 		return;
 
-	misc_deregister(&rd_dev->device);
+	cdev_del(&rd_dev->cdev);
+	device_unregister(rd_dev->dev);
+	ida_simple_remove(&rd_minor_id, minor);
 	kfree(rd_dev);
 }
 EXPORT_SYMBOL(destroy_ramdump_device);
diff --git a/drivers/soc/qcom/smp2p_sleepstate.c b/drivers/soc/qcom/smp2p_sleepstate.c
index 44192ff..5b0129e 100644
--- a/drivers/soc/qcom/smp2p_sleepstate.c
+++ b/drivers/soc/qcom/smp2p_sleepstate.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -46,6 +46,7 @@
 
 static struct notifier_block sleepstate_pm_nb = {
 	.notifier_call = sleepstate_pm_notifier,
+	.priority = INT_MAX,
 };
 
 static int smp2p_sleepstate_probe(struct platform_device *pdev)
diff --git a/drivers/soc/qcom/wcd-dsp-glink.c b/drivers/soc/qcom/wcd-dsp-glink.c
index c8bb13d..870b9f7 100644
--- a/drivers/soc/qcom/wcd-dsp-glink.c
+++ b/drivers/soc/qcom/wcd-dsp-glink.c
@@ -58,6 +58,7 @@
 
 struct wdsp_glink_tx_buf {
 	struct work_struct tx_work;
+	struct work_struct free_tx_work;
 
 	/* Glink channel information */
 	struct wdsp_glink_ch *ch;
@@ -125,6 +126,46 @@
 static int wdsp_glink_open_ch(struct wdsp_glink_ch *ch);
 
 /*
+ * wdsp_glink_free_tx_buf_work - Work function to free tx pkt
+ * work:      Work structure
+ */
+static void wdsp_glink_free_tx_buf_work(struct work_struct *work)
+{
+	struct wdsp_glink_tx_buf *tx_buf;
+
+	tx_buf = container_of(work, struct wdsp_glink_tx_buf,
+			      free_tx_work);
+	vfree(tx_buf);
+}
+
+/*
+ * wdsp_glink_free_tx_buf - Function to free tx buffer
+ * priv:        Pointer to the channel
+ * pkt_priv:    Pointer to the tx buffer
+ */
+static void wdsp_glink_free_tx_buf(const void *priv, const void *pkt_priv)
+{
+	struct wdsp_glink_tx_buf *tx_buf = (struct wdsp_glink_tx_buf *)pkt_priv;
+	struct wdsp_glink_priv *wpriv;
+	struct wdsp_glink_ch *ch;
+
+	if (!priv) {
+		pr_err("%s: Invalid priv\n", __func__);
+		return;
+	}
+	if (!tx_buf) {
+		pr_err("%s: Invalid tx_buf\n", __func__);
+		return;
+	}
+
+	ch = (struct wdsp_glink_ch *)priv;
+	wpriv = ch->wpriv;
+	/* Work queue to free tx pkt */
+	INIT_WORK(&tx_buf->free_tx_work, wdsp_glink_free_tx_buf_work);
+	queue_work(wpriv->work_queue, &tx_buf->free_tx_work);
+}
+
+/*
  * wdsp_glink_notify_rx - Glink notify rx callback for responses
  * handle:      Opaque Channel handle returned by GLink
  * priv:        Private pointer to the channel
@@ -183,14 +224,8 @@
 static void wdsp_glink_notify_tx_done(void *handle, const void *priv,
 				      const void *pkt_priv, const void *ptr)
 {
-	if (!pkt_priv) {
-		pr_err("%s: Invalid parameter\n", __func__);
-		return;
-	}
-	/* Free tx pkt */
-	vfree(pkt_priv);
+	wdsp_glink_free_tx_buf(priv, pkt_priv);
 }
-
 /*
  * wdsp_glink_notify_tx_abort - Glink notify tx abort callback to
  * free tx buffer
@@ -201,12 +236,7 @@
 static void wdsp_glink_notify_tx_abort(void *handle, const void *priv,
 				       const void *pkt_priv)
 {
-	if (!pkt_priv) {
-		pr_err("%s: Invalid parameter\n", __func__);
-		return;
-	}
-	/* Free tx pkt */
-	vfree(pkt_priv);
+	wdsp_glink_free_tx_buf(priv, pkt_priv);
 }
 
 /*
@@ -555,7 +585,7 @@
 		goto done;
 	}
 	ch = kcalloc(no_of_channels, sizeof(struct wdsp_glink_ch *),
-		     GFP_KERNEL);
+		     GFP_ATOMIC);
 	if (!ch) {
 		ret = -ENOMEM;
 		goto done;
diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig
index a4c2f0c..e1ca532 100644
--- a/drivers/soundwire/Kconfig
+++ b/drivers/soundwire/Kconfig
@@ -9,7 +9,7 @@
 
 if SOUNDWIRE
 config SOUNDWIRE_WCD_CTRL
-	depends on WCD9335_CODEC || WCD934X_CODEC
+	depends on WCD9XXX_CODEC_CORE
 	tristate "QTI WCD CODEC Soundwire controller"
 	default n
 	help
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 838783c..24d4492 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1004,7 +1004,7 @@
 				ret = 0;
 				ms = 8LL * 1000LL * xfer->len;
 				do_div(ms, xfer->speed_hz);
-				ms += ms + 100; /* some tolerance */
+				ms += ms + 200; /* some tolerance */
 
 				if (ms > UINT_MAX)
 					ms = UINT_MAX;
diff --git a/drivers/spmi/spmi-pmic-arb-debug.c b/drivers/spmi/spmi-pmic-arb-debug.c
index c5a31a9..2c90bef 100644
--- a/drivers/spmi/spmi-pmic-arb-debug.c
+++ b/drivers/spmi/spmi-pmic-arb-debug.c
@@ -11,6 +11,7 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/io.h>
@@ -69,6 +70,7 @@
 struct spmi_pmic_arb_debug {
 	void __iomem		*addr;
 	raw_spinlock_t		lock;
+	struct clk		*clock;
 };
 
 static inline void pmic_arb_debug_write(struct spmi_pmic_arb_debug *pa,
@@ -181,6 +183,12 @@
 	else
 		return -EINVAL;
 
+	rc = clk_prepare_enable(pa->clock);
+	if (rc) {
+		pr_err("%s: failed to enable core clock, rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
 	raw_spin_lock_irqsave(&pa->lock, flags);
 
 	rc = pmic_arb_debug_issue_command(ctrl, opc, sid, addr, len);
@@ -192,6 +200,7 @@
 		buf[i] = pmic_arb_debug_read(pa, PMIC_ARB_DEBUG_RDATA(i));
 done:
 	raw_spin_unlock_irqrestore(&pa->lock, flags);
+	clk_disable_unprepare(pa->clock);
 
 	return rc;
 }
@@ -221,6 +230,12 @@
 	else
 		return -EINVAL;
 
+	rc = clk_prepare_enable(pa->clock);
+	if (rc) {
+		pr_err("%s: failed to enable core clock, rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
 	raw_spin_lock_irqsave(&pa->lock, flags);
 
 	/* Write data to FIFO */
@@ -230,6 +245,7 @@
 	rc = pmic_arb_debug_issue_command(ctrl, opc, sid, addr, len);
 
 	raw_spin_unlock_irqrestore(&pa->lock, flags);
+	clk_disable_unprepare(pa->clock);
 
 	return rc;
 }
@@ -293,6 +309,17 @@
 		goto err_put_ctrl;
 	}
 
+	if (of_find_property(pdev->dev.of_node, "clock-names", NULL)) {
+		pa->clock = devm_clk_get(&pdev->dev, "core_clk");
+		if (IS_ERR(pa->clock)) {
+			rc = PTR_ERR(pa->clock);
+			if (rc != -EPROBE_DEFER)
+				dev_err(&pdev->dev, "unable to request core clock, rc=%d\n",
+					rc);
+			goto err_put_ctrl;
+		}
+	}
+
 	platform_set_drvdata(pdev, ctrl);
 	raw_spin_lock_init(&pa->lock);
 
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 9cc85ee..bfd4b7a 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -576,10 +576,16 @@
 	int last = pa->max_apid >> 5;
 	u32 status, enable;
 	int i, id, apid;
+	/* status based dispatch */
+	bool acc_valid = false;
+	u32 irq_status = 0;
 
 	for (i = first; i <= last; ++i) {
 		status = readl_relaxed(pa->acc_status +
 				      pa->ver_ops->owner_acc_status(pa->ee, i));
+		if (status)
+			acc_valid = true;
+
 		while (status) {
 			id = ffs(status) - 1;
 			status &= ~BIT(id);
@@ -595,6 +601,28 @@
 				periph_interrupt(pa, apid, show);
 		}
 	}
+
+	/* ACC_STATUS is empty but IRQ fired check IRQ_STATUS */
+	if (!acc_valid) {
+		for (i = pa->min_apid; i <= pa->max_apid; i++) {
+			/* skip if APPS is not irq owner */
+			if (pa->apid_data[i].irq_owner != pa->ee)
+				continue;
+
+			irq_status = readl_relaxed(pa->intr +
+						pa->ver_ops->irq_status(i));
+			if (irq_status) {
+				enable = readl_relaxed(pa->intr +
+						pa->ver_ops->acc_enable(i));
+				if (enable & SPMI_PIC_ACC_ENABLE_BIT) {
+					dev_dbg(&pa->spmic->dev,
+						"Dispatching IRQ for apid=%d status=%x\n",
+						i, irq_status);
+					periph_interrupt(pa, i, show);
+				}
+			}
+		}
+	}
 }
 
 static void pmic_arb_chained_irq(struct irq_desc *desc)
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index ad6028f..c9028bb 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -2,7 +2,7 @@
  * drivers/staging/android/ion/ion_system_heap.c
  *
  * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -123,9 +123,11 @@
 
 		if (order)
 			gfp_mask = high_order_gfp_flags;
+
 		page = alloc_pages(gfp_mask, order);
-		ion_pages_sync_for_device(dev, page, PAGE_SIZE << order,
-					  DMA_BIDIRECTIONAL);
+		if (page)
+			ion_pages_sync_for_device(dev, page, PAGE_SIZE << order,
+						  DMA_BIDIRECTIONAL);
 	}
 	if (!page)
 		return 0;
diff --git a/drivers/staging/android/uapi/msm_ion.h b/drivers/staging/android/uapi/msm_ion.h
index db4fc63..cc77674 100644
--- a/drivers/staging/android/uapi/msm_ion.h
+++ b/drivers/staging/android/uapi/msm_ion.h
@@ -84,7 +84,6 @@
 #define ION_FLAG_CP_NON_PIXEL		ION_BIT(20)
 #define ION_FLAG_CP_CAMERA		ION_BIT(21)
 #define ION_FLAG_CP_HLOS		ION_BIT(22)
-#define ION_FLAG_CP_HLOS_FREE		ION_BIT(23)
 #define ION_FLAG_CP_SEC_DISPLAY		ION_BIT(25)
 #define ION_FLAG_CP_APP			ION_BIT(26)
 #define ION_FLAG_CP_CAMERA_PREVIEW	ION_BIT(27)
@@ -96,13 +95,6 @@
  */
 #define ION_FLAG_SECURE			ION_BIT(ION_HEAP_ID_RESERVED)
 
-/**
- * Flag for clients to force contiguous memort allocation
- *
- * Use of this flag is carefully monitored!
- */
-#define ION_FLAG_FORCE_CONTIGUOUS	ION_BIT(30)
-
 /*
  * Used in conjunction with heap which pool memory to force an allocation
  * to come from the page allocator directly instead of from the pool allocation
@@ -113,7 +105,6 @@
  * Deprecated! Please use the corresponding ION_FLAG_*
  */
 #define ION_SECURE ION_FLAG_SECURE
-#define ION_FORCE_CONTIGUOUS ION_FLAG_FORCE_CONTIGUOUS
 
 /**
  * Macro should be used with ion_heap_ids defined above.
diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c
index ea15bc1..197201a 100644
--- a/drivers/staging/iio/light/tsl2x7x_core.c
+++ b/drivers/staging/iio/light/tsl2x7x_core.c
@@ -854,7 +854,7 @@
 		tmp = data[i] - statP->mean;
 		sample_sum += tmp * tmp;
 	}
-	statP->stddev = int_sqrt((long)sample_sum) / length;
+	statP->stddev = int_sqrt((long)sample_sum / length);
 }
 
 /**
diff --git a/drivers/staging/media/cec/cec-adap.c b/drivers/staging/media/cec/cec-adap.c
index 611e07b..057c9b5 100644
--- a/drivers/staging/media/cec/cec-adap.c
+++ b/drivers/staging/media/cec/cec-adap.c
@@ -1017,7 +1017,7 @@
 		las->log_addr[i] = CEC_LOG_ADDR_INVALID;
 		if (last_la == CEC_LOG_ADDR_INVALID ||
 		    last_la == CEC_LOG_ADDR_UNREGISTERED ||
-		    !(last_la & type2mask[type]))
+		    !((1 << last_la) & type2mask[type]))
 			last_la = la_list[0];
 
 		err = cec_config_log_addr(adap, i, last_la);
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 553e8d5..6513ace 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -890,7 +890,7 @@
 		return _FAIL;
 
 
-	if (len > MAX_IE_SZ)
+	if (len < 0 || len > MAX_IE_SZ)
 		return _FAIL;
 
 	pbss_network->IELength = len;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 01ea228..155fe0e 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1287,6 +1287,18 @@
 	 */
 	if (dump_payload)
 		goto after_immediate_data;
+	/*
+	 * Check for underflow case where both EDTL and immediate data payload
+	 * exceeds what is presented by CDB's TRANSFER LENGTH, and what has
+	 * already been set in target_cmd_size_check() as se_cmd->data_length.
+	 *
+	 * For this special case, fail the command and dump the immediate data
+	 * payload.
+	 */
+	if (cmd->first_burst_len > cmd->se_cmd.data_length) {
+		cmd->sense_reason = TCM_INVALID_CDB_FIELD;
+		goto after_immediate_data;
+	}
 
 	immed_ret = iscsit_handle_immediate_data(cmd, hdr,
 					cmd->first_burst_len);
@@ -4431,8 +4443,11 @@
 	 * always sleep waiting for RX/TX thread shutdown to complete
 	 * within iscsit_close_connection().
 	 */
-	if (!conn->conn_transport->rdma_shutdown)
+	if (!conn->conn_transport->rdma_shutdown) {
 		sleep = cmpxchg(&conn->tx_thread_active, true, false);
+		if (!sleep)
+			return;
+	}
 
 	atomic_set(&conn->conn_logout_remove, 0);
 	complete(&conn->conn_logout_comp);
@@ -4448,8 +4463,11 @@
 {
 	int sleep = 1;
 
-	if (!conn->conn_transport->rdma_shutdown)
+	if (!conn->conn_transport->rdma_shutdown) {
 		sleep = cmpxchg(&conn->tx_thread_active, true, false);
+		if (!sleep)
+			return;
+	}
 
 	atomic_set(&conn->conn_logout_remove, 0);
 	complete(&conn->conn_logout_comp);
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index e2c970a..be52838 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -131,7 +131,7 @@
 void	release_se_kmem_caches(void);
 u32	scsi_get_new_index(scsi_index_t);
 void	transport_subsystem_check_init(void);
-void	transport_cmd_finish_abort(struct se_cmd *, int);
+int	transport_cmd_finish_abort(struct se_cmd *, int);
 unsigned char *transport_dump_cmd_direction(struct se_cmd *);
 void	transport_dump_dev_state(struct se_device *, char *, int *);
 void	transport_dump_dev_info(struct se_device *, struct se_lun *,
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 4f229e7..27dd1e1 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -75,7 +75,7 @@
 	kfree(tmr);
 }
 
-static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
+static int core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
 {
 	unsigned long flags;
 	bool remove = true, send_tas;
@@ -91,7 +91,7 @@
 		transport_send_task_abort(cmd);
 	}
 
-	transport_cmd_finish_abort(cmd, remove);
+	return transport_cmd_finish_abort(cmd, remove);
 }
 
 static int target_check_cdb_and_preempt(struct list_head *list,
@@ -185,8 +185,8 @@
 		cancel_work_sync(&se_cmd->work);
 		transport_wait_for_tasks(se_cmd);
 
-		transport_cmd_finish_abort(se_cmd, true);
-		target_put_sess_cmd(se_cmd);
+		if (!transport_cmd_finish_abort(se_cmd, true))
+			target_put_sess_cmd(se_cmd);
 
 		printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
 				" ref_tag: %llu\n", ref_tag);
@@ -286,8 +286,8 @@
 		cancel_work_sync(&cmd->work);
 		transport_wait_for_tasks(cmd);
 
-		transport_cmd_finish_abort(cmd, 1);
-		target_put_sess_cmd(cmd);
+		if (!transport_cmd_finish_abort(cmd, 1))
+			target_put_sess_cmd(cmd);
 	}
 }
 
@@ -385,8 +385,8 @@
 		cancel_work_sync(&cmd->work);
 		transport_wait_for_tasks(cmd);
 
-		core_tmr_handle_tas_abort(cmd, tas);
-		target_put_sess_cmd(cmd);
+		if (!core_tmr_handle_tas_abort(cmd, tas))
+			target_put_sess_cmd(cmd);
 	}
 }
 
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 077344c..1f9bfa4 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -673,9 +673,10 @@
 		percpu_ref_put(&lun->lun_ref);
 }
 
-void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+int transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
 {
 	bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
+	int ret = 0;
 
 	if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
 		transport_lun_remove_cmd(cmd);
@@ -687,9 +688,11 @@
 		cmd->se_tfo->aborted_task(cmd);
 
 	if (transport_cmd_check_stop_to_fabric(cmd))
-		return;
+		return 1;
 	if (remove && ack_kref)
-		transport_put_cmd(cmd);
+		ret = transport_put_cmd(cmd);
+
+	return ret;
 }
 
 static void target_complete_failure_work(struct work_struct *work)
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 355d013..cd5bde3 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -118,6 +118,10 @@
 static DEFINE_MUTEX(cooling_cpufreq_lock);
 
 static unsigned int cpufreq_dev_count;
+static int8_t cpuhp_registered;
+static struct work_struct cpuhp_register_work;
+static struct cpumask cpus_pending_online;
+static DEFINE_MUTEX(core_isolate_lock);
 
 static DEFINE_MUTEX(cooling_list_lock);
 static LIST_HEAD(cpufreq_dev_list);
@@ -212,6 +216,49 @@
 }
 EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level);
 
+static int cpufreq_hp_offline(unsigned int offline_cpu)
+{
+	struct cpufreq_cooling_device *cpufreq_dev;
+
+	mutex_lock(&cooling_list_lock);
+	list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
+		if (!cpumask_test_cpu(offline_cpu, &cpufreq_dev->allowed_cpus))
+			continue;
+
+		mutex_lock(&core_isolate_lock);
+		if (cpufreq_dev->cpufreq_state == cpufreq_dev->max_level)
+			sched_unisolate_cpu_unlocked(offline_cpu);
+		mutex_unlock(&core_isolate_lock);
+		break;
+	}
+	mutex_unlock(&cooling_list_lock);
+
+	return 0;
+}
+
+static int cpufreq_hp_online(unsigned int online_cpu)
+{
+	struct cpufreq_cooling_device *cpufreq_dev;
+	int ret = 0;
+
+	mutex_lock(&cooling_list_lock);
+	list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
+		if (!cpumask_test_cpu(online_cpu, &cpufreq_dev->allowed_cpus))
+			continue;
+
+		mutex_lock(&core_isolate_lock);
+		if (cpufreq_dev->cpufreq_state == cpufreq_dev->max_level) {
+			cpumask_set_cpu(online_cpu, &cpus_pending_online);
+			ret = NOTIFY_BAD;
+		}
+		mutex_unlock(&core_isolate_lock);
+		break;
+	}
+	mutex_unlock(&cooling_list_lock);
+
+	return ret;
+}
+
 /**
  * cpufreq_thermal_notifier - notifier callback for cpufreq policy change.
  * @nb:	struct notifier_block * with callback info.
@@ -611,6 +658,9 @@
 	struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
 	unsigned int cpu = cpumask_any(&cpufreq_device->allowed_cpus);
 	unsigned int clip_freq;
+	unsigned long prev_state;
+	struct device *cpu_dev;
+	int ret = 0;
 
 	/* Request state should be less than max_level */
 	if (WARN_ON(state > cpufreq_device->max_level))
@@ -620,13 +670,34 @@
 	if (cpufreq_device->cpufreq_state == state)
 		return 0;
 
+	mutex_lock(&core_isolate_lock);
+	prev_state = cpufreq_device->cpufreq_state;
 	cpufreq_device->cpufreq_state = state;
 	/* If state is the last, isolate the CPU */
-	if (state == cpufreq_device->max_level)
-		return sched_isolate_cpu(cpu);
-	else if (state < cpufreq_device->max_level)
-		sched_unisolate_cpu(cpu);
-
+	if (state == cpufreq_device->max_level) {
+		if (cpu_online(cpu))
+			sched_isolate_cpu(cpu);
+		mutex_unlock(&core_isolate_lock);
+		return ret;
+	} else if ((prev_state == cpufreq_device->max_level)
+			&& (state < cpufreq_device->max_level)) {
+		if (cpumask_test_and_clear_cpu(cpu, &cpus_pending_online)) {
+			cpu_dev = get_cpu_device(cpu);
+			mutex_unlock(&core_isolate_lock);
+			/*
+			 * Unlock before calling the device_online.
+			 * Else, this will lead to deadlock, since the hp
+			 * online callback will be blocked on this mutex.
+			 */
+			ret = device_online(cpu_dev);
+			if (ret)
+				pr_err("CPU:%d online error:%d\n", cpu, ret);
+			goto update_frequency;
+		} else
+			sched_unisolate_cpu(cpu);
+	}
+	mutex_unlock(&core_isolate_lock);
+update_frequency:
 	clip_freq = cpufreq_device->freq_table[state];
 	cpufreq_device->clipped_freq = clip_freq;
 
@@ -878,6 +949,16 @@
 	return max;
 }
 
+static void register_cdev(struct work_struct *work)
+{
+	int ret = 0;
+
+	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+				"cpu_cooling/no-sched",	cpufreq_hp_online,
+				cpufreq_hp_offline);
+	if (ret < 0)
+		pr_err("Error registering for hotpug callback:%d\n", ret);
+}
 /**
  * __cpufreq_cooling_register - helper function to create cpufreq cooling device
  * @np: a valid struct device_node to the cooling device device tree node
@@ -1025,6 +1106,12 @@
 	if (!cpufreq_dev_count++)
 		cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
 					  CPUFREQ_POLICY_NOTIFIER);
+	if (!cpuhp_registered) {
+		cpuhp_registered = 1;
+		cpumask_clear(&cpus_pending_online);
+		INIT_WORK(&cpuhp_register_work, register_cdev);
+		queue_work(system_wq, &cpuhp_register_work);
+	}
 	mutex_unlock(&cooling_cpufreq_lock);
 
 	goto put_policy;
diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig
index f6e1b86..be33725 100644
--- a/drivers/thermal/qcom/Kconfig
+++ b/drivers/thermal/qcom/Kconfig
@@ -40,3 +40,23 @@
 	  The virtual sensor information includes the underlying thermal
 	  sensors to query for temperature and the aggregation logic to
 	  determine the virtual sensor temperature.
+
+config QTI_REG_COOLING_DEVICE
+	bool "QTI Regulator cooling device"
+	depends on THERMAL_OF && MSM_QMP
+	help
+	  This enables the Regulator cooling device. This cooling device
+	  will be used by QTI chipset to place a floor voltage restriction at
+	  low temperatures. The regulator cooling device will message the AOP
+	  using mail box to establish the floor voltage.
+
+config QTI_QMI_COOLING_DEVICE
+	bool "QTI QMI cooling devices"
+	depends on MSM_QMI_INTERFACE && THERMAL_OF
+	help
+	   This enables the QTI remote subsystem cooling devices. These cooling
+	   devices will be used by QTI chipset to place various remote
+	   subsystem mitigations like remote processor passive mitigation,
+	   remote subsystem voltage restriction at low temperatures etc.
+	   The QMI cooling device will interface with remote subsystem
+	   using QTI QMI interface.
diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile
index 8859380..000c6e7 100644
--- a/drivers/thermal/qcom/Makefile
+++ b/drivers/thermal/qcom/Makefile
@@ -1,5 +1,7 @@
 obj-$(CONFIG_QCOM_TSENS)	+= qcom_tsens.o
 qcom_tsens-y			+= tsens.o tsens-common.o tsens-8916.o tsens-8974.o tsens-8960.o tsens-8996.o
 obj-$(CONFIG_MSM_BCL_PERIPHERAL_CTL) += bcl_peripheral.o
-obj-$(CONFIG_QTI_THERMAL_LIMITS_DCVS) += msm_lmh_dcvs.o
+obj-$(CONFIG_QTI_THERMAL_LIMITS_DCVS) += msm_lmh_dcvs.o lmh_dbg.o
 obj-$(CONFIG_QTI_VIRTUAL_SENSOR) += qti_virtual_sensor.o
+obj-$(CONFIG_QTI_REG_COOLING_DEVICE) += regulator_cooling.o
+obj-$(CONFIG_QTI_QMI_COOLING_DEVICE) += thermal_mitigation_device_service_v01.o qmi_cooling.o
diff --git a/drivers/thermal/qcom/lmh_dbg.c b/drivers/thermal/qcom/lmh_dbg.c
new file mode 100644
index 0000000..74ffeda
--- /dev/null
+++ b/drivers/thermal/qcom/lmh_dbg.c
@@ -0,0 +1,567 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
+
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <asm/cacheflush.h>
+#include <soc/qcom/scm.h>
+#include <linux/dma-mapping.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+
+#include "lmh_dbg.h"
+
+#define LMH_MON_NAME			"lmh_monitor"
+#define LMH_DBGFS_READ			"data"
+#define LMH_DBGFS_CONFIG_READ		"config"
+#define LMH_DBGFS_READ_TYPES		"data_types"
+#define LMH_DBGFS_CONFIG_TYPES		"config_types"
+#define LMH_SCM_PAYLOAD_SIZE		10
+#define LMH_READ_LINE_LENGTH            10
+#define LMH_DEBUG_READ_TYPE		0x0
+#define LMH_DEBUG_CONFIG_TYPE		0x1
+#define LMH_DEBUG_SET			0x08
+#define LMH_DEBUG_READ_BUF_SIZE		0x09
+#define LMH_DEBUG_READ			0x0A
+#define LMH_DEBUG_GET_TYPE		0x0B
+
+struct lmh_driver_data {
+	struct device			*dev;
+	uint32_t			*read_type;
+	uint32_t			*config_type;
+	uint32_t			read_type_count;
+	uint32_t			config_type_count;
+	struct dentry			*debugfs_parent;
+	struct dentry			*debug_read;
+	struct dentry			*debug_config;
+	struct dentry			*debug_read_type;
+	struct dentry			*debug_config_type;
+};
+
+enum lmh_read_type {
+	LMH_READ_TYPE = 0,
+	LMH_CONFIG_TYPE,
+};
+
+static struct lmh_driver_data		*lmh_data;
+
+static int lmh_debug_read(uint32_t **buf)
+{
+	int ret = 0, size = 0, tz_ret = 0;
+	static uint32_t curr_size;
+	struct scm_desc desc_arg;
+	static uint32_t *payload;
+
+	desc_arg.arginfo = SCM_ARGS(0);
+	ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH,
+			LMH_DEBUG_READ_BUF_SIZE), &desc_arg);
+	size = desc_arg.ret[0];
+	if (ret) {
+		pr_err("Error in SCM v%d get debug buffer size call. err:%d\n",
+				(is_scm_armv8()) ? 8 : 7, ret);
+		goto get_dbg_exit;
+	}
+	if (!size) {
+		pr_err("No Debug data to read.\n");
+		ret = -ENODEV;
+		goto get_dbg_exit;
+	}
+	size = SCM_BUFFER_SIZE(uint32_t) * size * LMH_READ_LINE_LENGTH;
+	if (curr_size != size) {
+		if (payload)
+			devm_kfree(lmh_data->dev, payload);
+		payload = devm_kzalloc(lmh_data->dev, PAGE_ALIGN(size),
+				       GFP_KERNEL);
+		if (!payload) {
+			ret = -ENOMEM;
+			goto get_dbg_exit;
+		}
+		curr_size = size;
+	}
+
+	/* &payload may be a physical address > 4 GB */
+	desc_arg.args[0] = SCM_BUFFER_PHYS(payload);
+	desc_arg.args[1] = curr_size;
+	desc_arg.arginfo = SCM_ARGS(2, SCM_RW, SCM_VAL);
+	dmac_flush_range(payload, (void *)payload + curr_size);
+	ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH, LMH_DEBUG_READ),
+			&desc_arg);
+	dmac_inv_range(payload, (void *)payload + curr_size);
+	tz_ret = desc_arg.ret[0];
+	/* Have memory barrier before we access the TZ data */
+	mb();
+	if (ret) {
+		pr_err("Error in get debug read. err:%d\n", ret);
+		goto get_dbg_exit;
+	}
+	if (tz_ret) {
+		pr_err("TZ API returned error. err:%d\n", tz_ret);
+		ret = tz_ret;
+		goto get_dbg_exit;
+	}
+
+get_dbg_exit:
+	if (ret && payload) {
+		devm_kfree(lmh_data->dev, payload);
+		payload = NULL;
+		curr_size = 0;
+	}
+	*buf = payload;
+
+	return (ret < 0) ? ret : curr_size;
+}
+
+static int lmh_debug_config_write(uint32_t cmd_id, uint32_t *buf, int size)
+{
+	int ret = 0, size_bytes = 0;
+	struct scm_desc desc_arg;
+	uint32_t *payload = NULL;
+
+	size_bytes = (size - 3) * sizeof(uint32_t);
+	payload = devm_kzalloc(lmh_data->dev, PAGE_ALIGN(size_bytes),
+			       GFP_KERNEL);
+	if (!payload) {
+		ret = -ENOMEM;
+		goto set_cfg_exit;
+	}
+	memcpy(payload, &buf[3], size_bytes);
+
+	/* &payload may be a physical address > 4 GB */
+	desc_arg.args[0] = SCM_BUFFER_PHYS(payload);
+	desc_arg.args[1] = size_bytes;
+	desc_arg.args[2] = buf[0];
+	desc_arg.args[3] = buf[1];
+	desc_arg.args[4] = buf[2];
+	desc_arg.arginfo = SCM_ARGS(5, SCM_RO, SCM_VAL, SCM_VAL, SCM_VAL,
+					SCM_VAL);
+	dmac_flush_range(payload, (void *)payload + size_bytes);
+	ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH, cmd_id), &desc_arg);
+	/* Have memory barrier before we access the TZ data */
+	mb();
+	if (ret) {
+		pr_err("Error in config debug read. err:%d\n", ret);
+		goto set_cfg_exit;
+	}
+
+set_cfg_exit:
+	return ret;
+}
+
+static int lmh_parse_and_extract(const char __user *user_buf, size_t count,
+				enum lmh_read_type type)
+{
+	char *local_buf = NULL, *token = NULL, *curr_ptr = NULL, *token1 = NULL;
+	char *next_line = NULL;
+	int ret = 0, data_ct = 0, i = 0, size = 0;
+	uint32_t *config_buf = NULL;
+
+	/* Allocate two extra space to add ';' character and NULL terminate */
+	local_buf = kzalloc(count + 2, GFP_KERNEL);
+	if (!local_buf) {
+		ret = -ENOMEM;
+		goto dfs_cfg_write_exit;
+	}
+	if (copy_from_user(local_buf, user_buf, count)) {
+		pr_err("user buf error\n");
+		ret = -EFAULT;
+		goto dfs_cfg_write_exit;
+	}
+	size = count + (strnchr(local_buf, count, '\n') ? 1 :  2);
+	local_buf[size - 2] = ';';
+	local_buf[size - 1] = '\0';
+	curr_ptr = next_line = local_buf;
+	while ((token1 = strnchr(next_line, local_buf + size - next_line, ';'))
+		!= NULL) {
+		data_ct = 0;
+		*token1 = '\0';
+		curr_ptr = next_line;
+		next_line = token1 + 1;
+		for (token = (char *)curr_ptr; token &&
+			((token = strnchr(token, next_line - token, ' '))
+			 != NULL); token++)
+			data_ct++;
+		if (data_ct < 2) {
+			pr_err("Invalid format string:[%s]\n", curr_ptr);
+			ret = -EINVAL;
+			goto dfs_cfg_write_exit;
+		}
+		config_buf = kzalloc((++data_ct) * sizeof(uint32_t),
+				GFP_KERNEL);
+		if (!config_buf) {
+			ret = -ENOMEM;
+			goto dfs_cfg_write_exit;
+		}
+		pr_debug("Input:%s data_ct:%d\n", curr_ptr, data_ct);
+		for (i = 0, token = (char *)curr_ptr; token && (i < data_ct);
+			i++) {
+			token = strnchr(token, next_line - token, ' ');
+			if (token)
+				*token = '\0';
+			ret = kstrtouint(curr_ptr, 0, &config_buf[i]);
+			if (ret < 0) {
+				pr_err("Data[%s] scan error. err:%d\n",
+					curr_ptr, ret);
+				kfree(config_buf);
+				goto dfs_cfg_write_exit;
+			}
+			if (token)
+				curr_ptr = ++token;
+		}
+		switch (type) {
+		case LMH_READ_TYPE:
+		case LMH_CONFIG_TYPE:
+			ret = lmh_debug_config_write(LMH_DEBUG_SET,
+					config_buf, data_ct);
+			break;
+		default:
+			ret = -EINVAL;
+			break;
+		}
+		kfree(config_buf);
+		if (ret) {
+			pr_err("Config error. type:%d err:%d\n", type, ret);
+			goto dfs_cfg_write_exit;
+		}
+	}
+
+dfs_cfg_write_exit:
+	kfree(local_buf);
+	return ret;
+}
+
+static ssize_t lmh_dbgfs_config_write(struct file *file,
+	const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	lmh_parse_and_extract(user_buf, count, LMH_CONFIG_TYPE);
+	return count;
+}
+
+static int lmh_dbgfs_data_read(struct seq_file *seq_fp, void *data)
+{
+	static uint32_t *read_buf;
+	static int read_buf_size;
+	int idx = 0, ret = 0;
+
+	if (!read_buf_size) {
+		ret = lmh_debug_read(&read_buf);
+		if (ret <= 0)
+			goto dfs_read_exit;
+		if (!read_buf || ret < sizeof(uint32_t)) {
+			ret = -EINVAL;
+			goto dfs_read_exit;
+		}
+		read_buf_size = ret;
+		ret = 0;
+	}
+
+	do {
+		seq_printf(seq_fp, "0x%x ", read_buf[idx]);
+		if (seq_has_overflowed(seq_fp)) {
+			pr_err("Seq overflow. idx:%d\n", idx);
+			goto dfs_read_exit;
+		}
+		idx++;
+		if ((idx % LMH_READ_LINE_LENGTH) == 0) {
+			seq_puts(seq_fp, "\n");
+			if (seq_has_overflowed(seq_fp)) {
+				pr_err("Seq overflow. idx:%d\n", idx);
+				goto dfs_read_exit;
+			}
+		}
+	} while (idx < (read_buf_size / sizeof(uint32_t)));
+	read_buf_size = 0;
+	read_buf = NULL;
+
+dfs_read_exit:
+	return ret;
+}
+
+static int lmh_get_recurssive_data(struct scm_desc *desc_arg, uint32_t cmd_idx,
+		uint32_t *payload, uint32_t *size, uint32_t *dest_buf)
+{
+	int idx = 0, ret = 0;
+	uint32_t next = 0;
+
+	do {
+		desc_arg->args[cmd_idx] = next;
+		dmac_flush_range(payload, (void *)payload +
+				sizeof(*payload) * LMH_SCM_PAYLOAD_SIZE);
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH, LMH_DEBUG_GET_TYPE),
+				desc_arg);
+		dmac_inv_range(payload, (void *)payload +
+				sizeof(*payload) * LMH_SCM_PAYLOAD_SIZE);
+		*size = desc_arg->ret[0];
+		/* Have barrier before reading from TZ data */
+		mb();
+		if (ret) {
+			pr_err("Error in SCM get type. cmd:%x err:%d\n",
+				LMH_DEBUG_GET_TYPE, ret);
+			return ret;
+		}
+		if (!*size) {
+			pr_err("No LMH device supported.\n");
+			return -ENODEV;
+		}
+		if (!dest_buf)
+			dest_buf = devm_kcalloc(lmh_data->dev, *size,
+				sizeof(*dest_buf), GFP_KERNEL);
+			if (!dest_buf)
+				return -ENOMEM;
+
+		for (idx = next;
+			idx < min((next + LMH_SCM_PAYLOAD_SIZE), *size);
+			idx++)
+			dest_buf[idx] = payload[idx - next];
+		next += LMH_SCM_PAYLOAD_SIZE;
+	} while (next < *size);
+
+	return ret;
+}
+
+static ssize_t lmh_dbgfs_data_write(struct file *file,
+	const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	lmh_parse_and_extract(user_buf, count, LMH_READ_TYPE);
+	return count;
+}
+
+static int lmh_dbgfs_data_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, lmh_dbgfs_data_read, inode->i_private);
+}
+
+static int lmh_debug_get_types(bool is_read, uint32_t **buf)
+{
+	int ret = 0;
+	uint32_t size = 0;
+	struct scm_desc desc_arg;
+	uint32_t *payload = NULL, *dest_buf = NULL;
+
+	if (is_read && lmh_data->read_type) {
+		*buf = lmh_data->read_type;
+		return lmh_data->read_type_count;
+	} else if (!is_read && lmh_data->config_type) {
+		*buf = lmh_data->config_type;
+		return lmh_data->config_type_count;
+	}
+	payload = devm_kzalloc(lmh_data->dev,
+				PAGE_ALIGN(LMH_SCM_PAYLOAD_SIZE *
+				sizeof(*payload)), GFP_KERNEL);
+	if (!payload)
+		return -ENOMEM;
+	/* &payload may be a physical address > 4 GB */
+	desc_arg.args[0] = SCM_BUFFER_PHYS(payload);
+	desc_arg.args[1] =
+		SCM_BUFFER_SIZE(uint32_t) * LMH_SCM_PAYLOAD_SIZE;
+	desc_arg.args[2] = (is_read) ?
+			LMH_DEBUG_READ_TYPE : LMH_DEBUG_CONFIG_TYPE;
+	desc_arg.arginfo = SCM_ARGS(4, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL);
+	ret = lmh_get_recurssive_data(&desc_arg, 3, payload, &size, dest_buf);
+	if (ret)
+		goto get_type_exit;
+	pr_debug("Total %s types:%d\n", (is_read) ? "read" : "config", size);
+	if (is_read) {
+		lmh_data->read_type = *buf = dest_buf;
+		lmh_data->read_type_count = size;
+	} else {
+		lmh_data->config_type = *buf = dest_buf;
+		lmh_data->config_type_count = size;
+	}
+
+get_type_exit:
+	if (ret) {
+		if (lmh_data->read_type_count) {
+			devm_kfree(lmh_data->dev, lmh_data->read_type);
+			lmh_data->read_type_count = 0;
+		}
+		if (lmh_data->config_type_count) {
+			devm_kfree(lmh_data->dev, lmh_data->config_type);
+			lmh_data->config_type_count = 0;
+		}
+	}
+	if (payload)
+		devm_kfree(lmh_data->dev, payload);
+
+	return (ret) ? ret : size;
+}
+
+static int lmh_get_types(struct seq_file *seq_fp, enum lmh_read_type type)
+{
+	int ret = 0, idx = 0, size = 0;
+	uint32_t *type_list = NULL;
+
+	switch (type) {
+	case LMH_READ_TYPE:
+		ret = lmh_debug_get_types(true, &type_list);
+		break;
+	case LMH_CONFIG_TYPE:
+		ret = lmh_debug_get_types(false, &type_list);
+		break;
+	default:
+		return -EINVAL;
+	}
+	if (ret <= 0 || !type_list) {
+		pr_err("No device information. err:%d\n", ret);
+		return -ENODEV;
+	}
+	size = ret;
+	for (idx = 0; idx < size; idx++)
+		seq_printf(seq_fp, "0x%x ", type_list[idx]);
+	seq_puts(seq_fp, "\n");
+
+	return 0;
+}
+
+static int lmh_dbgfs_read_type(struct seq_file *seq_fp, void *data)
+{
+	return lmh_get_types(seq_fp, LMH_READ_TYPE);
+}
+
+static int lmh_dbgfs_read_type_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, lmh_dbgfs_read_type, inode->i_private);
+}
+
+static int lmh_dbgfs_config_type(struct seq_file *seq_fp, void *data)
+{
+	return lmh_get_types(seq_fp, LMH_CONFIG_TYPE);
+}
+
+static int lmh_dbgfs_config_type_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, lmh_dbgfs_config_type, inode->i_private);
+}
+
+static const struct file_operations lmh_dbgfs_config_fops = {
+	.write = lmh_dbgfs_config_write,
+};
+static const struct file_operations lmh_dbgfs_read_fops = {
+	.open = lmh_dbgfs_data_open,
+	.read = seq_read,
+	.write = lmh_dbgfs_data_write,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+static const struct file_operations lmh_dbgfs_read_type_fops = {
+	.open = lmh_dbgfs_read_type_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+static const struct file_operations lmh_dbgfs_config_type_fops = {
+	.open = lmh_dbgfs_config_type_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int lmh_check_tz_debug_cmds(void)
+{
+	if (!scm_is_call_available(SCM_SVC_LMH, LMH_DEBUG_SET)
+		|| !scm_is_call_available(SCM_SVC_LMH, LMH_DEBUG_READ_BUF_SIZE)
+		|| !scm_is_call_available(SCM_SVC_LMH, LMH_DEBUG_READ)
+		|| !scm_is_call_available(SCM_SVC_LMH, LMH_DEBUG_GET_TYPE)) {
+		pr_debug("LMH debug scm not available\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int lmh_debug_init(void)
+{
+	int ret = 0;
+
+	if (lmh_check_tz_debug_cmds()) {
+		pr_debug("Debug commands not available.\n");
+		return -ENODEV;
+	}
+
+	lmh_data->debugfs_parent = debugfs_create_dir(LMH_MON_NAME, NULL);
+	if (IS_ERR(lmh_data->debugfs_parent)) {
+		ret = PTR_ERR(lmh_data->debugfs_parent);
+		pr_debug("Error creating debugfs dir:%s. err:%d\n",
+					LMH_MON_NAME, ret);
+		return ret;
+	}
+	lmh_data->debug_read = debugfs_create_file(LMH_DBGFS_READ, 0600,
+					lmh_data->debugfs_parent, NULL,
+					&lmh_dbgfs_read_fops);
+	if (IS_ERR(lmh_data->debug_read)) {
+		pr_err("Error creating" LMH_DBGFS_READ "entry.\n");
+		ret = PTR_ERR(lmh_data->debug_read);
+		goto dbg_reg_exit;
+	}
+	lmh_data->debug_config = debugfs_create_file(LMH_DBGFS_CONFIG_READ,
+					0200, lmh_data->debugfs_parent, NULL,
+					&lmh_dbgfs_config_fops);
+	if (IS_ERR(lmh_data->debug_config)) {
+		pr_err("Error creating" LMH_DBGFS_CONFIG_READ "entry\n");
+		ret = PTR_ERR(lmh_data->debug_config);
+		goto dbg_reg_exit;
+	}
+	lmh_data->debug_read_type = debugfs_create_file(LMH_DBGFS_READ_TYPES,
+					0400, lmh_data->debugfs_parent, NULL,
+					&lmh_dbgfs_read_type_fops);
+	if (IS_ERR(lmh_data->debug_read_type)) {
+		pr_err("Error creating" LMH_DBGFS_READ_TYPES "entry\n");
+		ret = PTR_ERR(lmh_data->debug_read_type);
+		goto dbg_reg_exit;
+	}
+	lmh_data->debug_read_type = debugfs_create_file(
+					LMH_DBGFS_CONFIG_TYPES,
+					0400, lmh_data->debugfs_parent, NULL,
+					&lmh_dbgfs_config_type_fops);
+	if (IS_ERR(lmh_data->debug_config_type)) {
+		pr_err("Error creating" LMH_DBGFS_CONFIG_TYPES "entry\n");
+		ret = PTR_ERR(lmh_data->debug_config_type);
+		goto dbg_reg_exit;
+	}
+
+dbg_reg_exit:
+	if (ret)
+		/*Clean up all the dbg nodes*/
+		debugfs_remove_recursive(lmh_data->debugfs_parent);
+
+	return ret;
+}
+
+int lmh_debug_register(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	if (lmh_data) {
+		pr_debug("Reinitializing lmh hardware driver\n");
+		return -EEXIST;
+	}
+	lmh_data = devm_kzalloc(&pdev->dev, sizeof(*lmh_data), GFP_KERNEL);
+	if (!lmh_data)
+		return -ENOMEM;
+	lmh_data->dev = &pdev->dev;
+
+	ret = lmh_debug_init();
+	if (ret) {
+		pr_debug("LMH debug init failed. err:%d\n", ret);
+		goto probe_exit;
+	}
+
+	return ret;
+
+probe_exit:
+	lmh_data = NULL;
+	return ret;
+}
+EXPORT_SYMBOL(lmh_debug_register);
diff --git a/drivers/thermal/qcom/lmh_dbg.h b/drivers/thermal/qcom/lmh_dbg.h
new file mode 100644
index 0000000..6ceb832
--- /dev/null
+++ b/drivers/thermal/qcom/lmh_dbg.h
@@ -0,0 +1,20 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QTI_LMH_H__
+#define __QTI_LMH_H__
+
+#include <linux/platform_device.h>
+
+int lmh_debug_register(struct platform_device *pdev);
+
+#endif /* __QTI_LMH_H__ */
diff --git a/drivers/thermal/qcom/msm_lmh_dcvs.c b/drivers/thermal/qcom/msm_lmh_dcvs.c
index 65dc2df..4284b6c 100644
--- a/drivers/thermal/qcom/msm_lmh_dcvs.c
+++ b/drivers/thermal/qcom/msm_lmh_dcvs.c
@@ -33,6 +33,7 @@
 #include <soc/qcom/scm.h>
 
 #include "../thermal_core.h"
+#include "lmh_dbg.h"
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/lmh.h>
@@ -590,6 +591,7 @@
 	INIT_LIST_HEAD(&hw->list);
 	list_add(&hw->list, &lmh_dcvs_hw_list);
 	mutex_unlock(&lmh_dcvs_list_access);
+	lmh_debug_register(pdev);
 
 	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lmh-dcvs/cdev:online",
 				limits_cpu_online, NULL);
diff --git a/drivers/thermal/qcom/qmi_cooling.c b/drivers/thermal/qcom/qmi_cooling.c
new file mode 100644
index 0000000..af82030
--- /dev/null
+++ b/drivers/thermal/qcom/qmi_cooling.c
@@ -0,0 +1,681 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <soc/qcom/msm_qmi_interface.h>
+
+#include "thermal_mitigation_device_service_v01.h"
+
+#define QMI_CDEV_DRIVER		"qmi-cooling-device"
+#define QMI_TMD_RESP_TOUT_MSEC	50
+#define QMI_CLIENT_NAME_LENGTH	40
+
+enum qmi_device_type {
+	QMI_CDEV_MAX_LIMIT_TYPE,
+	QMI_CDEV_MIN_LIMIT_TYPE,
+	QMI_CDEV_TYPE_NR,
+};
+
+struct qmi_cooling_device {
+	struct device_node		*np;
+	char				cdev_name[THERMAL_NAME_LENGTH];
+	char				qmi_name[QMI_CLIENT_NAME_LENGTH];
+	bool                            connection_active;
+	enum qmi_device_type		type;
+	struct list_head		qmi_node;
+	struct thermal_cooling_device	*cdev;
+	unsigned int			mtgn_state;
+	unsigned int			max_level;
+	struct qmi_tmd_instance		*tmd;
+};
+
+struct qmi_tmd_instance {
+	struct device			*dev;
+	struct qmi_handle		*handle;
+	struct mutex			mutex;
+	struct work_struct		work_svc_arrive;
+	struct work_struct		work_svc_exit;
+	struct work_struct		work_rcv_msg;
+	struct notifier_block		nb;
+	uint32_t			inst_id;
+	struct list_head		tmd_cdev_list;
+};
+
+struct qmi_dev_info {
+	char				*dev_name;
+	enum qmi_device_type		type;
+};
+
+static struct workqueue_struct *qmi_tmd_wq;
+static struct qmi_tmd_instance *tmd_instances;
+static int tmd_inst_cnt;
+
+static struct qmi_dev_info device_clients[] = {
+	{
+		.dev_name = "pa",
+		.type = QMI_CDEV_MAX_LIMIT_TYPE,
+	},
+	{
+		.dev_name = "cx_vdd_limit",
+		.type = QMI_CDEV_MAX_LIMIT_TYPE,
+	},
+	{
+		.dev_name = "modem",
+		.type = QMI_CDEV_MAX_LIMIT_TYPE,
+	},
+	{
+		.dev_name = "modem_current",
+		.type = QMI_CDEV_MAX_LIMIT_TYPE,
+	},
+	{
+		.dev_name = "modem_bw",
+		.type = QMI_CDEV_MAX_LIMIT_TYPE,
+	},
+	{
+		.dev_name = "cpuv_restriction_cold",
+		.type = QMI_CDEV_MIN_LIMIT_TYPE,
+	},
+	{
+		.dev_name = "cpr_cold",
+		.type = QMI_CDEV_MIN_LIMIT_TYPE,
+	}
+};
+
+static int qmi_get_max_state(struct thermal_cooling_device *cdev,
+				 unsigned long *state)
+{
+	struct qmi_cooling_device *qmi_cdev = cdev->devdata;
+
+	if (!qmi_cdev)
+		return -EINVAL;
+
+	*state = qmi_cdev->max_level;
+
+	return 0;
+}
+
+static int qmi_get_cur_state(struct thermal_cooling_device *cdev,
+				 unsigned long *state)
+{
+	struct qmi_cooling_device *qmi_cdev = cdev->devdata;
+
+	if (!qmi_cdev)
+		return -EINVAL;
+
+	if (qmi_cdev->type == QMI_CDEV_MIN_LIMIT_TYPE) {
+		*state = 0;
+		return 0;
+	}
+	*state = qmi_cdev->mtgn_state;
+
+	return 0;
+}
+
+static int qmi_tmd_send_state_request(struct qmi_cooling_device *qmi_cdev,
+				uint8_t state)
+{
+	int ret = 0;
+	struct tmd_set_mitigation_level_req_msg_v01 req;
+	struct tmd_set_mitigation_level_resp_msg_v01 tmd_resp;
+	struct msg_desc req_desc, resp_desc;
+	struct qmi_tmd_instance *tmd = qmi_cdev->tmd;
+
+	memset(&req, 0, sizeof(req));
+	memset(&tmd_resp, 0, sizeof(tmd_resp));
+
+	strlcpy(req.mitigation_dev_id.mitigation_dev_id, qmi_cdev->qmi_name,
+		QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01);
+	req.mitigation_level = state;
+
+	req_desc.max_msg_len = TMD_SET_MITIGATION_LEVEL_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_TMD_SET_MITIGATION_LEVEL_REQ_V01;
+	req_desc.ei_array = tmd_set_mitigation_level_req_msg_v01_ei;
+
+	resp_desc.max_msg_len =
+		TMD_SET_MITIGATION_LEVEL_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_TMD_SET_MITIGATION_LEVEL_RESP_V01;
+	resp_desc.ei_array = tmd_set_mitigation_level_resp_msg_v01_ei;
+
+	mutex_lock(&tmd->mutex);
+	ret = qmi_send_req_wait(tmd->handle,
+				&req_desc, &req, sizeof(req),
+				&resp_desc, &tmd_resp, sizeof(tmd_resp),
+				QMI_TMD_RESP_TOUT_MSEC);
+	if (ret < 0) {
+		pr_err("qmi set state:%d failed for %s ret:%d\n",
+			state, qmi_cdev->cdev_name, ret);
+		goto qmi_send_exit;
+	}
+
+	if (tmd_resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		ret = tmd_resp.resp.result;
+		pr_err("qmi set state:%d NOT success for %s ret:%d\n",
+			state, qmi_cdev->cdev_name, ret);
+		goto qmi_send_exit;
+	}
+	pr_debug("Requested qmi state:%d for %s\n", state, qmi_cdev->cdev_name);
+
+qmi_send_exit:
+	mutex_unlock(&tmd->mutex);
+	return ret;
+}
+
+static int qmi_set_cur_or_min_state(struct qmi_cooling_device *qmi_cdev,
+				 unsigned long state)
+{
+	int ret = 0;
+	struct qmi_tmd_instance *tmd = qmi_cdev->tmd;
+
+	if (!tmd)
+		return -EINVAL;
+
+	if (qmi_cdev->mtgn_state == state)
+		return ret;
+
+	/* save it and return if server exit */
+	if (!qmi_cdev->connection_active) {
+		qmi_cdev->mtgn_state = state;
+		pr_debug("Pending request:%ld for %s\n", state,
+				qmi_cdev->cdev_name);
+		return ret;
+	}
+
+	/* It is best effort to save state even if QMI fail */
+	ret = qmi_tmd_send_state_request(qmi_cdev, (uint8_t)state);
+
+	qmi_cdev->mtgn_state = state;
+
+	return ret;
+}
+
+static int qmi_set_cur_state(struct thermal_cooling_device *cdev,
+				 unsigned long state)
+{
+	struct qmi_cooling_device *qmi_cdev = cdev->devdata;
+
+	if (!qmi_cdev)
+		return -EINVAL;
+
+	if (qmi_cdev->type == QMI_CDEV_MIN_LIMIT_TYPE)
+		return 0;
+
+	if (state > qmi_cdev->max_level)
+		state = qmi_cdev->max_level;
+
+	return qmi_set_cur_or_min_state(qmi_cdev, state);
+}
+
+static int qmi_set_min_state(struct thermal_cooling_device *cdev,
+				 unsigned long state)
+{
+	struct qmi_cooling_device *qmi_cdev = cdev->devdata;
+
+	if (!qmi_cdev)
+		return -EINVAL;
+
+	if (qmi_cdev->type == QMI_CDEV_MAX_LIMIT_TYPE)
+		return 0;
+
+	if (state > qmi_cdev->max_level)
+		state = qmi_cdev->max_level;
+
+	/* Convert state into QMI client expects for min state */
+	state = qmi_cdev->max_level - state;
+
+	return qmi_set_cur_or_min_state(qmi_cdev, state);
+}
+
+static int qmi_get_min_state(struct thermal_cooling_device *cdev,
+				 unsigned long *state)
+{
+	struct qmi_cooling_device *qmi_cdev = cdev->devdata;
+
+	if (!qmi_cdev)
+		return -EINVAL;
+
+	if (qmi_cdev->type == QMI_CDEV_MAX_LIMIT_TYPE) {
+		*state = 0;
+		return 0;
+	}
+	*state = qmi_cdev->max_level - qmi_cdev->mtgn_state;
+
+	return 0;
+}
+
+static struct thermal_cooling_device_ops qmi_device_ops = {
+	.get_max_state = qmi_get_max_state,
+	.get_cur_state = qmi_get_cur_state,
+	.set_cur_state = qmi_set_cur_state,
+	.set_min_state = qmi_set_min_state,
+	.get_min_state = qmi_get_min_state,
+};
+
+static int qmi_register_cooling_device(struct qmi_cooling_device *qmi_cdev)
+{
+	qmi_cdev->cdev = thermal_of_cooling_device_register(
+					qmi_cdev->np,
+					qmi_cdev->cdev_name,
+					qmi_cdev,
+					&qmi_device_ops);
+	if (IS_ERR(qmi_cdev->cdev)) {
+		pr_err("Cooling register failed for %s, ret:%ld\n",
+			qmi_cdev->cdev_name, PTR_ERR(qmi_cdev->cdev));
+		return PTR_ERR(qmi_cdev->cdev);
+	}
+	pr_debug("Cooling register success for %s\n", qmi_cdev->cdev_name);
+
+	return 0;
+}
+
+static int verify_devices_and_register(struct qmi_tmd_instance *tmd)
+{
+	struct tmd_get_mitigation_device_list_req_msg_v01 req;
+	struct tmd_get_mitigation_device_list_resp_msg_v01 *tmd_resp;
+	struct msg_desc req_desc, resp_desc;
+	int ret = 0, i;
+
+	memset(&req, 0, sizeof(req));
+	/* size of tmd_resp is very high, use heap memory rather than stack */
+	tmd_resp = kzalloc(sizeof(*tmd_resp), GFP_KERNEL);
+	if (!tmd_resp)
+		return -ENOMEM;
+
+	req_desc.max_msg_len =
+		TMD_GET_MITIGATION_DEVICE_LIST_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_TMD_GET_MITIGATION_DEVICE_LIST_REQ_V01;
+	req_desc.ei_array = tmd_get_mitigation_device_list_req_msg_v01_ei;
+
+	resp_desc.max_msg_len =
+		TMD_GET_MITIGATION_DEVICE_LIST_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_TMD_GET_MITIGATION_DEVICE_LIST_RESP_V01;
+	resp_desc.ei_array = tmd_get_mitigation_device_list_resp_msg_v01_ei;
+
+	mutex_lock(&tmd->mutex);
+	ret = qmi_send_req_wait(tmd->handle,
+			&req_desc, &req, sizeof(req),
+			&resp_desc, tmd_resp, sizeof(*tmd_resp),
+			0);
+	if (ret < 0) {
+		pr_err("qmi get device list failed for inst_id:0x%x ret:%d\n",
+			tmd->inst_id, ret);
+		goto reg_exit;
+	}
+
+	if (tmd_resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		ret = tmd_resp->resp.result;
+		pr_err("Get device list NOT success for inst_id:0x%x ret:%d\n",
+			tmd->inst_id, ret);
+		goto reg_exit;
+	}
+	mutex_unlock(&tmd->mutex);
+
+	for (i = 0; i < tmd_resp->mitigation_device_list_len; i++) {
+		struct qmi_cooling_device *qmi_cdev = NULL;
+
+		list_for_each_entry(qmi_cdev, &tmd->tmd_cdev_list,
+					qmi_node) {
+			struct tmd_mitigation_dev_list_type_v01 *device =
+				&tmd_resp->mitigation_device_list[i];
+
+			if ((strncasecmp(qmi_cdev->qmi_name,
+				device->mitigation_dev_id.mitigation_dev_id,
+				QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01)))
+				continue;
+
+			qmi_cdev->connection_active = true;
+			qmi_cdev->max_level = device->max_mitigation_level;
+			/*
+			 * It is better to set current state
+			 * initially or during restart
+			 */
+			qmi_tmd_send_state_request(qmi_cdev,
+						qmi_cdev->mtgn_state);
+			if (!qmi_cdev->cdev)
+				ret = qmi_register_cooling_device(qmi_cdev);
+			break;
+		}
+	}
+
+	kfree(tmd_resp);
+	return ret;
+
+reg_exit:
+	mutex_unlock(&tmd->mutex);
+	kfree(tmd_resp);
+
+	return ret;
+}
+
+static void qmi_tmd_rcv_msg(struct work_struct *work)
+{
+	int rc;
+	struct qmi_tmd_instance *tmd = container_of(work,
+						struct qmi_tmd_instance,
+						work_rcv_msg);
+
+	do {
+		pr_debug("Notified about a Receive Event\n");
+	} while ((rc = qmi_recv_msg(tmd->handle)) == 0);
+
+	if (rc != -ENOMSG)
+		pr_err("Error receiving message for SVC:0x%x, ret:%d\n",
+			tmd->inst_id, rc);
+}
+
+static void qmi_tmd_clnt_notify(struct qmi_handle *handle,
+		enum qmi_event_type event, void *priv_data)
+{
+	struct qmi_tmd_instance *tmd =
+		(struct qmi_tmd_instance *)priv_data;
+
+	if (!tmd) {
+		pr_debug("tmd is NULL\n");
+		return;
+	}
+
+	switch (event) {
+	case QMI_RECV_MSG:
+		queue_work(qmi_tmd_wq, &tmd->work_rcv_msg);
+		break;
+	default:
+		break;
+	}
+}
+
+static void qmi_tmd_svc_arrive(struct work_struct *work)
+{
+	int ret = 0;
+	struct qmi_tmd_instance *tmd = container_of(work,
+						struct qmi_tmd_instance,
+						work_svc_arrive);
+
+	mutex_lock(&tmd->mutex);
+	tmd->handle = qmi_handle_create(qmi_tmd_clnt_notify, tmd);
+	if (!tmd->handle) {
+		pr_err("QMI TMD client handle alloc failed for 0x%x\n",
+			tmd->inst_id);
+		goto arrive_exit;
+	}
+
+	ret = qmi_connect_to_service(tmd->handle, TMD_SERVICE_ID_V01,
+				   TMD_SERVICE_VERS_V01,
+				   tmd->inst_id);
+	if (ret < 0) {
+		pr_err("Could not connect handle to service for 0x%x, ret:%d\n",
+			tmd->inst_id, ret);
+		qmi_handle_destroy(tmd->handle);
+		tmd->handle = NULL;
+		goto arrive_exit;
+	}
+	mutex_unlock(&tmd->mutex);
+
+	verify_devices_and_register(tmd);
+
+	return;
+
+arrive_exit:
+	mutex_unlock(&tmd->mutex);
+}
+
+static void qmi_tmd_svc_exit(struct work_struct *work)
+{
+	struct qmi_tmd_instance *tmd = container_of(work,
+						struct qmi_tmd_instance,
+						work_svc_exit);
+	struct qmi_cooling_device *qmi_cdev;
+
+	mutex_lock(&tmd->mutex);
+	qmi_handle_destroy(tmd->handle);
+	tmd->handle = NULL;
+
+	list_for_each_entry(qmi_cdev, &tmd->tmd_cdev_list, qmi_node)
+		qmi_cdev->connection_active = false;
+
+	mutex_unlock(&tmd->mutex);
+}
+
+static int qmi_tmd_svc_event_notify(struct notifier_block *this,
+				    unsigned long event,
+				    void *data)
+{
+	struct qmi_tmd_instance *tmd = container_of(this,
+						struct qmi_tmd_instance,
+						nb);
+
+	if (!tmd) {
+		pr_debug("tmd is NULL\n");
+		return -EINVAL;
+	}
+
+	switch (event) {
+	case QMI_SERVER_ARRIVE:
+		schedule_work(&tmd->work_svc_arrive);
+		break;
+	case QMI_SERVER_EXIT:
+		schedule_work(&tmd->work_svc_exit);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static void qmi_tmd_cleanup(void)
+{
+	int idx = 0;
+	struct qmi_tmd_instance *tmd = tmd_instances;
+	struct qmi_cooling_device *qmi_cdev, *c_next;
+
+	for (; idx < tmd_inst_cnt; idx++) {
+		mutex_lock(&tmd[idx].mutex);
+		list_for_each_entry_safe(qmi_cdev, c_next,
+				&tmd[idx].tmd_cdev_list, qmi_node) {
+			if (qmi_cdev->cdev)
+				thermal_cooling_device_unregister(
+					qmi_cdev->cdev);
+
+			list_del(&qmi_cdev->qmi_node);
+		}
+		if (tmd[idx].handle)
+			qmi_handle_destroy(tmd[idx].handle);
+
+		if (tmd[idx].nb.notifier_call)
+			qmi_svc_event_notifier_unregister(TMD_SERVICE_ID_V01,
+						TMD_SERVICE_VERS_V01,
+						tmd[idx].inst_id,
+						&tmd[idx].nb);
+		mutex_unlock(&tmd[idx].mutex);
+	}
+
+	if (qmi_tmd_wq) {
+		destroy_workqueue(qmi_tmd_wq);
+		qmi_tmd_wq = NULL;
+	}
+}
+
+static int of_get_qmi_tmd_platform_data(struct device *dev)
+{
+	int ret = 0, idx = 0, i = 0, subsys_cnt = 0;
+	struct device_node *np = dev->of_node;
+	struct device_node *subsys_np, *cdev_np;
+	struct qmi_tmd_instance *tmd;
+	struct qmi_cooling_device *qmi_cdev;
+
+	subsys_cnt = of_get_available_child_count(np);
+	if (!subsys_cnt) {
+		dev_err(dev, "No child node to process\n");
+		return -EFAULT;
+	}
+
+	tmd = devm_kcalloc(dev, subsys_cnt, sizeof(*tmd), GFP_KERNEL);
+	if (!tmd)
+		return -ENOMEM;
+
+	for_each_available_child_of_node(np, subsys_np) {
+		if (idx >= subsys_cnt)
+			break;
+
+		ret = of_property_read_u32(subsys_np, "qcom,instance-id",
+				&tmd[idx].inst_id);
+		if (ret) {
+			dev_err(dev, "error reading qcom,insance-id. ret:%d\n",
+				ret);
+			return ret;
+		}
+
+		tmd[idx].dev = dev;
+		mutex_init(&tmd[idx].mutex);
+		INIT_LIST_HEAD(&tmd[idx].tmd_cdev_list);
+
+		for_each_available_child_of_node(subsys_np, cdev_np) {
+			const char *qmi_name;
+
+			qmi_cdev = devm_kzalloc(dev, sizeof(*qmi_cdev),
+					GFP_KERNEL);
+			if (!qmi_cdev) {
+				ret = -ENOMEM;
+				return ret;
+			}
+
+			strlcpy(qmi_cdev->cdev_name, cdev_np->name,
+				THERMAL_NAME_LENGTH);
+
+			if (!of_property_read_string(cdev_np,
+					"qcom,qmi-dev-name",
+					&qmi_name)) {
+				strlcpy(qmi_cdev->qmi_name, qmi_name,
+						QMI_CLIENT_NAME_LENGTH);
+			} else {
+				dev_err(dev, "Fail to parse dev name for %s\n",
+					cdev_np->name);
+				break;
+			}
+			/* Check for supported qmi dev*/
+			for (i = 0; i < ARRAY_SIZE(device_clients); i++) {
+				if (strcmp(device_clients[i].dev_name,
+					qmi_cdev->qmi_name) == 0)
+					break;
+			}
+
+			if (i >= ARRAY_SIZE(device_clients)) {
+				dev_err(dev, "Not supported dev name for %s\n",
+					cdev_np->name);
+				break;
+			}
+			qmi_cdev->type = device_clients[i].type;
+			qmi_cdev->tmd = &tmd[idx];
+			qmi_cdev->np = cdev_np;
+			qmi_cdev->mtgn_state = 0;
+			list_add(&qmi_cdev->qmi_node, &tmd[idx].tmd_cdev_list);
+		}
+		idx++;
+	}
+	tmd_instances = tmd;
+	tmd_inst_cnt = subsys_cnt;
+
+	return 0;
+}
+
+static int qmi_device_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	int ret = 0, idx = 0;
+
+	ret = of_get_qmi_tmd_platform_data(dev);
+	if (ret)
+		goto probe_err;
+
+	if (!tmd_instances || !tmd_inst_cnt) {
+		dev_err(dev, "Empty tmd instances\n");
+		return -EINVAL;
+	}
+
+	qmi_tmd_wq = create_singlethread_workqueue("qmi_tmd_wq");
+	if (!qmi_tmd_wq) {
+		dev_err(dev, "Failed to create single thread workqueue\n");
+		ret = -EFAULT;
+		goto probe_err;
+	}
+
+	for (; idx < tmd_inst_cnt; idx++) {
+		struct qmi_tmd_instance *tmd = &tmd_instances[idx];
+
+		if (list_empty(&tmd->tmd_cdev_list))
+			continue;
+
+		tmd->nb.notifier_call = qmi_tmd_svc_event_notify;
+		INIT_WORK(&tmd->work_svc_arrive, qmi_tmd_svc_arrive);
+		INIT_WORK(&tmd->work_svc_exit, qmi_tmd_svc_exit);
+		INIT_WORK(&tmd->work_rcv_msg, qmi_tmd_rcv_msg);
+
+		ret = qmi_svc_event_notifier_register(TMD_SERVICE_ID_V01,
+						TMD_SERVICE_VERS_V01,
+						tmd->inst_id,
+						&tmd->nb);
+		if (ret < 0) {
+			dev_err(dev, "QMI register failed for 0x%x, ret:%d\n",
+				tmd->inst_id, ret);
+			goto probe_err;
+		}
+	}
+
+	return 0;
+
+probe_err:
+	qmi_tmd_cleanup();
+	return ret;
+}
+
+static int qmi_device_remove(struct platform_device *pdev)
+{
+	qmi_tmd_cleanup();
+
+	return 0;
+}
+
+static const struct of_device_id qmi_device_match[] = {
+	{.compatible = "qcom,qmi_cooling_devices"},
+	{}
+};
+
+static struct platform_driver qmi_device_driver = {
+	.probe          = qmi_device_probe,
+	.remove         = qmi_device_remove,
+	.driver         = {
+		.name   = "QMI_CDEV_DRIVER",
+		.owner  = THIS_MODULE,
+		.of_match_table = qmi_device_match,
+	},
+};
+
+static int __init qmi_device_init(void)
+{
+	return platform_driver_register(&qmi_device_driver);
+}
+module_init(qmi_device_init);
+
+static void __exit qmi_device_exit(void)
+{
+	platform_driver_unregister(&qmi_device_driver);
+}
+module_exit(qmi_device_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI QMI cooling device driver");
diff --git a/drivers/thermal/qcom/qti_virtual_sensor.c b/drivers/thermal/qcom/qti_virtual_sensor.c
index 3064c74..923680a 100644
--- a/drivers/thermal/qcom/qti_virtual_sensor.c
+++ b/drivers/thermal/qcom/qti_virtual_sensor.c
@@ -29,7 +29,7 @@
 		.logic = VIRT_MAXIMUM,
 	},
 	{
-		.virt_zone_name = "silver-virt-max-usr",
+		.virt_zone_name = "silv-virt-max-step",
 		.num_sensors = 4,
 		.sensor_names = {"cpu0-silver-usr",
 				"cpu1-silver-usr",
@@ -38,7 +38,7 @@
 		.logic = VIRT_MAXIMUM,
 	},
 	{
-		.virt_zone_name = "gold-virt-max-usr",
+		.virt_zone_name = "gold-virt-max-step",
 		.num_sensors = 4,
 		.sensor_names = {"cpu0-gold-usr",
 				"cpu1-gold-usr",
diff --git a/drivers/thermal/qcom/regulator_cooling.c b/drivers/thermal/qcom/regulator_cooling.c
new file mode 100644
index 0000000..3cbf198
--- /dev/null
+++ b/drivers/thermal/qcom/regulator_cooling.c
@@ -0,0 +1,224 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/mailbox_client.h>
+
+#define REG_CDEV_DRIVER "reg-cooling-device"
+#define REG_MSG_FORMAT "{class:volt_flr, event:zero_temp, res:%s, value:%s}"
+#define REG_CDEV_MAX_STATE 1
+#define MBOX_TOUT_MS 1000
+#define REG_MSG_MAX_LEN 100
+
+struct reg_cooling_device {
+	struct thermal_cooling_device	*cdev;
+	unsigned int			min_state;
+	const char			*resource_name;
+	struct mbox_chan		*qmp_chan;
+	struct mbox_client		*client;
+};
+
+struct aop_msg {
+	uint32_t len;
+	void *msg;
+};
+
+enum regulator_rail_type {
+	REG_COOLING_CX,
+	REG_COOLING_MX,
+	REG_COOLING_EBI,
+	REG_COOLING_NR,
+};
+
+static char *regulator_rail[REG_COOLING_NR] = {
+	"cx",
+	"mx",
+	"ebi",
+};
+
+static int aop_send_msg(struct reg_cooling_device *reg_dev, int min_state)
+{
+	char msg_buf[REG_MSG_MAX_LEN] = {0};
+	int ret = 0;
+	struct aop_msg msg;
+
+	if (!reg_dev->qmp_chan) {
+		pr_err("mbox not initialized for resource:%s\n",
+				reg_dev->resource_name);
+		return -EINVAL;
+	}
+
+	ret = snprintf(msg_buf, REG_MSG_MAX_LEN, REG_MSG_FORMAT,
+			reg_dev->resource_name,
+			(min_state == REG_CDEV_MAX_STATE) ? "off" : "on");
+	if (ret >= REG_MSG_MAX_LEN) {
+		pr_err("Message too long for resource:%s\n",
+				reg_dev->resource_name);
+		return -E2BIG;
+	}
+	msg.len = REG_MSG_MAX_LEN;
+	msg.msg = msg_buf;
+	ret = mbox_send_message(reg_dev->qmp_chan, &msg);
+
+	return (ret < 0) ? ret : 0;
+}
+
+static int reg_get_max_state(struct thermal_cooling_device *cdev,
+				unsigned long *state)
+{
+	*state = REG_CDEV_MAX_STATE;
+	return 0;
+}
+
+static int reg_get_min_state(struct thermal_cooling_device *cdev,
+				unsigned long *state)
+{
+	struct reg_cooling_device *reg_dev = cdev->devdata;
+
+	*state = reg_dev->min_state;
+	return 0;
+}
+
+static int reg_send_min_state(struct thermal_cooling_device *cdev,
+				unsigned long state)
+{
+	struct reg_cooling_device *reg_dev = cdev->devdata;
+	int ret = 0;
+
+	if (state > REG_CDEV_MAX_STATE)
+		state = REG_CDEV_MAX_STATE;
+
+	if (reg_dev->min_state == state)
+		return ret;
+
+	ret = aop_send_msg(reg_dev, state);
+	if (ret) {
+		pr_err("regulator:%s switching to floor %lu error. err:%d\n",
+			reg_dev->resource_name, state, ret);
+	} else {
+		pr_debug("regulator:%s switched to %lu from %d\n",
+			reg_dev->resource_name, state, reg_dev->min_state);
+		reg_dev->min_state = state;
+	}
+
+	return ret;
+}
+
+static int reg_get_cur_state(struct thermal_cooling_device *cdev,
+				unsigned long *state)
+{
+	*state = 0;
+	return 0;
+}
+
+static int reg_send_cur_state(struct thermal_cooling_device *cdev,
+				unsigned long state)
+{
+	return 0;
+}
+
+static struct thermal_cooling_device_ops reg_dev_ops = {
+	.get_max_state = reg_get_max_state,
+	.get_cur_state = reg_get_cur_state,
+	.set_cur_state = reg_send_cur_state,
+	.set_min_state = reg_send_min_state,
+	.get_min_state = reg_get_min_state,
+};
+
+static int reg_init_mbox(struct platform_device *pdev,
+			struct reg_cooling_device *reg_dev)
+{
+	reg_dev->client = devm_kzalloc(&pdev->dev, sizeof(*reg_dev->client),
+					GFP_KERNEL);
+	if (!reg_dev->client)
+		return -ENOMEM;
+
+	reg_dev->client->dev = &pdev->dev;
+	reg_dev->client->tx_block = true;
+	reg_dev->client->tx_tout = MBOX_TOUT_MS;
+	reg_dev->client->knows_txdone = false;
+
+	reg_dev->qmp_chan = mbox_request_channel(reg_dev->client, 0);
+	if (IS_ERR(reg_dev->qmp_chan)) {
+		dev_err(&pdev->dev, "Mbox request failed. err:%ld\n",
+				PTR_ERR(reg_dev->qmp_chan));
+		return PTR_ERR(reg_dev->qmp_chan);
+	}
+
+	return 0;
+}
+
+static int reg_dev_probe(struct platform_device *pdev)
+{
+	int ret = 0, idx = 0;
+	struct reg_cooling_device *reg_dev = NULL;
+
+	reg_dev = devm_kzalloc(&pdev->dev, sizeof(*reg_dev), GFP_KERNEL);
+	if (!reg_dev)
+		return -ENOMEM;
+
+	ret = reg_init_mbox(pdev, reg_dev);
+	if (ret)
+		return ret;
+
+	ret = of_property_read_string(pdev->dev.of_node,
+			"qcom,reg-resource-name",
+			&reg_dev->resource_name);
+	if (ret) {
+		dev_err(&pdev->dev, "Error reading resource name. err:%d\n",
+			ret);
+		goto mbox_free;
+	}
+
+	for (idx = 0; idx < REG_COOLING_NR; idx++) {
+		if (!strcmp(reg_dev->resource_name, regulator_rail[idx]))
+			break;
+	}
+	if (idx == REG_COOLING_NR) {
+		dev_err(&pdev->dev, "Invalid regulator resource name:%s\n",
+				reg_dev->resource_name);
+		ret = -EINVAL;
+		goto mbox_free;
+	}
+	reg_dev->min_state = REG_CDEV_MAX_STATE;
+	reg_dev->cdev = thermal_of_cooling_device_register(
+				pdev->dev.of_node,
+				(char *)reg_dev->resource_name,
+				reg_dev, &reg_dev_ops);
+	if (IS_ERR(reg_dev->cdev))
+		goto mbox_free;
+
+	return ret;
+
+mbox_free:
+	mbox_free_channel(reg_dev->qmp_chan);
+
+	return ret;
+}
+
+static const struct of_device_id reg_dev_of_match[] = {
+	{.compatible = "qcom,rpmh-reg-cdev", },
+	{}
+};
+
+static struct platform_driver reg_dev_driver = {
+	.driver = {
+		.name = REG_CDEV_DRIVER,
+		.of_match_table = reg_dev_of_match,
+	},
+	.probe = reg_dev_probe,
+};
+builtin_platform_driver(reg_dev_driver);
diff --git a/drivers/thermal/qcom/thermal_mitigation_device_service_v01.c b/drivers/thermal/qcom/thermal_mitigation_device_service_v01.c
new file mode 100644
index 0000000..af020eb
--- /dev/null
+++ b/drivers/thermal/qcom/thermal_mitigation_device_service_v01.c
@@ -0,0 +1,359 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/qmi_encdec.h>
+
+#include <soc/qcom/msm_qmi_interface.h>
+
+#include "thermal_mitigation_device_service_v01.h"
+
+static struct elem_info tmd_mitigation_dev_id_type_v01_ei[] = {
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01 + 1,
+		.elem_size      = sizeof(char),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(
+					struct tmd_mitigation_dev_id_type_v01,
+					mitigation_dev_id),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info tmd_mitigation_dev_list_type_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct tmd_mitigation_dev_id_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(
+					struct tmd_mitigation_dev_list_type_v01,
+					mitigation_dev_id),
+		.ei_array      = tmd_mitigation_dev_id_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(
+					struct tmd_mitigation_dev_list_type_v01,
+					max_mitigation_level),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info tmd_get_mitigation_device_list_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info tmd_get_mitigation_device_list_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+			struct tmd_get_mitigation_device_list_resp_msg_v01,
+			resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			struct tmd_get_mitigation_device_list_resp_msg_v01,
+				mitigation_device_list_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			struct tmd_get_mitigation_device_list_resp_msg_v01,
+				mitigation_device_list_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_TMD_MITIGATION_DEV_LIST_MAX_V01,
+		.elem_size      = sizeof(
+				struct tmd_mitigation_dev_list_type_v01),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			struct tmd_get_mitigation_device_list_resp_msg_v01,
+				mitigation_device_list),
+		.ei_array      = tmd_mitigation_dev_list_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info tmd_set_mitigation_level_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct tmd_mitigation_dev_id_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(
+				struct tmd_set_mitigation_level_req_msg_v01,
+					mitigation_dev_id),
+		.ei_array      = tmd_mitigation_dev_id_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+				struct tmd_set_mitigation_level_req_msg_v01,
+					mitigation_level),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info tmd_set_mitigation_level_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+			struct tmd_set_mitigation_level_resp_msg_v01,
+				resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info tmd_get_mitigation_level_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct tmd_mitigation_dev_id_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(
+				struct tmd_get_mitigation_level_req_msg_v01,
+					mitigation_device),
+		.ei_array      = tmd_mitigation_dev_id_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info tmd_get_mitigation_level_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+				struct tmd_get_mitigation_level_resp_msg_v01,
+					resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+				struct tmd_get_mitigation_level_resp_msg_v01,
+					current_mitigation_level_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+				struct tmd_get_mitigation_level_resp_msg_v01,
+					current_mitigation_level),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+				struct tmd_get_mitigation_level_resp_msg_v01,
+					requested_mitigation_level_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+				struct tmd_get_mitigation_level_resp_msg_v01,
+					requested_mitigation_level),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info tmd_register_notification_mitigation_level_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct tmd_mitigation_dev_id_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(
+		struct tmd_register_notification_mitigation_level_req_msg_v01,
+				mitigation_device),
+		.ei_array      = tmd_mitigation_dev_id_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info tmd_register_notification_mitigation_level_resp_msg_v01_ei[]
+									= {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+		struct tmd_register_notification_mitigation_level_resp_msg_v01,
+				resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info tmd_deregister_notification_mitigation_level_req_msg_v01_ei[]
+									= {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct tmd_mitigation_dev_id_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct
+		tmd_deregister_notification_mitigation_level_req_msg_v01,
+				mitigation_device),
+		.ei_array      = tmd_mitigation_dev_id_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info tmd_deregister_notification_mitigation_level_resp_msg_v01_ei[]
+									= {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+		tmd_deregister_notification_mitigation_level_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info tmd_mitigation_level_report_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct tmd_mitigation_dev_id_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(
+				struct tmd_mitigation_level_report_ind_msg_v01,
+					mitigation_device),
+		.ei_array      = tmd_mitigation_dev_id_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+				struct tmd_mitigation_level_report_ind_msg_v01,
+					   current_mitigation_level),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
diff --git a/drivers/thermal/qcom/thermal_mitigation_device_service_v01.h b/drivers/thermal/qcom/thermal_mitigation_device_service_v01.h
new file mode 100644
index 0000000..c2d1201
--- /dev/null
+++ b/drivers/thermal/qcom/thermal_mitigation_device_service_v01.h
@@ -0,0 +1,128 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef THERMAL_MITIGATION_DEVICE_SERVICE_V01_H
+#define THERMAL_MITIGATION_DEVICE_SERVICE_V01_H
+
+#define TMD_SERVICE_ID_V01 0x18
+#define TMD_SERVICE_VERS_V01 0x01
+
+#define QMI_TMD_GET_MITIGATION_DEVICE_LIST_RESP_V01 0x0020
+#define QMI_TMD_GET_MITIGATION_LEVEL_REQ_V01 0x0022
+#define QMI_TMD_GET_SUPPORTED_MSGS_REQ_V01 0x001E
+#define QMI_TMD_SET_MITIGATION_LEVEL_REQ_V01 0x0021
+#define QMI_TMD_REGISTER_NOTIFICATION_MITIGATION_LEVEL_RESP_V01 0x0023
+#define QMI_TMD_GET_SUPPORTED_MSGS_RESP_V01 0x001E
+#define QMI_TMD_SET_MITIGATION_LEVEL_RESP_V01 0x0021
+#define QMI_TMD_DEREGISTER_NOTIFICATION_MITIGATION_LEVEL_RESP_V01 0x0024
+#define QMI_TMD_MITIGATION_LEVEL_REPORT_IND_V01 0x0025
+#define QMI_TMD_GET_MITIGATION_LEVEL_RESP_V01 0x0022
+#define QMI_TMD_GET_SUPPORTED_FIELDS_REQ_V01 0x001F
+#define QMI_TMD_GET_MITIGATION_DEVICE_LIST_REQ_V01 0x0020
+#define QMI_TMD_REGISTER_NOTIFICATION_MITIGATION_LEVEL_REQ_V01 0x0023
+#define QMI_TMD_DEREGISTER_NOTIFICATION_MITIGATION_LEVEL_REQ_V01 0x0024
+#define QMI_TMD_GET_SUPPORTED_FIELDS_RESP_V01 0x001F
+
+#define QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01 32
+#define QMI_TMD_MITIGATION_DEV_LIST_MAX_V01 32
+
+struct tmd_mitigation_dev_id_type_v01 {
+	char mitigation_dev_id[QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01 + 1];
+};
+
+struct tmd_mitigation_dev_list_type_v01 {
+	struct tmd_mitigation_dev_id_type_v01 mitigation_dev_id;
+	uint8_t max_mitigation_level;
+};
+
+struct tmd_get_mitigation_device_list_req_msg_v01 {
+	char placeholder;
+};
+#define TMD_GET_MITIGATION_DEVICE_LIST_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info tmd_get_mitigation_device_list_req_msg_v01_ei[];
+
+struct tmd_get_mitigation_device_list_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	uint8_t mitigation_device_list_valid;
+	uint32_t mitigation_device_list_len;
+	struct tmd_mitigation_dev_list_type_v01
+		mitigation_device_list[QMI_TMD_MITIGATION_DEV_LIST_MAX_V01];
+};
+#define TMD_GET_MITIGATION_DEVICE_LIST_RESP_MSG_V01_MAX_MSG_LEN 1099
+extern struct elem_info tmd_get_mitigation_device_list_resp_msg_v01_ei[];
+
+struct tmd_set_mitigation_level_req_msg_v01 {
+	struct tmd_mitigation_dev_id_type_v01 mitigation_dev_id;
+	uint8_t mitigation_level;
+};
+#define TMD_SET_MITIGATION_LEVEL_REQ_MSG_V01_MAX_MSG_LEN 40
+extern struct elem_info tmd_set_mitigation_level_req_msg_v01_ei[];
+
+struct tmd_set_mitigation_level_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+#define TMD_SET_MITIGATION_LEVEL_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info tmd_set_mitigation_level_resp_msg_v01_ei[];
+
+struct tmd_get_mitigation_level_req_msg_v01 {
+	struct tmd_mitigation_dev_id_type_v01 mitigation_device;
+};
+#define TMD_GET_MITIGATION_LEVEL_REQ_MSG_V01_MAX_MSG_LEN 36
+extern struct elem_info tmd_get_mitigation_level_req_msg_v01_ei[];
+
+struct tmd_get_mitigation_level_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	uint8_t current_mitigation_level_valid;
+	uint8_t current_mitigation_level;
+	uint8_t requested_mitigation_level_valid;
+	uint8_t requested_mitigation_level;
+};
+#define TMD_GET_MITIGATION_LEVEL_RESP_MSG_V01_MAX_MSG_LEN 15
+extern struct elem_info tmd_get_mitigation_level_resp_msg_v01_ei[];
+
+struct tmd_register_notification_mitigation_level_req_msg_v01 {
+	struct tmd_mitigation_dev_id_type_v01 mitigation_device;
+};
+#define TMD_REGISTER_NOTIFICATION_MITIGATION_LEVEL_REQ_MSG_V01_MAX_MSG_LEN 36
+extern struct elem_info
+		tmd_register_notification_mitigation_level_req_msg_v01_ei[];
+
+struct tmd_register_notification_mitigation_level_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+#define TMD_REGISTER_NOTIFICATION_MITIGATION_LEVEL_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info
+	tmd_register_notification_mitigation_level_resp_msg_v01_ei[];
+
+struct tmd_deregister_notification_mitigation_level_req_msg_v01 {
+	struct tmd_mitigation_dev_id_type_v01 mitigation_device;
+};
+#define TMD_DEREGISTER_NOTIFICATION_MITIGATION_LEVEL_REQ_MSG_V01_MAX_MSG_LEN 36
+extern struct elem_info
+	tmd_deregister_notification_mitigation_level_req_msg_v01_ei[];
+
+struct tmd_deregister_notification_mitigation_level_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+#define TMD_DEREGISTER_NOTIFICATION_MITIGATION_LEVEL_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info
+	tmd_deregister_notification_mitigation_level_resp_msg_v01_ei[];
+
+struct tmd_mitigation_level_report_ind_msg_v01 {
+	struct tmd_mitigation_dev_id_type_v01 mitigation_device;
+	uint8_t current_mitigation_level;
+};
+#define TMD_MITIGATION_LEVEL_REPORT_IND_MSG_V01_MAX_MSG_LEN 40
+extern struct elem_info tmd_mitigation_level_report_ind_msg_v01_ei[];
+
+#endif
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 4c1ccee..68d9feb 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -418,8 +418,9 @@
 		thermal_zone_device_set_polling(thermal_passive_wq,
 						tz, tz->passive_delay);
 	else if (tz->polling_delay)
-		thermal_zone_device_set_polling(system_freezable_wq,
-						tz, tz->polling_delay);
+		thermal_zone_device_set_polling(
+				system_freezable_power_efficient_wq,
+				tz, tz->polling_delay);
 	else
 		thermal_zone_device_set_polling(NULL, tz, 0);
 
@@ -2134,7 +2135,7 @@
 	/* Bind cooling devices for this zone */
 	bind_tz(tz);
 
-	INIT_DELAYED_WORK(&(tz->poll_queue), thermal_zone_device_check);
+	INIT_DEFERRABLE_WORK(&(tz->poll_queue), thermal_zone_device_check);
 
 	thermal_zone_device_reset(tz);
 	/* Update the new thermal zone and mark it as already updated. */
diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c
index 195acc8..5d47691 100644
--- a/drivers/tty/serial/efm32-uart.c
+++ b/drivers/tty/serial/efm32-uart.c
@@ -27,6 +27,7 @@
 #define UARTn_FRAME		0x04
 #define UARTn_FRAME_DATABITS__MASK	0x000f
 #define UARTn_FRAME_DATABITS(n)		((n) - 3)
+#define UARTn_FRAME_PARITY__MASK	0x0300
 #define UARTn_FRAME_PARITY_NONE		0x0000
 #define UARTn_FRAME_PARITY_EVEN		0x0200
 #define UARTn_FRAME_PARITY_ODD		0x0300
@@ -572,12 +573,16 @@
 			16 * (4 + (clkdiv >> 6)));
 
 	frame = efm32_uart_read32(efm_port, UARTn_FRAME);
-	if (frame & UARTn_FRAME_PARITY_ODD)
+	switch (frame & UARTn_FRAME_PARITY__MASK) {
+	case UARTn_FRAME_PARITY_ODD:
 		*parity = 'o';
-	else if (frame & UARTn_FRAME_PARITY_EVEN)
+		break;
+	case UARTn_FRAME_PARITY_EVEN:
 		*parity = 'e';
-	else
+		break;
+	default:
 		*parity = 'n';
+	}
 
 	*bits = (frame & UARTn_FRAME_DATABITS__MASK) -
 			UARTn_FRAME_DATABITS(4) + 4;
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index ee84f89..7e97a1c 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -2366,6 +2366,10 @@
 		 */
 		udelay(DIV_ROUND_UP(10 * 1000000, baud));
 	}
+	if (port->flags & UPF_HARD_FLOW) {
+		/* Refresh (Auto) RTS */
+		sci_set_mctrl(port, port->mctrl);
+	}
 
 #ifdef CONFIG_SERIAL_SH_SCI_DMA
 	/*
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 32f99da..e07fa76 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2578,6 +2578,7 @@
 		hcd->bandwidth_mutex = kmalloc(sizeof(*hcd->bandwidth_mutex),
 				GFP_KERNEL);
 		if (!hcd->bandwidth_mutex) {
+			kfree(hcd->address0_mutex);
 			kfree(hcd);
 			dev_dbg(dev, "hcd bandwidth mutex alloc failed\n");
 			return NULL;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index fcbaa61..50679bc 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1345,7 +1345,13 @@
 	if (ret < 0) {
 		message = "can't read hub descriptor";
 		goto fail;
-	} else if (hub->descriptor->bNbrPorts > USB_MAXCHILDREN) {
+	}
+
+	maxchild = USB_MAXCHILDREN;
+	if (hub_is_superspeed(hdev))
+		maxchild = min_t(unsigned, maxchild, USB_SS_MAXPORTS);
+
+	if (hub->descriptor->bNbrPorts > maxchild) {
 		message = "hub has too many ports!";
 		ret = -ENODEV;
 		goto fail;
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index 2f1fb7e..9eba51b 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -148,7 +148,8 @@
 		exynos->axius_clk = devm_clk_get(dev, "usbdrd30_axius_clk");
 		if (IS_ERR(exynos->axius_clk)) {
 			dev_err(dev, "no AXI UpScaler clk specified\n");
-			return -ENODEV;
+			ret = -ENODEV;
+			goto axius_clk_err;
 		}
 		clk_prepare_enable(exynos->axius_clk);
 	} else {
@@ -206,6 +207,7 @@
 	regulator_disable(exynos->vdd33);
 err2:
 	clk_disable_unprepare(exynos->axius_clk);
+axius_clk_err:
 	clk_disable_unprepare(exynos->susp_clk);
 	clk_disable_unprepare(exynos->clk);
 	return ret;
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index ec166f2..8a6ae0b 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -2291,7 +2291,7 @@
 	cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL);
 	if (!cdev->os_desc_req->buf) {
 		ret = -ENOMEM;
-		kfree(cdev->os_desc_req);
+		usb_ep_free_request(ep0, cdev->os_desc_req);
 		goto end;
 	}
 	cdev->os_desc_req->context = cdev;
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 6b2c137..5396557 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -22,6 +22,7 @@
 static struct class *android_class;
 static struct device *android_device;
 static int index;
+static int gadget_index;
 
 struct device *create_function_device(char *name)
 {
@@ -1425,21 +1426,21 @@
 	spin_unlock_irqrestore(&cdev->lock, flags);
 
 	if (status[0]) {
-		kobject_uevent_env(&android_device->kobj,
+		kobject_uevent_env(&gi->dev->kobj,
 					KOBJ_CHANGE, connected);
 		pr_info("%s: sent uevent %s\n", __func__, connected[0]);
 		uevent_sent = true;
 	}
 
 	if (status[1]) {
-		kobject_uevent_env(&android_device->kobj,
+		kobject_uevent_env(&gi->dev->kobj,
 					KOBJ_CHANGE, configured);
 		pr_info("%s: sent uevent %s\n", __func__, configured[0]);
 		uevent_sent = true;
 	}
 
 	if (status[2]) {
-		kobject_uevent_env(&android_device->kobj,
+		kobject_uevent_env(&gi->dev->kobj,
 					KOBJ_CHANGE, disconnected);
 		pr_info("%s: sent uevent %s\n", __func__, disconnected[0]);
 		uevent_sent = true;
@@ -1600,23 +1601,28 @@
 {
 	struct device_attribute **attrs;
 	struct device_attribute *attr;
+	char str[10];
 
 	INIT_WORK(&gi->work, android_work);
-	android_device = device_create(android_class, NULL,
-				MKDEV(0, 0), NULL, "android0");
-	if (IS_ERR(android_device))
-		return PTR_ERR(android_device);
+	snprintf(str, sizeof(str), "android%d", gadget_index - 1);
+	pr_debug("Creating android device %s\n", str);
+	gi->dev = device_create(android_class, NULL,
+				MKDEV(0, 0), NULL, str);
+	if (IS_ERR(gi->dev))
+		return PTR_ERR(gi->dev);
 
-	dev_set_drvdata(android_device, gi);
+	dev_set_drvdata(gi->dev, gi);
+	if (gadget_index == 1)
+		android_device = gi->dev;
 
 	attrs = android_usb_attributes;
 	while ((attr = *attrs++)) {
 		int err;
 
-		err = device_create_file(android_device, attr);
+		err = device_create_file(gi->dev, attr);
 		if (err) {
-			device_destroy(android_device->class,
-				       android_device->devt);
+			device_destroy(gi->dev->class,
+				       gi->dev->devt);
 			return err;
 		}
 	}
@@ -1624,15 +1630,15 @@
 	return 0;
 }
 
-static void android_device_destroy(void)
+static void android_device_destroy(struct device *dev)
 {
 	struct device_attribute **attrs;
 	struct device_attribute *attr;
 
 	attrs = android_usb_attributes;
 	while ((attr = *attrs++))
-		device_remove_file(android_device, attr);
-	device_destroy(android_device->class, android_device->devt);
+		device_remove_file(dev, attr);
+	device_destroy(dev->class, dev->devt);
 }
 #else
 static inline int android_device_create(struct gadget_info *gi)
@@ -1640,7 +1646,7 @@
 	return 0;
 }
 
-static inline void android_device_destroy(void)
+static inline void android_device_destroy(struct device *dev)
 {
 }
 #endif
@@ -1696,6 +1702,8 @@
 	if (!gi->composite.gadget_driver.function)
 		goto err;
 
+	gadget_index++;
+	pr_debug("Creating gadget index %d\n", gadget_index);
 	if (android_device_create(gi) < 0)
 		goto err;
 
@@ -1708,8 +1716,14 @@
 
 static void gadgets_drop(struct config_group *group, struct config_item *item)
 {
+	struct gadget_info *gi;
+
+	gi = container_of(to_config_group(item), struct gadget_info, group);
 	config_item_put(item);
-	android_device_destroy();
+	if (gi->dev) {
+		android_device_destroy(gi->dev);
+		gi->dev = NULL;
+	}
 }
 
 static struct configfs_group_operations gadgets_ops = {
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 1468d8f..f959c42 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1183,8 +1183,10 @@
 
 	/* closing ep0 === shutdown all */
 
-	if (dev->gadget_registered)
+	if (dev->gadget_registered) {
 		usb_gadget_unregister_driver (&gadgetfs_driver);
+		dev->gadget_registered = false;
+	}
 
 	/* at this point "good" hardware has disconnected the
 	 * device from USB; the host won't see it any more.
@@ -1677,9 +1679,10 @@
 gadgetfs_suspend (struct usb_gadget *gadget)
 {
 	struct dev_data		*dev = get_gadget_data (gadget);
+	unsigned long		flags;
 
 	INFO (dev, "suspended from state %d\n", dev->state);
-	spin_lock (&dev->lock);
+	spin_lock_irqsave(&dev->lock, flags);
 	switch (dev->state) {
 	case STATE_DEV_SETUP:		// VERY odd... host died??
 	case STATE_DEV_CONNECTED:
@@ -1690,7 +1693,7 @@
 	default:
 		break;
 	}
-	spin_unlock (&dev->lock);
+	spin_unlock_irqrestore(&dev->lock, flags);
 }
 
 static struct usb_gadget_driver gadgetfs_driver = {
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 4fa5de2..94c8a9f 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -442,23 +442,16 @@
 		/* Report reset and disconnect events to the driver */
 		if (dum->driver && (disconnect || reset)) {
 			stop_activity(dum);
-			spin_unlock(&dum->lock);
 			if (reset)
 				usb_gadget_udc_reset(&dum->gadget, dum->driver);
 			else
 				dum->driver->disconnect(&dum->gadget);
-			spin_lock(&dum->lock);
 		}
 	} else if (dum_hcd->active != dum_hcd->old_active) {
-		if (dum_hcd->old_active && dum->driver->suspend) {
-			spin_unlock(&dum->lock);
+		if (dum_hcd->old_active && dum->driver->suspend)
 			dum->driver->suspend(&dum->gadget);
-			spin_lock(&dum->lock);
-		} else if (!dum_hcd->old_active &&  dum->driver->resume) {
-			spin_unlock(&dum->lock);
+		else if (!dum_hcd->old_active &&  dum->driver->resume)
 			dum->driver->resume(&dum->gadget);
-			spin_lock(&dum->lock);
-		}
 	}
 
 	dum_hcd->old_status = dum_hcd->port_status;
@@ -983,7 +976,9 @@
 	struct dummy_hcd	*dum_hcd = gadget_to_dummy_hcd(g);
 	struct dummy		*dum = dum_hcd->dum;
 
+	spin_lock_irq(&dum->lock);
 	dum->driver = NULL;
+	spin_unlock_irq(&dum->lock);
 
 	return 0;
 }
@@ -2009,7 +2004,7 @@
 			HUB_CHAR_COMMON_OCPM);
 	desc->bNbrPorts = 1;
 	desc->u.ss.bHubHdrDecLat = 0x04; /* Worst case: 0.4 micro sec*/
-	desc->u.ss.DeviceRemovable = 0xffff;
+	desc->u.ss.DeviceRemovable = 0;
 }
 
 static inline void hub_descriptor(struct usb_hub_descriptor *desc)
@@ -2021,8 +2016,8 @@
 			HUB_CHAR_INDV_PORT_LPSM |
 			HUB_CHAR_COMMON_OCPM);
 	desc->bNbrPorts = 1;
-	desc->u.hs.DeviceRemovable[0] = 0xff;
-	desc->u.hs.DeviceRemovable[1] = 0xff;
+	desc->u.hs.DeviceRemovable[0] = 0;
+	desc->u.hs.DeviceRemovable[1] = 0xff;	/* PortPwrCtrlMask */
 }
 
 static int dummy_hub_control(
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 61c938c..33f3987 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -2469,11 +2469,8 @@
 		nuke(&dev->ep[i]);
 
 	/* report disconnect; the driver is already quiesced */
-	if (driver) {
-		spin_unlock(&dev->lock);
+	if (driver)
 		driver->disconnect(&dev->gadget);
-		spin_lock(&dev->lock);
-	}
 
 	usb_reinit(dev);
 }
@@ -3347,8 +3344,6 @@
 		BIT(PCI_RETRY_ABORT_INTERRUPT))
 
 static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
-__releases(dev->lock)
-__acquires(dev->lock)
 {
 	struct net2280_ep	*ep;
 	u32			tmp, num, mask, scratch;
@@ -3389,14 +3384,12 @@
 			if (disconnect || reset) {
 				stop_activity(dev, dev->driver);
 				ep0_start(dev);
-				spin_unlock(&dev->lock);
 				if (reset)
 					usb_gadget_udc_reset
 						(&dev->gadget, dev->driver);
 				else
 					(dev->driver->disconnect)
 						(&dev->gadget);
-				spin_lock(&dev->lock);
 				return;
 			}
 		}
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index fb8fc34..ba78e3f 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -1401,7 +1401,13 @@
 				    struct renesas_usb3_request *usb3_req,
 				    int status)
 {
-	usb3_pn_stop(usb3);
+	unsigned long flags;
+
+	spin_lock_irqsave(&usb3->lock, flags);
+	if (usb3_pn_change(usb3, usb3_ep->num))
+		usb3_pn_stop(usb3);
+	spin_unlock_irqrestore(&usb3->lock, flags);
+
 	usb3_disable_pipe_irq(usb3, usb3_ep->num);
 	usb3_request_done(usb3_ep, usb3_req, status);
 
@@ -1430,30 +1436,46 @@
 {
 	struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, num);
 	struct renesas_usb3_request *usb3_req = usb3_get_request(usb3_ep);
+	bool done = false;
 
 	if (!usb3_req)
 		return;
 
+	spin_lock(&usb3->lock);
+	if (usb3_pn_change(usb3, num))
+		goto out;
+
 	if (usb3_ep->dir_in) {
 		/* Do not stop the IN pipe here to detect LSTTR interrupt */
 		if (!usb3_write_pipe(usb3_ep, usb3_req, USB3_PN_WRITE))
 			usb3_clear_bit(usb3, PN_INT_BFRDY, USB3_PN_INT_ENA);
 	} else {
 		if (!usb3_read_pipe(usb3_ep, usb3_req, USB3_PN_READ))
-			usb3_request_done_pipen(usb3, usb3_ep, usb3_req, 0);
+			done = true;
 	}
+
+out:
+	/* need to unlock because usb3_request_done_pipen() locks it */
+	spin_unlock(&usb3->lock);
+
+	if (done)
+		usb3_request_done_pipen(usb3, usb3_ep, usb3_req, 0);
 }
 
 static void usb3_irq_epc_pipen(struct renesas_usb3 *usb3, int num)
 {
 	u32 pn_int_sta;
 
-	if (usb3_pn_change(usb3, num) < 0)
+	spin_lock(&usb3->lock);
+	if (usb3_pn_change(usb3, num) < 0) {
+		spin_unlock(&usb3->lock);
 		return;
+	}
 
 	pn_int_sta = usb3_read(usb3, USB3_PN_INT_STA);
 	pn_int_sta &= usb3_read(usb3, USB3_PN_INT_ENA);
 	usb3_write(usb3, pn_int_sta, USB3_PN_INT_STA);
+	spin_unlock(&usb3->lock);
 	if (pn_int_sta & PN_INT_LSTTR)
 		usb3_irq_epc_pipen_lsttr(usb3, num);
 	if (pn_int_sta & PN_INT_BFRDY)
@@ -1707,6 +1729,9 @@
 	/* hook up the driver */
 	usb3->driver = driver;
 
+	pm_runtime_enable(usb3_to_dev(usb3));
+	pm_runtime_get_sync(usb3_to_dev(usb3));
+
 	renesas_usb3_init_controller(usb3);
 
 	return 0;
@@ -1715,14 +1740,14 @@
 static int renesas_usb3_stop(struct usb_gadget *gadget)
 {
 	struct renesas_usb3 *usb3 = gadget_to_renesas_usb3(gadget);
-	unsigned long flags;
 
-	spin_lock_irqsave(&usb3->lock, flags);
 	usb3->softconnect = false;
 	usb3->gadget.speed = USB_SPEED_UNKNOWN;
 	usb3->driver = NULL;
 	renesas_usb3_stop_controller(usb3);
-	spin_unlock_irqrestore(&usb3->lock, flags);
+
+	pm_runtime_put(usb3_to_dev(usb3));
+	pm_runtime_disable(usb3_to_dev(usb3));
 
 	return 0;
 }
@@ -1761,9 +1786,6 @@
 {
 	struct renesas_usb3 *usb3 = platform_get_drvdata(pdev);
 
-	pm_runtime_put(&pdev->dev);
-	pm_runtime_disable(&pdev->dev);
-
 	usb_del_gadget_udc(&usb3->gadget);
 
 	__renesas_usb3_ep_free_request(usb3->ep0_req);
@@ -1948,9 +1970,6 @@
 
 	usb3->workaround_for_vbus = priv->workaround_for_vbus;
 
-	pm_runtime_enable(&pdev->dev);
-	pm_runtime_get_sync(&pdev->dev);
-
 	dev_info(&pdev->dev, "probed\n");
 
 	return 0;
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index bfa7fa3..7bf78be 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -1269,7 +1269,7 @@
 			time = 30;
 			break;
 		default:
-			time = 300;
+			time = 50;
 			break;
 		}
 
@@ -1785,6 +1785,7 @@
 		pipe = td->pipe;
 		pipe_stop(r8a66597, pipe);
 
+		/* Select a different address or endpoint */
 		new_td = td;
 		do {
 			list_move_tail(&new_td->queue,
@@ -1794,7 +1795,8 @@
 				new_td = td;
 				break;
 			}
-		} while (td != new_td && td->address == new_td->address);
+		} while (td != new_td && td->address == new_td->address &&
+			td->pipe->info.epnum == new_td->pipe->info.epnum);
 
 		start_transfer(r8a66597, new_td);
 
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index d680eb3..c99121a6 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -2230,11 +2230,12 @@
 {
 	u32 temp, port_offset, port_count;
 	int i;
-	u8 major_revision;
+	u8 major_revision, minor_revision;
 	struct xhci_hub *rhub;
 
 	temp = readl(addr);
 	major_revision = XHCI_EXT_PORT_MAJOR(temp);
+	minor_revision = XHCI_EXT_PORT_MINOR(temp);
 
 	if (major_revision == 0x03) {
 		rhub = &xhci->usb3_rhub;
@@ -2248,7 +2249,9 @@
 		return;
 	}
 	rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp);
-	rhub->min_rev = XHCI_EXT_PORT_MINOR(temp);
+
+	if (rhub->min_rev < minor_revision)
+		rhub->min_rev = minor_revision;
 
 	/* Port offset and count in the third dword, see section 7.2 */
 	temp = readl(addr + 2);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index e7d6752..69864ba 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -201,6 +201,9 @@
 	if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
 			pdev->device == 0x1042)
 		xhci->quirks |= XHCI_BROKEN_STREAMS;
+	if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+			pdev->device == 0x1142)
+		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
 
 	if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241)
 		xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7;
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 358feca..261ed2c 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1909,6 +1909,14 @@
 static void musb_irq_work(struct work_struct *data)
 {
 	struct musb *musb = container_of(data, struct musb, irq_work.work);
+	int error;
+
+	error = pm_runtime_get_sync(musb->controller);
+	if (error < 0) {
+		dev_err(musb->controller, "Could not enable: %i\n", error);
+
+		return;
+	}
 
 	musb_pm_runtime_check_session(musb);
 
@@ -1916,6 +1924,9 @@
 		musb->xceiv_old_state = musb->xceiv->otg->state;
 		sysfs_notify(&musb->controller->kobj, NULL, "mode");
 	}
+
+	pm_runtime_mark_last_busy(musb->controller);
+	pm_runtime_put_autosuspend(musb->controller);
 }
 
 static void musb_recover_from_babble(struct musb *musb)
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 9f125e1..39666fb 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -213,6 +213,12 @@
 				msecs_to_jiffies(wrp->poll_timeout));
 		break;
 	case OTG_STATE_A_WAIT_BCON:
+		/* keep VBUS on for host-only mode */
+		if (musb->port_mode == MUSB_PORT_MODE_HOST) {
+			mod_timer(&glue->timer, jiffies +
+					msecs_to_jiffies(wrp->poll_timeout));
+			break;
+		}
 		musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
 		skip_session = 1;
 		/* fall */
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index 5643613..2682d29 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -763,10 +763,7 @@
 	case PE_SRC_STARTUP:
 		if (pd->current_dr == DR_NONE) {
 			pd->current_dr = DR_DFP;
-			/*
-			 * Defer starting USB host mode until PE_SRC_READY or
-			 * when PE_SRC_SEND_CAPABILITIES fails
-			 */
+			start_usb_host(pd, true);
 		}
 
 		dual_role_instance_changed(pd->dual_role);
@@ -1302,14 +1299,6 @@
 				if (svid == 0xFF01)
 					has_dp = true;
 			}
-
-			/*
-			 * Finally start USB host now that we have determined
-			 * if DisplayPort mode is present or not and limit USB
-			 * to HS-only mode if so.
-			 */
-			start_usb_host(pd, !has_dp);
-
 			break;
 
 		default:
@@ -1326,7 +1315,6 @@
 		switch (cmd) {
 		case USBPD_SVDM_DISCOVER_IDENTITY:
 		case USBPD_SVDM_DISCOVER_SVIDS:
-			start_usb_host(pd, true);
 			break;
 		default:
 			break;
@@ -1723,11 +1711,7 @@
 				ARRAY_SIZE(default_src_caps), SOP_MSG);
 		if (ret) {
 			pd->caps_count++;
-
-			if (pd->caps_count == 10 && pd->current_dr == DR_DFP) {
-				/* Likely not PD-capable, start host now */
-				start_usb_host(pd, true);
-			} else if (pd->caps_count >= PD_CAPS_COUNT) {
+			if (pd->caps_count >= PD_CAPS_COUNT) {
 				usbpd_dbg(&pd->dev, "Src CapsCounter exceeded, disabling PD\n");
 				usbpd_set_state(pd, PE_SRC_DISABLED);
 
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 03eccf2..d6dc165 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -235,14 +235,19 @@
 
 static inline void hub_descriptor(struct usb_hub_descriptor *desc)
 {
+	int width;
+
 	memset(desc, 0, sizeof(*desc));
 	desc->bDescriptorType = USB_DT_HUB;
-	desc->bDescLength = 9;
 	desc->wHubCharacteristics = cpu_to_le16(
 		HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_COMMON_OCPM);
+
 	desc->bNbrPorts = VHCI_HC_PORTS;
-	desc->u.hs.DeviceRemovable[0] = 0xff;
-	desc->u.hs.DeviceRemovable[1] = 0xff;
+	BUILD_BUG_ON(VHCI_HC_PORTS > USB_MAXCHILDREN);
+	width = desc->bNbrPorts / 8 + 1;
+	desc->bDescLength = USB_DT_HUB_NONVAR_SIZE + 2 * width;
+	memset(&desc->u.hs.DeviceRemovable[0], 0, width);
+	memset(&desc->u.hs.DeviceRemovable[width], 0xff, width);
 }
 
 static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index c882357..79ddcb0 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -1246,6 +1246,8 @@
 static long tce_iommu_take_ownership_ddw(struct tce_container *container,
 		struct iommu_table_group *table_group)
 {
+	long i, ret = 0;
+
 	if (!table_group->ops->create_table || !table_group->ops->set_window ||
 			!table_group->ops->release_ownership) {
 		WARN_ON_ONCE(1);
@@ -1254,7 +1256,27 @@
 
 	table_group->ops->take_ownership(table_group);
 
+	/* Set all windows to the new group */
+	for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
+		struct iommu_table *tbl = container->tables[i];
+
+		if (!tbl)
+			continue;
+
+		ret = table_group->ops->set_window(table_group, i, tbl);
+		if (ret)
+			goto release_exit;
+	}
+
 	return 0;
+
+release_exit:
+	for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
+		table_group->ops->unset_window(table_group, i);
+
+	table_group->ops->release_ownership(table_group);
+
+	return ret;
 }
 
 static int tce_iommu_attach_group(void *iommu_data,
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index a504e2e0..e3fad30 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -368,6 +368,7 @@
 
 static int vhost_vsock_start(struct vhost_vsock *vsock)
 {
+	struct vhost_virtqueue *vq;
 	size_t i;
 	int ret;
 
@@ -378,19 +379,20 @@
 		goto err;
 
 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
-		struct vhost_virtqueue *vq = &vsock->vqs[i];
+		vq = &vsock->vqs[i];
 
 		mutex_lock(&vq->mutex);
 
 		if (!vhost_vq_access_ok(vq)) {
 			ret = -EFAULT;
-			mutex_unlock(&vq->mutex);
 			goto err_vq;
 		}
 
 		if (!vq->private_data) {
 			vq->private_data = vsock;
-			vhost_vq_init_access(vq);
+			ret = vhost_vq_init_access(vq);
+			if (ret)
+				goto err_vq;
 		}
 
 		mutex_unlock(&vq->mutex);
@@ -400,8 +402,11 @@
 	return 0;
 
 err_vq:
+	vq->private_data = NULL;
+	mutex_unlock(&vq->mutex);
+
 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
-		struct vhost_virtqueue *vq = &vsock->vqs[i];
+		vq = &vsock->vqs[i];
 
 		mutex_lock(&vq->mutex);
 		vq->private_data = NULL;
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index fc09eb7..ffc69dd 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -345,7 +345,7 @@
 	int status;
 
 	token = (autofs_wqt_t) param->fail.token;
-	status = param->fail.status ? param->fail.status : -ENOENT;
+	status = param->fail.status < 0 ? param->fail.status : -ENOENT;
 	return autofs4_wait_release(sbi, token, status);
 }
 
diff --git a/fs/btrfs/hash.c b/fs/btrfs/hash.c
index a97fdc1..baacc18 100644
--- a/fs/btrfs/hash.c
+++ b/fs/btrfs/hash.c
@@ -38,6 +38,7 @@
 {
 	SHASH_DESC_ON_STACK(shash, tfm);
 	u32 *ctx = (u32 *)shash_desc_ctx(shash);
+	u32 retval;
 	int err;
 
 	shash->tfm = tfm;
@@ -47,5 +48,7 @@
 	err = crypto_shash_update(shash, address, length);
 	BUG_ON(err);
 
-	return *ctx;
+	retval = *ctx;
+	barrier_data(ctx);
+	return retval;
 }
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 87b87e0..efd72e1 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -849,8 +849,13 @@
 		     struct cifs_fid *fid, __u16 search_flags,
 		     struct cifs_search_info *srch_inf)
 {
-	return CIFSFindFirst(xid, tcon, path, cifs_sb,
-			     &fid->netfid, search_flags, srch_inf, true);
+	int rc;
+
+	rc = CIFSFindFirst(xid, tcon, path, cifs_sb,
+			   &fid->netfid, search_flags, srch_inf, true);
+	if (rc)
+		cifs_dbg(FYI, "find first failed=%d\n", rc);
+	return rc;
 }
 
 static int
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 36334fe..b696824 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -964,7 +964,7 @@
 	rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL);
 	kfree(utf16_path);
 	if (rc) {
-		cifs_dbg(VFS, "open dir failed\n");
+		cifs_dbg(FYI, "open dir failed rc=%d\n", rc);
 		return rc;
 	}
 
@@ -974,7 +974,7 @@
 	rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
 				  fid->volatile_fid, 0, srch_inf);
 	if (rc) {
-		cifs_dbg(VFS, "query directory failed\n");
+		cifs_dbg(FYI, "query directory failed rc=%d\n", rc);
 		SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
 	}
 	return rc;
diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
index db6d692..314b4ed 100644
--- a/fs/configfs/symlink.c
+++ b/fs/configfs/symlink.c
@@ -83,14 +83,13 @@
 	ret = -ENOMEM;
 	sl = kmalloc(sizeof(struct configfs_symlink), GFP_KERNEL);
 	if (sl) {
-		sl->sl_target = config_item_get(item);
 		spin_lock(&configfs_dirent_lock);
 		if (target_sd->s_type & CONFIGFS_USET_DROPPING) {
 			spin_unlock(&configfs_dirent_lock);
-			config_item_put(item);
 			kfree(sl);
 			return -ENOENT;
 		}
+		sl->sl_target = config_item_get(item);
 		list_add(&sl->sl_list, &target_sd->s_links);
 		spin_unlock(&configfs_dirent_lock);
 		ret = configfs_create_link(sl, parent_item->ci_dentry,
diff --git a/fs/exec.c b/fs/exec.c
index c8ca064..26ab263 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -215,8 +215,26 @@
 
 	if (write) {
 		unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
+		unsigned long ptr_size;
 		struct rlimit *rlim;
 
+		/*
+		 * Since the stack will hold pointers to the strings, we
+		 * must account for them as well.
+		 *
+		 * The size calculation is the entire vma while each arg page is
+		 * built, so each time we get here it's calculating how far it
+		 * is currently (rather than each call being just the newly
+		 * added size from the arg page).  As a result, we need to
+		 * always add the entire size of the pointers, so that on the
+		 * last call to get_arg_page() we'll actually have the entire
+		 * correct size.
+		 */
+		ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
+		if (ptr_size > ULONG_MAX - size)
+			goto fail;
+		size += ptr_size;
+
 		acct_arg_size(bprm, size / PAGE_SIZE);
 
 		/*
@@ -234,13 +252,15 @@
 		 *    to work from.
 		 */
 		rlim = current->signal->rlim;
-		if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) {
-			put_page(page);
-			return NULL;
-		}
+		if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4)
+			goto fail;
 	}
 
 	return page;
+
+fail:
+	put_page(page);
+	return NULL;
 }
 
 static void put_arg_page(struct page *page)
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index c12f695..88e111a 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -948,6 +948,7 @@
 {
 	SHASH_DESC_ON_STACK(shash, sbi->s_chksum_driver);
 	u32 *ctx = (u32 *)shash_desc_ctx(shash);
+	u32 retval;
 	int err;
 
 	shash->tfm = sbi->s_chksum_driver;
@@ -957,7 +958,9 @@
 	err = crypto_shash_update(shash, address, length);
 	BUG_ON(err);
 
-	return *ctx;
+	retval = *ctx;
+	barrier_data(ctx);
+	return retval;
 }
 
 static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index 43040721..40d6107 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -542,6 +542,7 @@
 		hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
 			if (invalidate)
 				set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
+			clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
 			fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
 		}
 	} else {
@@ -560,6 +561,10 @@
 		wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t,
 				 TASK_UNINTERRUPTIBLE);
 
+	/* Make sure any pending writes are cancelled. */
+	if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX)
+		fscache_invalidate_writes(cookie);
+
 	/* Reset the cookie state if it wasn't relinquished */
 	if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) {
 		atomic_inc(&cookie->n_active);
diff --git a/fs/fscache/netfs.c b/fs/fscache/netfs.c
index 9b28649..a8aa00b 100644
--- a/fs/fscache/netfs.c
+++ b/fs/fscache/netfs.c
@@ -48,6 +48,7 @@
 	cookie->flags		= 1 << FSCACHE_COOKIE_ENABLED;
 
 	spin_lock_init(&cookie->lock);
+	spin_lock_init(&cookie->stores_lock);
 	INIT_HLIST_HEAD(&cookie->backing_objects);
 
 	/* check the netfs type is not already present */
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index 9e792e3..7a182c8 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -30,6 +30,7 @@
 static const struct fscache_state *fscache_object_available(struct fscache_object *, int);
 static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int);
 static const struct fscache_state *fscache_update_object(struct fscache_object *, int);
+static const struct fscache_state *fscache_object_dead(struct fscache_object *, int);
 
 #define __STATE_NAME(n) fscache_osm_##n
 #define STATE(n) (&__STATE_NAME(n))
@@ -91,7 +92,7 @@
 static WORK_STATE(KILL_OBJECT,		"KILL", fscache_kill_object);
 static WORK_STATE(KILL_DEPENDENTS,	"KDEP", fscache_kill_dependents);
 static WORK_STATE(DROP_OBJECT,		"DROP", fscache_drop_object);
-static WORK_STATE(OBJECT_DEAD,		"DEAD", (void*)2UL);
+static WORK_STATE(OBJECT_DEAD,		"DEAD", fscache_object_dead);
 
 static WAIT_STATE(WAIT_FOR_INIT,	"?INI",
 		  TRANSIT_TO(INIT_OBJECT,	1 << FSCACHE_OBJECT_EV_NEW_CHILD));
@@ -229,6 +230,10 @@
 	event = -1;
 	if (new_state == NO_TRANSIT) {
 		_debug("{OBJ%x} %s notrans", object->debug_id, state->name);
+		if (unlikely(state == STATE(OBJECT_DEAD))) {
+			_leave(" [dead]");
+			return;
+		}
 		fscache_enqueue_object(object);
 		event_mask = object->oob_event_mask;
 		goto unmask_events;
@@ -239,7 +244,7 @@
 	object->state = state = new_state;
 
 	if (state->work) {
-		if (unlikely(state->work == ((void *)2UL))) {
+		if (unlikely(state == STATE(OBJECT_DEAD))) {
 			_leave(" [dead]");
 			return;
 		}
@@ -645,6 +650,12 @@
 	fscache_mark_object_dead(object);
 	object->oob_event_mask = 0;
 
+	if (test_bit(FSCACHE_OBJECT_RETIRED, &object->flags)) {
+		/* Reject any new read/write ops and abort any that are pending. */
+		clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
+		fscache_cancel_all_ops(object);
+	}
+
 	if (list_empty(&object->dependents) &&
 	    object->n_ops == 0 &&
 	    object->n_children == 0)
@@ -1077,3 +1088,20 @@
 	}
 }
 EXPORT_SYMBOL(fscache_object_mark_killed);
+
+/*
+ * The object is dead.  We can get here if an object gets queued by an event
+ * that would lead to its death (such as EV_KILL) when the dispatcher is
+ * already running (and so can be requeued) but hasn't yet cleared the event
+ * mask.
+ */
+static const struct fscache_state *fscache_object_dead(struct fscache_object *object,
+						       int event)
+{
+	if (!test_and_set_bit(FSCACHE_OBJECT_RUN_AFTER_DEAD,
+			      &object->flags))
+		return NO_TRANSIT;
+
+	WARN(true, "FS-Cache object redispatched after death");
+	return NO_TRANSIT;
+}
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 133f322..6528724 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1425,26 +1425,32 @@
  * @sdp: the filesystem
  * @bucket: the bucket
  *
+ * Note that the function can be called multiple times on the same
+ * object.  So the user must ensure that the function can cope with
+ * that.
  */
 
 static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
 {
 	struct gfs2_glock *gl;
-	struct rhash_head *pos;
-	const struct bucket_table *tbl;
-	int i;
+	struct rhashtable_iter iter;
 
-	rcu_read_lock();
-	tbl = rht_dereference_rcu(gl_hash_table.tbl, &gl_hash_table);
-	for (i = 0; i < tbl->size; i++) {
-		rht_for_each_entry_rcu(gl, pos, tbl, i, gl_node) {
+	rhashtable_walk_enter(&gl_hash_table, &iter);
+
+	do {
+		gl = ERR_PTR(rhashtable_walk_start(&iter));
+		if (gl)
+			continue;
+
+		while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl))
 			if ((gl->gl_name.ln_sbd == sdp) &&
 			    lockref_get_not_dead(&gl->gl_lockref))
 				examiner(gl);
-		}
-	}
-	rcu_read_unlock();
-	cond_resched();
+
+		rhashtable_walk_stop(&iter);
+	} while (cond_resched(), gl == ERR_PTR(-EAGAIN));
+
+	rhashtable_walk_exit(&iter);
 }
 
 /**
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 4fb7b10..704fa0b 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -191,7 +191,7 @@
 		addr = ALIGN(addr, huge_page_size(h));
 		vma = find_vma(mm, addr);
 		if (TASK_SIZE - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 0959c96..9267191 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1079,6 +1079,7 @@
 		case -NFS4ERR_BADXDR:
 		case -NFS4ERR_RESOURCE:
 		case -NFS4ERR_NOFILEHANDLE:
+		case -NFS4ERR_MOVED:
 			/* Non-seqid mutating errors */
 			return;
 	};
diff --git a/fs/proc/base.c b/fs/proc/base.c
index c573113..45f75c4 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -3386,6 +3386,8 @@
 	     iter.tgid += 1, iter = next_tgid(ns, iter)) {
 		char name[PROC_NUMBUF];
 		int len;
+
+		cond_resched();
 		if (!has_pid_permissions(ns, iter.task, 2))
 			continue;
 
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f998332..9182f84 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -349,11 +349,7 @@
 
 	/* We don't show the stack guard page in /proc/maps */
 	start = vma->vm_start;
-	if (stack_guard_page_start(vma, start))
-		start += PAGE_SIZE;
 	end = vma->vm_end;
-	if (stack_guard_page_end(vma, end))
-		end -= PAGE_SIZE;
 
 	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
 	seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
diff --git a/fs/read_write.c b/fs/read_write.c
index 190e0d36..e479e24 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -1232,7 +1232,7 @@
 	if (!(file->f_mode & FMODE_CAN_WRITE))
 		goto out;
 
-	ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos, 0);
+	ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos, flags);
 
 out:
 	if (ret > 0)
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index d0f8a38..0186fe6 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -74,6 +74,7 @@
 #include <linux/highmem.h>
 #include <linux/pagemap.h>
 #include <linux/uaccess.h>
+#include <linux/major.h>
 #include "internal.h"
 
 static struct kmem_cache *romfs_inode_cachep;
@@ -416,7 +417,22 @@
 static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
 	struct super_block *sb = dentry->d_sb;
-	u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
+	u64 id = 0;
+
+	/* When calling huge_encode_dev(),
+	 * use sb->s_bdev->bd_dev when,
+	 *   - CONFIG_ROMFS_ON_BLOCK defined
+	 * use sb->s_dev when,
+	 *   - CONFIG_ROMFS_ON_BLOCK undefined and
+	 *   - CONFIG_ROMFS_ON_MTD defined
+	 * leave id as 0 when,
+	 *   - CONFIG_ROMFS_ON_BLOCK undefined and
+	 *   - CONFIG_ROMFS_ON_MTD undefined
+	 */
+	if (sb->s_bdev)
+		id = huge_encode_dev(sb->s_bdev->bd_dev);
+	else if (sb->s_dev)
+		id = huge_encode_dev(sb->s_dev);
 
 	buf->f_type = ROMFS_MAGIC;
 	buf->f_namelen = ROMFS_MAXFN;
@@ -489,6 +505,11 @@
 	sb->s_flags |= MS_RDONLY | MS_NOATIME;
 	sb->s_op = &romfs_super_ops;
 
+#ifdef CONFIG_ROMFS_ON_MTD
+	/* Use same dev ID from the underlying mtdblock device */
+	if (sb->s_mtd)
+		sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, sb->s_mtd->index);
+#endif
 	/* read the image superblock and check it */
 	rsb = kmalloc(512, GFP_KERNEL);
 	if (!rsb)
diff --git a/fs/sdcardfs/packagelist.c b/fs/sdcardfs/packagelist.c
index 00a0f65..6da0c21 100644
--- a/fs/sdcardfs/packagelist.c
+++ b/fs/sdcardfs/packagelist.c
@@ -174,19 +174,6 @@
 	return 1;
 }
 
-/* This function is used when file opening. The open flags must be
- * checked before calling check_caller_access_to_name()
- */
-int open_flags_to_access_mode(int open_flags)
-{
-	if ((open_flags & O_ACCMODE) == O_RDONLY)
-		return 0; /* R_OK */
-	if ((open_flags & O_ACCMODE) == O_WRONLY)
-		return 1; /* W_OK */
-	/* Probably O_RDRW, but treat as default to be safe */
-		return 1; /* R_OK | W_OK */
-}
-
 static struct hashtable_entry *alloc_hashtable_entry(const struct qstr *key,
 		appid_t value)
 {
diff --git a/fs/sdcardfs/sdcardfs.h b/fs/sdcardfs/sdcardfs.h
index 3687b22..4e0ce49 100644
--- a/fs/sdcardfs/sdcardfs.h
+++ b/fs/sdcardfs/sdcardfs.h
@@ -499,7 +499,6 @@
 extern appid_t get_ext_gid(const char *app_name);
 extern appid_t is_excluded(const char *app_name, userid_t userid);
 extern int check_caller_access_to_name(struct inode *parent_node, const struct qstr *name);
-extern int open_flags_to_access_mode(int open_flags);
 extern int packagelist_init(void);
 extern void packagelist_exit(void);
 
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 69c867c0..2cde073 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -63,6 +63,7 @@
 	struct uffd_msg msg;
 	wait_queue_t wq;
 	struct userfaultfd_ctx *ctx;
+	bool waken;
 };
 
 struct userfaultfd_wake_range {
@@ -86,6 +87,12 @@
 	if (len && (start > uwq->msg.arg.pagefault.address ||
 		    start + len <= uwq->msg.arg.pagefault.address))
 		goto out;
+	WRITE_ONCE(uwq->waken, true);
+	/*
+	 * The implicit smp_mb__before_spinlock in try_to_wake_up()
+	 * renders uwq->waken visible to other CPUs before the task is
+	 * waken.
+	 */
 	ret = wake_up_state(wq->private, mode);
 	if (ret)
 		/*
@@ -264,6 +271,7 @@
 	struct userfaultfd_wait_queue uwq;
 	int ret;
 	bool must_wait, return_to_userland;
+	long blocking_state;
 
 	BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
 
@@ -333,10 +341,13 @@
 	uwq.wq.private = current;
 	uwq.msg = userfault_msg(fe->address, fe->flags, reason);
 	uwq.ctx = ctx;
+	uwq.waken = false;
 
 	return_to_userland =
 		(fe->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
 		(FAULT_FLAG_USER|FAULT_FLAG_KILLABLE);
+	blocking_state = return_to_userland ? TASK_INTERRUPTIBLE :
+			 TASK_KILLABLE;
 
 	spin_lock(&ctx->fault_pending_wqh.lock);
 	/*
@@ -349,8 +360,7 @@
 	 * following the spin_unlock to happen before the list_add in
 	 * __add_wait_queue.
 	 */
-	set_current_state(return_to_userland ? TASK_INTERRUPTIBLE :
-			  TASK_KILLABLE);
+	set_current_state(blocking_state);
 	spin_unlock(&ctx->fault_pending_wqh.lock);
 
 	must_wait = userfaultfd_must_wait(ctx, fe->address, fe->flags, reason);
@@ -362,6 +372,29 @@
 		wake_up_poll(&ctx->fd_wqh, POLLIN);
 		schedule();
 		ret |= VM_FAULT_MAJOR;
+
+		/*
+		 * False wakeups can orginate even from rwsem before
+		 * up_read() however userfaults will wait either for a
+		 * targeted wakeup on the specific uwq waitqueue from
+		 * wake_userfault() or for signals or for uffd
+		 * release.
+		 */
+		while (!READ_ONCE(uwq.waken)) {
+			/*
+			 * This needs the full smp_store_mb()
+			 * guarantee as the state write must be
+			 * visible to other CPUs before reading
+			 * uwq.waken from other CPUs.
+			 */
+			set_current_state(blocking_state);
+			if (READ_ONCE(uwq.waken) ||
+			    READ_ONCE(ctx->released) ||
+			    (return_to_userland ? signal_pending(current) :
+			     fatal_signal_pending(current)))
+				break;
+			schedule();
+		}
 	}
 
 	__set_current_state(TASK_RUNNING);
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index e9fb2e8..0c4f9c67 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -776,6 +776,7 @@
 	struct drm_minor *control;		/**< Control node */
 	struct drm_minor *primary;		/**< Primary node */
 	struct drm_minor *render;		/**< Render node */
+	bool registered;
 
 	/* currently active master for this device. Protected by master_mutex */
 	struct drm_master *master;
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 1c12875..b28c4a3 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -345,6 +345,8 @@
 	 * core drm connector interfaces. Everything added from this callback
 	 * should be unregistered in the early_unregister callback.
 	 *
+	 * This is called while holding drm_connector->mutex.
+	 *
 	 * Returns:
 	 *
 	 * 0 on success, or a negative error code on failure.
@@ -359,6 +361,8 @@
 	 * late_register(). It is called from drm_connector_unregister(),
 	 * early in the driver unload sequence to disable userspace access
 	 * before data structures are torndown.
+	 *
+	 * This is called while holding drm_connector->mutex.
 	 */
 	void (*early_unregister)(struct drm_connector *connector);
 
@@ -511,7 +515,6 @@
  * @interlace_allowed: can this connector handle interlaced modes?
  * @doublescan_allowed: can this connector handle doublescan?
  * @stereo_allowed: can this connector handle stereo modes?
- * @registered: is this connector exposed (registered) with userspace?
  * @modes: modes available on this connector (from fill_modes() + user)
  * @status: one of the drm_connector_status enums (connected, not, or unknown)
  * @probed_modes: list of modes derived directly from the display
@@ -574,6 +577,13 @@
 	char *name;
 
 	/**
+	 * @mutex: Lock for general connector state, but currently only protects
+	 * @registered. Most of the connector state is still protected by the
+	 * mutex in &drm_mode_config.
+	 */
+	struct mutex mutex;
+
+	/**
 	 * @index: Compacted connector index, which matches the position inside
 	 * the mode_config.list for drivers not supporting hot-add/removing. Can
 	 * be used as an array index. It is invariant over the lifetime of the
@@ -586,6 +596,10 @@
 	bool interlace_allowed;
 	bool doublescan_allowed;
 	bool stereo_allowed;
+	/**
+	 * @registered: Is this connector exposed (registered) with userspace?
+	 * Protected by @mutex.
+	 */
 	bool registered;
 	struct list_head modes; /* list of modes on this connector */
 
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 8ee110a..a52b65a 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -494,6 +494,7 @@
 #define CLK_DIVIDER_ROUND_CLOSEST	BIT(4)
 #define CLK_DIVIDER_READ_ONLY		BIT(5)
 #define CLK_DIVIDER_MAX_AT_ZERO		BIT(6)
+#define CLK_DIVIDER_ROUND_KHZ	BIT(7)
 
 extern const struct clk_ops clk_divider_ops;
 extern const struct clk_ops clk_divider_ro_ops;
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index b8eb25b..4fbc62e 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -103,6 +103,13 @@
 #define num_present_cpus()	cpumask_weight(cpu_present_mask)
 #define num_active_cpus()	cpumask_weight(cpu_active_mask)
 #define num_isolated_cpus()	cpumask_weight(cpu_isolated_mask)
+#define num_online_uniso_cpus()						\
+({									\
+	cpumask_t mask;							\
+									\
+	cpumask_andnot(&mask, cpu_online_mask, cpu_isolated_mask);	\
+	cpumask_weight(&mask);						\
+})
 #define cpu_online(cpu)		cpumask_test_cpu((cpu), cpu_online_mask)
 #define cpu_possible(cpu)	cpumask_test_cpu((cpu), cpu_possible_mask)
 #define cpu_present(cpu)	cpumask_test_cpu((cpu), cpu_present_mask)
@@ -114,6 +121,7 @@
 #define num_present_cpus()	1U
 #define num_active_cpus()	1U
 #define num_isolated_cpus()	0U
+#define num_online_uniso_cpus()	1U
 #define cpu_online(cpu)		((cpu) == 0)
 #define cpu_possible(cpu)	((cpu) == 0)
 #define cpu_present(cpu)	((cpu) == 0)
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 13ba552..4c467ef 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -360,6 +360,7 @@
 #define FSCACHE_OBJECT_IS_AVAILABLE	5	/* T if object has become active */
 #define FSCACHE_OBJECT_RETIRED		6	/* T if object was retired on relinquishment */
 #define FSCACHE_OBJECT_KILLED_BY_CACHE	7	/* T if object was killed by the cache */
+#define FSCACHE_OBJECT_RUN_AFTER_DEAD	8	/* T if object has been dispatched after death */
 
 	struct list_head	cache_link;	/* link in cache->object_list */
 	struct hlist_node	cookie_link;	/* link in cookie->backing_objects */
diff --git a/include/linux/log2.h b/include/linux/log2.h
index f38fae2..c373295f 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -194,6 +194,17 @@
  *  ... and so on.
  */
 
-#define order_base_2(n) ilog2(roundup_pow_of_two(n))
+static inline __attribute_const__
+int __order_base_2(unsigned long n)
+{
+	return n > 1 ? ilog2(n - 1) + 1 : 0;
+}
 
+#define order_base_2(n)				\
+(						\
+	__builtin_constant_p(n) ? (		\
+		((n) == 0 || (n) == 1) ? 0 :	\
+		ilog2((n) - 1) + 1) :		\
+	__order_base_2(n)			\
+)
 #endif /* _LINUX_LOG2_H */
diff --git a/include/linux/mfd/msm-cdc-pinctrl.h b/include/linux/mfd/msm-cdc-pinctrl.h
index 14b18fe..7eabefb 100644
--- a/include/linux/mfd/msm-cdc-pinctrl.h
+++ b/include/linux/mfd/msm-cdc-pinctrl.h
@@ -16,11 +16,13 @@
 #include <linux/types.h>
 #include <linux/of.h>
 
-#ifdef CONFIG_MSM_CDC_PINCTRL
+#if IS_ENABLED(CONFIG_MSM_CDC_PINCTRL)
 extern int msm_cdc_pinctrl_select_sleep_state(struct device_node *np);
 extern int msm_cdc_pinctrl_select_active_state(struct device_node *np);
 extern bool msm_cdc_pinctrl_get_state(struct device_node *np);
 extern int msm_cdc_get_gpio_state(struct device_node *np);
+int msm_cdc_pinctrl_drv_init(void);
+void msm_cdc_pinctrl_drv_exit(void);
 
 #else
 int msm_cdc_pinctrl_select_sleep_state(struct device_node *np)
@@ -35,7 +37,13 @@
 {
 	return 0;
 }
-#
+int msm_cdc_pinctrl_drv_init(void)
+{
+	return 0;
+}
+void msm_cdc_pinctrl_drv_exit(void)
+{
+}
 #endif
 
 #endif
diff --git a/include/linux/mfd/wcd9335/irq.h b/include/linux/mfd/wcd9335/irq.h
new file mode 100644
index 0000000..c666d31
--- /dev/null
+++ b/include/linux/mfd/wcd9335/irq.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __WCD9335_IRQ_H_
+#define __WCD9335_IRQ_H_
+
+enum {
+	/* INTR_REG 0 */
+	WCD9335_IRQ_FLL_LOCK_LOSS = 1,
+	WCD9335_IRQ_HPH_PA_OCPL_FAULT,
+	WCD9335_IRQ_HPH_PA_OCPR_FAULT,
+	WCD9335_IRQ_EAR_PA_OCP_FAULT,
+	WCD9335_IRQ_HPH_PA_CNPL_COMPLETE,
+	WCD9335_IRQ_HPH_PA_CNPR_COMPLETE,
+	WCD9335_IRQ_EAR_PA_CNP_COMPLETE,
+	/* INTR_REG 1 */
+	WCD9335_IRQ_MBHC_SW_DET,
+	WCD9335_IRQ_MBHC_ELECT_INS_REM_DET,
+	WCD9335_IRQ_MBHC_BUTTON_PRESS_DET,
+	WCD9335_IRQ_MBHC_BUTTON_RELEASE_DET,
+	WCD9335_IRQ_MBHC_ELECT_INS_REM_LEG_DET,
+	WCD9335_IRQ_RESERVED_0,
+	WCD9335_IRQ_RESERVED_1,
+	WCD9335_IRQ_RESERVED_2,
+	/* INTR_REG 2 */
+	WCD9335_IRQ_LINE_PA1_CNP_COMPLETE,
+	WCD9335_IRQ_LINE_PA2_CNP_COMPLETE,
+	WCD9335_IRQ_LINE_PA3_CNP_COMPLETE,
+	WCD9335_IRQ_LINE_PA4_CNP_COMPLETE,
+	WCD9335_IRQ_SOUNDWIRE,
+	WCD9335_IRQ_VDD_DIG_RAMP_COMPLETE,
+	WCD9335_IRQ_RCO_ERROR,
+	WCD9335_IRQ_SVA_ERROR,
+	/* INTR_REG 3 */
+	WCD9335_IRQ_MAD_AUDIO,
+	WCD9335_IRQ_MAD_BEACON,
+	WCD9335_IRQ_MAD_ULTRASOUND,
+	WCD9335_IRQ_VBAT_ATTACK,
+	WCD9335_IRQ_VBAT_RESTORE,
+	WCD9335_IRQ_SVA_OUTBOX1,
+	WCD9335_IRQ_SVA_OUTBOX2,
+	WCD9335_NUM_IRQS,
+};
+
+#endif
diff --git a/include/linux/mfd/wcd934x/irq.h b/include/linux/mfd/wcd934x/irq.h
new file mode 100644
index 0000000..1a18be3
--- /dev/null
+++ b/include/linux/mfd/wcd934x/irq.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __WCD934X_IRQ_H_
+#define __WCD934X_IRQ_H_
+
+enum {
+	/* INTR_REG 0 */
+	WCD934X_IRQ_MISC = 1,
+	WCD934X_IRQ_HPH_PA_OCPL_FAULT,
+	WCD934X_IRQ_HPH_PA_OCPR_FAULT,
+	WCD934X_IRQ_EAR_PA_OCP_FAULT,
+	WCD934X_IRQ_HPH_PA_CNPL_COMPLETE,
+	WCD934X_IRQ_HPH_PA_CNPR_COMPLETE,
+	WCD934X_IRQ_EAR_PA_CNP_COMPLETE,
+	/* INTR_REG 1 */
+	WCD934X_IRQ_MBHC_SW_DET,
+	WCD934X_IRQ_MBHC_ELECT_INS_REM_DET,
+	WCD934X_IRQ_MBHC_BUTTON_PRESS_DET,
+	WCD934X_IRQ_MBHC_BUTTON_RELEASE_DET,
+	WCD934X_IRQ_MBHC_ELECT_INS_REM_LEG_DET,
+	WCD934X_IRQ_RESERVED_0,
+	WCD934X_IRQ_RESERVED_1,
+	WCD934X_IRQ_RESERVED_2,
+	/* INTR_REG 2 */
+	WCD934X_IRQ_LINE_PA1_CNP_COMPLETE,
+	WCD934X_IRQ_LINE_PA2_CNP_COMPLETE,
+	WCD934X_IRQ_SLNQ_ANALOG_ERROR,
+	WCD934X_IRQ_RESERVED_3,
+	WCD934X_IRQ_SOUNDWIRE,
+	WCD934X_IRQ_VDD_DIG_RAMP_COMPLETE,
+	WCD934X_IRQ_RCO_ERROR,
+	WCD934X_IRQ_CPE_ERROR,
+	/* INTR_REG 3 */
+	WCD934X_IRQ_MAD_AUDIO,
+	WCD934X_IRQ_MAD_BEACON,
+	WCD934X_IRQ_MAD_ULTRASOUND,
+	WCD934X_IRQ_VBAT_ATTACK,
+	WCD934X_IRQ_VBAT_RESTORE,
+	WCD934X_IRQ_CPE1_INTR,
+	WCD934X_IRQ_RESERVED_4,
+	WCD934X_IRQ_SLNQ_DIGITAL,
+	WCD934X_NUM_IRQS,
+};
+
+#endif
diff --git a/include/linux/mfd/wcd9xxx/core.h b/include/linux/mfd/wcd9xxx/core.h
index c6c8d24..b4c1be4 100644
--- a/include/linux/mfd/wcd9xxx/core.h
+++ b/include/linux/mfd/wcd9xxx/core.h
@@ -434,4 +434,7 @@
 {
 	return 0;
 }
+
+int wcd9xxx_init(void);
+void wcd9xxx_exit(void);
 #endif
diff --git a/include/linux/mfd/wcd9xxx/wcd9330_registers.h b/include/linux/mfd/wcd9xxx/wcd9330_registers.h
deleted file mode 100644
index c37d25f..0000000
--- a/include/linux/mfd/wcd9xxx/wcd9330_registers.h
+++ /dev/null
@@ -1,1626 +0,0 @@
-/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef WCD9330_REGISTERS_H
-#define WCD9330_REGISTERS_H
-
-#include <linux/types.h>
-
-#define TOMTOM_A_CHIP_CTL			(0x000)
-#define TOMTOM_A_CHIP_CTL__POR				(0x38)
-#define TOMTOM_A_CHIP_STATUS			(0x001)
-#define TOMTOM_A_CHIP_STATUS__POR				(0x00)
-#define TOMTOM_A_CHIP_ID_BYTE_0			(0x004)
-#define TOMTOM_A_CHIP_ID_BYTE_0__POR				(0x00)
-#define TOMTOM_A_CHIP_ID_BYTE_1			(0x005)
-#define TOMTOM_A_CHIP_ID_BYTE_1__POR				(0x00)
-#define TOMTOM_A_CHIP_ID_BYTE_2			(0x006)
-#define TOMTOM_A_CHIP_ID_BYTE_2__POR				(0x05)
-#define TOMTOM_A_CHIP_ID_BYTE_3			(0x007)
-#define TOMTOM_A_CHIP_ID_BYTE_3__POR				(0x01)
-#define TOMTOM_A_CHIP_I2C_SLAVE_ID			(0x008)
-#define TOMTOM_A_CHIP_I2C_SLAVE_ID__POR				(0x01)
-#define TOMTOM_A_SLAVE_ID_1			(0x00C)
-#define TOMTOM_A_SLAVE_ID_1__POR				(0x77)
-#define TOMTOM_A_SLAVE_ID_2			(0x00D)
-#define TOMTOM_A_SLAVE_ID_2__POR				(0x66)
-#define TOMTOM_A_SLAVE_ID_3			(0x00E)
-#define TOMTOM_A_SLAVE_ID_3__POR				(0x55)
-#define TOMTOM_A_PIN_CTL_OE0			(0x010)
-#define TOMTOM_A_PIN_CTL_OE0__POR				(0x00)
-#define TOMTOM_A_PIN_CTL_OE1			(0x011)
-#define TOMTOM_A_PIN_CTL_OE1__POR				(0x00)
-#define TOMTOM_A_PIN_CTL_OE2			(0x012)
-#define TOMTOM_A_PIN_CTL_OE2__POR				(0x00)
-#define TOMTOM_A_PIN_CTL_DATA0			(0x013)
-#define TOMTOM_A_PIN_CTL_DATA0__POR				(0x00)
-#define TOMTOM_A_PIN_CTL_DATA1			(0x014)
-#define TOMTOM_A_PIN_CTL_DATA1__POR				(0x00)
-#define TOMTOM_A_PIN_CTL_DATA2			(0x015)
-#define TOMTOM_A_PIN_CTL_DATA2__POR				(0x00)
-#define TOMTOM_A_HDRIVE_GENERIC			(0x018)
-#define TOMTOM_A_HDRIVE_GENERIC__POR				(0x00)
-#define TOMTOM_A_HDRIVE_OVERRIDE			(0x019)
-#define TOMTOM_A_HDRIVE_OVERRIDE__POR				(0x08)
-#define TOMTOM_A_ANA_CSR_WAIT_STATE			(0x01C)
-#define TOMTOM_A_ANA_CSR_WAIT_STATE__POR				(0x44)
-#define TOMTOM_A_PROCESS_MONITOR_CTL0			(0x020)
-#define TOMTOM_A_PROCESS_MONITOR_CTL0__POR				(0x80)
-#define TOMTOM_A_PROCESS_MONITOR_CTL1			(0x021)
-#define TOMTOM_A_PROCESS_MONITOR_CTL1__POR				(0x00)
-#define TOMTOM_A_PROCESS_MONITOR_CTL2			(0x022)
-#define TOMTOM_A_PROCESS_MONITOR_CTL2__POR				(0x00)
-#define TOMTOM_A_PROCESS_MONITOR_CTL3			(0x023)
-#define TOMTOM_A_PROCESS_MONITOR_CTL3__POR				(0x01)
-#define TOMTOM_A_QFUSE_CTL			(0x028)
-#define TOMTOM_A_QFUSE_CTL__POR				(0x00)
-#define TOMTOM_A_QFUSE_STATUS			(0x029)
-#define TOMTOM_A_QFUSE_STATUS__POR				(0x00)
-#define TOMTOM_A_QFUSE_DATA_OUT0			(0x02A)
-#define TOMTOM_A_QFUSE_DATA_OUT0__POR				(0x00)
-#define TOMTOM_A_QFUSE_DATA_OUT1			(0x02B)
-#define TOMTOM_A_QFUSE_DATA_OUT1__POR				(0x00)
-#define TOMTOM_A_QFUSE_DATA_OUT2			(0x02C)
-#define TOMTOM_A_QFUSE_DATA_OUT2__POR				(0x00)
-#define TOMTOM_A_QFUSE_DATA_OUT3			(0x02D)
-#define TOMTOM_A_QFUSE_DATA_OUT3__POR				(0x00)
-#define TOMTOM_A_QFUSE_DATA_OUT4			(0x02E)
-#define TOMTOM_A_QFUSE_DATA_OUT4__POR				(0x00)
-#define TOMTOM_A_QFUSE_DATA_OUT5			(0x02F)
-#define TOMTOM_A_QFUSE_DATA_OUT5__POR				(0x00)
-#define TOMTOM_A_QFUSE_DATA_OUT6			(0x030)
-#define TOMTOM_A_QFUSE_DATA_OUT6__POR				(0x00)
-#define TOMTOM_A_QFUSE_DATA_OUT7			(0x031)
-#define TOMTOM_A_QFUSE_DATA_OUT7__POR				(0x00)
-#define TOMTOM_A_CDC_CTL			(0x034)
-#define TOMTOM_A_CDC_CTL__POR				(0x00)
-#define TOMTOM_A_LEAKAGE_CTL			(0x03C)
-#define TOMTOM_A_LEAKAGE_CTL__POR				(0x04)
-#define TOMTOM_A_SVASS_MEM_PTR0			(0x044)
-#define TOMTOM_A_SVASS_MEM_PTR0__POR				(0x00)
-#define TOMTOM_A_SVASS_MEM_PTR1			(0x045)
-#define TOMTOM_A_SVASS_MEM_PTR1__POR				(0x00)
-#define TOMTOM_A_SVASS_MEM_PTR2			(0x046)
-#define TOMTOM_A_SVASS_MEM_PTR2__POR				(0x00)
-#define TOMTOM_A_SVASS_MEM_CTL			(0x048)
-#define TOMTOM_A_SVASS_MEM_CTL__POR				(0x04)
-#define TOMTOM_A_SVASS_MEM_BANK			(0x049)
-#define TOMTOM_A_SVASS_MEM_BANK__POR				(0x00)
-#define TOMTOM_A_DMIC_B1_CTL			(0x04A)
-#define TOMTOM_A_DMIC_B1_CTL__POR				(0x00)
-#define TOMTOM_A_DMIC_B2_CTL			(0x04B)
-#define TOMTOM_A_DMIC_B2_CTL__POR				(0x00)
-#define TOMTOM_A_SVASS_CLKRST_CTL			(0x04C)
-#define TOMTOM_A_SVASS_CLKRST_CTL__POR				(0x00)
-#define TOMTOM_A_SVASS_CPAR_CFG			(0x04D)
-#define TOMTOM_A_SVASS_CPAR_CFG__POR				(0x00)
-#define TOMTOM_A_SVASS_BUF_RDY_INT_PERIOD			(0x04E)
-#define TOMTOM_A_SVASS_BUF_RDY_INT_PERIOD__POR				(0x14)
-#define TOMTOM_A_SVASS_CPAR_WDOG_CFG			(0x04F)
-#define TOMTOM_A_SVASS_CPAR_WDOG_CFG__POR				(0x00)
-#define TOMTOM_A_SVASS_CFG			(0x050)
-#define TOMTOM_A_SVASS_CFG__POR				(0x01)
-#define TOMTOM_A_SVASS_SPE_CFG			(0x051)
-#define TOMTOM_A_SVASS_SPE_CFG__POR				(0x04)
-#define TOMTOM_A_SVASS_STATUS			(0x052)
-#define TOMTOM_A_SVASS_STATUS__POR				(0x00)
-#define TOMTOM_A_SVASS_INT_MASK			(0x053)
-#define TOMTOM_A_SVASS_INT_MASK__POR				(0x3F)
-#define TOMTOM_A_SVASS_INT_STATUS			(0x054)
-#define TOMTOM_A_SVASS_INT_STATUS__POR				(0x00)
-#define TOMTOM_A_SVASS_INT_CLR			(0x055)
-#define TOMTOM_A_SVASS_INT_CLR__POR				(0x00)
-#define TOMTOM_A_SVASS_DEBUG			(0x056)
-#define TOMTOM_A_SVASS_DEBUG__POR				(0x00)
-#define TOMTOM_A_SVASS_SPE_BKUP_INT			(0x057)
-#define TOMTOM_A_SVASS_SPE_BKUP_INT__POR				(0x00)
-#define TOMTOM_A_SVASS_MEM_ACC			(0x058)
-#define TOMTOM_A_SVASS_MEM_ACC__POR				(0x00)
-#define TOMTOM_A_MEM_LEAKAGE_CTL			(0x059)
-#define TOMTOM_A_MEM_LEAKAGE_CTL__POR				(0x04)
-#define TOMTOM_A_SVASS_SPE_INBOX_TRG			(0x05A)
-#define TOMTOM_A_SVASS_SPE_INBOX_TRG__POR				(0x00)
-#define TOMTOM_A_SVASS_SPE_INBOX_0			(0x060)
-#define TOMTOM_A_SVASS_SPE_INBOX_0__POR				(0x00)
-#define TOMTOM_A_SVASS_SPE_INBOX_1			(0x061)
-#define TOMTOM_A_SVASS_SPE_INBOX_1__POR				(0x00)
-#define TOMTOM_A_SVASS_SPE_INBOX_2			(0x062)
-#define TOMTOM_A_SVASS_SPE_INBOX_2__POR				(0x00)
-#define TOMTOM_A_SVASS_SPE_INBOX_3			(0x063)
-#define TOMTOM_A_SVASS_SPE_INBOX_3__POR				(0x00)
-#define TOMTOM_A_SVASS_SPE_INBOX_4			(0x064)
-#define TOMTOM_A_SVASS_SPE_INBOX_4__POR				(0x00)
-#define TOMTOM_A_SVASS_SPE_INBOX_5			(0x065)
-#define TOMTOM_A_SVASS_SPE_INBOX_5__POR				(0x00)
-#define TOMTOM_A_SVASS_SPE_INBOX_6			(0x066)
-#define TOMTOM_A_SVASS_SPE_INBOX_6__POR				(0x00)
-#define TOMTOM_A_SVASS_SPE_INBOX_7			(0x067)
-#define TOMTOM_A_SVASS_SPE_INBOX_7__POR				(0x00)
-#define TOMTOM_A_SVASS_SPE_INBOX_8			(0x068)
-#define TOMTOM_A_SVASS_SPE_INBOX_8__POR				(0x00)
-#define TOMTOM_A_SVASS_SPE_INBOX_9			(0x069)
-#define TOMTOM_A_SVASS_SPE_INBOX_9__POR				(0x00)
-#define TOMTOM_A_SVASS_SPE_INBOX_10			(0x06A)
-#define TOMTOM_A_SVASS_SPE_INBOX_10__POR				(0x00)
-#define TOMTOM_A_SVASS_SPE_INBOX_11			(0x06B)
-#define TOMTOM_A_SVASS_SPE_INBOX_11__POR				(0x00)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_0			(0x070)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_0__POR				(0x00)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_1			(0x071)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_1__POR				(0x00)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_2			(0x072)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_2__POR				(0x00)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_3			(0x073)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_3__POR				(0x00)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_4			(0x074)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_4__POR				(0x00)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_5			(0x075)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_5__POR				(0x00)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_6			(0x076)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_6__POR				(0x00)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_7			(0x077)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_7__POR				(0x00)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_8			(0x078)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_8__POR				(0x00)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_9			(0x079)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_9__POR				(0x00)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_10			(0x07A)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_10__POR				(0x00)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_11			(0x07B)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_11__POR				(0x00)
-#define TOMTOM_A_INTR_MODE			(0x090)
-#define TOMTOM_A_INTR_MODE__POR				(0x00)
-#define TOMTOM_A_INTR1_MASK0			(0x094)
-#define TOMTOM_A_INTR1_MASK0__POR				(0xFF)
-#define TOMTOM_A_INTR1_MASK1			(0x095)
-#define TOMTOM_A_INTR1_MASK1__POR				(0xFF)
-#define TOMTOM_A_INTR1_MASK2			(0x096)
-#define TOMTOM_A_INTR1_MASK2__POR				(0xFF)
-#define TOMTOM_A_INTR1_MASK3			(0x097)
-#define TOMTOM_A_INTR1_MASK3__POR				(0xFF)
-#define TOMTOM_A_INTR1_STATUS0			(0x098)
-#define TOMTOM_A_INTR1_STATUS0__POR				(0x00)
-#define TOMTOM_A_INTR1_STATUS1			(0x099)
-#define TOMTOM_A_INTR1_STATUS1__POR				(0x00)
-#define TOMTOM_A_INTR1_STATUS2			(0x09A)
-#define TOMTOM_A_INTR1_STATUS2__POR				(0x00)
-#define TOMTOM_A_INTR1_STATUS3			(0x09B)
-#define TOMTOM_A_INTR1_STATUS3__POR				(0x00)
-#define TOMTOM_A_INTR1_CLEAR0			(0x09C)
-#define TOMTOM_A_INTR1_CLEAR0__POR				(0x00)
-#define TOMTOM_A_INTR1_CLEAR1			(0x09D)
-#define TOMTOM_A_INTR1_CLEAR1__POR				(0x00)
-#define TOMTOM_A_INTR1_CLEAR2			(0x09E)
-#define TOMTOM_A_INTR1_CLEAR2__POR				(0x00)
-#define TOMTOM_A_INTR1_CLEAR3			(0x09F)
-#define TOMTOM_A_INTR1_CLEAR3__POR				(0x00)
-#define TOMTOM_A_INTR1_LEVEL0			(0x0A0)
-#define TOMTOM_A_INTR1_LEVEL0__POR				(0x01)
-#define TOMTOM_A_INTR1_LEVEL1			(0x0A1)
-#define TOMTOM_A_INTR1_LEVEL1__POR				(0x00)
-#define TOMTOM_A_INTR1_LEVEL2			(0x0A2)
-#define TOMTOM_A_INTR1_LEVEL2__POR				(0x40)
-#define TOMTOM_A_INTR1_LEVEL3			(0x0A3)
-#define TOMTOM_A_INTR1_LEVEL3__POR				(0x00)
-#define TOMTOM_A_INTR1_TEST0			(0x0A4)
-#define TOMTOM_A_INTR1_TEST0__POR				(0x00)
-#define TOMTOM_A_INTR1_TEST1			(0x0A5)
-#define TOMTOM_A_INTR1_TEST1__POR				(0x00)
-#define TOMTOM_A_INTR1_TEST2			(0x0A6)
-#define TOMTOM_A_INTR1_TEST2__POR				(0x00)
-#define TOMTOM_A_INTR1_TEST3			(0x0A7)
-#define TOMTOM_A_INTR1_TEST3__POR				(0x00)
-#define TOMTOM_A_INTR1_SET0			(0x0A8)
-#define TOMTOM_A_INTR1_SET0__POR				(0x00)
-#define TOMTOM_A_INTR1_SET1			(0x0A9)
-#define TOMTOM_A_INTR1_SET1__POR				(0x00)
-#define TOMTOM_A_INTR1_SET2			(0x0AA)
-#define TOMTOM_A_INTR1_SET2__POR				(0x00)
-#define TOMTOM_A_INTR1_SET3			(0x0AB)
-#define TOMTOM_A_INTR1_SET3__POR				(0x00)
-#define TOMTOM_A_INTR2_MASK0			(0x0B0)
-#define TOMTOM_A_INTR2_MASK0__POR				(0xFF)
-#define TOMTOM_A_INTR2_STATUS0			(0x0B2)
-#define TOMTOM_A_INTR2_STATUS0__POR				(0x00)
-#define TOMTOM_A_INTR2_CLEAR0			(0x0B4)
-#define TOMTOM_A_INTR2_CLEAR0__POR				(0x00)
-#define TOMTOM_A_INTR2_LEVEL0			(0x0B6)
-#define TOMTOM_A_INTR2_LEVEL0__POR				(0x00)
-#define TOMTOM_A_INTR2_TEST0			(0x0B8)
-#define TOMTOM_A_INTR2_TEST0__POR				(0x00)
-#define TOMTOM_A_INTR2_SET0			(0x0BA)
-#define TOMTOM_A_INTR2_SET0__POR				(0x00)
-#define TOMTOM_A_CDC_TX_I2S_SCK_MODE			(0x0C0)
-#define TOMTOM_A_CDC_TX_I2S_SCK_MODE__POR				(0x00)
-#define TOMTOM_A_CDC_TX_I2S_WS_MODE			(0x0C1)
-#define TOMTOM_A_CDC_TX_I2S_WS_MODE__POR				(0x00)
-#define TOMTOM_A_CDC_DMIC_DATA0_MODE			(0x0C4)
-#define TOMTOM_A_CDC_DMIC_DATA0_MODE__POR				(0x00)
-#define TOMTOM_A_CDC_DMIC_CLK0_MODE			(0x0C5)
-#define TOMTOM_A_CDC_DMIC_CLK0_MODE__POR				(0x00)
-#define TOMTOM_A_CDC_DMIC_DATA1_MODE			(0x0C6)
-#define TOMTOM_A_CDC_DMIC_DATA1_MODE__POR				(0x00)
-#define TOMTOM_A_CDC_DMIC_CLK1_MODE			(0x0C7)
-#define TOMTOM_A_CDC_DMIC_CLK1_MODE__POR				(0x00)
-#define TOMTOM_A_CDC_RX_I2S_SCK_MODE			(0x0C8)
-#define TOMTOM_A_CDC_RX_I2S_SCK_MODE__POR				(0x00)
-#define TOMTOM_A_CDC_RX_I2S_WS_MODE			(0x0C9)
-#define TOMTOM_A_CDC_RX_I2S_WS_MODE__POR				(0x00)
-#define TOMTOM_A_CDC_DMIC_DATA2_MODE			(0x0CA)
-#define TOMTOM_A_CDC_DMIC_DATA2_MODE__POR				(0x00)
-#define TOMTOM_A_CDC_DMIC_CLK2_MODE			(0x0CB)
-#define TOMTOM_A_CDC_DMIC_CLK2_MODE__POR				(0x00)
-#define TOMTOM_A_CDC_INTR1_MODE			(0x0CC)
-#define TOMTOM_A_CDC_INTR1_MODE__POR				(0x00)
-#define TOMTOM_A_CDC_SB_NRZ_SEL_MODE			(0x0CD)
-#define TOMTOM_A_CDC_SB_NRZ_SEL_MODE__POR				(0x00)
-#define TOMTOM_A_CDC_INTR2_MODE			(0x0CE)
-#define TOMTOM_A_CDC_INTR2_MODE__POR				(0x00)
-#define TOMTOM_A_CDC_RF_PA_ON_MODE			(0x0CF)
-#define TOMTOM_A_CDC_RF_PA_ON_MODE__POR				(0x00)
-#define TOMTOM_A_CDC_BOOST_MODE			(0x0D0)
-#define TOMTOM_A_CDC_BOOST_MODE__POR				(0x00)
-#define TOMTOM_A_CDC_JTCK_MODE			(0x0D1)
-#define TOMTOM_A_CDC_JTCK_MODE__POR				(0x00)
-#define TOMTOM_A_CDC_JTDI_MODE			(0x0D2)
-#define TOMTOM_A_CDC_JTDI_MODE__POR				(0x00)
-#define TOMTOM_A_CDC_JTMS_MODE			(0x0D3)
-#define TOMTOM_A_CDC_JTMS_MODE__POR				(0x00)
-#define TOMTOM_A_CDC_JTDO_MODE			(0x0D4)
-#define TOMTOM_A_CDC_JTDO_MODE__POR				(0x00)
-#define TOMTOM_A_CDC_JTRST_MODE			(0x0D5)
-#define TOMTOM_A_CDC_JTRST_MODE__POR				(0x00)
-#define TOMTOM_A_CDC_BIST_MODE_MODE			(0x0D6)
-#define TOMTOM_A_CDC_BIST_MODE_MODE__POR				(0x00)
-#define TOMTOM_A_CDC_MAD_MAIN_CTL_1			(0x0E0)
-#define TOMTOM_A_CDC_MAD_MAIN_CTL_1__POR				(0x00)
-#define TOMTOM_A_CDC_MAD_MAIN_CTL_2			(0x0E1)
-#define TOMTOM_A_CDC_MAD_MAIN_CTL_2__POR				(0x00)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_1			(0x0E2)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_1__POR				(0x00)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_2			(0x0E3)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_2__POR				(0x00)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_3			(0x0E4)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_3__POR				(0x00)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_4			(0x0E5)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_4__POR				(0x00)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_5			(0x0E6)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_5__POR				(0x00)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_6			(0x0E7)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_6__POR				(0x00)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_7			(0x0E8)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_7__POR				(0x00)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_8			(0x0E9)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_8__POR				(0x00)
-#define TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_PTR			(0x0EA)
-#define TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_PTR__POR				(0x00)
-#define TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_VAL			(0x0EB)
-#define TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_VAL__POR				(0x40)
-#define TOMTOM_A_CDC_MAD_ULTR_CTL_1			(0x0EC)
-#define TOMTOM_A_CDC_MAD_ULTR_CTL_1__POR				(0x00)
-#define TOMTOM_A_CDC_MAD_ULTR_CTL_2			(0x0ED)
-#define TOMTOM_A_CDC_MAD_ULTR_CTL_2__POR				(0x00)
-#define TOMTOM_A_CDC_MAD_ULTR_CTL_3			(0x0EE)
-#define TOMTOM_A_CDC_MAD_ULTR_CTL_3__POR				(0x00)
-#define TOMTOM_A_CDC_MAD_ULTR_CTL_4			(0x0EF)
-#define TOMTOM_A_CDC_MAD_ULTR_CTL_4__POR				(0x00)
-#define TOMTOM_A_CDC_MAD_ULTR_CTL_5			(0x0F0)
-#define TOMTOM_A_CDC_MAD_ULTR_CTL_5__POR				(0x00)
-#define TOMTOM_A_CDC_MAD_ULTR_CTL_6			(0x0F1)
-#define TOMTOM_A_CDC_MAD_ULTR_CTL_6__POR				(0x00)
-#define TOMTOM_A_CDC_MAD_ULTR_CTL_7			(0x0F2)
-#define TOMTOM_A_CDC_MAD_ULTR_CTL_7__POR				(0x00)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_1			(0x0F3)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_1__POR				(0x00)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_2			(0x0F4)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_2__POR				(0x00)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_3			(0x0F5)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_3__POR				(0x00)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_4			(0x0F6)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_4__POR				(0x00)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_5			(0x0F7)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_5__POR				(0x00)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_6			(0x0F8)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_6__POR				(0x00)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_7			(0x0F9)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_7__POR				(0x00)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_8			(0x0FA)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_8__POR				(0x00)
-#define TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_PTR			(0x0FB)
-#define TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_PTR__POR			(0x00)
-#define TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_VAL			(0x0FC)
-#define TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_VAL__POR			(0x00)
-#define TOMTOM_A_CDC_MAD_INP_SEL			(0x0FD)
-#define TOMTOM_A_CDC_MAD_INP_SEL__POR				(0x00)
-#define TOMTOM_A_BIAS_REF_CTL			(0x100)
-#define TOMTOM_A_BIAS_REF_CTL__POR				(0x1C)
-#define TOMTOM_A_BIAS_CENTRAL_BG_CTL			(0x101)
-#define TOMTOM_A_BIAS_CENTRAL_BG_CTL__POR				(0x50)
-#define TOMTOM_A_BIAS_PRECHRG_CTL			(0x102)
-#define TOMTOM_A_BIAS_PRECHRG_CTL__POR				(0x07)
-#define TOMTOM_A_BIAS_CURR_CTL_1			(0x103)
-#define TOMTOM_A_BIAS_CURR_CTL_1__POR				(0x52)
-#define TOMTOM_A_BIAS_CURR_CTL_2			(0x104)
-#define TOMTOM_A_BIAS_CURR_CTL_2__POR				(0x00)
-#define TOMTOM_A_BIAS_OSC_BG_CTL			(0x105)
-#define TOMTOM_A_BIAS_OSC_BG_CTL__POR				(0x36)
-#define TOMTOM_A_CLK_BUFF_EN1			(0x108)
-#define TOMTOM_A_CLK_BUFF_EN1__POR				(0x04)
-#define TOMTOM_A_CLK_BUFF_EN2			(0x109)
-#define TOMTOM_A_CLK_BUFF_EN2__POR				(0x02)
-#define TOMTOM_A_LDO_L_MODE_1			(0x10A)
-#define TOMTOM_A_LDO_L_MODE_1__POR				(0x08)
-#define TOMTOM_A_LDO_L_MODE_2			(0x10B)
-#define TOMTOM_A_LDO_L_MODE_2__POR				(0x50)
-#define TOMTOM_A_LDO_L_CTRL_1			(0x10C)
-#define TOMTOM_A_LDO_L_CTRL_1__POR				(0x70)
-#define TOMTOM_A_LDO_L_CTRL_2			(0x10D)
-#define TOMTOM_A_LDO_L_CTRL_2__POR				(0x55)
-#define TOMTOM_A_LDO_L_CTRL_3			(0x10E)
-#define TOMTOM_A_LDO_L_CTRL_3__POR				(0x56)
-#define TOMTOM_A_LDO_L_CTRL_4			(0x10F)
-#define TOMTOM_A_LDO_L_CTRL_4__POR				(0x55)
-#define TOMTOM_A_LDO_H_MODE_1			(0x110)
-#define TOMTOM_A_LDO_H_MODE_1__POR				(0x65)
-#define TOMTOM_A_LDO_H_MODE_2			(0x111)
-#define TOMTOM_A_LDO_H_MODE_2__POR				(0xA8)
-#define TOMTOM_A_LDO_H_LOOP_CTL			(0x112)
-#define TOMTOM_A_LDO_H_LOOP_CTL__POR				(0x6B)
-#define TOMTOM_A_LDO_H_COMP_1			(0x113)
-#define TOMTOM_A_LDO_H_COMP_1__POR				(0x84)
-#define TOMTOM_A_LDO_H_COMP_2			(0x114)
-#define TOMTOM_A_LDO_H_COMP_2__POR				(0xE0)
-#define TOMTOM_A_LDO_H_BIAS_1			(0x115)
-#define TOMTOM_A_LDO_H_BIAS_1__POR				(0x6D)
-#define TOMTOM_A_LDO_H_BIAS_2			(0x116)
-#define TOMTOM_A_LDO_H_BIAS_2__POR				(0xA5)
-#define TOMTOM_A_LDO_H_BIAS_3			(0x117)
-#define TOMTOM_A_LDO_H_BIAS_3__POR				(0x60)
-#define TOMTOM_A_VBAT_CLK			(0x118)
-#define TOMTOM_A_VBAT_CLK__POR				(0x03)
-#define TOMTOM_A_VBAT_LOOP			(0x119)
-#define TOMTOM_A_VBAT_LOOP__POR				(0x02)
-#define TOMTOM_A_VBAT_REF			(0x11A)
-#define TOMTOM_A_VBAT_REF__POR				(0x20)
-#define TOMTOM_A_VBAT_ADC_TEST			(0x11B)
-#define TOMTOM_A_VBAT_ADC_TEST__POR				(0x00)
-#define TOMTOM_A_VBAT_FE			(0x11C)
-#define TOMTOM_A_VBAT_FE__POR				(0x48)
-#define TOMTOM_A_VBAT_BIAS_1			(0x11D)
-#define TOMTOM_A_VBAT_BIAS_1__POR				(0x03)
-#define TOMTOM_A_VBAT_BIAS_2			(0x11E)
-#define TOMTOM_A_VBAT_BIAS_2__POR				(0x00)
-#define TOMTOM_A_VBAT_ADC_DATA_MSB			(0x11F)
-#define TOMTOM_A_VBAT_ADC_DATA_MSB__POR				(0x00)
-#define TOMTOM_A_VBAT_ADC_DATA_LSB			(0x120)
-#define TOMTOM_A_VBAT_ADC_DATA_LSB__POR				(0x00)
-#define TOMTOM_A_FLL_NREF			(0x121)
-#define TOMTOM_A_FLL_NREF__POR				(0x12)
-#define TOMTOM_A_FLL_KDCO_TUNE			(0x122)
-#define TOMTOM_A_FLL_KDCO_TUNE__POR				(0x05)
-#define TOMTOM_A_FLL_LOCK_THRESH			(0x123)
-#define TOMTOM_A_FLL_LOCK_THRESH__POR				(0xC2)
-#define TOMTOM_A_FLL_LOCK_DET_COUNT			(0x124)
-#define TOMTOM_A_FLL_LOCK_DET_COUNT__POR				(0x40)
-#define TOMTOM_A_FLL_DAC_THRESHOLD			(0x125)
-#define TOMTOM_A_FLL_DAC_THRESHOLD__POR				(0xC8)
-#define TOMTOM_A_FLL_TEST_DCO_FREERUN			(0x126)
-#define TOMTOM_A_FLL_TEST_DCO_FREERUN__POR				(0x00)
-#define TOMTOM_A_FLL_TEST_ENABLE			(0x127)
-#define TOMTOM_A_FLL_TEST_ENABLE__POR				(0x00)
-#define TOMTOM_A_MICB_CFILT_1_CTL			(0x128)
-#define TOMTOM_A_MICB_CFILT_1_CTL__POR				(0x40)
-#define TOMTOM_A_MICB_CFILT_1_VAL			(0x129)
-#define TOMTOM_A_MICB_CFILT_1_VAL__POR				(0x80)
-#define TOMTOM_A_MICB_CFILT_1_PRECHRG			(0x12A)
-#define TOMTOM_A_MICB_CFILT_1_PRECHRG__POR				(0x38)
-#define TOMTOM_A_MICB_1_CTL			(0x12B)
-#define TOMTOM_A_MICB_1_CTL__POR				(0x16)
-#define TOMTOM_A_MICB_1_INT_RBIAS			(0x12C)
-#define TOMTOM_A_MICB_1_INT_RBIAS__POR				(0x24)
-#define TOMTOM_A_MICB_1_MBHC			(0x12D)
-#define TOMTOM_A_MICB_1_MBHC__POR				(0x01)
-#define TOMTOM_A_MICB_CFILT_2_CTL			(0x12E)
-#define TOMTOM_A_MICB_CFILT_2_CTL__POR				(0x41)
-#define TOMTOM_A_MICB_CFILT_2_VAL			(0x12F)
-#define TOMTOM_A_MICB_CFILT_2_VAL__POR				(0x80)
-#define TOMTOM_A_MICB_CFILT_2_PRECHRG			(0x130)
-#define TOMTOM_A_MICB_CFILT_2_PRECHRG__POR				(0x38)
-#define TOMTOM_A_MICB_2_CTL			(0x131)
-#define TOMTOM_A_MICB_2_CTL__POR				(0x16)
-#define TOMTOM_A_MICB_2_INT_RBIAS			(0x132)
-#define TOMTOM_A_MICB_2_INT_RBIAS__POR				(0x24)
-#define TOMTOM_A_MICB_2_MBHC			(0x133)
-#define TOMTOM_A_MICB_2_MBHC__POR				(0x02)
-#define TOMTOM_A_MICB_CFILT_3_CTL			(0x134)
-#define TOMTOM_A_MICB_CFILT_3_CTL__POR				(0x40)
-#define TOMTOM_A_MICB_CFILT_3_VAL			(0x135)
-#define TOMTOM_A_MICB_CFILT_3_VAL__POR				(0x80)
-#define TOMTOM_A_MICB_CFILT_3_PRECHRG			(0x136)
-#define TOMTOM_A_MICB_CFILT_3_PRECHRG__POR				(0x38)
-#define TOMTOM_A_MICB_3_CTL			(0x137)
-#define TOMTOM_A_MICB_3_CTL__POR				(0x16)
-#define TOMTOM_A_MICB_3_INT_RBIAS			(0x138)
-#define TOMTOM_A_MICB_3_INT_RBIAS__POR				(0x24)
-#define TOMTOM_A_MICB_3_MBHC			(0x139)
-#define TOMTOM_A_MICB_3_MBHC__POR				(0x00)
-#define TOMTOM_A_MICB_4_CTL			(0x13A)
-#define TOMTOM_A_MICB_4_CTL__POR				(0x16)
-#define TOMTOM_A_MICB_4_INT_RBIAS			(0x13B)
-#define TOMTOM_A_MICB_4_INT_RBIAS__POR				(0x24)
-#define TOMTOM_A_MICB_4_MBHC			(0x13C)
-#define TOMTOM_A_MICB_4_MBHC__POR				(0x01)
-#define TOMTOM_A_SPKR_DRV2_EN			(0x13D)
-#define TOMTOM_A_SPKR_DRV2_EN__POR				(0x6F)
-#define TOMTOM_A_SPKR_DRV2_GAIN			(0x13E)
-#define TOMTOM_A_SPKR_DRV2_GAIN__POR				(0x00)
-#define TOMTOM_A_SPKR_DRV2_DAC_CTL			(0x13F)
-#define TOMTOM_A_SPKR_DRV2_DAC_CTL__POR				(0x04)
-#define TOMTOM_A_SPKR_DRV2_OCP_CTL			(0x140)
-#define TOMTOM_A_SPKR_DRV2_OCP_CTL__POR				(0x97)
-#define TOMTOM_A_SPKR_DRV2_CLIP_DET			(0x141)
-#define TOMTOM_A_SPKR_DRV2_CLIP_DET__POR				(0x01)
-#define TOMTOM_A_SPKR_DRV2_DBG_DAC			(0x142)
-#define TOMTOM_A_SPKR_DRV2_DBG_DAC__POR				(0x05)
-#define TOMTOM_A_SPKR_DRV2_DBG_PA			(0x143)
-#define TOMTOM_A_SPKR_DRV2_DBG_PA__POR				(0x18)
-#define TOMTOM_A_SPKR_DRV2_DBG_PWRSTG			(0x144)
-#define TOMTOM_A_SPKR_DRV2_DBG_PWRSTG__POR				(0x00)
-#define TOMTOM_A_SPKR_DRV2_BIAS_LDO			(0x145)
-#define TOMTOM_A_SPKR_DRV2_BIAS_LDO__POR				(0x45)
-#define TOMTOM_A_SPKR_DRV2_BIAS_INT			(0x146)
-#define TOMTOM_A_SPKR_DRV2_BIAS_INT__POR				(0xA5)
-#define TOMTOM_A_SPKR_DRV2_BIAS_PA			(0x147)
-#define TOMTOM_A_SPKR_DRV2_BIAS_PA__POR				(0x55)
-#define TOMTOM_A_SPKR_DRV2_STATUS_OCP			(0x148)
-#define TOMTOM_A_SPKR_DRV2_STATUS_OCP__POR				(0x00)
-#define TOMTOM_A_SPKR_DRV2_STATUS_PA			(0x149)
-#define TOMTOM_A_SPKR_DRV2_STATUS_PA__POR				(0x00)
-#define TOMTOM_A_MBHC_INSERT_DETECT			(0x14A)
-#define TOMTOM_A_MBHC_INSERT_DETECT__POR				(0x00)
-#define TOMTOM_A_MBHC_INSERT_DET_STATUS			(0x14B)
-#define TOMTOM_A_MBHC_INSERT_DET_STATUS__POR				(0x00)
-#define TOMTOM_A_TX_COM_BIAS			(0x14C)
-#define TOMTOM_A_TX_COM_BIAS__POR				(0xF0)
-#define TOMTOM_A_MBHC_INSERT_DETECT2			(0x14D)
-#define TOMTOM_A_MBHC_INSERT_DETECT2__POR				(0xD0)
-#define TOMTOM_A_MBHC_SCALING_MUX_1			(0x14E)
-#define TOMTOM_A_MBHC_SCALING_MUX_1__POR				(0x00)
-#define TOMTOM_A_MBHC_SCALING_MUX_2			(0x14F)
-#define TOMTOM_A_MBHC_SCALING_MUX_2__POR				(0x80)
-#define TOMTOM_A_MAD_ANA_CTRL			(0x150)
-#define TOMTOM_A_MAD_ANA_CTRL__POR				(0xF1)
-#define TOMTOM_A_TX_SUP_SWITCH_CTRL_1			(0x151)
-#define TOMTOM_A_TX_SUP_SWITCH_CTRL_1__POR				(0x00)
-#define TOMTOM_A_TX_SUP_SWITCH_CTRL_2			(0x152)
-#define TOMTOM_A_TX_SUP_SWITCH_CTRL_2__POR				(0x80)
-#define TOMTOM_A_TX_1_GAIN			(0x153)
-#define TOMTOM_A_TX_1_GAIN__POR				(0x02)
-#define TOMTOM_A_TX_1_2_TEST_EN			(0x154)
-#define TOMTOM_A_TX_1_2_TEST_EN__POR				(0xCC)
-#define TOMTOM_A_TX_2_GAIN			(0x155)
-#define TOMTOM_A_TX_2_GAIN__POR				(0x02)
-#define TOMTOM_A_TX_1_2_ADC_IB			(0x156)
-#define TOMTOM_A_TX_1_2_ADC_IB__POR				(0x44)
-#define TOMTOM_A_TX_1_2_ATEST_REFCTRL			(0x157)
-#define TOMTOM_A_TX_1_2_ATEST_REFCTRL__POR				(0x00)
-#define TOMTOM_A_TX_1_2_TEST_CTL			(0x158)
-#define TOMTOM_A_TX_1_2_TEST_CTL__POR				(0x38)
-#define TOMTOM_A_TX_1_2_TEST_BLOCK_EN			(0x159)
-#define TOMTOM_A_TX_1_2_TEST_BLOCK_EN__POR				(0xFC)
-#define TOMTOM_A_TX_1_2_TXFE_CLKDIV			(0x15A)
-#define TOMTOM_A_TX_1_2_TXFE_CLKDIV__POR				(0x55)
-#define TOMTOM_A_TX_1_2_SAR_ERR_CH1			(0x15B)
-#define TOMTOM_A_TX_1_2_SAR_ERR_CH1__POR				(0x00)
-#define TOMTOM_A_TX_1_2_SAR_ERR_CH2			(0x15C)
-#define TOMTOM_A_TX_1_2_SAR_ERR_CH2__POR				(0x00)
-#define TOMTOM_A_TX_3_GAIN			(0x15D)
-#define TOMTOM_A_TX_3_GAIN__POR				(0x02)
-#define TOMTOM_A_TX_3_4_TEST_EN			(0x15E)
-#define TOMTOM_A_TX_3_4_TEST_EN__POR				(0xCC)
-#define TOMTOM_A_TX_4_GAIN			(0x15F)
-#define TOMTOM_A_TX_4_GAIN__POR				(0x02)
-#define TOMTOM_A_TX_3_4_ADC_IB			(0x160)
-#define TOMTOM_A_TX_3_4_ADC_IB__POR				(0x44)
-#define TOMTOM_A_TX_3_4_ATEST_REFCTRL			(0x161)
-#define TOMTOM_A_TX_3_4_ATEST_REFCTRL__POR				(0x00)
-#define TOMTOM_A_TX_3_4_TEST_CTL			(0x162)
-#define TOMTOM_A_TX_3_4_TEST_CTL__POR				(0x38)
-#define TOMTOM_A_TX_3_4_TEST_BLOCK_EN			(0x163)
-#define TOMTOM_A_TX_3_4_TEST_BLOCK_EN__POR				(0xFC)
-#define TOMTOM_A_TX_3_4_TXFE_CKDIV			(0x164)
-#define TOMTOM_A_TX_3_4_TXFE_CKDIV__POR				(0x55)
-#define TOMTOM_A_TX_3_4_SAR_ERR_CH3			(0x165)
-#define TOMTOM_A_TX_3_4_SAR_ERR_CH3__POR				(0x00)
-#define TOMTOM_A_TX_3_4_SAR_ERR_CH4			(0x166)
-#define TOMTOM_A_TX_3_4_SAR_ERR_CH4__POR				(0x00)
-#define TOMTOM_A_TX_5_GAIN			(0x167)
-#define TOMTOM_A_TX_5_GAIN__POR				(0x02)
-#define TOMTOM_A_TX_5_6_TEST_EN			(0x168)
-#define TOMTOM_A_TX_5_6_TEST_EN__POR				(0xCC)
-#define TOMTOM_A_TX_6_GAIN			(0x169)
-#define TOMTOM_A_TX_6_GAIN__POR				(0x02)
-#define TOMTOM_A_TX_5_6_ADC_IB			(0x16A)
-#define TOMTOM_A_TX_5_6_ADC_IB__POR				(0x44)
-#define TOMTOM_A_TX_5_6_ATEST_REFCTRL			(0x16B)
-#define TOMTOM_A_TX_5_6_ATEST_REFCTRL__POR				(0x00)
-#define TOMTOM_A_TX_5_6_TEST_CTL			(0x16C)
-#define TOMTOM_A_TX_5_6_TEST_CTL__POR				(0x38)
-#define TOMTOM_A_TX_5_6_TEST_BLOCK_EN			(0x16D)
-#define TOMTOM_A_TX_5_6_TEST_BLOCK_EN__POR				(0xFC)
-#define TOMTOM_A_TX_5_6_TXFE_CKDIV			(0x16E)
-#define TOMTOM_A_TX_5_6_TXFE_CKDIV__POR				(0x55)
-#define TOMTOM_A_TX_5_6_SAR_ERR_CH5			(0x16F)
-#define TOMTOM_A_TX_5_6_SAR_ERR_CH5__POR				(0x00)
-#define TOMTOM_A_TX_5_6_SAR_ERR_CH6			(0x170)
-#define TOMTOM_A_TX_5_6_SAR_ERR_CH6__POR				(0x00)
-#define TOMTOM_A_TX_7_MBHC_EN			(0x171)
-#define TOMTOM_A_TX_7_MBHC_EN__POR				(0x0C)
-#define TOMTOM_A_TX_7_MBHC_ATEST_REFCTRL			(0x172)
-#define TOMTOM_A_TX_7_MBHC_ATEST_REFCTRL__POR				(0x00)
-#define TOMTOM_A_TX_7_MBHC_ADC			(0x173)
-#define TOMTOM_A_TX_7_MBHC_ADC__POR				(0x44)
-#define TOMTOM_A_TX_7_MBHC_TEST_CTL			(0x174)
-#define TOMTOM_A_TX_7_MBHC_TEST_CTL__POR				(0x38)
-#define TOMTOM_A_TX_7_MBHC_SAR_ERR			(0x175)
-#define TOMTOM_A_TX_7_MBHC_SAR_ERR__POR				(0x00)
-#define TOMTOM_A_TX_7_TXFE_CLKDIV			(0x176)
-#define TOMTOM_A_TX_7_TXFE_CLKDIV__POR				(0x8B)
-#define TOMTOM_A_RCO_CTRL			(0x177)
-#define TOMTOM_A_RCO_CTRL__POR				(0x00)
-#define TOMTOM_A_RCO_CALIBRATION_CTRL1			(0x178)
-#define TOMTOM_A_RCO_CALIBRATION_CTRL1__POR				(0x00)
-#define TOMTOM_A_RCO_CALIBRATION_CTRL2			(0x179)
-#define TOMTOM_A_RCO_CALIBRATION_CTRL2__POR				(0x00)
-#define TOMTOM_A_RCO_CALIBRATION_CTRL3			(0x17A)
-#define TOMTOM_A_RCO_CALIBRATION_CTRL3__POR				(0x00)
-#define TOMTOM_A_RCO_TEST_CTRL			(0x17B)
-#define TOMTOM_A_RCO_TEST_CTRL__POR				(0x00)
-#define TOMTOM_A_RCO_CALIBRATION_RESULT1			(0x17C)
-#define TOMTOM_A_RCO_CALIBRATION_RESULT1__POR				(0x00)
-#define TOMTOM_A_RCO_CALIBRATION_RESULT2			(0x17D)
-#define TOMTOM_A_RCO_CALIBRATION_RESULT2__POR				(0x00)
-#define TOMTOM_A_BUCK_MODE_1			(0x181)
-#define TOMTOM_A_BUCK_MODE_1__POR				(0x21)
-#define TOMTOM_A_BUCK_MODE_2			(0x182)
-#define TOMTOM_A_BUCK_MODE_2__POR				(0xFF)
-#define TOMTOM_A_BUCK_MODE_3			(0x183)
-#define TOMTOM_A_BUCK_MODE_3__POR				(0xCE)
-#define TOMTOM_A_BUCK_MODE_4			(0x184)
-#define TOMTOM_A_BUCK_MODE_4__POR				(0x3A)
-#define TOMTOM_A_BUCK_MODE_5			(0x185)
-#define TOMTOM_A_BUCK_MODE_5__POR				(0x00)
-#define TOMTOM_A_BUCK_CTRL_VCL_1			(0x186)
-#define TOMTOM_A_BUCK_CTRL_VCL_1__POR				(0x08)
-#define TOMTOM_A_BUCK_CTRL_VCL_2			(0x187)
-#define TOMTOM_A_BUCK_CTRL_VCL_2__POR				(0xA3)
-#define TOMTOM_A_BUCK_CTRL_VCL_3			(0x188)
-#define TOMTOM_A_BUCK_CTRL_VCL_3__POR				(0x82)
-#define TOMTOM_A_BUCK_CTRL_CCL_1			(0x189)
-#define TOMTOM_A_BUCK_CTRL_CCL_1__POR				(0x5B)
-#define TOMTOM_A_BUCK_CTRL_CCL_2			(0x18A)
-#define TOMTOM_A_BUCK_CTRL_CCL_2__POR				(0xDC)
-#define TOMTOM_A_BUCK_CTRL_CCL_3			(0x18B)
-#define TOMTOM_A_BUCK_CTRL_CCL_3__POR				(0x6A)
-#define TOMTOM_A_BUCK_CTRL_CCL_4			(0x18C)
-#define TOMTOM_A_BUCK_CTRL_CCL_4__POR				(0x51)
-#define TOMTOM_A_BUCK_CTRL_PWM_DRVR_1			(0x18D)
-#define TOMTOM_A_BUCK_CTRL_PWM_DRVR_1__POR				(0x50)
-#define TOMTOM_A_BUCK_CTRL_PWM_DRVR_2			(0x18E)
-#define TOMTOM_A_BUCK_CTRL_PWM_DRVR_2__POR				(0x64)
-#define TOMTOM_A_BUCK_CTRL_PWM_DRVR_3			(0x18F)
-#define TOMTOM_A_BUCK_CTRL_PWM_DRVR_3__POR				(0x77)
-#define TOMTOM_A_BUCK_TMUX_A_D			(0x190)
-#define TOMTOM_A_BUCK_TMUX_A_D__POR				(0x00)
-#define TOMTOM_A_NCP_BUCKREF			(0x191)
-#define TOMTOM_A_NCP_BUCKREF__POR				(0x00)
-#define TOMTOM_A_NCP_EN			(0x192)
-#define TOMTOM_A_NCP_EN__POR				(0xFE)
-#define TOMTOM_A_NCP_CLK			(0x193)
-#define TOMTOM_A_NCP_CLK__POR				(0x94)
-#define TOMTOM_A_NCP_STATIC			(0x194)
-#define TOMTOM_A_NCP_STATIC__POR				(0x28)
-#define TOMTOM_A_NCP_VTH_LOW			(0x195)
-#define TOMTOM_A_NCP_VTH_LOW__POR				(0x88)
-#define TOMTOM_A_NCP_VTH_HIGH			(0x196)
-#define TOMTOM_A_NCP_VTH_HIGH__POR				(0xA0)
-#define TOMTOM_A_NCP_ATEST			(0x197)
-#define TOMTOM_A_NCP_ATEST__POR				(0x00)
-#define TOMTOM_A_NCP_DTEST			(0x198)
-#define TOMTOM_A_NCP_DTEST__POR				(0x10)
-#define TOMTOM_A_NCP_DLY1			(0x199)
-#define TOMTOM_A_NCP_DLY1__POR				(0x06)
-#define TOMTOM_A_NCP_DLY2			(0x19A)
-#define TOMTOM_A_NCP_DLY2__POR				(0x06)
-#define TOMTOM_A_RX_AUX_SW_CTL			(0x19B)
-#define TOMTOM_A_RX_AUX_SW_CTL__POR				(0x00)
-#define TOMTOM_A_RX_PA_AUX_IN_CONN			(0x19C)
-#define TOMTOM_A_RX_PA_AUX_IN_CONN__POR				(0x00)
-#define TOMTOM_A_RX_COM_TIMER_DIV			(0x19E)
-#define TOMTOM_A_RX_COM_TIMER_DIV__POR				(0xE8)
-#define TOMTOM_A_RX_COM_OCP_CTL			(0x19F)
-#define TOMTOM_A_RX_COM_OCP_CTL__POR				(0x1F)
-#define TOMTOM_A_RX_COM_OCP_COUNT			(0x1A0)
-#define TOMTOM_A_RX_COM_OCP_COUNT__POR				(0x77)
-#define TOMTOM_A_RX_COM_DAC_CTL			(0x1A1)
-#define TOMTOM_A_RX_COM_DAC_CTL__POR				(0x00)
-#define TOMTOM_A_RX_COM_BIAS			(0x1A2)
-#define TOMTOM_A_RX_COM_BIAS__POR				(0x20)
-#define TOMTOM_A_RX_HPH_AUTO_CHOP			(0x1A4)
-#define TOMTOM_A_RX_HPH_AUTO_CHOP__POR				(0x38)
-#define TOMTOM_A_RX_HPH_CHOP_CTL			(0x1A5)
-#define TOMTOM_A_RX_HPH_CHOP_CTL__POR				(0xA4)
-#define TOMTOM_A_RX_HPH_BIAS_PA			(0x1A6)
-#define TOMTOM_A_RX_HPH_BIAS_PA__POR				(0x7A)
-#define TOMTOM_A_RX_HPH_BIAS_LDO			(0x1A7)
-#define TOMTOM_A_RX_HPH_BIAS_LDO__POR				(0x87)
-#define TOMTOM_A_RX_HPH_BIAS_CNP			(0x1A8)
-#define TOMTOM_A_RX_HPH_BIAS_CNP__POR				(0x8A)
-#define TOMTOM_A_RX_HPH_BIAS_WG_OCP			(0x1A9)
-#define TOMTOM_A_RX_HPH_BIAS_WG_OCP__POR				(0x2A)
-#define TOMTOM_A_RX_HPH_OCP_CTL			(0x1AA)
-#define TOMTOM_A_RX_HPH_OCP_CTL__POR				(0x69)
-#define TOMTOM_A_RX_HPH_CNP_EN			(0x1AB)
-#define TOMTOM_A_RX_HPH_CNP_EN__POR				(0x80)
-#define TOMTOM_A_RX_HPH_CNP_WG_CTL			(0x1AC)
-#define TOMTOM_A_RX_HPH_CNP_WG_CTL__POR				(0xDA)
-#define TOMTOM_A_RX_HPH_CNP_WG_TIME			(0x1AD)
-#define TOMTOM_A_RX_HPH_CNP_WG_TIME__POR				(0x15)
-#define TOMTOM_A_RX_HPH_L_GAIN			(0x1AE)
-#define TOMTOM_A_RX_HPH_L_GAIN__POR				(0xC0)
-#define TOMTOM_A_RX_HPH_L_TEST			(0x1AF)
-#define TOMTOM_A_RX_HPH_L_TEST__POR				(0x02)
-#define TOMTOM_A_RX_HPH_L_PA_CTL			(0x1B0)
-#define TOMTOM_A_RX_HPH_L_PA_CTL__POR				(0x42)
-#define TOMTOM_A_RX_HPH_L_DAC_CTL			(0x1B1)
-#define TOMTOM_A_RX_HPH_L_DAC_CTL__POR				(0x00)
-#define TOMTOM_A_RX_HPH_L_ATEST			(0x1B2)
-#define TOMTOM_A_RX_HPH_L_ATEST__POR				(0x00)
-#define TOMTOM_A_RX_HPH_L_STATUS			(0x1B3)
-#define TOMTOM_A_RX_HPH_L_STATUS__POR				(0x00)
-#define TOMTOM_A_RX_HPH_R_GAIN			(0x1B4)
-#define TOMTOM_A_RX_HPH_R_GAIN__POR				(0x00)
-#define TOMTOM_A_RX_HPH_R_TEST			(0x1B5)
-#define TOMTOM_A_RX_HPH_R_TEST__POR				(0x02)
-#define TOMTOM_A_RX_HPH_R_PA_CTL			(0x1B6)
-#define TOMTOM_A_RX_HPH_R_PA_CTL__POR				(0x42)
-#define TOMTOM_A_RX_HPH_R_DAC_CTL			(0x1B7)
-#define TOMTOM_A_RX_HPH_R_DAC_CTL__POR				(0x00)
-#define TOMTOM_A_RX_HPH_R_ATEST			(0x1B8)
-#define TOMTOM_A_RX_HPH_R_ATEST__POR				(0x00)
-#define TOMTOM_A_RX_HPH_R_STATUS			(0x1B9)
-#define TOMTOM_A_RX_HPH_R_STATUS__POR				(0x00)
-#define TOMTOM_A_RX_EAR_BIAS_PA			(0x1BA)
-#define TOMTOM_A_RX_EAR_BIAS_PA__POR				(0x76)
-#define TOMTOM_A_RX_EAR_BIAS_CMBUFF			(0x1BB)
-#define TOMTOM_A_RX_EAR_BIAS_CMBUFF__POR				(0xA0)
-#define TOMTOM_A_RX_EAR_EN			(0x1BC)
-#define TOMTOM_A_RX_EAR_EN__POR				(0x00)
-#define TOMTOM_A_RX_EAR_GAIN			(0x1BD)
-#define TOMTOM_A_RX_EAR_GAIN__POR				(0x02)
-#define TOMTOM_A_RX_EAR_CMBUFF			(0x1BE)
-#define TOMTOM_A_RX_EAR_CMBUFF__POR				(0x05)
-#define TOMTOM_A_RX_EAR_ICTL			(0x1BF)
-#define TOMTOM_A_RX_EAR_ICTL__POR				(0x40)
-#define TOMTOM_A_RX_EAR_CCOMP			(0x1C0)
-#define TOMTOM_A_RX_EAR_CCOMP__POR				(0x08)
-#define TOMTOM_A_RX_EAR_VCM			(0x1C1)
-#define TOMTOM_A_RX_EAR_VCM__POR				(0x03)
-#define TOMTOM_A_RX_EAR_CNP			(0x1C2)
-#define TOMTOM_A_RX_EAR_CNP__POR				(0xC0)
-#define TOMTOM_A_RX_EAR_DAC_CTL_ATEST			(0x1C3)
-#define TOMTOM_A_RX_EAR_DAC_CTL_ATEST__POR				(0x00)
-#define TOMTOM_A_RX_EAR_STATUS			(0x1C5)
-#define TOMTOM_A_RX_EAR_STATUS__POR				(0x04)
-#define TOMTOM_A_RX_LINE_BIAS_PA			(0x1C6)
-#define TOMTOM_A_RX_LINE_BIAS_PA__POR				(0x78)
-#define TOMTOM_A_RX_BUCK_BIAS1			(0x1C7)
-#define TOMTOM_A_RX_BUCK_BIAS1__POR				(0x42)
-#define TOMTOM_A_RX_BUCK_BIAS2			(0x1C8)
-#define TOMTOM_A_RX_BUCK_BIAS2__POR				(0x84)
-#define TOMTOM_A_RX_LINE_COM			(0x1C9)
-#define TOMTOM_A_RX_LINE_COM__POR				(0x80)
-#define TOMTOM_A_RX_LINE_CNP_EN			(0x1CA)
-#define TOMTOM_A_RX_LINE_CNP_EN__POR				(0x00)
-#define TOMTOM_A_RX_LINE_CNP_WG_CTL			(0x1CB)
-#define TOMTOM_A_RX_LINE_CNP_WG_CTL__POR				(0x00)
-#define TOMTOM_A_RX_LINE_CNP_WG_TIME			(0x1CC)
-#define TOMTOM_A_RX_LINE_CNP_WG_TIME__POR				(0x04)
-#define TOMTOM_A_RX_LINE_1_GAIN			(0x1CD)
-#define TOMTOM_A_RX_LINE_1_GAIN__POR				(0x00)
-#define TOMTOM_A_RX_LINE_1_TEST			(0x1CE)
-#define TOMTOM_A_RX_LINE_1_TEST__POR				(0x02)
-#define TOMTOM_A_RX_LINE_1_DAC_CTL			(0x1CF)
-#define TOMTOM_A_RX_LINE_1_DAC_CTL__POR				(0x00)
-#define TOMTOM_A_RX_LINE_1_STATUS			(0x1D0)
-#define TOMTOM_A_RX_LINE_1_STATUS__POR				(0x00)
-#define TOMTOM_A_RX_LINE_2_GAIN			(0x1D1)
-#define TOMTOM_A_RX_LINE_2_GAIN__POR				(0x00)
-#define TOMTOM_A_RX_LINE_2_TEST			(0x1D2)
-#define TOMTOM_A_RX_LINE_2_TEST__POR				(0x02)
-#define TOMTOM_A_RX_LINE_2_DAC_CTL			(0x1D3)
-#define TOMTOM_A_RX_LINE_2_DAC_CTL__POR				(0x00)
-#define TOMTOM_A_RX_LINE_2_STATUS			(0x1D4)
-#define TOMTOM_A_RX_LINE_2_STATUS__POR				(0x00)
-#define TOMTOM_A_RX_LINE_3_GAIN			(0x1D5)
-#define TOMTOM_A_RX_LINE_3_GAIN__POR				(0x00)
-#define TOMTOM_A_RX_LINE_3_TEST			(0x1D6)
-#define TOMTOM_A_RX_LINE_3_TEST__POR				(0x02)
-#define TOMTOM_A_RX_LINE_3_DAC_CTL			(0x1D7)
-#define TOMTOM_A_RX_LINE_3_DAC_CTL__POR				(0x00)
-#define TOMTOM_A_RX_LINE_3_STATUS			(0x1D8)
-#define TOMTOM_A_RX_LINE_3_STATUS__POR				(0x00)
-#define TOMTOM_A_RX_LINE_4_GAIN			(0x1D9)
-#define TOMTOM_A_RX_LINE_4_GAIN__POR				(0x00)
-#define TOMTOM_A_RX_LINE_4_TEST			(0x1DA)
-#define TOMTOM_A_RX_LINE_4_TEST__POR				(0x02)
-#define TOMTOM_A_RX_LINE_4_DAC_CTL			(0x1DB)
-#define TOMTOM_A_RX_LINE_4_DAC_CTL__POR				(0x00)
-#define TOMTOM_A_RX_LINE_4_STATUS			(0x1DC)
-#define TOMTOM_A_RX_LINE_4_STATUS__POR				(0x00)
-#define TOMTOM_A_RX_LINE_CNP_DBG			(0x1DD)
-#define TOMTOM_A_RX_LINE_CNP_DBG__POR				(0x00)
-#define TOMTOM_A_SPKR_DRV1_EN			(0x1DF)
-#define TOMTOM_A_SPKR_DRV1_EN__POR				(0x6F)
-#define TOMTOM_A_SPKR_DRV1_GAIN			(0x1E0)
-#define TOMTOM_A_SPKR_DRV1_GAIN__POR				(0x00)
-#define TOMTOM_A_SPKR_DRV1_DAC_CTL			(0x1E1)
-#define TOMTOM_A_SPKR_DRV1_DAC_CTL__POR				(0x04)
-#define TOMTOM_A_SPKR_DRV1_OCP_CTL			(0x1E2)
-#define TOMTOM_A_SPKR_DRV1_OCP_CTL__POR				(0x97)
-#define TOMTOM_A_SPKR_DRV1_CLIP_DET			(0x1E3)
-#define TOMTOM_A_SPKR_DRV1_CLIP_DET__POR				(0x01)
-#define TOMTOM_A_SPKR_DRV1_IEC			(0x1E4)
-#define TOMTOM_A_SPKR_DRV1_IEC__POR				(0x00)
-#define TOMTOM_A_SPKR_DRV1_DBG_DAC			(0x1E5)
-#define TOMTOM_A_SPKR_DRV1_DBG_DAC__POR				(0x05)
-#define TOMTOM_A_SPKR_DRV1_DBG_PA			(0x1E6)
-#define TOMTOM_A_SPKR_DRV1_DBG_PA__POR				(0x18)
-#define TOMTOM_A_SPKR_DRV1_DBG_PWRSTG			(0x1E7)
-#define TOMTOM_A_SPKR_DRV1_DBG_PWRSTG__POR				(0x00)
-#define TOMTOM_A_SPKR_DRV1_BIAS_LDO			(0x1E8)
-#define TOMTOM_A_SPKR_DRV1_BIAS_LDO__POR				(0x45)
-#define TOMTOM_A_SPKR_DRV1_BIAS_INT			(0x1E9)
-#define TOMTOM_A_SPKR_DRV1_BIAS_INT__POR				(0xA5)
-#define TOMTOM_A_SPKR_DRV1_BIAS_PA			(0x1EA)
-#define TOMTOM_A_SPKR_DRV1_BIAS_PA__POR				(0x55)
-#define TOMTOM_A_SPKR_DRV1_STATUS_OCP			(0x1EB)
-#define TOMTOM_A_SPKR_DRV1_STATUS_OCP__POR				(0x00)
-#define TOMTOM_A_SPKR_DRV1_STATUS_PA			(0x1EC)
-#define TOMTOM_A_SPKR_DRV1_STATUS_PA__POR				(0x00)
-#define TOMTOM_A_SPKR1_PROT_EN			(0x1ED)
-#define TOMTOM_A_SPKR1_PROT_EN__POR				(0x00)
-#define TOMTOM_A_SPKR1_PROT_ADC_TEST_EN			(0x1EE)
-#define TOMTOM_A_SPKR1_PROT_ADC_TEST_EN__POR				(0x44)
-#define TOMTOM_A_SPKR1_PROT_ATEST			(0x1EF)
-#define TOMTOM_A_SPKR1_PROT_ATEST__POR				(0x00)
-#define TOMTOM_A_SPKR1_PROT_LDO_CTRL			(0x1F0)
-#define TOMTOM_A_SPKR1_PROT_LDO_CTRL__POR				(0x00)
-#define TOMTOM_A_SPKR1_PROT_ISENSE_CTRL			(0x1F1)
-#define TOMTOM_A_SPKR1_PROT_ISENSE_CTRL__POR				(0x00)
-#define TOMTOM_A_SPKR1_PROT_VSENSE_CTRL			(0x1F2)
-#define TOMTOM_A_SPKR1_PROT_VSENSE_CTRL__POR				(0x00)
-#define TOMTOM_A_SPKR2_PROT_EN			(0x1F3)
-#define TOMTOM_A_SPKR2_PROT_EN__POR				(0x00)
-#define TOMTOM_A_SPKR2_PROT_ADC_TEST_EN			(0x1F4)
-#define TOMTOM_A_SPKR2_PROT_ADC_TEST_EN__POR				(0x44)
-#define TOMTOM_A_SPKR2_PROT_ATEST			(0x1F5)
-#define TOMTOM_A_SPKR2_PROT_ATEST__POR				(0x00)
-#define TOMTOM_A_SPKR2_PROT_LDO_CTRL			(0x1F6)
-#define TOMTOM_A_SPKR2_PROT_LDO_CTRL__POR				(0x00)
-#define TOMTOM_A_SPKR2_PROT_ISENSE_CTRL			(0x1F7)
-#define TOMTOM_A_SPKR2_PROT_ISENSE_CTRL__POR				(0x00)
-#define TOMTOM_A_SPKR2_PROT_VSENSE_CTRL			(0x1F8)
-#define TOMTOM_A_SPKR2_PROT_VSENSE_CTRL__POR				(0x00)
-#define TOMTOM_A_MBHC_HPH			(0x1FE)
-#define TOMTOM_A_MBHC_HPH__POR				(0x44)
-#define TOMTOM_A_CDC_ANC1_B1_CTL			(0x200)
-#define TOMTOM_A_CDC_ANC1_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_ANC2_B1_CTL			(0x280)
-#define TOMTOM_A_CDC_ANC2_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_ANC1_SHIFT			(0x201)
-#define TOMTOM_A_CDC_ANC1_SHIFT__POR				(0x00)
-#define TOMTOM_A_CDC_ANC2_SHIFT			(0x281)
-#define TOMTOM_A_CDC_ANC2_SHIFT__POR				(0x00)
-#define TOMTOM_A_CDC_ANC1_IIR_B1_CTL			(0x202)
-#define TOMTOM_A_CDC_ANC1_IIR_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_ANC2_IIR_B1_CTL			(0x282)
-#define TOMTOM_A_CDC_ANC2_IIR_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_ANC1_IIR_B2_CTL			(0x203)
-#define TOMTOM_A_CDC_ANC1_IIR_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_ANC2_IIR_B2_CTL			(0x283)
-#define TOMTOM_A_CDC_ANC2_IIR_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_ANC1_IIR_B3_CTL			(0x204)
-#define TOMTOM_A_CDC_ANC1_IIR_B3_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_ANC2_IIR_B3_CTL			(0x284)
-#define TOMTOM_A_CDC_ANC2_IIR_B3_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_ANC1_LPF_B1_CTL			(0x206)
-#define TOMTOM_A_CDC_ANC1_LPF_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_ANC2_LPF_B1_CTL			(0x286)
-#define TOMTOM_A_CDC_ANC2_LPF_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_ANC1_LPF_B2_CTL			(0x207)
-#define TOMTOM_A_CDC_ANC1_LPF_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_ANC2_LPF_B2_CTL			(0x287)
-#define TOMTOM_A_CDC_ANC2_LPF_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_ANC1_SPARE			(0x209)
-#define TOMTOM_A_CDC_ANC1_SPARE__POR				(0x00)
-#define TOMTOM_A_CDC_ANC2_SPARE			(0x289)
-#define TOMTOM_A_CDC_ANC2_SPARE__POR				(0x00)
-#define TOMTOM_A_CDC_ANC1_SMLPF_CTL			(0x20A)
-#define TOMTOM_A_CDC_ANC1_SMLPF_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_ANC2_SMLPF_CTL			(0x28A)
-#define TOMTOM_A_CDC_ANC2_SMLPF_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_ANC1_DCFLT_CTL			(0x20B)
-#define TOMTOM_A_CDC_ANC1_DCFLT_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_ANC2_DCFLT_CTL			(0x28B)
-#define TOMTOM_A_CDC_ANC2_DCFLT_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_ANC1_GAIN_CTL			(0x20C)
-#define TOMTOM_A_CDC_ANC1_GAIN_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_ANC2_GAIN_CTL			(0x28C)
-#define TOMTOM_A_CDC_ANC2_GAIN_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_ANC1_B2_CTL			(0x20D)
-#define TOMTOM_A_CDC_ANC1_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_ANC2_B2_CTL			(0x28D)
-#define TOMTOM_A_CDC_ANC2_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_TX1_VOL_CTL_TIMER			(0x220)
-#define TOMTOM_A_CDC_TX1_VOL_CTL_TIMER__POR				(0x00)
-#define TOMTOM_A_CDC_TX2_VOL_CTL_TIMER			(0x228)
-#define TOMTOM_A_CDC_TX2_VOL_CTL_TIMER__POR				(0x00)
-#define TOMTOM_A_CDC_TX3_VOL_CTL_TIMER			(0x230)
-#define TOMTOM_A_CDC_TX3_VOL_CTL_TIMER__POR				(0x00)
-#define TOMTOM_A_CDC_TX4_VOL_CTL_TIMER			(0x238)
-#define TOMTOM_A_CDC_TX4_VOL_CTL_TIMER__POR				(0x00)
-#define TOMTOM_A_CDC_TX5_VOL_CTL_TIMER			(0x240)
-#define TOMTOM_A_CDC_TX5_VOL_CTL_TIMER__POR				(0x00)
-#define TOMTOM_A_CDC_TX6_VOL_CTL_TIMER			(0x248)
-#define TOMTOM_A_CDC_TX6_VOL_CTL_TIMER__POR				(0x00)
-#define TOMTOM_A_CDC_TX7_VOL_CTL_TIMER			(0x250)
-#define TOMTOM_A_CDC_TX7_VOL_CTL_TIMER__POR				(0x00)
-#define TOMTOM_A_CDC_TX8_VOL_CTL_TIMER			(0x258)
-#define TOMTOM_A_CDC_TX8_VOL_CTL_TIMER__POR				(0x00)
-#define TOMTOM_A_CDC_TX9_VOL_CTL_TIMER			(0x260)
-#define TOMTOM_A_CDC_TX9_VOL_CTL_TIMER__POR				(0x00)
-#define TOMTOM_A_CDC_TX10_VOL_CTL_TIMER			(0x268)
-#define TOMTOM_A_CDC_TX10_VOL_CTL_TIMER__POR				(0x00)
-#define TOMTOM_A_CDC_TX1_VOL_CTL_GAIN			(0x221)
-#define TOMTOM_A_CDC_TX1_VOL_CTL_GAIN__POR				(0x00)
-#define TOMTOM_A_CDC_TX2_VOL_CTL_GAIN			(0x229)
-#define TOMTOM_A_CDC_TX2_VOL_CTL_GAIN__POR				(0x00)
-#define TOMTOM_A_CDC_TX3_VOL_CTL_GAIN			(0x231)
-#define TOMTOM_A_CDC_TX3_VOL_CTL_GAIN__POR				(0x00)
-#define TOMTOM_A_CDC_TX4_VOL_CTL_GAIN			(0x239)
-#define TOMTOM_A_CDC_TX4_VOL_CTL_GAIN__POR				(0x00)
-#define TOMTOM_A_CDC_TX5_VOL_CTL_GAIN			(0x241)
-#define TOMTOM_A_CDC_TX5_VOL_CTL_GAIN__POR				(0x00)
-#define TOMTOM_A_CDC_TX6_VOL_CTL_GAIN			(0x249)
-#define TOMTOM_A_CDC_TX6_VOL_CTL_GAIN__POR				(0x00)
-#define TOMTOM_A_CDC_TX7_VOL_CTL_GAIN			(0x251)
-#define TOMTOM_A_CDC_TX7_VOL_CTL_GAIN__POR				(0x00)
-#define TOMTOM_A_CDC_TX8_VOL_CTL_GAIN			(0x259)
-#define TOMTOM_A_CDC_TX8_VOL_CTL_GAIN__POR				(0x00)
-#define TOMTOM_A_CDC_TX9_VOL_CTL_GAIN			(0x261)
-#define TOMTOM_A_CDC_TX9_VOL_CTL_GAIN__POR				(0x00)
-#define TOMTOM_A_CDC_TX10_VOL_CTL_GAIN			(0x269)
-#define TOMTOM_A_CDC_TX10_VOL_CTL_GAIN__POR				(0x00)
-#define TOMTOM_A_CDC_TX1_VOL_CTL_CFG			(0x222)
-#define TOMTOM_A_CDC_TX1_VOL_CTL_CFG__POR				(0x00)
-#define TOMTOM_A_CDC_TX2_VOL_CTL_CFG			(0x22A)
-#define TOMTOM_A_CDC_TX2_VOL_CTL_CFG__POR				(0x00)
-#define TOMTOM_A_CDC_TX3_VOL_CTL_CFG			(0x232)
-#define TOMTOM_A_CDC_TX3_VOL_CTL_CFG__POR				(0x00)
-#define TOMTOM_A_CDC_TX4_VOL_CTL_CFG			(0x23A)
-#define TOMTOM_A_CDC_TX4_VOL_CTL_CFG__POR				(0x00)
-#define TOMTOM_A_CDC_TX5_VOL_CTL_CFG			(0x242)
-#define TOMTOM_A_CDC_TX5_VOL_CTL_CFG__POR				(0x00)
-#define TOMTOM_A_CDC_TX6_VOL_CTL_CFG			(0x24A)
-#define TOMTOM_A_CDC_TX6_VOL_CTL_CFG__POR				(0x00)
-#define TOMTOM_A_CDC_TX7_VOL_CTL_CFG			(0x252)
-#define TOMTOM_A_CDC_TX7_VOL_CTL_CFG__POR				(0x00)
-#define TOMTOM_A_CDC_TX8_VOL_CTL_CFG			(0x25A)
-#define TOMTOM_A_CDC_TX8_VOL_CTL_CFG__POR				(0x00)
-#define TOMTOM_A_CDC_TX9_VOL_CTL_CFG			(0x262)
-#define TOMTOM_A_CDC_TX9_VOL_CTL_CFG__POR				(0x00)
-#define TOMTOM_A_CDC_TX10_VOL_CTL_CFG			(0x26A)
-#define TOMTOM_A_CDC_TX10_VOL_CTL_CFG__POR				(0x00)
-#define TOMTOM_A_CDC_TX1_MUX_CTL			(0x223)
-#define TOMTOM_A_CDC_TX1_MUX_CTL__POR				(0x48)
-#define TOMTOM_A_CDC_TX2_MUX_CTL			(0x22B)
-#define TOMTOM_A_CDC_TX2_MUX_CTL__POR				(0x48)
-#define TOMTOM_A_CDC_TX3_MUX_CTL			(0x233)
-#define TOMTOM_A_CDC_TX3_MUX_CTL__POR				(0x48)
-#define TOMTOM_A_CDC_TX4_MUX_CTL			(0x23B)
-#define TOMTOM_A_CDC_TX4_MUX_CTL__POR				(0x48)
-#define TOMTOM_A_CDC_TX5_MUX_CTL			(0x243)
-#define TOMTOM_A_CDC_TX5_MUX_CTL__POR				(0x48)
-#define TOMTOM_A_CDC_TX6_MUX_CTL			(0x24B)
-#define TOMTOM_A_CDC_TX6_MUX_CTL__POR				(0x48)
-#define TOMTOM_A_CDC_TX7_MUX_CTL			(0x253)
-#define TOMTOM_A_CDC_TX7_MUX_CTL__POR				(0x48)
-#define TOMTOM_A_CDC_TX8_MUX_CTL			(0x25B)
-#define TOMTOM_A_CDC_TX8_MUX_CTL__POR				(0x48)
-#define TOMTOM_A_CDC_TX9_MUX_CTL			(0x263)
-#define TOMTOM_A_CDC_TX9_MUX_CTL__POR				(0x48)
-#define TOMTOM_A_CDC_TX10_MUX_CTL			(0x26B)
-#define TOMTOM_A_CDC_TX10_MUX_CTL__POR				(0x48)
-#define TOMTOM_A_CDC_TX1_CLK_FS_CTL			(0x224)
-#define TOMTOM_A_CDC_TX1_CLK_FS_CTL__POR				(0x03)
-#define TOMTOM_A_CDC_TX2_CLK_FS_CTL			(0x22C)
-#define TOMTOM_A_CDC_TX2_CLK_FS_CTL__POR				(0x03)
-#define TOMTOM_A_CDC_TX3_CLK_FS_CTL			(0x234)
-#define TOMTOM_A_CDC_TX3_CLK_FS_CTL__POR				(0x03)
-#define TOMTOM_A_CDC_TX4_CLK_FS_CTL			(0x23C)
-#define TOMTOM_A_CDC_TX4_CLK_FS_CTL__POR				(0x03)
-#define TOMTOM_A_CDC_TX5_CLK_FS_CTL			(0x244)
-#define TOMTOM_A_CDC_TX5_CLK_FS_CTL__POR				(0x03)
-#define TOMTOM_A_CDC_TX6_CLK_FS_CTL			(0x24C)
-#define TOMTOM_A_CDC_TX6_CLK_FS_CTL__POR				(0x03)
-#define TOMTOM_A_CDC_TX7_CLK_FS_CTL			(0x254)
-#define TOMTOM_A_CDC_TX7_CLK_FS_CTL__POR				(0x03)
-#define TOMTOM_A_CDC_TX8_CLK_FS_CTL			(0x25C)
-#define TOMTOM_A_CDC_TX8_CLK_FS_CTL__POR				(0x03)
-#define TOMTOM_A_CDC_TX9_CLK_FS_CTL			(0x264)
-#define TOMTOM_A_CDC_TX9_CLK_FS_CTL__POR				(0x03)
-#define TOMTOM_A_CDC_TX10_CLK_FS_CTL			(0x26C)
-#define TOMTOM_A_CDC_TX10_CLK_FS_CTL__POR				(0x03)
-#define TOMTOM_A_CDC_TX1_DMIC_CTL			(0x225)
-#define TOMTOM_A_CDC_TX1_DMIC_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_TX2_DMIC_CTL			(0x22D)
-#define TOMTOM_A_CDC_TX2_DMIC_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_TX3_DMIC_CTL			(0x235)
-#define TOMTOM_A_CDC_TX3_DMIC_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_TX4_DMIC_CTL			(0x23D)
-#define TOMTOM_A_CDC_TX4_DMIC_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_TX5_DMIC_CTL			(0x245)
-#define TOMTOM_A_CDC_TX5_DMIC_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_TX6_DMIC_CTL			(0x24D)
-#define TOMTOM_A_CDC_TX6_DMIC_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_TX7_DMIC_CTL			(0x255)
-#define TOMTOM_A_CDC_TX7_DMIC_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_TX8_DMIC_CTL			(0x25D)
-#define TOMTOM_A_CDC_TX8_DMIC_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_TX9_DMIC_CTL			(0x265)
-#define TOMTOM_A_CDC_TX9_DMIC_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_TX10_DMIC_CTL			(0x26D)
-#define TOMTOM_A_CDC_TX10_DMIC_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL0			(0x270)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL0__POR				(0x00)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL1			(0x271)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL1__POR				(0x00)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL2			(0x272)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL2__POR				(0x00)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL3			(0x273)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL3__POR				(0x00)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL4			(0x274)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL4__POR				(0x00)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL5			(0x275)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL5__POR				(0x00)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL6			(0x276)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL6__POR				(0x00)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL7			(0x277)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL7__POR				(0x00)
-#define TOMTOM_A_CDC_DEBUG_B1_CTL			(0x278)
-#define TOMTOM_A_CDC_DEBUG_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_DEBUG_B2_CTL			(0x279)
-#define TOMTOM_A_CDC_DEBUG_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_DEBUG_B3_CTL			(0x27A)
-#define TOMTOM_A_CDC_DEBUG_B3_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_DEBUG_B4_CTL			(0x27B)
-#define TOMTOM_A_CDC_DEBUG_B4_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_DEBUG_B5_CTL			(0x27C)
-#define TOMTOM_A_CDC_DEBUG_B5_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_DEBUG_B6_CTL			(0x27D)
-#define TOMTOM_A_CDC_DEBUG_B6_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_DEBUG_B7_CTL			(0x27E)
-#define TOMTOM_A_CDC_DEBUG_B7_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_SRC1_PDA_CFG			(0x2A0)
-#define TOMTOM_A_CDC_SRC1_PDA_CFG__POR				(0x00)
-#define TOMTOM_A_CDC_SRC2_PDA_CFG			(0x2A8)
-#define TOMTOM_A_CDC_SRC2_PDA_CFG__POR				(0x00)
-#define TOMTOM_A_CDC_SRC1_FS_CTL			(0x2A1)
-#define TOMTOM_A_CDC_SRC1_FS_CTL__POR				(0x1B)
-#define TOMTOM_A_CDC_SRC2_FS_CTL			(0x2A9)
-#define TOMTOM_A_CDC_SRC2_FS_CTL__POR				(0x1B)
-#define TOMTOM_A_CDC_RX1_B1_CTL			(0x2B0)
-#define TOMTOM_A_CDC_RX1_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX2_B1_CTL			(0x2B8)
-#define TOMTOM_A_CDC_RX2_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX3_B1_CTL			(0x2C0)
-#define TOMTOM_A_CDC_RX3_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX4_B1_CTL			(0x2C8)
-#define TOMTOM_A_CDC_RX4_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX5_B1_CTL			(0x2D0)
-#define TOMTOM_A_CDC_RX5_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX6_B1_CTL			(0x2D8)
-#define TOMTOM_A_CDC_RX6_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX7_B1_CTL			(0x2E0)
-#define TOMTOM_A_CDC_RX7_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX1_B2_CTL			(0x2B1)
-#define TOMTOM_A_CDC_RX1_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX2_B2_CTL			(0x2B9)
-#define TOMTOM_A_CDC_RX2_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX3_B2_CTL			(0x2C1)
-#define TOMTOM_A_CDC_RX3_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX4_B2_CTL			(0x2C9)
-#define TOMTOM_A_CDC_RX4_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX5_B2_CTL			(0x2D1)
-#define TOMTOM_A_CDC_RX5_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX6_B2_CTL			(0x2D9)
-#define TOMTOM_A_CDC_RX6_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX7_B2_CTL			(0x2E1)
-#define TOMTOM_A_CDC_RX7_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX1_B3_CTL			(0x2B2)
-#define TOMTOM_A_CDC_RX1_B3_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX2_B3_CTL			(0x2BA)
-#define TOMTOM_A_CDC_RX2_B3_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX3_B3_CTL			(0x2C2)
-#define TOMTOM_A_CDC_RX3_B3_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX4_B3_CTL			(0x2CA)
-#define TOMTOM_A_CDC_RX4_B3_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX5_B3_CTL			(0x2D2)
-#define TOMTOM_A_CDC_RX5_B3_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX6_B3_CTL			(0x2DA)
-#define TOMTOM_A_CDC_RX6_B3_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX7_B3_CTL			(0x2E2)
-#define TOMTOM_A_CDC_RX7_B3_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX1_B4_CTL			(0x2B3)
-#define TOMTOM_A_CDC_RX1_B4_CTL__POR				(0x0B)
-#define TOMTOM_A_CDC_RX2_B4_CTL			(0x2BB)
-#define TOMTOM_A_CDC_RX2_B4_CTL__POR				(0x0B)
-#define TOMTOM_A_CDC_RX3_B4_CTL			(0x2C3)
-#define TOMTOM_A_CDC_RX3_B4_CTL__POR				(0x0B)
-#define TOMTOM_A_CDC_RX4_B4_CTL			(0x2CB)
-#define TOMTOM_A_CDC_RX4_B4_CTL__POR				(0x0B)
-#define TOMTOM_A_CDC_RX5_B4_CTL			(0x2D3)
-#define TOMTOM_A_CDC_RX5_B4_CTL__POR				(0x0B)
-#define TOMTOM_A_CDC_RX6_B4_CTL			(0x2DB)
-#define TOMTOM_A_CDC_RX6_B4_CTL__POR				(0x0B)
-#define TOMTOM_A_CDC_RX7_B4_CTL			(0x2E3)
-#define TOMTOM_A_CDC_RX7_B4_CTL__POR				(0x0B)
-#define TOMTOM_A_CDC_RX1_B5_CTL			(0x2B4)
-#define TOMTOM_A_CDC_RX1_B5_CTL__POR				(0x78)
-#define TOMTOM_A_CDC_RX2_B5_CTL			(0x2BC)
-#define TOMTOM_A_CDC_RX2_B5_CTL__POR				(0x78)
-#define TOMTOM_A_CDC_RX3_B5_CTL			(0x2C4)
-#define TOMTOM_A_CDC_RX3_B5_CTL__POR				(0x78)
-#define TOMTOM_A_CDC_RX4_B5_CTL			(0x2CC)
-#define TOMTOM_A_CDC_RX4_B5_CTL__POR				(0x78)
-#define TOMTOM_A_CDC_RX5_B5_CTL			(0x2D4)
-#define TOMTOM_A_CDC_RX5_B5_CTL__POR				(0x78)
-#define TOMTOM_A_CDC_RX6_B5_CTL			(0x2DC)
-#define TOMTOM_A_CDC_RX6_B5_CTL__POR				(0x78)
-#define TOMTOM_A_CDC_RX7_B5_CTL			(0x2E4)
-#define TOMTOM_A_CDC_RX7_B5_CTL__POR				(0x78)
-#define TOMTOM_A_CDC_RX1_B6_CTL			(0x2B5)
-#define TOMTOM_A_CDC_RX1_B6_CTL__POR				(0x80)
-#define TOMTOM_A_CDC_RX2_B6_CTL			(0x2BD)
-#define TOMTOM_A_CDC_RX2_B6_CTL__POR				(0x80)
-#define TOMTOM_A_CDC_RX3_B6_CTL			(0x2C5)
-#define TOMTOM_A_CDC_RX3_B6_CTL__POR				(0x80)
-#define TOMTOM_A_CDC_RX4_B6_CTL			(0x2CD)
-#define TOMTOM_A_CDC_RX4_B6_CTL__POR				(0x80)
-#define TOMTOM_A_CDC_RX5_B6_CTL			(0x2D5)
-#define TOMTOM_A_CDC_RX5_B6_CTL__POR				(0x80)
-#define TOMTOM_A_CDC_RX6_B6_CTL			(0x2DD)
-#define TOMTOM_A_CDC_RX6_B6_CTL__POR				(0x80)
-#define TOMTOM_A_CDC_RX7_B6_CTL			(0x2E5)
-#define TOMTOM_A_CDC_RX7_B6_CTL__POR				(0x80)
-#define TOMTOM_A_CDC_RX1_VOL_CTL_B1_CTL			(0x2B6)
-#define TOMTOM_A_CDC_RX1_VOL_CTL_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX2_VOL_CTL_B1_CTL			(0x2BE)
-#define TOMTOM_A_CDC_RX2_VOL_CTL_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX3_VOL_CTL_B1_CTL			(0x2C6)
-#define TOMTOM_A_CDC_RX3_VOL_CTL_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX4_VOL_CTL_B1_CTL			(0x2CE)
-#define TOMTOM_A_CDC_RX4_VOL_CTL_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX5_VOL_CTL_B1_CTL			(0x2D6)
-#define TOMTOM_A_CDC_RX5_VOL_CTL_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX6_VOL_CTL_B1_CTL			(0x2DE)
-#define TOMTOM_A_CDC_RX6_VOL_CTL_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX7_VOL_CTL_B1_CTL			(0x2E6)
-#define TOMTOM_A_CDC_RX7_VOL_CTL_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL			(0x2B7)
-#define TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL			(0x2BF)
-#define TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL			(0x2C7)
-#define TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL			(0x2CF)
-#define TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL			(0x2D7)
-#define TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL			(0x2DF)
-#define TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL			(0x2E7)
-#define TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_VBAT_CFG			(0x2E8)
-#define TOMTOM_A_CDC_VBAT_CFG__POR				(0x1A)
-#define TOMTOM_A_CDC_VBAT_ADC_CAL1			(0x2E9)
-#define TOMTOM_A_CDC_VBAT_ADC_CAL1__POR				(0x00)
-#define TOMTOM_A_CDC_VBAT_ADC_CAL2			(0x2EA)
-#define TOMTOM_A_CDC_VBAT_ADC_CAL2__POR				(0x00)
-#define TOMTOM_A_CDC_VBAT_ADC_CAL3			(0x2EB)
-#define TOMTOM_A_CDC_VBAT_ADC_CAL3__POR				(0x04)
-#define TOMTOM_A_CDC_VBAT_PK_EST1			(0x2EC)
-#define TOMTOM_A_CDC_VBAT_PK_EST1__POR				(0xE0)
-#define TOMTOM_A_CDC_VBAT_PK_EST2			(0x2ED)
-#define TOMTOM_A_CDC_VBAT_PK_EST2__POR				(0x01)
-#define TOMTOM_A_CDC_VBAT_PK_EST3			(0x2EE)
-#define TOMTOM_A_CDC_VBAT_PK_EST3__POR				(0x40)
-#define TOMTOM_A_CDC_VBAT_RF_PROC1			(0x2EF)
-#define TOMTOM_A_CDC_VBAT_RF_PROC1__POR				(0x2A)
-#define TOMTOM_A_CDC_VBAT_RF_PROC2			(0x2F0)
-#define TOMTOM_A_CDC_VBAT_RF_PROC2__POR				(0x86)
-#define TOMTOM_A_CDC_VBAT_TAC1			(0x2F1)
-#define TOMTOM_A_CDC_VBAT_TAC1__POR				(0x70)
-#define TOMTOM_A_CDC_VBAT_TAC2			(0x2F2)
-#define TOMTOM_A_CDC_VBAT_TAC2__POR				(0x18)
-#define TOMTOM_A_CDC_VBAT_TAC3			(0x2F3)
-#define TOMTOM_A_CDC_VBAT_TAC3__POR				(0x18)
-#define TOMTOM_A_CDC_VBAT_TAC4			(0x2F4)
-#define TOMTOM_A_CDC_VBAT_TAC4__POR				(0x03)
-#define TOMTOM_A_CDC_VBAT_GAIN_UPD1			(0x2F5)
-#define TOMTOM_A_CDC_VBAT_GAIN_UPD1__POR				(0x01)
-#define TOMTOM_A_CDC_VBAT_GAIN_UPD2			(0x2F6)
-#define TOMTOM_A_CDC_VBAT_GAIN_UPD2__POR				(0x00)
-#define TOMTOM_A_CDC_VBAT_GAIN_UPD3			(0x2F7)
-#define TOMTOM_A_CDC_VBAT_GAIN_UPD3__POR				(0x64)
-#define TOMTOM_A_CDC_VBAT_GAIN_UPD4			(0x2F8)
-#define TOMTOM_A_CDC_VBAT_GAIN_UPD4__POR				(0x01)
-#define TOMTOM_A_CDC_VBAT_DEBUG1			(0x2F9)
-#define TOMTOM_A_CDC_VBAT_DEBUG1__POR				(0x00)
-#define TOMTOM_A_CDC_VBAT_GAIN_UPD_MON			(0x2FA)
-#define TOMTOM_A_CDC_VBAT_GAIN_UPD_MON__POR				(0x00)
-#define TOMTOM_A_CDC_VBAT_GAIN_MON_VAL			(0x2FB)
-#define TOMTOM_A_CDC_VBAT_GAIN_MON_VAL__POR				(0x00)
-#define TOMTOM_A_CDC_CLK_ANC_RESET_CTL			(0x300)
-#define TOMTOM_A_CDC_CLK_ANC_RESET_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CLK_RX_RESET_CTL			(0x301)
-#define TOMTOM_A_CDC_CLK_RX_RESET_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CLK_TX_RESET_B1_CTL			(0x302)
-#define TOMTOM_A_CDC_CLK_TX_RESET_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CLK_TX_RESET_B2_CTL			(0x303)
-#define TOMTOM_A_CDC_CLK_TX_RESET_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CLK_RX_I2S_CTL			(0x306)
-#define TOMTOM_A_CDC_CLK_RX_I2S_CTL__POR				(0x03)
-#define TOMTOM_A_CDC_CLK_TX_I2S_CTL			(0x307)
-#define TOMTOM_A_CDC_CLK_TX_I2S_CTL__POR				(0x03)
-#define TOMTOM_A_CDC_CLK_OTHR_RESET_B1_CTL			(0x308)
-#define TOMTOM_A_CDC_CLK_OTHR_RESET_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL			(0x309)
-#define TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL			(0x30A)
-#define TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL			(0x30B)
-#define TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CLK_OTHR_CTL			(0x30C)
-#define TOMTOM_A_CDC_CLK_OTHR_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CLK_ANC_CLK_EN_CTL			(0x30E)
-#define TOMTOM_A_CDC_CLK_ANC_CLK_EN_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CLK_RX_B1_CTL			(0x30F)
-#define TOMTOM_A_CDC_CLK_RX_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CLK_RX_B2_CTL			(0x310)
-#define TOMTOM_A_CDC_CLK_RX_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CLK_MCLK_CTL			(0x311)
-#define TOMTOM_A_CDC_CLK_MCLK_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CLK_PDM_CTL			(0x312)
-#define TOMTOM_A_CDC_CLK_PDM_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CLK_SD_CTL			(0x313)
-#define TOMTOM_A_CDC_CLK_SD_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CLSH_B1_CTL			(0x320)
-#define TOMTOM_A_CDC_CLSH_B1_CTL__POR				(0xE4)
-#define TOMTOM_A_CDC_CLSH_B2_CTL			(0x321)
-#define TOMTOM_A_CDC_CLSH_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CLSH_B3_CTL			(0x322)
-#define TOMTOM_A_CDC_CLSH_B3_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CLSH_BUCK_NCP_VARS			(0x323)
-#define TOMTOM_A_CDC_CLSH_BUCK_NCP_VARS__POR				(0x00)
-#define TOMTOM_A_CDC_CLSH_IDLE_HPH_THSD			(0x324)
-#define TOMTOM_A_CDC_CLSH_IDLE_HPH_THSD__POR				(0x12)
-#define TOMTOM_A_CDC_CLSH_IDLE_EAR_THSD			(0x325)
-#define TOMTOM_A_CDC_CLSH_IDLE_EAR_THSD__POR				(0x0C)
-#define TOMTOM_A_CDC_CLSH_FCLKONLY_HPH_THSD			(0x326)
-#define TOMTOM_A_CDC_CLSH_FCLKONLY_HPH_THSD__POR			(0x18)
-#define TOMTOM_A_CDC_CLSH_FCLKONLY_EAR_THSD			(0x327)
-#define TOMTOM_A_CDC_CLSH_FCLKONLY_EAR_THSD__POR			(0x23)
-#define TOMTOM_A_CDC_CLSH_K_ADDR			(0x328)
-#define TOMTOM_A_CDC_CLSH_K_ADDR__POR				(0x00)
-#define TOMTOM_A_CDC_CLSH_K_DATA			(0x329)
-#define TOMTOM_A_CDC_CLSH_K_DATA__POR				(0xA4)
-#define TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_L			(0x32A)
-#define TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_L__POR				(0xD7)
-#define TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_U			(0x32B)
-#define TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_U__POR				(0x05)
-#define TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_L			(0x32C)
-#define TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_L__POR				(0x60)
-#define TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_U			(0x32D)
-#define TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_U__POR				(0x09)
-#define TOMTOM_A_CDC_CLSH_V_PA_HD_EAR			(0x32E)
-#define TOMTOM_A_CDC_CLSH_V_PA_HD_EAR__POR				(0x00)
-#define TOMTOM_A_CDC_CLSH_V_PA_HD_HPH			(0x32F)
-#define TOMTOM_A_CDC_CLSH_V_PA_HD_HPH__POR				(0x00)
-#define TOMTOM_A_CDC_CLSH_V_PA_MIN_EAR			(0x330)
-#define TOMTOM_A_CDC_CLSH_V_PA_MIN_EAR__POR				(0x00)
-#define TOMTOM_A_CDC_CLSH_V_PA_MIN_HPH			(0x331)
-#define TOMTOM_A_CDC_CLSH_V_PA_MIN_HPH__POR				(0x00)
-#define TOMTOM_A_CDC_IIR1_GAIN_B1_CTL			(0x340)
-#define TOMTOM_A_CDC_IIR1_GAIN_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_IIR2_GAIN_B1_CTL			(0x350)
-#define TOMTOM_A_CDC_IIR2_GAIN_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_IIR1_GAIN_B2_CTL			(0x341)
-#define TOMTOM_A_CDC_IIR1_GAIN_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_IIR2_GAIN_B2_CTL			(0x351)
-#define TOMTOM_A_CDC_IIR2_GAIN_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_IIR1_GAIN_B3_CTL			(0x342)
-#define TOMTOM_A_CDC_IIR1_GAIN_B3_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_IIR2_GAIN_B3_CTL			(0x352)
-#define TOMTOM_A_CDC_IIR2_GAIN_B3_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_IIR1_GAIN_B4_CTL			(0x343)
-#define TOMTOM_A_CDC_IIR1_GAIN_B4_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_IIR2_GAIN_B4_CTL			(0x353)
-#define TOMTOM_A_CDC_IIR2_GAIN_B4_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_IIR1_GAIN_B5_CTL			(0x344)
-#define TOMTOM_A_CDC_IIR1_GAIN_B5_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_IIR2_GAIN_B5_CTL			(0x354)
-#define TOMTOM_A_CDC_IIR2_GAIN_B5_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_IIR1_GAIN_B6_CTL			(0x345)
-#define TOMTOM_A_CDC_IIR1_GAIN_B6_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_IIR2_GAIN_B6_CTL			(0x355)
-#define TOMTOM_A_CDC_IIR2_GAIN_B6_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_IIR1_GAIN_B7_CTL			(0x346)
-#define TOMTOM_A_CDC_IIR1_GAIN_B7_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_IIR2_GAIN_B7_CTL			(0x356)
-#define TOMTOM_A_CDC_IIR2_GAIN_B7_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_IIR1_GAIN_B8_CTL			(0x347)
-#define TOMTOM_A_CDC_IIR1_GAIN_B8_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_IIR2_GAIN_B8_CTL			(0x357)
-#define TOMTOM_A_CDC_IIR2_GAIN_B8_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_IIR1_CTL			(0x348)
-#define TOMTOM_A_CDC_IIR1_CTL__POR				(0x40)
-#define TOMTOM_A_CDC_IIR2_CTL			(0x358)
-#define TOMTOM_A_CDC_IIR2_CTL__POR				(0x40)
-#define TOMTOM_A_CDC_IIR1_GAIN_TIMER_CTL			(0x349)
-#define TOMTOM_A_CDC_IIR1_GAIN_TIMER_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_IIR2_GAIN_TIMER_CTL			(0x359)
-#define TOMTOM_A_CDC_IIR2_GAIN_TIMER_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_IIR1_COEF_B1_CTL			(0x34A)
-#define TOMTOM_A_CDC_IIR1_COEF_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_IIR2_COEF_B1_CTL			(0x35A)
-#define TOMTOM_A_CDC_IIR2_COEF_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_IIR1_COEF_B2_CTL			(0x34B)
-#define TOMTOM_A_CDC_IIR1_COEF_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_IIR2_COEF_B2_CTL			(0x35B)
-#define TOMTOM_A_CDC_IIR2_COEF_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_TOP_GAIN_UPDATE			(0x360)
-#define TOMTOM_A_CDC_TOP_GAIN_UPDATE__POR				(0x00)
-#define TOMTOM_A_CDC_PA_RAMP_B1_CTL			(0x361)
-#define TOMTOM_A_CDC_PA_RAMP_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_PA_RAMP_B2_CTL			(0x362)
-#define TOMTOM_A_CDC_PA_RAMP_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_PA_RAMP_B3_CTL			(0x363)
-#define TOMTOM_A_CDC_PA_RAMP_B3_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_PA_RAMP_B4_CTL			(0x364)
-#define TOMTOM_A_CDC_PA_RAMP_B4_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_B1_CTL			(0x365)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_B1_CTL			(0x366)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_COMP0_B1_CTL			(0x368)
-#define TOMTOM_A_CDC_COMP0_B1_CTL__POR				(0x30)
-#define TOMTOM_A_CDC_COMP1_B1_CTL			(0x370)
-#define TOMTOM_A_CDC_COMP1_B1_CTL__POR				(0x30)
-#define TOMTOM_A_CDC_COMP2_B1_CTL			(0x378)
-#define TOMTOM_A_CDC_COMP2_B1_CTL__POR				(0x30)
-#define TOMTOM_A_CDC_COMP0_B2_CTL			(0x369)
-#define TOMTOM_A_CDC_COMP0_B2_CTL__POR				(0xB5)
-#define TOMTOM_A_CDC_COMP1_B2_CTL			(0x371)
-#define TOMTOM_A_CDC_COMP1_B2_CTL__POR				(0xB5)
-#define TOMTOM_A_CDC_COMP2_B2_CTL			(0x379)
-#define TOMTOM_A_CDC_COMP2_B2_CTL__POR				(0xB5)
-#define TOMTOM_A_CDC_COMP0_B3_CTL			(0x36A)
-#define TOMTOM_A_CDC_COMP0_B3_CTL__POR				(0x28)
-#define TOMTOM_A_CDC_COMP1_B3_CTL			(0x372)
-#define TOMTOM_A_CDC_COMP1_B3_CTL__POR				(0x28)
-#define TOMTOM_A_CDC_COMP2_B3_CTL			(0x37A)
-#define TOMTOM_A_CDC_COMP2_B3_CTL__POR				(0x28)
-#define TOMTOM_A_CDC_COMP0_B4_CTL			(0x36B)
-#define TOMTOM_A_CDC_COMP0_B4_CTL__POR				(0x37)
-#define TOMTOM_A_CDC_COMP1_B4_CTL			(0x373)
-#define TOMTOM_A_CDC_COMP1_B4_CTL__POR				(0x37)
-#define TOMTOM_A_CDC_COMP2_B4_CTL			(0x37B)
-#define TOMTOM_A_CDC_COMP2_B4_CTL__POR				(0x37)
-#define TOMTOM_A_CDC_COMP0_B5_CTL			(0x36C)
-#define TOMTOM_A_CDC_COMP0_B5_CTL__POR				(0x7F)
-#define TOMTOM_A_CDC_COMP1_B5_CTL			(0x374)
-#define TOMTOM_A_CDC_COMP1_B5_CTL__POR				(0x7F)
-#define TOMTOM_A_CDC_COMP2_B5_CTL			(0x37C)
-#define TOMTOM_A_CDC_COMP2_B5_CTL__POR				(0x7F)
-#define TOMTOM_A_CDC_COMP0_B6_CTL			(0x36D)
-#define TOMTOM_A_CDC_COMP0_B6_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_COMP1_B6_CTL			(0x375)
-#define TOMTOM_A_CDC_COMP1_B6_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_COMP2_B6_CTL			(0x37D)
-#define TOMTOM_A_CDC_COMP2_B6_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_COMP0_SHUT_DOWN_STATUS			(0x36E)
-#define TOMTOM_A_CDC_COMP0_SHUT_DOWN_STATUS__POR			(0x03)
-#define TOMTOM_A_CDC_COMP1_SHUT_DOWN_STATUS			(0x376)
-#define TOMTOM_A_CDC_COMP1_SHUT_DOWN_STATUS__POR			(0x03)
-#define TOMTOM_A_CDC_COMP2_SHUT_DOWN_STATUS			(0x37E)
-#define TOMTOM_A_CDC_COMP2_SHUT_DOWN_STATUS__POR			(0x03)
-#define TOMTOM_A_CDC_COMP0_FS_CFG			(0x36F)
-#define TOMTOM_A_CDC_COMP0_FS_CFG__POR				(0x03)
-#define TOMTOM_A_CDC_COMP1_FS_CFG			(0x377)
-#define TOMTOM_A_CDC_COMP1_FS_CFG__POR				(0x03)
-#define TOMTOM_A_CDC_COMP2_FS_CFG			(0x37F)
-#define TOMTOM_A_CDC_COMP2_FS_CFG__POR				(0x03)
-#define TOMTOM_A_CDC_CONN_RX1_B1_CTL			(0x380)
-#define TOMTOM_A_CDC_CONN_RX1_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_RX1_B2_CTL			(0x381)
-#define TOMTOM_A_CDC_CONN_RX1_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_RX1_B3_CTL			(0x382)
-#define TOMTOM_A_CDC_CONN_RX1_B3_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_RX2_B1_CTL			(0x383)
-#define TOMTOM_A_CDC_CONN_RX2_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_RX2_B2_CTL			(0x384)
-#define TOMTOM_A_CDC_CONN_RX2_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_RX2_B3_CTL			(0x385)
-#define TOMTOM_A_CDC_CONN_RX2_B3_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_RX3_B1_CTL			(0x386)
-#define TOMTOM_A_CDC_CONN_RX3_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_RX3_B2_CTL			(0x387)
-#define TOMTOM_A_CDC_CONN_RX3_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_RX4_B1_CTL			(0x388)
-#define TOMTOM_A_CDC_CONN_RX4_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_RX4_B2_CTL			(0x389)
-#define TOMTOM_A_CDC_CONN_RX4_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_RX5_B1_CTL			(0x38A)
-#define TOMTOM_A_CDC_CONN_RX5_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_RX5_B2_CTL			(0x38B)
-#define TOMTOM_A_CDC_CONN_RX5_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_RX6_B1_CTL			(0x38C)
-#define TOMTOM_A_CDC_CONN_RX6_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_RX6_B2_CTL			(0x38D)
-#define TOMTOM_A_CDC_CONN_RX6_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_RX7_B1_CTL			(0x38E)
-#define TOMTOM_A_CDC_CONN_RX7_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_RX7_B2_CTL			(0x38F)
-#define TOMTOM_A_CDC_CONN_RX7_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_RX7_B3_CTL			(0x390)
-#define TOMTOM_A_CDC_CONN_RX7_B3_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_ANC_B1_CTL			(0x391)
-#define TOMTOM_A_CDC_CONN_ANC_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_ANC_B2_CTL			(0x392)
-#define TOMTOM_A_CDC_CONN_ANC_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_TX_B1_CTL			(0x393)
-#define TOMTOM_A_CDC_CONN_TX_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_TX_B2_CTL			(0x394)
-#define TOMTOM_A_CDC_CONN_TX_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_TX_B3_CTL			(0x395)
-#define TOMTOM_A_CDC_CONN_TX_B3_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_TX_B4_CTL			(0x396)
-#define TOMTOM_A_CDC_CONN_TX_B4_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_EQ1_B1_CTL			(0x397)
-#define TOMTOM_A_CDC_CONN_EQ1_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_EQ1_B2_CTL			(0x398)
-#define TOMTOM_A_CDC_CONN_EQ1_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_EQ1_B3_CTL			(0x399)
-#define TOMTOM_A_CDC_CONN_EQ1_B3_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_EQ1_B4_CTL			(0x39A)
-#define TOMTOM_A_CDC_CONN_EQ1_B4_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_EQ2_B1_CTL			(0x39B)
-#define TOMTOM_A_CDC_CONN_EQ2_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_EQ2_B2_CTL			(0x39C)
-#define TOMTOM_A_CDC_CONN_EQ2_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_EQ2_B3_CTL			(0x39D)
-#define TOMTOM_A_CDC_CONN_EQ2_B3_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_EQ2_B4_CTL			(0x39E)
-#define TOMTOM_A_CDC_CONN_EQ2_B4_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_SRC1_B1_CTL			(0x39F)
-#define TOMTOM_A_CDC_CONN_SRC1_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_SRC1_B2_CTL			(0x3A0)
-#define TOMTOM_A_CDC_CONN_SRC1_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_SRC2_B1_CTL			(0x3A1)
-#define TOMTOM_A_CDC_CONN_SRC2_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_SRC2_B2_CTL			(0x3A2)
-#define TOMTOM_A_CDC_CONN_SRC2_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_TX_SB_B1_CTL			(0x3A3)
-#define TOMTOM_A_CDC_CONN_TX_SB_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_TX_SB_B2_CTL			(0x3A4)
-#define TOMTOM_A_CDC_CONN_TX_SB_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_TX_SB_B3_CTL			(0x3A5)
-#define TOMTOM_A_CDC_CONN_TX_SB_B3_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_TX_SB_B4_CTL			(0x3A6)
-#define TOMTOM_A_CDC_CONN_TX_SB_B4_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_TX_SB_B5_CTL			(0x3A7)
-#define TOMTOM_A_CDC_CONN_TX_SB_B5_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_TX_SB_B6_CTL			(0x3A8)
-#define TOMTOM_A_CDC_CONN_TX_SB_B6_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_TX_SB_B7_CTL			(0x3A9)
-#define TOMTOM_A_CDC_CONN_TX_SB_B7_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_TX_SB_B8_CTL			(0x3AA)
-#define TOMTOM_A_CDC_CONN_TX_SB_B8_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_TX_SB_B9_CTL			(0x3AB)
-#define TOMTOM_A_CDC_CONN_TX_SB_B9_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_TX_SB_B10_CTL			(0x3AC)
-#define TOMTOM_A_CDC_CONN_TX_SB_B10_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_TX_SB_B11_CTL			(0x3AD)
-#define TOMTOM_A_CDC_CONN_TX_SB_B11_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_RX_SB_B1_CTL			(0x3AE)
-#define TOMTOM_A_CDC_CONN_RX_SB_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_RX_SB_B2_CTL			(0x3AF)
-#define TOMTOM_A_CDC_CONN_RX_SB_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_CLSH_CTL			(0x3B0)
-#define TOMTOM_A_CDC_CONN_CLSH_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CONN_MISC			(0x3B1)
-#define TOMTOM_A_CDC_CONN_MISC__POR				(0x01)
-#define TOMTOM_A_CDC_CONN_RX8_B1_CTL			(0x3B3)
-#define TOMTOM_A_CDC_CONN_RX8_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_B1_CTL		(0x3B4)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_B1_CTL__POR				(0x81)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_CLIP_LEVEL_ADJUST	(0x3B5)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_CLIP_LEVEL_ADJUST__POR		(0x00)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_MIN_CLIP_THRESHOLD	(0x3B6)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_MIN_CLIP_THRESHOLD__POR		(0xFF)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_THRESHOLD_STATUS	(0x3B7)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_THRESHOLD_STATUS__POR		(0x00)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_SAMPLE_MARK		(0x3B8)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_SAMPLE_MARK__POR			(0x04)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_BOOST_GATING		(0x3B9)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_BOOST_GATING__POR			(0x04)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_B1_CTL		(0x3BA)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_B1_CTL__POR				(0x81)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_CLIP_LEVEL_ADJUST	(0x3BB)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_CLIP_LEVEL_ADJUST__POR		(0x00)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_MIN_CLIP_THRESHOLD	(0x3BC)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_MIN_CLIP_THRESHOLD__POR		(0xFF)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_THRESHOLD_STATUS	(0x3BD)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_THRESHOLD_STATUS__POR		(0x00)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_SAMPLE_MARK		(0x3BE)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_SAMPLE_MARK__POR			(0x04)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_BOOST_GATING	(0x3BF)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_BOOST_GATING__POR			(0x04)
-#define TOMTOM_A_CDC_MBHC_EN_CTL			(0x3C0)
-#define TOMTOM_A_CDC_MBHC_EN_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_MBHC_FIR_B1_CFG			(0x3C1)
-#define TOMTOM_A_CDC_MBHC_FIR_B1_CFG__POR				(0x00)
-#define TOMTOM_A_CDC_MBHC_FIR_B2_CFG			(0x3C2)
-#define TOMTOM_A_CDC_MBHC_FIR_B2_CFG__POR				(0x06)
-#define TOMTOM_A_CDC_MBHC_TIMER_B1_CTL			(0x3C3)
-#define TOMTOM_A_CDC_MBHC_TIMER_B1_CTL__POR				(0x03)
-#define TOMTOM_A_CDC_MBHC_TIMER_B2_CTL			(0x3C4)
-#define TOMTOM_A_CDC_MBHC_TIMER_B2_CTL__POR				(0x09)
-#define TOMTOM_A_CDC_MBHC_TIMER_B3_CTL			(0x3C5)
-#define TOMTOM_A_CDC_MBHC_TIMER_B3_CTL__POR				(0x1E)
-#define TOMTOM_A_CDC_MBHC_TIMER_B4_CTL			(0x3C6)
-#define TOMTOM_A_CDC_MBHC_TIMER_B4_CTL__POR				(0x45)
-#define TOMTOM_A_CDC_MBHC_TIMER_B5_CTL			(0x3C7)
-#define TOMTOM_A_CDC_MBHC_TIMER_B5_CTL__POR				(0x04)
-#define TOMTOM_A_CDC_MBHC_TIMER_B6_CTL			(0x3C8)
-#define TOMTOM_A_CDC_MBHC_TIMER_B6_CTL__POR				(0x78)
-#define TOMTOM_A_CDC_MBHC_B1_STATUS			(0x3C9)
-#define TOMTOM_A_CDC_MBHC_B1_STATUS__POR				(0x00)
-#define TOMTOM_A_CDC_MBHC_B2_STATUS			(0x3CA)
-#define TOMTOM_A_CDC_MBHC_B2_STATUS__POR				(0x00)
-#define TOMTOM_A_CDC_MBHC_B3_STATUS			(0x3CB)
-#define TOMTOM_A_CDC_MBHC_B3_STATUS__POR				(0x00)
-#define TOMTOM_A_CDC_MBHC_B4_STATUS			(0x3CC)
-#define TOMTOM_A_CDC_MBHC_B4_STATUS__POR				(0x00)
-#define TOMTOM_A_CDC_MBHC_B5_STATUS			(0x3CD)
-#define TOMTOM_A_CDC_MBHC_B5_STATUS__POR				(0x00)
-#define TOMTOM_A_CDC_MBHC_B1_CTL			(0x3CE)
-#define TOMTOM_A_CDC_MBHC_B1_CTL__POR				(0xC0)
-#define TOMTOM_A_CDC_MBHC_B2_CTL			(0x3CF)
-#define TOMTOM_A_CDC_MBHC_B2_CTL__POR				(0x5D)
-#define TOMTOM_A_CDC_MBHC_VOLT_B1_CTL			(0x3D0)
-#define TOMTOM_A_CDC_MBHC_VOLT_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_MBHC_VOLT_B2_CTL			(0x3D1)
-#define TOMTOM_A_CDC_MBHC_VOLT_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_MBHC_VOLT_B3_CTL			(0x3D2)
-#define TOMTOM_A_CDC_MBHC_VOLT_B3_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_MBHC_VOLT_B4_CTL			(0x3D3)
-#define TOMTOM_A_CDC_MBHC_VOLT_B4_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_MBHC_VOLT_B5_CTL			(0x3D4)
-#define TOMTOM_A_CDC_MBHC_VOLT_B5_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_MBHC_VOLT_B6_CTL			(0x3D5)
-#define TOMTOM_A_CDC_MBHC_VOLT_B6_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_MBHC_VOLT_B7_CTL			(0x3D6)
-#define TOMTOM_A_CDC_MBHC_VOLT_B7_CTL__POR				(0xFF)
-#define TOMTOM_A_CDC_MBHC_VOLT_B8_CTL			(0x3D7)
-#define TOMTOM_A_CDC_MBHC_VOLT_B8_CTL__POR				(0x07)
-#define TOMTOM_A_CDC_MBHC_VOLT_B9_CTL			(0x3D8)
-#define TOMTOM_A_CDC_MBHC_VOLT_B9_CTL__POR				(0xFF)
-#define TOMTOM_A_CDC_MBHC_VOLT_B10_CTL			(0x3D9)
-#define TOMTOM_A_CDC_MBHC_VOLT_B10_CTL__POR				(0x7F)
-#define TOMTOM_A_CDC_MBHC_VOLT_B11_CTL			(0x3DA)
-#define TOMTOM_A_CDC_MBHC_VOLT_B11_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_MBHC_VOLT_B12_CTL			(0x3DB)
-#define TOMTOM_A_CDC_MBHC_VOLT_B12_CTL__POR				(0x80)
-#define TOMTOM_A_CDC_MBHC_CLK_CTL			(0x3DC)
-#define TOMTOM_A_CDC_MBHC_CLK_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_MBHC_INT_CTL			(0x3DD)
-#define TOMTOM_A_CDC_MBHC_INT_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_MBHC_DEBUG_CTL			(0x3DE)
-#define TOMTOM_A_CDC_MBHC_DEBUG_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_MBHC_SPARE			(0x3DF)
-#define TOMTOM_A_CDC_MBHC_SPARE__POR				(0x00)
-#define TOMTOM_A_CDC_RX8_B1_CTL			(0x3E0)
-#define TOMTOM_A_CDC_RX8_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX8_B2_CTL			(0x3E1)
-#define TOMTOM_A_CDC_RX8_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX8_B3_CTL			(0x3E2)
-#define TOMTOM_A_CDC_RX8_B3_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX8_B4_CTL			(0x3E3)
-#define TOMTOM_A_CDC_RX8_B4_CTL__POR				(0x0B)
-#define TOMTOM_A_CDC_RX8_B5_CTL			(0x3E4)
-#define TOMTOM_A_CDC_RX8_B5_CTL__POR				(0x78)
-#define TOMTOM_A_CDC_RX8_B6_CTL			(0x3E5)
-#define TOMTOM_A_CDC_RX8_B6_CTL__POR				(0x80)
-#define TOMTOM_A_CDC_RX8_VOL_CTL_B1_CTL			(0x3E6)
-#define TOMTOM_A_CDC_RX8_VOL_CTL_B1_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL			(0x3E7)
-#define TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL0			(0x3E8)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL0__POR				(0x00)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL1			(0x3E9)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL1__POR				(0x00)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL2			(0x3EA)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL2__POR				(0x00)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL3			(0x3EB)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL3__POR				(0x00)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL4			(0x3EC)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL4__POR				(0x00)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL5			(0x3ED)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL5__POR				(0x00)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL6			(0x3EE)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL6__POR				(0x00)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL7			(0x3EF)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL7__POR				(0x00)
-#define TOMTOM_A_CDC_BOOST_MODE_CTL			(0x3F0)
-#define TOMTOM_A_CDC_BOOST_MODE_CTL__POR				(0x00)
-#define TOMTOM_A_CDC_BOOST_THRESHOLD			(0x3F1)
-#define TOMTOM_A_CDC_BOOST_THRESHOLD__POR				(0x02)
-#define TOMTOM_A_CDC_BOOST_TAP_SEL			(0x3F2)
-#define TOMTOM_A_CDC_BOOST_TAP_SEL__POR				(0x00)
-#define TOMTOM_A_CDC_BOOST_HOLD_TIME			(0x3F3)
-#define TOMTOM_A_CDC_BOOST_HOLD_TIME__POR				(0x02)
-#define TOMTOM_A_CDC_BOOST_TRGR_EN			(0x3F4)
-#define TOMTOM_A_CDC_BOOST_TRGR_EN__POR				(0x00)
-
-/* SLIMBUS Slave Registers */
-#define TOMTOM_SLIM_PGD_PORT_INT_EN0                     (0x30)
-#define TOMTOM_SLIM_PGD_PORT_INT_STATUS_RX_0             (0x34)
-#define TOMTOM_SLIM_PGD_PORT_INT_STATUS_RX_1             (0x35)
-#define TOMTOM_SLIM_PGD_PORT_INT_STATUS_TX_0             (0x36)
-#define TOMTOM_SLIM_PGD_PORT_INT_STATUS_TX_1             (0x37)
-#define TOMTOM_SLIM_PGD_PORT_INT_CLR_RX_0                (0x38)
-#define TOMTOM_SLIM_PGD_PORT_INT_CLR_RX_1                (0x39)
-#define TOMTOM_SLIM_PGD_PORT_INT_CLR_TX_0                (0x3A)
-#define TOMTOM_SLIM_PGD_PORT_INT_CLR_TX_1                (0x3B)
-#define TOMTOM_SLIM_PGD_PORT_INT_RX_SOURCE0		(0x60)
-#define TOMTOM_SLIM_PGD_PORT_INT_TX_SOURCE0		(0x70)
-
-/* Macros for Packing Register Writes into a U32 */
-#define TOMTOM_PACKED_REG_SIZE sizeof(u32)
-
-#define TOMTOM_CODEC_PACK_ENTRY(reg, mask, val) ((val & 0xff)|\
-	((mask & 0xff) << 8)|((reg & 0xffff) << 16))
-#define TOMTOM_CODEC_UNPACK_ENTRY(packed, reg, mask, val) \
-	do { \
-		((reg) = ((packed >> 16) & (0xffff))); \
-		((mask) = ((packed >> 8) & (0xff))); \
-		((val) = ((packed) & (0xff))); \
-	} while (0)
-
-#define TOMTOM_SB_PGD_PORT_TX_BASE    0x50
-#define TOMTOM_SB_PGD_PORT_RX_BASE    0x40
-#define WCD9330_MAX_REGISTER 0x3FF
-extern const u8 tomtom_reg_readable[WCD9330_MAX_REGISTER + 1];
-#endif
diff --git a/include/linux/mfd/wcd9xxx/wcd9xxx-irq.h b/include/linux/mfd/wcd9xxx/wcd9xxx-irq.h
index 1e428a1..99ce603 100644
--- a/include/linux/mfd/wcd9xxx/wcd9xxx-irq.h
+++ b/include/linux/mfd/wcd9xxx/wcd9xxx-irq.h
@@ -32,4 +32,6 @@
 
 int wcd9xxx_irq_init(struct wcd9xxx_core_resource *wcd9xxx_res);
 void wcd9xxx_irq_exit(struct wcd9xxx_core_resource *wcd9xxx_res);
+int wcd9xxx_irq_drv_init(void);
+void wcd9xxx_irq_drv_exit(void);
 #endif
diff --git a/include/linux/mfd/wcd9xxx/wcd9xxx-utils.h b/include/linux/mfd/wcd9xxx/wcd9xxx-utils.h
index d0ac0ac..7a13dd1 100644
--- a/include/linux/mfd/wcd9xxx/wcd9xxx-utils.h
+++ b/include/linux/mfd/wcd9xxx/wcd9xxx-utils.h
@@ -33,108 +33,8 @@
 typedef int (*codec_type_fn)(struct wcd9xxx *,
 			     struct wcd9xxx_codec_type *);
 
-#ifdef CONFIG_WCD934X_CODEC
-extern int wcd934x_bringup(struct wcd9xxx *wcd9xxx);
-extern int wcd934x_bringdown(struct wcd9xxx *wcd9xxx);
-extern int wcd934x_get_codec_info(struct wcd9xxx *wcd9xxx,
-				  struct wcd9xxx_codec_type *wcd_type);
-#endif
+codec_bringdown_fn wcd9xxx_bringdown_fn(int type);
+codec_bringup_fn wcd9xxx_bringup_fn(int type);
+codec_type_fn wcd9xxx_get_codec_info_fn(int type);
 
-#ifdef CONFIG_WCD9335_CODEC
-extern int wcd9335_bringup(struct wcd9xxx *wcd9xxx);
-extern int wcd9335_bringdown(struct wcd9xxx *wcd9xxx);
-extern int wcd9335_get_codec_info(struct wcd9xxx *wcd9xxx,
-				  struct wcd9xxx_codec_type *wcd_type);
-#endif
-
-#ifdef CONFIG_WCD9330_CODEC
-extern int wcd9330_bringup(struct wcd9xxx *wcd9xxx);
-extern int wcd9330_bringdown(struct wcd9xxx *wcd9xxx);
-extern int wcd9330_get_codec_info(struct wcd9xxx *wcd9xxx,
-				  struct wcd9xxx_codec_type *wcd_type);
-#endif
-
-static inline codec_bringdown_fn wcd9xxx_bringdown_fn(int type)
-{
-	codec_bringdown_fn cdc_bdown_fn;
-
-	switch (type) {
-#ifdef CONFIG_WCD934X_CODEC
-	case WCD934X:
-		cdc_bdown_fn = wcd934x_bringdown;
-		break;
-#endif
-#ifdef CONFIG_WCD9335_CODEC
-	case WCD9335:
-		cdc_bdown_fn = wcd9335_bringdown;
-		break;
-#endif
-#ifdef CONFIG_WCD9330_CODEC
-	case WCD9330:
-		cdc_bdown_fn = wcd9330_bringdown;
-		break;
-#endif
-	default:
-		cdc_bdown_fn = NULL;
-		break;
-	}
-
-	return cdc_bdown_fn;
-}
-
-static inline codec_bringup_fn wcd9xxx_bringup_fn(int type)
-{
-	codec_bringup_fn cdc_bup_fn;
-
-	switch (type) {
-#ifdef CONFIG_WCD934X_CODEC
-	case WCD934X:
-		cdc_bup_fn = wcd934x_bringup;
-		break;
-#endif
-#ifdef CONFIG_WCD9335_CODEC
-	case WCD9335:
-		cdc_bup_fn = wcd9335_bringup;
-		break;
-#endif
-#ifdef CONFIG_WCD9330_CODEC
-	case WCD9330:
-		cdc_bup_fn = wcd9330_bringup;
-		break;
-#endif
-	default:
-		cdc_bup_fn = NULL;
-		break;
-	}
-
-	return cdc_bup_fn;
-}
-
-static inline codec_type_fn wcd9xxx_get_codec_info_fn(int type)
-{
-	codec_type_fn cdc_type_fn;
-
-	switch (type) {
-#ifdef CONFIG_WCD934X_CODEC
-	case WCD934X:
-		cdc_type_fn = wcd934x_get_codec_info;
-		break;
-#endif
-#ifdef CONFIG_WCD9335_CODEC
-	case WCD9335:
-		cdc_type_fn = wcd9335_get_codec_info;
-		break;
-#endif
-#ifdef CONFIG_WCD9330_CODEC
-	case WCD9330:
-		cdc_type_fn = wcd9330_get_codec_info;
-		break;
-#endif
-	default:
-		cdc_type_fn = NULL;
-		break;
-	}
-
-	return cdc_type_fn;
-}
 #endif
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 257173e..f541da6 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -35,6 +35,8 @@
 #define PHY_ID_KSZ886X		0x00221430
 #define PHY_ID_KSZ8863		0x00221435
 
+#define PHY_ID_KSZ8795		0x00221550
+
 /* struct phy_device dev_flags definitions */
 #define MICREL_PHY_50MHZ_CLK	0x00000001
 #define MICREL_PHY_FXEN		0x00000002
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f7b0dab..6a14034 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1357,39 +1357,11 @@
 
 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
 
-/* Is the vma a continuation of the stack vma above it? */
-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
-{
-	return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
-}
-
 static inline bool vma_is_anonymous(struct vm_area_struct *vma)
 {
 	return !vma->vm_ops;
 }
 
-static inline int stack_guard_page_start(struct vm_area_struct *vma,
-					     unsigned long addr)
-{
-	return (vma->vm_flags & VM_GROWSDOWN) &&
-		(vma->vm_start == addr) &&
-		!vma_growsdown(vma->vm_prev, addr);
-}
-
-/* Is the vma a continuation of the stack vma below it? */
-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
-{
-	return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
-}
-
-static inline int stack_guard_page_end(struct vm_area_struct *vma,
-					   unsigned long addr)
-{
-	return (vma->vm_flags & VM_GROWSUP) &&
-		(vma->vm_end == addr) &&
-		!vma_growsup(vma->vm_next, addr);
-}
-
 int vma_is_stack_for_current(struct vm_area_struct *vma);
 
 extern unsigned long move_page_tables(struct vm_area_struct *vma,
@@ -2128,6 +2100,7 @@
 				pgoff_t offset,
 				unsigned long size);
 
+extern unsigned long stack_guard_gap;
 /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
 extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
 
@@ -2156,6 +2129,30 @@
 	return vma;
 }
 
+static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
+{
+	unsigned long vm_start = vma->vm_start;
+
+	if (vma->vm_flags & VM_GROWSDOWN) {
+		vm_start -= stack_guard_gap;
+		if (vm_start > vma->vm_start)
+			vm_start = 0;
+	}
+	return vm_start;
+}
+
+static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
+{
+	unsigned long vm_end = vma->vm_end;
+
+	if (vma->vm_flags & VM_GROWSUP) {
+		vm_end += stack_guard_gap;
+		if (vm_end < vma->vm_end)
+			vm_end = -PAGE_SIZE;
+	}
+	return vm_end;
+}
+
 static inline unsigned long vma_pages(struct vm_area_struct *vma)
 {
 	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 0f2e651..b718105 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -233,6 +233,7 @@
 	bool lock_needed);
 extern void mmc_cmdq_clk_scaling_stop_busy(struct mmc_host *host,
 	bool lock_needed, bool is_cmdq_dcmd);
+extern void mmc_recovery_fallback_lower_speed(struct mmc_host *host);
 
 /**
  *	mmc_claim_host - exclusively claim a host
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 9200069..f214b0c 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -480,6 +480,7 @@
 	int			clk_requests;	/* internal reference counter */
 	unsigned int		clk_delay;	/* number of MCI clk hold cycles */
 	bool			clk_gated;	/* clock gated */
+	struct workqueue_struct *clk_gate_wq;	/* clock gate work queue */
 	struct delayed_work	clk_gate_work; /* delayed clock gate */
 	unsigned int		clk_old;	/* old clock value cache */
 	spinlock_t		clk_lock;	/* lock for clk fields */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index a47c29e..d92d9a6 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -865,11 +865,15 @@
  *	of useless work if you return NETDEV_TX_BUSY.
  *	Required; cannot be NULL.
  *
- * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
- *		netdev_features_t features);
- *	Adjusts the requested feature flags according to device-specific
- *	constraints, and returns the resulting flags. Must not modify
- *	the device state.
+ * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
+ *					   struct net_device *dev
+ *					   netdev_features_t features);
+ *	Called by core transmit path to determine if device is capable of
+ *	performing offload operations on a given packet. This is to give
+ *	the device an opportunity to implement any restrictions that cannot
+ *	be otherwise expressed by feature flags. The check is called with
+ *	the set of features that the stack has calculated and it returns
+ *	those the driver believes to be appropriate.
  *
  * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
  *                         void *accel_priv, select_queue_fallback_t fallback);
@@ -1027,6 +1031,12 @@
  *	Called to release previously enslaved netdev.
  *
  *      Feature/offload setting functions.
+ * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
+ *		netdev_features_t features);
+ *	Adjusts the requested feature flags according to device-specific
+ *	constraints, and returns the resulting flags. Must not modify
+ *	the device state.
+ *
  * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
  *	Called to update device configuration to new features. Passed
  *	feature set might be less than what was returned by ndo_fix_features()).
@@ -1099,15 +1109,6 @@
  *	Callback to use for xmit over the accelerated station. This
  *	is used in place of ndo_start_xmit on accelerated net
  *	devices.
- * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
- *					   struct net_device *dev
- *					   netdev_features_t features);
- *	Called by core transmit path to determine if device is capable of
- *	performing offload operations on a given packet. This is to give
- *	the device an opportunity to implement any restrictions that cannot
- *	be otherwise expressed by feature flags. The check is called with
- *	the set of features that the stack has calculated and it returns
- *	those the driver believes to be appropriate.
  * int (*ndo_set_tx_maxrate)(struct net_device *dev,
  *			     int queue_index, u32 maxrate);
  *	Called when a user wants to set a max-rate limitation of specific
@@ -2831,6 +2832,8 @@
 	unsigned int		processed;
 	unsigned int		time_squeeze;
 	unsigned int		received_rps;
+	unsigned int            gro_coalesced;
+
 #ifdef CONFIG_RPS
 	struct softnet_data	*rps_ipi_list;
 #endif
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index dde3b13..3ca2526 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -7,6 +7,23 @@
 #include <linux/sched.h>
 #include <asm/irq.h>
 
+/*
+ * The run state of the lockup detectors is controlled by the content of the
+ * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
+ * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
+ *
+ * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled'
+ * are variables that are only used as an 'interface' between the parameters
+ * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The
+ * 'watchdog_thresh' variable is handled differently because its value is not
+ * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh'
+ * is equal zero.
+ */
+#define NMI_WATCHDOG_ENABLED_BIT   0
+#define SOFT_WATCHDOG_ENABLED_BIT  1
+#define NMI_WATCHDOG_ENABLED      (1 << NMI_WATCHDOG_ENABLED_BIT)
+#define SOFT_WATCHDOG_ENABLED     (1 << SOFT_WATCHDOG_ENABLED_BIT)
+
 /**
  * touch_nmi_watchdog - restart NMI watchdog timeout.
  *
@@ -14,11 +31,8 @@
  * may be used to reset the timeout - for code which intentionally
  * disables interrupts for a long time. This call is stateless.
  */
-#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR_NMI)
-#include <asm/nmi.h>
-#endif
-
 #if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
+#include <asm/nmi.h>
 extern void touch_nmi_watchdog(void);
 #else
 static inline void touch_nmi_watchdog(void)
@@ -106,9 +120,17 @@
 extern int soft_watchdog_enabled;
 extern int watchdog_user_enabled;
 extern int watchdog_thresh;
+extern unsigned long watchdog_enabled;
 extern unsigned long *watchdog_cpumask_bits;
+extern atomic_t watchdog_park_in_progress;
+#ifdef CONFIG_SMP
 extern int sysctl_softlockup_all_cpu_backtrace;
 extern int sysctl_hardlockup_all_cpu_backtrace;
+#else
+#define sysctl_softlockup_all_cpu_backtrace 0
+#define sysctl_hardlockup_all_cpu_backtrace 0
+#endif
+extern bool is_hardlockup(void);
 struct ctl_table;
 extern int proc_watchdog(struct ctl_table *, int ,
 			 void __user *, size_t *, loff_t *);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index a38772a..1b71179 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -178,6 +178,11 @@
 	PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
 	/* Get VPD from function 0 VPD */
 	PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
+	/*
+	 * Resume before calling the driver's system suspend hooks, disabling
+	 * the direct_complete optimization.
+	 */
+	PCI_DEV_FLAGS_NEEDS_RESUME = (__force pci_dev_flags_t) (1 << 11),
 };
 
 enum pci_irq_reroute_variant {
diff --git a/include/linux/qcom_tspp.h b/include/linux/qcom_tspp.h
new file mode 100644
index 0000000..1b34c38
--- /dev/null
+++ b/include/linux/qcom_tspp.h
@@ -0,0 +1,108 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_TSPP_H_
+#define _MSM_TSPP_H_
+
+struct tspp_data_descriptor {
+	void *virt_base;   /* logical address of the actual data */
+	phys_addr_t phys_base; /* physical address of the actual data */
+	u32 size;          /* size of buffer in bytes */
+	int id;            /* unique identifier */
+	void *user;        /* user-defined data */
+};
+
+enum tspp_key_parity {
+	TSPP_KEY_PARITY_EVEN,
+	TSPP_KEY_PARITY_ODD
+};
+
+struct tspp_key {
+	enum tspp_key_parity parity;
+	int lsb;
+	int msb;
+};
+
+enum tspp_source {
+	TSPP_SOURCE_TSIF0,
+	TSPP_SOURCE_TSIF1,
+	TSPP_SOURCE_MEM,
+	TSPP_SOURCE_NONE = -1
+};
+
+enum tspp_mode {
+	TSPP_MODE_DISABLED,
+	TSPP_MODE_PES,
+	TSPP_MODE_RAW,
+	TSPP_MODE_RAW_NO_SUFFIX
+};
+
+enum tspp_tsif_mode {
+	TSPP_TSIF_MODE_LOOPBACK, /* loopback mode */
+	TSPP_TSIF_MODE_1,        /* without sync */
+	TSPP_TSIF_MODE_2         /* with sync signal */
+};
+
+struct tspp_filter {
+	int pid;
+	int mask;
+	enum tspp_mode mode;
+	unsigned int priority;	/* 0 - 15 */
+	int decrypt;
+	enum tspp_source source;
+};
+
+struct tspp_select_source {
+	enum tspp_source source;
+	enum tspp_tsif_mode mode;
+	int clk_inverse;
+	int data_inverse;
+	int sync_inverse;
+	int enable_inverse;
+};
+
+enum tsif_tts_source {
+	TSIF_TTS_TCR = 0,	/* Time stamps from TCR counter */
+	TSIF_TTS_LPASS_TIMER	/* Time stamps from AV/Qtimer Timer  */
+};
+
+typedef void (tspp_notifier)(int channel_id, void *user);
+typedef void* (tspp_allocator)(int channel_id, u32 size,
+	phys_addr_t *phys_base, void *user);
+typedef void (tspp_memfree)(int channel_id, u32 size,
+	void *virt_base, phys_addr_t phys_base, void *user);
+
+/* Kernel API functions */
+int tspp_open_stream(u32 dev, u32 channel_id,
+			struct tspp_select_source *source);
+int tspp_close_stream(u32 dev, u32 channel_id);
+int tspp_open_channel(u32 dev, u32 channel_id);
+int tspp_close_channel(u32 dev, u32 channel_id);
+int tspp_get_ref_clk_counter(u32 dev,
+	enum tspp_source source, u32 *tcr_counter);
+int tspp_add_filter(u32 dev, u32 channel_id, struct tspp_filter *filter);
+int tspp_remove_filter(u32 dev, u32 channel_id,	struct tspp_filter *filter);
+int tspp_set_key(u32 dev, u32 channel_id, struct tspp_key *key);
+int tspp_register_notification(u32 dev, u32 channel_id, tspp_notifier *notify,
+	void *data, u32 timer_ms);
+int tspp_unregister_notification(u32 dev, u32 channel_id);
+const struct tspp_data_descriptor *tspp_get_buffer(u32 dev, u32 channel_id);
+int tspp_release_buffer(u32 dev, u32 channel_id, u32 descriptor_id);
+int tspp_allocate_buffers(u32 dev, u32 channel_id, u32 count,
+	u32 size, u32 int_freq, tspp_allocator *alloc,
+	tspp_memfree *memfree, void *user);
+
+int tspp_get_tts_source(u32 dev, int *tts_source);
+int tspp_get_lpass_time_counter(u32 dev, enum tspp_source source,
+			u64 *lpass_time_counter);
+
+#endif /* _MSM_TSPP_H_ */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9e7ab05..864c7d7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -177,11 +177,26 @@
 extern u64 nr_running_integral(unsigned int cpu);
 #endif
 
+#ifdef CONFIG_SMP
 extern void sched_update_nr_prod(int cpu, long delta, bool inc);
 extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg,
 				     unsigned int *max_nr,
 				     unsigned int *big_max_nr);
 extern unsigned int sched_get_cpu_util(int cpu);
+#else
+static inline void sched_update_nr_prod(int cpu, long delta, bool inc)
+{
+}
+static inline void sched_get_nr_running_avg(int *avg, int *iowait_avg,
+				int *big_avg, unsigned int *max_nr,
+				unsigned int *big_max_nr)
+{
+}
+static inline unsigned int sched_get_cpu_util(int cpu)
+{
+	return 0;
+}
+#endif
 
 extern void calc_global_load(unsigned long ticks);
 
@@ -3905,6 +3920,7 @@
 #define SCHED_CPUFREQ_DL	(1U << 1)
 #define SCHED_CPUFREQ_IOWAIT	(1U << 2)
 #define SCHED_CPUFREQ_INTERCLUSTER_MIG (1U << 3)
+#define SCHED_CPUFREQ_WALT (1U << 4)
 
 #define SCHED_CPUFREQ_RT_DL	(SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)
 
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 190bf3b..f0ba8e6 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -22,6 +22,8 @@
 extern unsigned int sysctl_sched_sync_hint_enable;
 extern unsigned int sysctl_sched_initial_task_util;
 extern unsigned int sysctl_sched_cstate_aware;
+extern unsigned int sysctl_sched_capacity_margin;
+extern unsigned int sysctl_sched_capacity_margin_down;
 #ifdef CONFIG_SCHED_WALT
 extern unsigned int sysctl_sched_use_walt_cpu_util;
 extern unsigned int sysctl_sched_use_walt_task_util;
@@ -33,6 +35,8 @@
 extern unsigned int sysctl_sched_use_walt_cpu_util;
 extern unsigned int sysctl_sched_use_walt_task_util;
 extern unsigned int sysctl_sched_boost;
+extern unsigned int sysctl_sched_group_upmigrate_pct;
+extern unsigned int sysctl_sched_group_downmigrate_pct;
 #endif
 
 #ifdef CONFIG_SCHED_HMP
@@ -53,8 +57,6 @@
 extern unsigned int sysctl_sched_spill_load_pct;
 extern unsigned int sysctl_sched_upmigrate_pct;
 extern unsigned int sysctl_sched_downmigrate_pct;
-extern unsigned int sysctl_sched_group_upmigrate_pct;
-extern unsigned int sysctl_sched_group_downmigrate_pct;
 extern unsigned int sysctl_early_detection_duration;
 extern unsigned int sysctl_sched_small_wakee_task_load_pct;
 extern unsigned int sysctl_sched_big_waker_task_load_pct;
@@ -67,6 +69,14 @@
 extern unsigned int sysctl_sched_prefer_sync_wakee_to_waker;
 extern unsigned int sysctl_sched_short_burst;
 extern unsigned int sysctl_sched_short_sleep;
+
+#elif defined(CONFIG_SCHED_WALT)
+
+extern int
+walt_proc_update_handler(struct ctl_table *table, int write,
+			 void __user *buffer, size_t *lenp,
+			 loff_t *ppos);
+
 #endif /* CONFIG_SCHED_HMP */
 
 enum sched_tunable_scaling {
@@ -148,6 +158,10 @@
 		void __user *buffer, size_t *lenp,
 		loff_t *ppos);
 
+extern int sched_updown_migrate_handler(struct ctl_table *table,
+					int write, void __user *buffer,
+					size_t *lenp, loff_t *ppos);
+
 extern int sysctl_numa_balancing(struct ctl_table *table, int write,
 				 void __user *buffer, size_t *lenp,
 				 loff_t *ppos);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index adf4e51..8f84c84 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -59,6 +59,9 @@
 				      void __user *, size_t *, loff_t *);
 extern int proc_do_large_bitmap(struct ctl_table *, int,
 				void __user *, size_t *, loff_t *);
+extern int proc_douintvec_capacity(struct ctl_table *table, int write,
+				   void __user *buffer, size_t *lenp,
+				   loff_t *ppos);
 
 /*
  * Register a set of sysctl names by calling register_sysctl_table
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index e880054..2c225d4 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -29,7 +29,6 @@
  */
 struct tk_read_base {
 	struct clocksource	*clock;
-	cycle_t			(*read)(struct clocksource *cs);
 	cycle_t			mask;
 	cycle_t			cycle_last;
 	u32			mult;
@@ -58,7 +57,7 @@
  *			interval.
  * @xtime_remainder:	Shifted nano seconds left over when rounding
  *			@cycle_interval
- * @raw_interval:	Raw nano seconds accumulated per NTP interval.
+ * @raw_interval:	Shifted raw nano seconds accumulated per NTP interval.
  * @ntp_error:		Difference between accumulated time and NTP time in ntp
  *			shifted nano seconds.
  * @ntp_error_shift:	Shift conversion between clock shifted nano seconds and
@@ -100,7 +99,7 @@
 	cycle_t			cycle_interval;
 	u64			xtime_interval;
 	s64			xtime_remainder;
-	u32			raw_interval;
+	u64			raw_interval;
 	/* The ntp_tick_length() value currently being used.
 	 * This cached copy ensures we consistently apply the tick
 	 * length for an entire tick, as ntp_tick_length may change
diff --git a/include/media/msm_vidc.h b/include/media/msm_vidc.h
index bb5a21c..623b6f0 100644
--- a/include/media/msm_vidc.h
+++ b/include/media/msm_vidc.h
@@ -20,7 +20,7 @@
 #include <linux/msm_ion.h>
 #include <uapi/media/msm_vidc.h>
 
-#define HAL_BUFFER_MAX 0xb
+#define HAL_BUFFER_MAX 0xd
 
 enum smem_type {
 	SMEM_ION,
diff --git a/include/net/cnss_utils.h b/include/net/cnss_utils.h
new file mode 100644
index 0000000..6ff0fd0
--- /dev/null
+++ b/include/net/cnss_utils.h
@@ -0,0 +1,40 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CNSS_UTILS_H_
+#define _CNSS_UTILS_H_
+
+enum cnss_utils_cc_src {
+	CNSS_UTILS_SOURCE_CORE,
+	CNSS_UTILS_SOURCE_11D,
+	CNSS_UTILS_SOURCE_USER
+};
+
+extern int cnss_utils_set_wlan_unsafe_channel(struct device *dev,
+					      u16 *unsafe_ch_list,
+					      u16 ch_count);
+extern int cnss_utils_get_wlan_unsafe_channel(struct device *dev,
+					      u16 *unsafe_ch_list,
+					      u16 *ch_count, u16 buf_len);
+extern int cnss_utils_wlan_set_dfs_nol(struct device *dev,
+				       const void *info, u16 info_len);
+extern int cnss_utils_wlan_get_dfs_nol(struct device *dev,
+				       void *info, u16 info_len);
+extern int cnss_utils_get_driver_load_cnt(struct device *dev);
+extern void cnss_utils_increment_driver_load_cnt(struct device *dev);
+extern int cnss_utils_set_wlan_mac_address(const u8 *in, uint32_t len);
+extern u8 *cnss_utils_get_wlan_mac_address(struct device *dev, uint32_t *num);
+extern void cnss_utils_set_cc_source(struct device *dev,
+				     enum cnss_utils_cc_src cc_source);
+extern enum cnss_utils_cc_src cnss_utils_get_cc_source(struct device *dev);
+
+#endif
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 91afb4a..615ce0a 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -776,6 +776,11 @@
 {
 	u32 hash;
 
+	/* @flowlabel may include more than a flow label, eg, the traffic class.
+	 * Here we want only the flow label value.
+	 */
+	flowlabel &= IPV6_FLOWLABEL_MASK;
+
 	if (flowlabel ||
 	    net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF ||
 	    (!autolabel &&
diff --git a/include/soc/qcom/icnss.h b/include/soc/qcom/icnss.h
index 549cb84..3527c35 100644
--- a/include/soc/qcom/icnss.h
+++ b/include/soc/qcom/icnss.h
@@ -77,13 +77,6 @@
 	struct icnss_shadow_reg_cfg *shadow_reg_cfg;
 };
 
-/* MSA Memory Regions Information */
-struct icnss_mem_region_info {
-	uint64_t reg_addr;
-	uint32_t size;
-	uint8_t secure_flag;
-};
-
 /* driver modes */
 enum icnss_driver_mode {
 	ICNSS_MISSION,
diff --git a/include/soc/qcom/pm.h b/include/soc/qcom/pm.h
index a82ada6..58d011e 100644
--- a/include/soc/qcom/pm.h
+++ b/include/soc/qcom/pm.h
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2009-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2009-2017, The Linux Foundation. All rights reserved.
  * Author: San Mehat <san@android.com>
  *
  * This software is licensed under the terms of the GNU General Public
@@ -70,16 +70,6 @@
 };
 
 /**
- * lpm_cpu_pre_pc_cb(): API to get the L2 flag to pass to TZ
- *
- * @cpu: cpuid of the CPU going down.
- *
- * Returns the l2 flush flag enum that is passed down to TZ during power
- * collaps
- */
-enum msm_pm_l2_scm_flag lpm_cpu_pre_pc_cb(unsigned int cpu);
-
-/**
  * msm_pm_sleep_mode_allow() - API to determine if sleep mode is allowed.
  * @cpu:	CPU on which to check for the sleep mode.
  * @mode:	Sleep Mode to check for.
diff --git a/include/sound/wcd-dsp-mgr.h b/include/sound/wcd-dsp-mgr.h
index 2beb9b3..8a4c6d9 100644
--- a/include/sound/wcd-dsp-mgr.h
+++ b/include/sound/wcd-dsp-mgr.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -15,6 +15,7 @@
 #define __WCD_DSP_MGR_H__
 
 #include <linux/types.h>
+#include <linux/device.h>
 
 /*
  * These enums correspond to the component types
@@ -63,6 +64,9 @@
 	/* Suspend/Resume related */
 	WDSP_EVENT_SUSPEND,
 	WDSP_EVENT_RESUME,
+
+	/* Misc */
+	WDSP_EVENT_GET_DEVOPS
 };
 
 enum wdsp_signal {
@@ -109,6 +113,8 @@
  *			their own ops to manager driver
  * @get_dev_for_cmpnt: components can use this to get handle
  *		       to struct device * of any other component
+ * @get_devops_for_cmpnt: components can use this to get ops
+ *			  from other related components.
  * @signal_handler: callback to notify manager driver that signal
  *		    has occurred. Cannot be called from interrupt
  *		    context as this can sleep
@@ -126,6 +132,8 @@
 				  struct wdsp_cmpnt_ops *ops);
 	struct device *(*get_dev_for_cmpnt)(struct device *wdsp_dev,
 					    enum wdsp_cmpnt_type type);
+	int (*get_devops_for_cmpnt)(struct device *wdsp_dev,
+				    enum wdsp_cmpnt_type type, void *data);
 	int (*signal_handler)(struct device *wdsp_dev,
 			      enum wdsp_signal signal, void *arg);
 	int (*vote_for_dsp)(struct device *wdsp_dev, bool vote);
@@ -133,4 +141,6 @@
 	int (*resume)(struct device *wdsp_dev);
 };
 
+int wcd_dsp_mgr_init(void);
+void wcd_dsp_mgr_exit(void);
 #endif /* end of __WCD_DSP_MGR_H__ */
diff --git a/include/sound/wcd-spi.h b/include/sound/wcd-spi.h
index 1fff58d..b85c68e 100644
--- a/include/sound/wcd-spi.h
+++ b/include/sound/wcd-spi.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -35,23 +35,10 @@
 	u32 flags;
 };
 
-#ifdef CONFIG_SND_SOC_WCD_SPI
-
-int wcd_spi_data_write(struct spi_device *spi, struct wcd_spi_msg *msg);
-int wcd_spi_data_read(struct spi_device *spi, struct wcd_spi_msg *msg);
-
-#else
-
-int wcd_spi_data_write(struct spi_device *spi, struct wcd_spi_msg *msg)
-{
-	return -ENODEV;
-}
-
-int wcd_spi_data_read(struct spi_device *spi, struct wcd_spi_msg *msg)
-{
-	return -ENODEV;
-}
-
-#endif /* End of CONFIG_SND_SOC_WCD_SPI */
+struct wcd_spi_ops {
+	struct spi_device *spi_dev;
+	int (*read_dev)(struct spi_device *spi, struct wcd_spi_msg *msg);
+	int (*write_dev)(struct spi_device *spi, struct wcd_spi_msg *msg);
+};
 
 #endif /* End of __WCD_SPI_H__ */
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h
index 0c68ae22..33dfa76 100644
--- a/include/trace/events/skb.h
+++ b/include/trace/events/skb.h
@@ -50,6 +50,33 @@
 	TP_printk("skbaddr=%p", __entry->skbaddr)
 );
 
+TRACE_EVENT(print_skb_gso,
+
+	TP_PROTO(struct sk_buff *skb, __be16 src, __be16 dest),
+
+	TP_ARGS(skb, src, dest),
+
+	TP_STRUCT__entry(
+		__field(void *,	skbaddr)
+		__field(int, len)
+		__field(int, data_len)
+		__field(__be16, src)
+		__field(__be16, dest)
+	),
+
+	TP_fast_assign(
+		__entry->skbaddr = skb;
+		__entry->len = skb->len;
+		__entry->data_len = skb->data_len;
+		__entry->src = src;
+		__entry->dest = dest;
+	),
+
+	TP_printk("GSO: skbaddr=%pK, len=%d, data_len=%d, src=%u, dest=%u",
+		__entry->skbaddr, __entry->len, __entry->data_len,
+		be16_to_cpu(__entry->src), be16_to_cpu(__entry->dest))
+);
+
 TRACE_EVENT(skb_copy_datagram_iovec,
 
 	TP_PROTO(const struct sk_buff *skb, int len),
diff --git a/include/uapi/drm/msm_drm_pp.h b/include/uapi/drm/msm_drm_pp.h
index d9155a9..7945af0 100644
--- a/include/uapi/drm/msm_drm_pp.h
+++ b/include/uapi/drm/msm_drm_pp.h
@@ -281,4 +281,26 @@
 	__u32 cfg_param_053;
 };
 
+#define DITHER_MATRIX_SZ 16
+
+/**
+ * struct drm_msm_dither - dither feature structure
+ * @flags: for customizing operations
+ * @temporal_en: temperal dither enable
+ * @c0_bitdepth: c0 component bit depth
+ * @c1_bitdepth: c1 component bit depth
+ * @c2_bitdepth: c2 component bit depth
+ * @c3_bitdepth: c2 component bit depth
+ * @matrix: dither strength matrix
+ */
+struct drm_msm_dither {
+	__u64 flags;
+	__u32 temporal_en;
+	__u32 c0_bitdepth;
+	__u32 c1_bitdepth;
+	__u32 c2_bitdepth;
+	__u32 c3_bitdepth;
+	__u32 matrix[DITHER_MATRIX_SZ];
+};
+
 #endif /* _MSM_DRM_PP_H_ */
diff --git a/include/uapi/drm/sde_drm.h b/include/uapi/drm/sde_drm.h
index 44b42a6..439a925 100644
--- a/include/uapi/drm/sde_drm.h
+++ b/include/uapi/drm/sde_drm.h
@@ -67,6 +67,48 @@
 #define SDE_DRM_BITMASK_COUNT       64
 
 /**
+ * Framebuffer modes for "fb_translation_mode" PLANE property
+ *
+ * @SDE_DRM_FB_NON_SEC:          IOMMU configuration for this framebuffer mode
+ *                               is non-secure domain and requires
+ *                               both stage I and stage II translations when
+ *                               this buffer is accessed by the display HW.
+ *                               This is the default mode of all frambuffers.
+ * @SDE_DRM_FB_SEC:              IOMMU configuration for this framebuffer mode
+ *                               is secure domain and requires
+ *                               both stage I and stage II translations when
+ *                               this buffer is accessed by the display HW.
+ * @SDE_DRM_FB_NON_SEC_DIR_TRANS: IOMMU configuration for this framebuffer mode
+ *                               is non-secure domain and requires
+ *                               only stage II translation when
+ *                               this buffer is accessed by the display HW.
+ * @SDE_DRM_FB_SEC_DIR_TRANS:    IOMMU configuration for this framebuffer mode
+ *                               is secure domain and requires
+ *                               only stage II translation when
+ *                               this buffer is accessed by the display HW.
+ */
+
+#define SDE_DRM_FB_NON_SEC              0
+#define SDE_DRM_FB_SEC                  1
+#define SDE_DRM_FB_NON_SEC_DIR_TRANS    2
+#define SDE_DRM_FB_SEC_DIR_TRANS        3
+
+/**
+ * Secure levels for "security_level" CRTC property.
+ *                        CRTC property which specifies what plane types
+ *                        can be attached to this CRTC. Plane component
+ *                        derives the plane type based on the FB_MODE.
+ * @ SDE_DRM_SEC_NON_SEC: Both Secure and non-secure plane types can be
+ *                        attached to this CRTC. This is the default state of
+ *                        the CRTC.
+ * @ SDE_DRM_SEC_ONLY:    Only secure planes can be added to this CRTC. If a
+ *                        CRTC is instructed to be in this mode it follows the
+ *                        platform dependent restrictions.
+ */
+#define SDE_DRM_SEC_NON_SEC            0
+#define SDE_DRM_SEC_ONLY               1
+
+/**
  * struct sde_drm_pix_ext_v1 - version 1 of pixel ext structure
  * @num_ext_pxls_lr: Number of total horizontal pixels
  * @num_ext_pxls_tb: Number of total vertical lines
diff --git a/include/uapi/linux/dvb/dmx.h b/include/uapi/linux/dvb/dmx.h
index 427e489..175534a 100644
--- a/include/uapi/linux/dvb/dmx.h
+++ b/include/uapi/linux/dvb/dmx.h
@@ -32,6 +32,11 @@
 
 #define DMX_FILTER_SIZE 16
 
+/* Min recording chunk upon which event is generated */
+#define DMX_REC_BUFF_CHUNK_MIN_SIZE		(100*188)
+
+#define DMX_MAX_DECODER_BUFFER_NUM		(32)
+
 enum dmx_output
 {
 	DMX_OUT_DECODER, /* Streaming directly to decoder. */
@@ -108,6 +113,44 @@
 #define DMX_KERNEL_CLIENT   0x8000
 };
 
+enum dmx_video_codec {
+	DMX_VIDEO_CODEC_MPEG2,
+	DMX_VIDEO_CODEC_H264,
+	DMX_VIDEO_CODEC_VC1
+};
+
+/* Index entries types */
+#define DMX_IDX_RAI                         0x00000001
+#define DMX_IDX_PUSI                        0x00000002
+#define DMX_IDX_MPEG_SEQ_HEADER             0x00000004
+#define DMX_IDX_MPEG_GOP                    0x00000008
+#define DMX_IDX_MPEG_FIRST_SEQ_FRAME_START  0x00000010
+#define DMX_IDX_MPEG_FIRST_SEQ_FRAME_END    0x00000020
+#define DMX_IDX_MPEG_I_FRAME_START          0x00000040
+#define DMX_IDX_MPEG_I_FRAME_END            0x00000080
+#define DMX_IDX_MPEG_P_FRAME_START          0x00000100
+#define DMX_IDX_MPEG_P_FRAME_END            0x00000200
+#define DMX_IDX_MPEG_B_FRAME_START          0x00000400
+#define DMX_IDX_MPEG_B_FRAME_END            0x00000800
+#define DMX_IDX_H264_SPS                    0x00001000
+#define DMX_IDX_H264_PPS                    0x00002000
+#define DMX_IDX_H264_FIRST_SPS_FRAME_START  0x00004000
+#define DMX_IDX_H264_FIRST_SPS_FRAME_END    0x00008000
+#define DMX_IDX_H264_IDR_START              0x00010000
+#define DMX_IDX_H264_IDR_END                0x00020000
+#define DMX_IDX_H264_NON_IDR_START          0x00040000
+#define DMX_IDX_H264_NON_IDR_END            0x00080000
+#define DMX_IDX_VC1_SEQ_HEADER              0x00100000
+#define DMX_IDX_VC1_ENTRY_POINT             0x00200000
+#define DMX_IDX_VC1_FIRST_SEQ_FRAME_START   0x00400000
+#define DMX_IDX_VC1_FIRST_SEQ_FRAME_END     0x00800000
+#define DMX_IDX_VC1_FRAME_START             0x01000000
+#define DMX_IDX_VC1_FRAME_END               0x02000000
+#define DMX_IDX_H264_ACCESS_UNIT_DEL        0x04000000
+#define DMX_IDX_H264_SEI                    0x08000000
+#define DMX_IDX_H264_IDR_ISLICE_START       0x10000000
+#define DMX_IDX_H264_NON_IDR_PSLICE_START   0x20000000
+#define DMX_IDX_H264_NON_IDR_BSLICE_START   0x40000000
 
 struct dmx_pes_filter_params
 {
@@ -116,11 +159,457 @@
 	dmx_output_t   output;
 	dmx_pes_type_t pes_type;
 	__u32          flags;
+
+	/*
+	 * The following configures when the event
+	 * DMX_EVENT_NEW_REC_CHUNK will be triggered.
+	 * When new recorded data is received with size
+	 * equal or larger than this value a new event
+	 * will be triggered. This is relevant when
+	 * output is DMX_OUT_TS_TAP or DMX_OUT_TSDEMUX_TAP,
+	 * size must be at least DMX_REC_BUFF_CHUNK_MIN_SIZE
+	 * and smaller than buffer size.
+	 */
+	__u32          rec_chunk_size;
+
+	enum dmx_video_codec video_codec;
+};
+
+struct dmx_buffer_status {
+	/* size of buffer in bytes */
+	unsigned int size;
+
+	/* fullness of buffer in bytes */
+	unsigned int fullness;
+
+	/*
+	 * How many bytes are free
+	 * It's the same as: size-fullness-1
+	 */
+	unsigned int free_bytes;
+
+	/* read pointer offset in bytes */
+	unsigned int read_offset;
+
+	/* write pointer offset in bytes */
+	unsigned int write_offset;
+
+	/* non-zero if data error occurred */
+	int error;
+};
+
+/* Events associated with each demux filter */
+enum dmx_event {
+	/* New PES packet is ready to be consumed */
+	DMX_EVENT_NEW_PES = 0x00000001,
+
+	/* New section is ready to be consumed */
+	DMX_EVENT_NEW_SECTION = 0x00000002,
+
+	/* New recording chunk is ready to be consumed */
+	DMX_EVENT_NEW_REC_CHUNK = 0x00000004,
+
+	/* New PCR value is ready */
+	DMX_EVENT_NEW_PCR = 0x00000008,
+
+	/* Overflow */
+	DMX_EVENT_BUFFER_OVERFLOW = 0x00000010,
+
+	/* Section was dropped due to CRC error */
+	DMX_EVENT_SECTION_CRC_ERROR = 0x00000020,
+
+	/* End-of-stream, no more data from this filter */
+	DMX_EVENT_EOS = 0x00000040,
+
+	/* New Elementary Stream data is ready */
+	DMX_EVENT_NEW_ES_DATA = 0x00000080,
+
+	/* Data markers */
+	DMX_EVENT_MARKER = 0x00000100,
+
+	/* New indexing entry is ready */
+	DMX_EVENT_NEW_INDEX_ENTRY = 0x00000200,
+
+	/*
+	 * Section filter timer expired. This is notified
+	 * when timeout is configured to section filter
+	 * (dmx_sct_filter_params) and no sections were
+	 * received for the given time.
+	 */
+	DMX_EVENT_SECTION_TIMEOUT = 0x00000400,
+
+	/* Scrambling bits change between clear and scrambled */
+	DMX_EVENT_SCRAMBLING_STATUS_CHANGE = 0x00000800
+};
+
+enum dmx_oob_cmd {
+	/* End-of-stream, no more data from this filter */
+	DMX_OOB_CMD_EOS,
+
+	/* Data markers */
+	DMX_OOB_CMD_MARKER,
+};
+
+/* Flags passed in filter events */
+
+/* Continuity counter error was detected */
+#define DMX_FILTER_CC_ERROR			0x01
+
+/* Discontinuity indicator was set */
+#define DMX_FILTER_DISCONTINUITY_INDICATOR	0x02
+
+/* PES length in PES header is not correct */
+#define DMX_FILTER_PES_LENGTH_ERROR		0x04
+
+
+/* PES info associated with DMX_EVENT_NEW_PES event */
+struct dmx_pes_event_info {
+	/* Offset at which PES information starts */
+	__u32 base_offset;
+
+	/*
+	 * Start offset at which PES data
+	 * from the stream starts.
+	 * Equal to base_offset if PES data
+	 * starts from the beginning.
+	 */
+	__u32 start_offset;
+
+	/* Total length holding the PES information */
+	__u32 total_length;
+
+	/* Actual length holding the PES data */
+	__u32 actual_length;
+
+	/* Local receiver timestamp in 27MHz */
+	__u64 stc;
+
+	/* Flags passed in filter events */
+	__u32 flags;
+
+	/*
+	 * Number of TS packets with Transport Error Indicator (TEI)
+	 * found while constructing the PES.
+	 */
+	__u32 transport_error_indicator_counter;
+
+	/* Number of continuity errors found while constructing the PES */
+	__u32 continuity_error_counter;
+
+	/* Total number of TS packets holding the PES */
+	__u32 ts_packets_num;
+};
+
+/* Section info associated with DMX_EVENT_NEW_SECTION event */
+struct dmx_section_event_info {
+	/* Offset at which section information starts */
+	__u32 base_offset;
+
+	/*
+	 * Start offset at which section data
+	 * from the stream starts.
+	 * Equal to base_offset if section data
+	 * starts from the beginning.
+	 */
+	__u32 start_offset;
+
+	/* Total length holding the section information */
+	__u32 total_length;
+
+	/* Actual length holding the section data */
+	__u32 actual_length;
+
+	/* Flags passed in filter events */
+	__u32 flags;
+};
+
+/* Recording info associated with DMX_EVENT_NEW_REC_CHUNK event */
+struct dmx_rec_chunk_event_info {
+	/* Offset at which recording chunk starts */
+	__u32 offset;
+
+	/* Size of recording chunk in bytes */
+	__u32 size;
+};
+
+/* PCR info associated with DMX_EVENT_NEW_PCR event */
+struct dmx_pcr_event_info {
+	/* Local timestamp in 27MHz
+	 * when PCR packet was received
+	 */
+	__u64 stc;
+
+	/* PCR value in 27MHz */
+	__u64 pcr;
+
+	/* Flags passed in filter events */
+	__u32 flags;
+};
+
+/*
+ * Elementary stream data information associated
+ * with DMX_EVENT_NEW_ES_DATA event
+ */
+struct dmx_es_data_event_info {
+	/* Buffer user-space handle */
+	int buf_handle;
+
+	/*
+	 * Cookie to provide when releasing the buffer
+	 * using the DMX_RELEASE_DECODER_BUFFER ioctl command
+	 */
+	int cookie;
+
+	/* Offset of data from the beginning of the buffer */
+	__u32 offset;
+
+	/* Length of data in buffer (in bytes) */
+	__u32 data_len;
+
+	/* Indication whether PTS value is valid */
+	int pts_valid;
+
+	/* PTS value associated with the buffer */
+	__u64 pts;
+
+	/* Indication whether DTS value is valid */
+	int dts_valid;
+
+	/* DTS value associated with the buffer */
+	__u64 dts;
+
+	/* STC value associated with the buffer in 27MHz */
+	__u64 stc;
+
+	/*
+	 * Number of TS packets with Transport Error Indicator (TEI) set
+	 * in the TS packet header since last reported event
+	 */
+	__u32 transport_error_indicator_counter;
+
+	/* Number of continuity errors since last reported event */
+	__u32 continuity_error_counter;
+
+	/* Total number of TS packets processed since last reported event */
+	__u32 ts_packets_num;
+
+	/*
+	 * Number of dropped bytes due to insufficient buffer space,
+	 * since last reported event
+	 */
+	__u32 ts_dropped_bytes;
+};
+
+/* Marker details associated with DMX_EVENT_MARKER event */
+struct dmx_marker_event_info {
+	/* Marker id */
+	__u64 id;
+};
+
+/* Indexing information associated with DMX_EVENT_NEW_INDEX_ENTRY event */
+struct dmx_index_event_info {
+	/* Index entry type, one of DMX_IDX_* */
+	__u64 type;
+
+	/*
+	 * The PID the index entry belongs to.
+	 * In case of recording filter, multiple PIDs may exist in the same
+	 * filter through DMX_ADD_PID ioctl and each can be indexed separately.
+	 */
+	__u16 pid;
+
+	/*
+	 * The TS packet number in the recorded data at which
+	 * the indexing event is found.
+	 */
+	__u64 match_tsp_num;
+
+	/*
+	 * The TS packet number in the recorded data preceding
+	 * match_tsp_num and has PUSI set.
+	 */
+	__u64 last_pusi_tsp_num;
+
+	/* STC associated with match_tsp_num, in 27MHz */
+	__u64 stc;
+};
+
+/* Scrambling information associated with DMX_EVENT_SCRAMBLING_STATUS_CHANGE */
+struct dmx_scrambling_status_event_info {
+	/*
+	 * The PID which its scrambling bit status changed.
+	 * In case of recording filter, multiple PIDs may exist in the same
+	 * filter through DMX_ADD_PID ioctl, each may have
+	 * different scrambling bits status.
+	 */
+	__u16 pid;
+
+	/* old value of scrambling bits */
+	__u8 old_value;
+
+	/* new value of scrambling bits */
+	__u8 new_value;
+};
+
+/*
+ * Filter's event returned through DMX_GET_EVENT.
+ * poll with POLLPRI would block until events are available.
+ */
+struct dmx_filter_event {
+	enum dmx_event type;
+
+	union {
+		struct dmx_pes_event_info pes;
+		struct dmx_section_event_info section;
+		struct dmx_rec_chunk_event_info recording_chunk;
+		struct dmx_pcr_event_info pcr;
+		struct dmx_es_data_event_info es_data;
+		struct dmx_marker_event_info marker;
+		struct dmx_index_event_info index;
+		struct dmx_scrambling_status_event_info scrambling_status;
+	} params;
+};
+
+/* Filter's buffer requirement returned in dmx_caps */
+struct dmx_buffer_requirement {
+	/* Buffer size alignment, 0 means no special requirement */
+	__u32 size_alignment;
+
+	/* Maximum buffer size allowed */
+	__u32 max_size;
+
+	/* Maximum number of linear buffers handled by demux */
+	__u32 max_buffer_num;
+
+	/* Feature support bitmap as detailed below */
+	__u32 flags;
+
+/* Buffer must be allocated as physically contiguous memory */
+#define DMX_BUFFER_CONTIGUOUS_MEM		0x1
+
+/* If the filter's data is decrypted, the buffer should be secured one */
+#define DMX_BUFFER_SECURED_IF_DECRYPTED		0x2
+
+/* Buffer can be allocated externally */
+#define DMX_BUFFER_EXTERNAL_SUPPORT		0x4
+
+/* Buffer can be allocated internally */
+#define DMX_BUFFER_INTERNAL_SUPPORT		0x8
+
+/* Filter output can be output to a linear buffer group */
+#define DMX_BUFFER_LINEAR_GROUP_SUPPORT		0x10
+
+/* Buffer may be allocated as cached buffer */
+#define DMX_BUFFER_CACHED		0x20
+};
+
+/* Out-of-band (OOB) command */
+struct dmx_oob_command {
+	enum dmx_oob_cmd type;
+
+	union {
+		struct dmx_marker_event_info marker;
+	} params;
 };
 
 typedef struct dmx_caps {
 	__u32 caps;
+
+/* Indicates whether demux support playback from memory in pull mode */
+#define DMX_CAP_PULL_MODE				0x01
+
+/* Indicates whether demux support indexing of recorded video stream */
+#define DMX_CAP_VIDEO_INDEXING			0x02
+
+/* Indicates whether demux support sending data directly to video decoder */
+#define DMX_CAP_VIDEO_DECODER_DATA		0x04
+
+/* Indicates whether demux support sending data directly to audio decoder */
+#define DMX_CAP_AUDIO_DECODER_DATA		0x08
+
+/* Indicates whether demux support sending data directly to subtitle decoder */
+#define DMX_CAP_SUBTITLE_DECODER_DATA	0x10
+
+/* Indicates whether TS insertion is supported */
+#define DMX_CAP_TS_INSERTION	0x20
+
+/* Indicates whether playback from secured input is supported */
+#define DMX_CAP_SECURED_INPUT_PLAYBACK	0x40
+
+/* Indicates whether automatic buffer flush upon overflow is allowed */
+#define DMX_CAP_AUTO_BUFFER_FLUSH	0x80
+
+	/* Number of decoders demux can output data to */
 	int num_decoders;
+
+	/* Number of demux devices */
+	int num_demux_devices;
+
+	/* Max number of PID filters */
+	int num_pid_filters;
+
+	/* Max number of section filters */
+	int num_section_filters;
+
+	/*
+	 * Max number of section filters using same PID,
+	 * 0 if not supported
+	 */
+	int num_section_filters_per_pid;
+
+	/*
+	 * Length of section filter, not including section
+	 * length field (2 bytes).
+	 */
+	int section_filter_length;
+
+	/* Max number of demod based input */
+	int num_demod_inputs;
+
+	/* Max number of memory based input */
+	int num_memory_inputs;
+
+	/* Overall bitrate from all inputs concurrently. Mbit/sec */
+	int max_bitrate;
+
+	/* Max bitrate from single demod input. Mbit/sec */
+	int demod_input_max_bitrate;
+
+	/* Max bitrate from single memory input. Mbit/sec */
+	int memory_input_max_bitrate;
+
+	/* Max number of supported cipher operations per PID */
+	int num_cipher_ops;
+
+	/* Max possible value of STC reported by demux, in 27MHz */
+	__u64 max_stc;
+
+	/*
+	 * For indexing support (DMX_CAP_VIDEO_INDEXING capability) this is
+	 * the max number of video pids that can be indexed for a single
+	 * recording filter. If 0, means there is not limitation.
+	 */
+	int recording_max_video_pids_indexed;
+
+	struct dmx_buffer_requirement section;
+
+	/* For PES not sent to decoder */
+	struct dmx_buffer_requirement pes;
+
+	/* For PES sent to decoder */
+	struct dmx_buffer_requirement decoder;
+
+	/* Recording buffer for recording of 188 bytes packets */
+	struct dmx_buffer_requirement recording_188_tsp;
+
+	/* Recording buffer for recording of 192 bytes packets */
+	struct dmx_buffer_requirement recording_192_tsp;
+
+	/* DVR input buffer for playback of 188 bytes packets */
+	struct dmx_buffer_requirement playback_188_tsp;
+
+	/* DVR input buffer for playback of 192 bytes packets */
+	struct dmx_buffer_requirement playback_192_tsp;
 } dmx_caps_t;
 
 typedef enum dmx_source {
@@ -134,12 +623,229 @@
 	DMX_SOURCE_DVR3
 } dmx_source_t;
 
+enum dmx_tsp_format_t {
+	DMX_TSP_FORMAT_188 = 0,
+	DMX_TSP_FORMAT_192_TAIL,
+	DMX_TSP_FORMAT_192_HEAD,
+	DMX_TSP_FORMAT_204,
+};
+
+enum dmx_playback_mode_t {
+	/*
+	 * In push mode, if one of output buffers
+	 * is full, the buffer would overflow
+	 * and demux continue processing incoming stream.
+	 * This is the default mode. When playing from frontend,
+	 * this is the only mode that is allowed.
+	 */
+	DMX_PB_MODE_PUSH = 0,
+
+	/*
+	 * In pull mode, if one of output buffers
+	 * is full, demux stalls waiting for free space,
+	 * this would cause DVR input buffer fullness
+	 * to accumulate.
+	 * This mode is possible only when playing
+	 * from DVR.
+	 */
+	DMX_PB_MODE_PULL,
+};
+
 struct dmx_stc {
 	unsigned int num;	/* input : which STC? 0..N */
 	unsigned int base;	/* output: divisor for stc to get 90 kHz clock */
 	__u64 stc;		/* output: stc in 'base'*90 kHz units */
 };
 
+enum dmx_buffer_mode {
+	/*
+	 * demux buffers are allocated internally
+	 * by the demux driver. This is the default mode.
+	 * DMX_SET_BUFFER_SIZE can be used to set the size of
+	 * this buffer.
+	 */
+	DMX_BUFFER_MODE_INTERNAL,
+
+	/*
+	 * demux buffers are allocated externally and provided
+	 * to demux through DMX_SET_BUFFER.
+	 * When this mode is used DMX_SET_BUFFER_SIZE and
+	 * mmap are prohibited.
+	 */
+	DMX_BUFFER_MODE_EXTERNAL,
+};
+
+struct dmx_buffer {
+	unsigned int size;
+	int handle;
+
+	/*
+	 * The following indication is relevant only when setting
+	 * DVR input buffer. It indicates whether the input buffer
+	 * being set is secured one or not. Secured (locked) buffers
+	 * are required for playback from secured input. In such case
+	 * write() syscall is not allowed.
+	 */
+	int is_protected;
+};
+
+struct dmx_decoder_buffers {
+	/*
+	 * Specify if linear buffer support is requested. If set, buffers_num
+	 * must be greater than 1
+	 */
+	int is_linear;
+
+	/*
+	 * Specify number of external buffers allocated by user.
+	 * If set to 0 means internal buffer allocation is requested
+	 */
+	__u32 buffers_num;
+
+	/* Specify buffer size, either external or internal */
+	__u32 buffers_size;
+
+	/* Array of externally allocated buffer handles */
+	int handles[DMX_MAX_DECODER_BUFFER_NUM];
+};
+
+struct dmx_secure_mode {
+	/*
+	 * Specifies whether the filter is secure or not.
+	 * Filter should be set as secured if the filter's data *may* include
+	 * encrypted data that would require decryption configured through
+	 * DMX_SET_CIPHER ioctl. The setting may be done while
+	 * filter is in idle state only.
+	 */
+	int is_secured;
+};
+
+struct dmx_cipher_operation {
+	/* Indication whether the operation is encryption or decryption */
+	int encrypt;
+
+	/* The ID of the key used for decryption or encryption */
+	__u32 key_ladder_id;
+};
+
+#define DMX_MAX_CIPHER_OPERATIONS_COUNT	5
+struct dmx_cipher_operations {
+	/*
+	 * The PID to perform the cipher operations on.
+	 * In case of recording filter, multiple PIDs may exist in the same
+	 * filter through DMX_ADD_PID ioctl, each may have different
+	 * cipher operations.
+	 */
+	__u16 pid;
+
+	/* Total number of operations */
+	__u8 operations_count;
+
+	/*
+	 * Cipher operation to perform on the given PID.
+	 * The operations are performed in the order they are given.
+	 */
+	struct dmx_cipher_operation operations[DMX_MAX_CIPHER_OPERATIONS_COUNT];
+};
+
+struct dmx_events_mask {
+	/*
+	 * Bitmask of events to be disabled (dmx_event).
+	 * Disabled events will not be notified to the user.
+	 * By default all events are enabled except for
+	 * DMX_EVENT_NEW_ES_DATA.
+	 * Overflow event can't be disabled.
+	 */
+	__u32 disable_mask;
+
+	/*
+	 * Bitmask of events that will not wake-up the user
+	 * when user calls poll with POLLPRI flag.
+	 * Events that are used as wake-up source should not be
+	 * disabled in disable_mask or they would not be used
+	 * as a wake-up source.
+	 * By default all enabled events are set as wake-up events.
+	 * Overflow event can't be disabled as a wake-up source.
+	 */
+	__u32 no_wakeup_mask;
+
+	/*
+	 * Number of ready wake-up events which will trigger
+	 * a wake-up when user calls poll with POLLPRI flag.
+	 * Default is set to 1.
+	 */
+	__u32 wakeup_threshold;
+};
+
+struct dmx_indexing_params {
+	/*
+	 * PID to index. In case of recording filter, multiple PIDs
+	 * may exist in the same filter through DMX_ADD_PID ioctl.
+	 * It is assumed that the PID was already added using DMX_ADD_PID
+	 * or an error will be reported.
+	 */
+	__u16 pid;
+
+	/* enable or disable indexing, default is disabled */
+	int enable;
+
+	/* combination of DMX_IDX_* bits */
+	__u64 types;
+};
+
+struct dmx_set_ts_insertion {
+	/*
+	 * Unique identifier managed by the caller.
+	 * This identifier can be used later to remove the
+	 * insertion using DMX_ABORT_TS_INSERTION ioctl.
+	 */
+	__u32 identifier;
+
+	/*
+	 * Repetition time in msec, minimum allowed value is 25msec.
+	 * 0 repetition time means one-shot insertion is done.
+	 * Insertion done based on wall-clock.
+	 */
+	__u32 repetition_time;
+
+	/*
+	 * TS packets buffer to be inserted.
+	 * The buffer is inserted as-is to the recording buffer
+	 * without any modification.
+	 * It is advised to set discontinuity flag in the very
+	 * first TS packet in the buffer.
+	 */
+	const __u8 *ts_packets;
+
+	/*
+	 * Size in bytes of the TS packets buffer to be inserted.
+	 * Should be in multiples of 188 or 192 bytes
+	 * depending on recording filter output format.
+	 */
+	size_t size;
+};
+
+struct dmx_abort_ts_insertion {
+	/*
+	 * Identifier of the insertion buffer previously set
+	 * using DMX_SET_TS_INSERTION.
+	 */
+	__u32 identifier;
+};
+
+struct dmx_scrambling_bits {
+	/*
+	 * The PID to return its scrambling bit value.
+	 * In case of recording filter, multiple PIDs may exist in the same
+	 * filter through DMX_ADD_PID ioctl, each may have different
+	 * scrambling bits status.
+	 */
+	__u16 pid;
+
+	/* Current value of scrambling bits: 0, 1, 2 or 3 */
+	__u8 value;
+};
+
 #define DMX_START                _IO('o', 41)
 #define DMX_STOP                 _IO('o', 42)
 #define DMX_SET_FILTER           _IOW('o', 43, struct dmx_sct_filter_params)
@@ -151,5 +857,27 @@
 #define DMX_GET_STC              _IOWR('o', 50, struct dmx_stc)
 #define DMX_ADD_PID              _IOW('o', 51, __u16)
 #define DMX_REMOVE_PID           _IOW('o', 52, __u16)
+#define DMX_SET_TS_PACKET_FORMAT _IOW('o', 53, enum dmx_tsp_format_t)
+#define DMX_SET_TS_OUT_FORMAT	 _IOW('o', 54, enum dmx_tsp_format_t)
+#define DMX_SET_DECODER_BUFFER_SIZE	_IO('o', 55)
+#define DMX_GET_BUFFER_STATUS	 _IOR('o', 56, struct dmx_buffer_status)
+#define DMX_RELEASE_DATA		 _IO('o', 57)
+#define DMX_FEED_DATA			 _IO('o', 58)
+#define DMX_SET_PLAYBACK_MODE	 _IOW('o', 59, enum dmx_playback_mode_t)
+#define DMX_GET_EVENT		 _IOR('o', 60, struct dmx_filter_event)
+#define DMX_SET_BUFFER_MODE	 _IOW('o', 61, enum dmx_buffer_mode)
+#define DMX_SET_BUFFER		 _IOW('o', 62, struct dmx_buffer)
+#define DMX_SET_DECODER_BUFFER	 _IOW('o', 63, struct dmx_decoder_buffers)
+#define DMX_REUSE_DECODER_BUFFER _IO('o', 64)
+#define DMX_SET_SECURE_MODE	_IOW('o', 65, struct dmx_secure_mode)
+#define DMX_SET_EVENTS_MASK	_IOW('o', 66, struct dmx_events_mask)
+#define DMX_GET_EVENTS_MASK	_IOR('o', 67, struct dmx_events_mask)
+#define DMX_PUSH_OOB_COMMAND	_IOW('o', 68, struct dmx_oob_command)
+#define DMX_SET_INDEXING_PARAMS _IOW('o', 69, struct dmx_indexing_params)
+#define DMX_SET_TS_INSERTION _IOW('o', 70, struct dmx_set_ts_insertion)
+#define DMX_ABORT_TS_INSERTION _IOW('o', 71, struct dmx_abort_ts_insertion)
+#define DMX_GET_SCRAMBLING_BITS _IOWR('o', 72, struct dmx_scrambling_bits)
+#define DMX_SET_CIPHER _IOW('o', 73, struct dmx_cipher_operations)
+#define DMX_FLUSH_BUFFER _IO('o', 74)
 
 #endif /* _UAPI_DVBDMX_H_ */
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index 48cfe31..939ad08 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -69,8 +69,12 @@
 #define IPA_IOCTL_ADD_FLT_RULE_AFTER 44
 #define IPA_IOCTL_GET_HW_VERSION 45
 #define IPA_IOCTL_ADD_RT_RULE_EXT 46
-#define IPA_IOCTL_NAT_MODIFY_PDN 47
-#define IPA_IOCTL_MAX 48
+#define IPA_IOCTL_ADD_VLAN_IFACE 47
+#define IPA_IOCTL_DEL_VLAN_IFACE 48
+#define IPA_IOCTL_ADD_L2TP_VLAN_MAPPING 49
+#define IPA_IOCTL_DEL_L2TP_VLAN_MAPPING 50
+#define IPA_IOCTL_NAT_MODIFY_PDN 51
+#define IPA_IOCTL_MAX 52
 
 /**
  * max size of the header to be inserted
@@ -127,6 +131,7 @@
 #define IPA_FLT_MAC_SRC_ADDR_802_3	(1ul << 19)
 #define IPA_FLT_MAC_DST_ADDR_802_3	(1ul << 20)
 #define IPA_FLT_MAC_ETHER_TYPE		(1ul << 21)
+#define IPA_FLT_MAC_DST_ADDR_L2TP	(1ul << 22)
 
 /**
  * maximal number of NAT PDNs in the PDN config table
@@ -250,9 +255,12 @@
 
 	IPA_CLIENT_TEST4_PROD			= 70,
 	IPA_CLIENT_TEST4_CONS			= 71,
+
+	/* RESERVERD PROD				= 72, */
+	IPA_CLIENT_DUMMY_CONS			= 73
 };
 
-#define IPA_CLIENT_MAX (IPA_CLIENT_TEST4_CONS + 1)
+#define IPA_CLIENT_MAX (IPA_CLIENT_DUMMY_CONS + 1)
 
 #define IPA_CLIENT_IS_APPS_CONS(client) \
 	((client) == IPA_CLIENT_APPS_LAN_CONS || \
@@ -451,7 +459,16 @@
 	IPA_SSR_EVENT_MAX
 };
 
-#define IPA_EVENT_MAX_NUM ((int)IPA_SSR_EVENT_MAX)
+enum ipa_vlan_l2tp_event {
+	ADD_VLAN_IFACE = IPA_SSR_EVENT_MAX,
+	DEL_VLAN_IFACE,
+	ADD_L2TP_VLAN_MAPPING,
+	DEL_L2TP_VLAN_MAPPING,
+	IPA_VLAN_L2TP_EVENT_MAX,
+};
+
+#define IPA_EVENT_MAX_NUM (IPA_VLAN_L2TP_EVENT_MAX)
+#define IPA_EVENT_MAX ((int)IPA_EVENT_MAX_NUM)
 
 /**
  * enum ipa_rm_resource_name - IPA RM clients identification names
@@ -784,8 +801,10 @@
 	IPA_HDR_PROC_ETHII_TO_802_3,
 	IPA_HDR_PROC_802_3_TO_ETHII,
 	IPA_HDR_PROC_802_3_TO_802_3,
+	IPA_HDR_PROC_L2TP_HEADER_ADD,
+	IPA_HDR_PROC_L2TP_HEADER_REMOVE
 };
-#define IPA_HDR_PROC_MAX (IPA_HDR_PROC_802_3_TO_802_3 + 1)
+#define IPA_HDR_PROC_MAX (IPA_HDR_PROC_L2TP_HEADER_REMOVE + 1)
 
 /**
  * struct ipa_rt_rule - attributes of a routing rule
@@ -856,10 +875,45 @@
 };
 
 /**
+ * struct ipa_l2tp_header_add_procparams -
+ * @eth_hdr_retained: Specifies if Ethernet header is retained or not
+ * @input_ip_version: Specifies if Input header is IPV4(0) or IPV6(1)
+ * @output_ip_version: Specifies if template header is IPV4(0) or IPV6(1)
+ */
+struct ipa_l2tp_header_add_procparams {
+	uint32_t eth_hdr_retained:1;
+	uint32_t input_ip_version:1;
+	uint32_t output_ip_version:1;
+	uint32_t reserved:29;
+};
+
+/**
+ * struct ipa_l2tp_header_remove_procparams -
+ * @hdr_len_remove: Specifies how much of the header needs to
+		be removed in bytes
+ * @eth_hdr_retained: Specifies if Ethernet header is retained or not
+ */
+struct ipa_l2tp_header_remove_procparams {
+	uint32_t hdr_len_remove:8;
+	uint32_t eth_hdr_retained:1;
+	uint32_t reserved:23;
+};
+
+/**
+ * union ipa_l2tp_hdr_proc_ctx_params -
+ * @hdr_add_param: parameters for header add
+ * @hdr_remove_param: parameters for header remove
+ */
+union ipa_l2tp_hdr_proc_ctx_params {
+	struct ipa_l2tp_header_add_procparams hdr_add_param;
+	struct ipa_l2tp_header_remove_procparams hdr_remove_param;
+};
+/**
  * struct ipa_hdr_proc_ctx_add - processing context descriptor includes
  * in and out parameters
  * @type: processing context type
  * @hdr_hdl: in parameter, handle to header
+ * @l2tp_params: l2tp parameters
  * @proc_ctx_hdl: out parameter, handle to proc_ctx, valid when status is 0
  * @status:	out parameter, status of header add operation,
  *		0 for success,
@@ -870,8 +924,11 @@
 	uint32_t hdr_hdl;
 	uint32_t proc_ctx_hdl;
 	int status;
+	union ipa_l2tp_hdr_proc_ctx_params l2tp_params;
 };
 
+#define IPA_L2TP_HDR_PROC_SUPPORT
+
 /**
  * struct ipa_ioc_add_hdr - processing context addition parameters (support
  * multiple processing context and commit)
@@ -1445,6 +1502,30 @@
 };
 
 /**
+ * struct ipa_ioc_vlan_iface_info - add vlan interface
+ * @name: interface name
+ * @vlan_id: VLAN ID
+ */
+struct ipa_ioc_vlan_iface_info {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint8_t vlan_id;
+};
+
+/**
+ * struct ipa_ioc_l2tp_vlan_mapping_info - l2tp->vlan mapping info
+ * @iptype: l2tp tunnel IP type
+ * @l2tp_iface_name: l2tp interface name
+ * @l2tp_session_id: l2tp session id
+ * @vlan_iface_name: vlan interface name
+ */
+struct ipa_ioc_l2tp_vlan_mapping_info {
+	enum ipa_ip_type iptype;
+	char l2tp_iface_name[IPA_RESOURCE_NAME_MAX];
+	uint8_t l2tp_session_id;
+	char vlan_iface_name[IPA_RESOURCE_NAME_MAX];
+};
+
+/**
  * struct ipa_msg_meta - Format of the message meta-data.
  * @msg_type: the type of the message
  * @rsvd: reserved bits for future use.
@@ -1721,6 +1802,21 @@
 				IPA_IOCTL_GET_HW_VERSION, \
 				enum ipa_hw_type *)
 
+#define IPA_IOC_ADD_VLAN_IFACE _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ADD_VLAN_IFACE, \
+				struct ipa_ioc_vlan_iface_info *)
+
+#define IPA_IOC_DEL_VLAN_IFACE _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_DEL_VLAN_IFACE, \
+				struct ipa_ioc_vlan_iface_info *)
+
+#define IPA_IOC_ADD_L2TP_VLAN_MAPPING _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ADD_L2TP_VLAN_MAPPING, \
+				struct ipa_ioc_l2tp_vlan_mapping_info *)
+
+#define IPA_IOC_DEL_L2TP_VLAN_MAPPING _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_DEL_L2TP_VLAN_MAPPING, \
+				struct ipa_ioc_l2tp_vlan_mapping_info *)
 /*
  * unique magic number of the Tethering bridge ioctls
  */
diff --git a/include/uapi/linux/netfilter/nf_log.h b/include/uapi/linux/netfilter/nf_log.h
index 8be21e0..d0b5fa9 100644
--- a/include/uapi/linux/netfilter/nf_log.h
+++ b/include/uapi/linux/netfilter/nf_log.h
@@ -9,4 +9,6 @@
 #define NF_LOG_MACDECODE	0x20	/* Decode MAC header */
 #define NF_LOG_MASK		0x2f
 
+#define NF_LOG_PREFIXLEN	128
+
 #endif /* _NETFILTER_NF_LOG_H */
diff --git a/include/uapi/linux/usb/ch11.h b/include/uapi/linux/usb/ch11.h
index 361297e..576c704e 100644
--- a/include/uapi/linux/usb/ch11.h
+++ b/include/uapi/linux/usb/ch11.h
@@ -22,6 +22,9 @@
  */
 #define USB_MAXCHILDREN		31
 
+/* See USB 3.1 spec Table 10-5 */
+#define USB_SS_MAXPORTS		15
+
 /*
  * Hub request types
  */
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index cf96ac1..e5c4ddf 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -779,6 +779,9 @@
 #define V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE \
 	V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE
 	V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE = 30,
+#define V4L2_MPEG_VIDC_EXTRADATA_UBWC_CR_STATS_INFO \
+	V4L2_MPEG_VIDC_EXTRADATA_UBWC_CR_STATS_INFO
+	V4L2_MPEG_VIDC_EXTRADATA_UBWC_CR_STATS_INFO = 31,
 };
 
 #define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_DELIVERY_MODE	\
diff --git a/include/uapi/media/msm_vidc.h b/include/uapi/media/msm_vidc.h
index 038dd48..4fe325d 100644
--- a/include/uapi/media/msm_vidc.h
+++ b/include/uapi/media/msm_vidc.h
@@ -170,6 +170,16 @@
 	unsigned int data[1];
 };
 
+struct msm_vidc_ubwc_cr_stats_info {
+	unsigned int stats_tile_32;
+	unsigned int stats_tile_64;
+	unsigned int stats_tile_96;
+	unsigned int stats_tile_128;
+	unsigned int stats_tile_160;
+	unsigned int stats_tile_192;
+	unsigned int stats_tile_256;
+};
+
 struct msm_vidc_yuv_stats_payload {
 	unsigned int frame_qp;
 	unsigned int texture;
@@ -250,6 +260,12 @@
 #define MSM_VIDC_EXTRADATA_PQ_INFO \
 	MSM_VIDC_EXTRADATA_PQ_INFO
 	MSM_VIDC_EXTRADATA_PQ_INFO = 0x00000017,
+#define MSM_VIDC_EXTRADATA_COLOUR_REMAPPING_INFO_SEI \
+	MSM_VIDC_EXTRADATA_COLOUR_REMAPPING_INFO_SEI
+	MSM_VIDC_EXTRADATA_COLOUR_REMAPPING_INFO_SEI = 0x00000018,
+#define MSM_VIDC_EXTRADATA_UBWC_CR_STAT_INFO \
+	MSM_VIDC_EXTRADATA_UBWC_CR_STAT_INFO
+	MSM_VIDC_EXTRADATA_UBWC_CR_STAT_INFO = 0x00000019,
 	MSM_VIDC_EXTRADATA_INPUT_CROP = 0x0700000E,
 #define MSM_VIDC_EXTRADATA_OUTPUT_CROP \
 	MSM_VIDC_EXTRADATA_OUTPUT_CROP
diff --git a/kernel/Makefile b/kernel/Makefile
index eb26e12c..314e7d6 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -84,6 +84,7 @@
 obj-$(CONFIG_KGDB) += debug/
 obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o
 obj-$(CONFIG_LOCKUP_DETECTOR) += watchdog.o
+obj-$(CONFIG_HARDLOCKUP_DETECTOR) += watchdog_hld.o
 obj-$(CONFIG_SECCOMP) += seccomp.o
 obj-$(CONFIG_RELAY) += relay.o
 obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index fe158bd..44c17f4 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2164,7 +2164,6 @@
 	case BPF_PROG_TYPE_SOCKET_FILTER:
 	case BPF_PROG_TYPE_SCHED_CLS:
 	case BPF_PROG_TYPE_SCHED_ACT:
-	case BPF_PROG_TYPE_CGROUP_SKB:
 		return true;
 	default:
 		return false;
diff --git a/kernel/configs/android-base-arm64.cfg b/kernel/configs/android-base-arm64.cfg
new file mode 100644
index 0000000..43f23d6
--- /dev/null
+++ b/kernel/configs/android-base-arm64.cfg
@@ -0,0 +1,5 @@
+#  KEEP ALPHABETICALLY SORTED
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_SWP_EMULATION=y
diff --git a/kernel/configs/android-base.config b/kernel/configs/android-base.config
index fb6017e..301e1a6 100644
--- a/kernel/configs/android-base.config
+++ b/kernel/configs/android-base.config
@@ -12,7 +12,6 @@
 CONFIG_ANDROID_BINDER_DEVICES=binder,hwbinder,vndbinder
 CONFIG_ANDROID_BINDER_IPC=y
 CONFIG_ANDROID_LOW_MEMORY_KILLER=y
-CONFIG_ARMV8_DEPRECATED=y
 CONFIG_ASHMEM=y
 CONFIG_AUDIT=y
 CONFIG_BLK_DEV_INITRD=y
@@ -22,7 +21,6 @@
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_SCHED=y
 CONFIG_CGROUP_BPF=y
-CONFIG_CP15_BARRIER_EMULATION=y
 CONFIG_DEFAULT_SECURITY_SELINUX=y
 CONFIG_EMBEDDED=y
 CONFIG_FB=y
@@ -156,9 +154,7 @@
 CONFIG_SECURITY_NETWORK=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY_SELINUX=y
-CONFIG_SETEND_EMULATION=y
 CONFIG_STAGING=y
-CONFIG_SWP_EMULATION=y
 CONFIG_SYNC=y
 CONFIG_TUN=y
 CONFIG_UID_SYS_STATS=y
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 78b72d5..3577ec6a 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -886,6 +886,9 @@
 	if (!cpu_present(cpu))
 		return -EINVAL;
 
+	if (!tasks_frozen && !cpu_isolated(cpu) && num_online_uniso_cpus() == 1)
+		return -EBUSY;
+
 	cpu_hotplug_begin();
 
 	cpuhp_tasks_frozen = tasks_frozen;
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
index dac3724..007482b 100644
--- a/kernel/irq/cpuhotplug.c
+++ b/kernel/irq/cpuhotplug.c
@@ -36,10 +36,32 @@
 	affinity = &available_cpus;
 
 	if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
+		/*
+		 * The order of preference for selecting a fallback CPU is
+		 *
+		 * (1) online and un-isolated CPU from default affinity
+		 * (2) online and un-isolated CPU
+		 * (3) online CPU
+		 */
 		cpumask_andnot(&available_cpus, cpu_online_mask,
 							cpu_isolated_mask);
-		if (cpumask_empty(affinity))
+		if (cpumask_intersects(&available_cpus, irq_default_affinity))
+			cpumask_and(&available_cpus, &available_cpus,
+							irq_default_affinity);
+		else if (cpumask_empty(&available_cpus))
 			affinity = cpu_online_mask;
+
+		/*
+		 * We are overriding the affinity with all online and
+		 * un-isolated cpus. irq_set_affinity_locked() call
+		 * below notify this mask to PM QOS affinity listener.
+		 * That results in applying the CPU_DMA_LATENCY QOS
+		 * to all the CPUs specified in the mask. But the low
+		 * level irqchip driver sets the affinity of an irq
+		 * to only one CPU. So pick only one CPU from the
+		 * prepared mask while overriding the user affinity.
+		 */
+		affinity = cpumask_of(cpumask_any(affinity));
 		ret = true;
 	}
 
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 4f64490..c1195eb 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1311,8 +1311,10 @@
 			ret = __irq_set_trigger(desc,
 						new->flags & IRQF_TRIGGER_MASK);
 
-			if (ret)
+			if (ret) {
+				irq_release_resources(desc);
 				goto out_mask;
+			}
 		}
 
 		desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index feaa813..88a02e3 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -114,6 +114,11 @@
 		goto free_cpumask;
 	}
 
+	if (cpumask_subset(new_value, cpu_isolated_mask)) {
+		err = -EINVAL;
+		goto free_cpumask;
+	}
+
 	/*
 	 * Do not allow disabling IRQs completely - it's a too easy
 	 * way to make the system unusable accidentally :-) At least
diff --git a/kernel/kthread.c b/kernel/kthread.c
index b65854c..80bf7ba 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -160,9 +160,11 @@
 {
 	__set_current_state(TASK_PARKED);
 	while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
+		preempt_disable();
 		if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
 			complete(&self->parked);
-		schedule();
+		schedule_preempt_disabled();
+		preempt_enable();
 		__set_current_state(TASK_PARKED);
 	}
 	clear_bit(KTHREAD_IS_PARKED, &self->flags);
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 0854263..12fe782 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -284,6 +284,9 @@
 				if (req->node.prio > qos_val[cpu])
 					qos_val[cpu] = req->node.prio;
 				break;
+			case PM_QOS_SUM:
+					qos_val[cpu] += req->node.prio;
+				break;
 			default:
 				BUG();
 				break;
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 27a7574..3d12ce8 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -17,9 +17,9 @@
 
 obj-y += core.o loadavg.o clock.o cputime.o
 obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
-obj-y += wait.o swait.o completion.o idle.o sched_avg.o
+obj-y += wait.o swait.o completion.o idle.o
 obj-$(CONFIG_SCHED_HMP) += hmp.o boost.o
-obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o energy.o
+obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o energy.o sched_avg.o
 obj-$(CONFIG_SCHED_WALT) += walt.o boost.o
 obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
 obj-$(CONFIG_SCHEDSTATS) += stats.o
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 30a1b34..c2433b3 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5787,7 +5787,7 @@
 	BUG_ON(cpu_online(smp_processor_id()));
 
 	if (mm != &init_mm) {
-		switch_mm_irqs_off(mm, &init_mm, current);
+		switch_mm(mm, &init_mm, current);
 		finish_arch_post_lock_switch();
 	}
 	mmdrop(mm);
@@ -8052,6 +8052,20 @@
 	int ret;
 
 	set_cpu_active(cpu, false);
+	/*
+	 * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
+	 * users of this state to go away such that all new such users will
+	 * observe it.
+	 *
+	 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
+	 * not imply sync_sched(), so wait for both.
+	 *
+	 * Do sync before park smpboot threads to take care the rcu boost case.
+	 */
+	if (IS_ENABLED(CONFIG_PREEMPT))
+		synchronize_rcu_mult(call_rcu, call_rcu_sched);
+	else
+		synchronize_rcu();
 
 	if (!sched_smp_initialized)
 		return 0;
@@ -9123,6 +9137,32 @@
 	return ret;
 }
 
+#ifdef CONFIG_PROC_SYSCTL
+int sched_updown_migrate_handler(struct ctl_table *table, int write,
+				 void __user *buffer, size_t *lenp,
+				 loff_t *ppos)
+{
+	int ret;
+	unsigned int *data = (unsigned int *)table->data;
+	unsigned int old_val;
+	static DEFINE_MUTEX(mutex);
+
+	mutex_lock(&mutex);
+	old_val = *data;
+
+	ret = proc_douintvec_capacity(table, write, buffer, lenp, ppos);
+
+	if (!ret && write &&
+	    sysctl_sched_capacity_margin > sysctl_sched_capacity_margin_down) {
+		ret = -EINVAL;
+		*data = old_val;
+	}
+	mutex_unlock(&mutex);
+
+	return ret;
+}
+#endif
+
 #ifdef CONFIG_CGROUP_SCHED
 
 inline struct task_group *css_tg(struct cgroup_subsys_state *css)
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 0a0e9aa..e756b83 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -36,6 +36,10 @@
 	raw_spinlock_t update_lock;  /* For shared policies */
 	u64 last_freq_update_time;
 	s64 freq_update_delay_ns;
+	u64 last_ws;
+	u64 curr_cycles;
+	u64 last_cyc_update_time;
+	unsigned long avg_cap;
 	unsigned int next_freq;
 	unsigned int cached_raw_freq;
 	unsigned long hispeed_util;
@@ -199,19 +203,63 @@
 	sg_cpu->iowait_boost >>= 1;
 }
 
+static unsigned long freq_to_util(struct sugov_policy *sg_policy,
+				  unsigned int freq)
+{
+	return mult_frac(sg_policy->max, freq,
+			 sg_policy->policy->cpuinfo.max_freq);
+}
+
+#define KHZ 1000
+static void sugov_track_cycles(struct sugov_policy *sg_policy,
+				unsigned int prev_freq,
+				u64 upto)
+{
+	u64 delta_ns, cycles;
+	/* Track cycles in current window */
+	delta_ns = upto - sg_policy->last_cyc_update_time;
+	cycles = (prev_freq * delta_ns) / (NSEC_PER_SEC / KHZ);
+	sg_policy->curr_cycles += cycles;
+	sg_policy->last_cyc_update_time = upto;
+}
+
+static void sugov_calc_avg_cap(struct sugov_policy *sg_policy, u64 curr_ws,
+				unsigned int prev_freq)
+{
+	u64 last_ws = sg_policy->last_ws;
+	unsigned int avg_freq;
+
+	WARN_ON(curr_ws < last_ws);
+	if (curr_ws <= last_ws)
+		return;
+
+	/* If we skipped some windows */
+	if (curr_ws > (last_ws + sched_ravg_window)) {
+		avg_freq = prev_freq;
+		/* Reset tracking history */
+		sg_policy->last_cyc_update_time = curr_ws;
+	} else {
+		sugov_track_cycles(sg_policy, prev_freq, curr_ws);
+		avg_freq = sg_policy->curr_cycles;
+		avg_freq /= sched_ravg_window / (NSEC_PER_SEC / KHZ);
+	}
+	sg_policy->avg_cap = freq_to_util(sg_policy, avg_freq);
+	sg_policy->curr_cycles = 0;
+	sg_policy->last_ws = curr_ws;
+}
+
 #define NL_RATIO 75
 #define HISPEED_LOAD 90
 static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util,
 			      unsigned long *max)
 {
 	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
-	unsigned long cap_cur = capacity_curr_of(sg_cpu->cpu);
 	bool is_migration = sg_cpu->flags & SCHED_CPUFREQ_INTERCLUSTER_MIG;
 	unsigned long nl = sg_cpu->walt_load.nl;
 	unsigned long cpu_util = sg_cpu->util;
 	bool is_hiload;
 
-	is_hiload = (cpu_util >= mult_frac(cap_cur,
+	is_hiload = (cpu_util >= mult_frac(sg_policy->avg_cap,
 					   HISPEED_LOAD,
 					   100));
 
@@ -247,6 +295,8 @@
 	} else {
 		sugov_get_util(&util, &max, sg_cpu->cpu);
 		sugov_iowait_boost(sg_cpu, &util, &max);
+		sugov_calc_avg_cap(sg_policy, sg_cpu->walt_load.ws,
+				   sg_policy->policy->cur);
 		sugov_walt_adjust(sg_cpu, &util, &max);
 		next_f = get_next_freq(sg_policy, util, max);
 	}
@@ -322,12 +372,11 @@
 	raw_spin_lock(&sg_policy->update_lock);
 
 	if (sg_policy->max != max) {
-		hs_util = mult_frac(max,
-				    sg_policy->tunables->hispeed_freq,
-				    sg_policy->policy->cpuinfo.max_freq);
+		sg_policy->max = max;
+		hs_util = freq_to_util(sg_policy,
+					sg_policy->tunables->hispeed_freq);
 		hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
 		sg_policy->hispeed_util = hs_util;
-		sg_policy->max = max;
 	}
 
 	sg_cpu->util = util;
@@ -337,6 +386,9 @@
 	sugov_set_iowait_boost(sg_cpu, time, flags);
 	sg_cpu->last_update = time;
 
+	sugov_calc_avg_cap(sg_policy, sg_cpu->walt_load.ws,
+			   sg_policy->policy->cur);
+
 	trace_sugov_util_update(sg_cpu->cpu, sg_cpu->util, max,
 				sg_cpu->walt_load.nl,
 				sg_cpu->walt_load.pl, flags);
@@ -354,6 +406,10 @@
 	struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
 
 	mutex_lock(&sg_policy->work_lock);
+	raw_spin_lock(&sg_policy->update_lock);
+	sugov_track_cycles(sg_policy, sg_policy->policy->cur,
+			   sched_ktime_clock());
+	raw_spin_unlock(&sg_policy->update_lock);
 	__cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq,
 				CPUFREQ_RELATION_L);
 	mutex_unlock(&sg_policy->work_lock);
@@ -438,11 +494,12 @@
 
 	tunables->hispeed_freq = val;
 	list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
-		hs_util = mult_frac(sg_policy->max,
-				    sg_policy->tunables->hispeed_freq,
-				    sg_policy->policy->cpuinfo.max_freq);
+		raw_spin_lock(&sg_policy->update_lock);
+		hs_util = freq_to_util(sg_policy,
+					sg_policy->tunables->hispeed_freq);
 		hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
 		sg_policy->hispeed_util = hs_util;
+		raw_spin_unlock(&sg_policy->update_lock);
 	}
 
 	return count;
@@ -725,6 +782,10 @@
 
 	if (!policy->fast_switch_enabled) {
 		mutex_lock(&sg_policy->work_lock);
+		raw_spin_lock(&sg_policy->update_lock);
+		sugov_track_cycles(sg_policy, sg_policy->policy->cur,
+				   sched_ktime_clock());
+		raw_spin_unlock(&sg_policy->update_lock);
 		cpufreq_policy_apply_limits(policy);
 		mutex_unlock(&sg_policy->work_lock);
 	}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 45f404b..d06ac7d 100755
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -221,8 +221,8 @@
  * The margin used when comparing utilization with CPU capacity:
  * util * 1024 < capacity * margin
  */
-unsigned int capacity_margin = 1078; /* ~5% margin */
-unsigned int capacity_margin_down = 1205; /* ~15% margin */
+unsigned int sysctl_sched_capacity_margin = 1078; /* ~5% margin */
+unsigned int sysctl_sched_capacity_margin_down = 1205; /* ~15% margin */
 
 static inline void update_load_add(struct load_weight *lw, unsigned long inc)
 {
@@ -5918,9 +5918,9 @@
 	util += boosted_task_util(p);
 
 	if (capacity_orig_of(task_cpu(p)) > capacity_orig_of(cpu))
-		margin = capacity_margin_down;
+		margin = sysctl_sched_capacity_margin_down;
 	else
-		margin = capacity_margin;
+		margin = sysctl_sched_capacity_margin;
 
 	return (capacity_orig_of(cpu) * 1024) > (util * margin);
 }
@@ -5948,7 +5948,7 @@
 static bool __cpu_overutilized(int cpu, int delta)
 {
 	return (capacity_orig_of(cpu) * 1024) <
-	       ((cpu_util(cpu) + delta) * capacity_margin);
+	       ((cpu_util(cpu) + delta) * sysctl_sched_capacity_margin);
 }
 
 bool cpu_overutilized(int cpu)
@@ -6085,10 +6085,14 @@
 	struct sched_group *fit_group = NULL, *spare_group = NULL;
 	unsigned long min_load = ULONG_MAX, this_load = 0;
 	unsigned long fit_capacity = ULONG_MAX;
-	unsigned long max_spare_capacity = capacity_margin - SCHED_CAPACITY_SCALE;
+	unsigned long max_spare_capacity;
+
 	int load_idx = sd->forkexec_idx;
 	int imbalance = 100 + (sd->imbalance_pct-100)/2;
 
+	max_spare_capacity = sysctl_sched_capacity_margin -
+			     SCHED_CAPACITY_SCALE;
+
 	if (sd_flag & SD_BALANCE_WAKE)
 		load_idx = sd->wake_idx;
 
@@ -8430,7 +8434,8 @@
 		mcc->cpu = cpu;
 #ifdef CONFIG_SCHED_DEBUG
 		raw_spin_unlock_irqrestore(&mcc->lock, flags);
-		pr_info("CPU%d: update max cpu_capacity %lu\n", cpu, capacity);
+		printk_deferred(KERN_INFO "CPU%d: update max cpu_capacity %lu\n",
+				cpu, capacity);
 		goto skip_unlock;
 #endif
 	}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 566e103..2524954 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1779,6 +1779,7 @@
 	unsigned long prev_window_util;
 	unsigned long nl;
 	unsigned long pl;
+	u64 ws;
 };
 
 static inline unsigned long cpu_util_cum(int cpu, int delta)
@@ -1828,6 +1829,7 @@
 			walt_load->prev_window_util = util;
 			walt_load->nl = nl;
 			walt_load->pl = 0;
+			walt_load->ws = rq->window_start;
 		}
 	}
 #endif
@@ -2207,6 +2209,15 @@
 }
 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
 
+#ifdef CONFIG_SCHED_WALT
+u64 sched_ktime_clock(void);
+#else /* CONFIG_SCHED_WALT */
+static inline u64 sched_ktime_clock(void)
+{
+	return 0;
+}
+#endif /* CONFIG_SCHED_WALT */
+
 #ifdef CONFIG_CPU_FREQ
 DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
 
@@ -2239,8 +2250,10 @@
 #ifdef CONFIG_SCHED_WALT
 	/*
 	 * Skip if we've already reported, but not if this is an inter-cluster
-	 * migration
+	 * migration. Also only allow WALT update sites.
 	 */
+	if (!(flags & SCHED_CPUFREQ_WALT))
+		return;
 	if (!sched_disable_window_stats &&
 		(rq->load_reported_window == rq->window_start) &&
 		!(flags & SCHED_CPUFREQ_INTERCLUSTER_MIG))
@@ -2251,7 +2264,7 @@
 	data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
 					cpu_of(rq)));
 	if (data)
-		data->func(data, sched_clock(), flags);
+		data->func(data, sched_ktime_clock(), flags);
 }
 
 static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags)
@@ -2336,7 +2349,6 @@
 extern unsigned int  __read_mostly sysctl_sched_spill_nr_run;
 extern unsigned int  __read_mostly sched_load_granule;
 
-extern u64 sched_ktime_clock(void);
 extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
 extern void reset_cpu_hmp_stats(int cpu, int reset_cra);
 extern int update_preferred_cluster(struct related_thread_group *grp,
diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c
index 7f86c0b..4238924 100644
--- a/kernel/sched/sched_avg.c
+++ b/kernel/sched/sched_avg.c
@@ -162,16 +162,14 @@
 unsigned int sched_get_cpu_util(int cpu)
 {
 	struct rq *rq = cpu_rq(cpu);
-	u64 util = 0;
-	unsigned long capacity = SCHED_CAPACITY_SCALE, flags;
+	u64 util;
+	unsigned long capacity, flags;
 	unsigned int busy;
 
 	raw_spin_lock_irqsave(&rq->lock, flags);
 
-#ifdef CONFIG_SMP
 	util = rq->cfs.avg.util_avg;
 	capacity = capacity_orig_of(cpu);
-#endif
 
 #ifdef CONFIG_SCHED_WALT
 	if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 50f889b..ae45283 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -871,8 +871,10 @@
 	migrate_top_tasks(p, src_rq, dest_rq);
 
 	if (!same_freq_domain(new_cpu, task_cpu(p))) {
-		cpufreq_update_util(dest_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG);
-		cpufreq_update_util(src_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG);
+		cpufreq_update_util(dest_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG |
+					     SCHED_CPUFREQ_WALT);
+		cpufreq_update_util(src_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG |
+					    SCHED_CPUFREQ_WALT);
 	}
 
 	if (p == src_rq->ed_task) {
@@ -3040,10 +3042,40 @@
 
 	for_each_sched_cluster(cluster)
 		for_each_cpu(cpu, &cluster->cpus)
-			cpufreq_update_util(cpu_rq(cpu), 0);
+			cpufreq_update_util(cpu_rq(cpu), SCHED_CPUFREQ_WALT);
 
 	for_each_cpu(cpu, cpu_possible_mask)
 		raw_spin_unlock(&cpu_rq(cpu)->lock);
 
 	core_ctl_check(this_rq()->window_start);
 }
+
+#ifndef CONFIG_SCHED_HMP
+int walt_proc_update_handler(struct ctl_table *table, int write,
+			     void __user *buffer, size_t *lenp,
+			     loff_t *ppos)
+{
+	int ret;
+	unsigned int *data = (unsigned int *)table->data;
+	static DEFINE_MUTEX(mutex);
+
+	mutex_lock(&mutex);
+	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+	if (ret || !write) {
+		mutex_unlock(&mutex);
+		return ret;
+	}
+
+	if (data == &sysctl_sched_group_upmigrate_pct)
+		sched_group_upmigrate =
+		    pct_to_real(sysctl_sched_group_upmigrate_pct);
+	else if (data == &sysctl_sched_group_downmigrate_pct)
+		sched_group_downmigrate =
+		    pct_to_real(sysctl_sched_group_downmigrate_pct);
+	else
+		ret = -EINVAL;
+	mutex_unlock(&mutex);
+
+	return ret;
+}
+#endif
diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h
index f153332..887933f 100644
--- a/kernel/sched/walt.h
+++ b/kernel/sched/walt.h
@@ -194,8 +194,6 @@
 	return (p->ravg.sum_history[0] == EXITING_TASK_MARKER);
 }
 
-extern u64 sched_ktime_clock(void);
-
 static inline struct sched_cluster *cpu_cluster(int cpu)
 {
 	return cpu_rq(cpu)->cluster;
@@ -335,11 +333,6 @@
 static inline void set_window_start(struct rq *rq) { }
 static inline int sched_cpu_high_irqload(int cpu) { return 0; }
 
-static inline u64 sched_ktime_clock(void)
-{
-	return 0;
-}
-
 static inline void sched_account_irqstart(int cpu, struct task_struct *curr,
 					  u64 wallclock)
 {
diff --git a/kernel/signal.c b/kernel/signal.c
index 0b14157..deb04d5 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -503,7 +503,8 @@
 	return !tsk->ptrace;
 }
 
-static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
+static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
+			   bool *resched_timer)
 {
 	struct sigqueue *q, *first = NULL;
 
@@ -525,6 +526,12 @@
 still_pending:
 		list_del_init(&first->list);
 		copy_siginfo(info, &first->info);
+
+		*resched_timer =
+			(first->flags & SIGQUEUE_PREALLOC) &&
+			(info->si_code == SI_TIMER) &&
+			(info->si_sys_private);
+
 		__sigqueue_free(first);
 	} else {
 		/*
@@ -541,12 +548,12 @@
 }
 
 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
-			siginfo_t *info)
+			siginfo_t *info, bool *resched_timer)
 {
 	int sig = next_signal(pending, mask);
 
 	if (sig)
-		collect_signal(sig, pending, info);
+		collect_signal(sig, pending, info, resched_timer);
 	return sig;
 }
 
@@ -558,15 +565,16 @@
  */
 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
 {
+	bool resched_timer = false;
 	int signr;
 
 	/* We only dequeue private signals from ourselves, we don't let
 	 * signalfd steal them
 	 */
-	signr = __dequeue_signal(&tsk->pending, mask, info);
+	signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
 	if (!signr) {
 		signr = __dequeue_signal(&tsk->signal->shared_pending,
-					 mask, info);
+					 mask, info, &resched_timer);
 		/*
 		 * itimer signal ?
 		 *
@@ -611,7 +619,7 @@
 		 */
 		current->jobctl |= JOBCTL_STOP_DEQUEUED;
 	}
-	if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
+	if (resched_timer) {
 		/*
 		 * Release the siglock to ensure proper locking order
 		 * of timer locks outside of siglocks.  Note, we leave
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 534431a..b076cba 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -300,6 +300,31 @@
 		.mode           = 0644,
 		.proc_handler   = proc_dointvec,
 	},
+	{
+		.procname	= "sched_group_upmigrate",
+		.data		= &sysctl_sched_group_upmigrate_pct,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+#ifdef CONFIG_SCHED_HMP
+		.proc_handler	= sched_hmp_proc_update_handler,
+#else
+		.proc_handler	= walt_proc_update_handler,
+#endif
+		.extra1		= &sysctl_sched_group_downmigrate_pct,
+	},
+	{
+		.procname	= "sched_group_downmigrate",
+		.data		= &sysctl_sched_group_downmigrate_pct,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+#ifdef CONFIG_SCHED_HMP
+		.proc_handler	= sched_hmp_proc_update_handler,
+#else
+		.proc_handler	= walt_proc_update_handler,
+#endif
+		.extra1		= &zero,
+		.extra2		= &sysctl_sched_group_upmigrate_pct,
+	},
 #endif
 #ifdef CONFIG_SCHED_HMP
 	{
@@ -377,22 +402,6 @@
 		.extra2		= &one_hundred,
 	},
 	{
-		.procname	= "sched_group_upmigrate",
-		.data		= &sysctl_sched_group_upmigrate_pct,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= sched_hmp_proc_update_handler,
-		.extra1		= &zero,
-	},
-	{
-		.procname	= "sched_group_downmigrate",
-		.data		= &sysctl_sched_group_downmigrate_pct,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= sched_hmp_proc_update_handler,
-		.extra1		= &zero,
-	},
-	{
 		.procname	= "sched_init_task_load",
 		.data		= &sysctl_sched_init_task_load_pct,
 		.maxlen		= sizeof(unsigned int),
@@ -577,6 +586,20 @@
 		.extra1		= &min_wakeup_granularity_ns,
 		.extra2		= &max_wakeup_granularity_ns,
 	},
+	{
+		.procname	= "sched_upmigrate",
+		.data		= &sysctl_sched_capacity_margin,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= sched_updown_migrate_handler,
+	},
+	{
+		.procname	= "sched_downmigrate",
+		.data		= &sysctl_sched_capacity_margin_down,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= sched_updown_migrate_handler,
+	},
 #ifdef CONFIG_SMP
 	{
 		.procname	= "sched_tunable_scaling",
@@ -3172,6 +3195,39 @@
 	}
 }
 
+static int do_proc_douintvec_capacity_conv(bool *negp, unsigned long *lvalp,
+					   int *valp, int write, void *data)
+{
+	if (write) {
+		if (*negp)
+			return -EINVAL;
+		*valp = SCHED_FIXEDPOINT_SCALE * 100 / *lvalp;
+	} else {
+		*negp = false;
+		*lvalp = SCHED_FIXEDPOINT_SCALE * 100 / *valp;
+	}
+
+	return 0;
+}
+
+/**
+ * proc_douintvec_capacity - read a vector of integers in percentage and convert
+ *			    into sched capacity
+ * @table: the sysctl table
+ * @write: %TRUE if this is a write to the sysctl file
+ * @buffer: the user buffer
+ * @lenp: the size of the user buffer
+ * @ppos: file position
+ *
+ * Returns 0 on success.
+ */
+int proc_douintvec_capacity(struct ctl_table *table, int write,
+			    void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+	return do_proc_dointvec(table, write, buffer, lenp, ppos,
+				do_proc_douintvec_capacity_conv, NULL);
+}
+
 #else /* CONFIG_PROC_SYSCTL */
 
 int proc_dostring(struct ctl_table *table, int write,
@@ -3229,6 +3285,11 @@
     return -ENOSYS;
 }
 
+int proc_douintvec_capacity(struct ctl_table *table, int write,
+			    void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+	return -ENOSYS;
+}
 
 #endif /* CONFIG_PROC_SYSCTL */
 
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index fa80192..5dfdf39 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -568,7 +568,7 @@
 {
 	struct alarm_base *base = &alarm_bases[alarm->type];
 
-	start = ktime_add(start, base->gettime());
+	start = ktime_add_safe(start, base->gettime());
 	alarm_start(alarm, start);
 }
 EXPORT_SYMBOL_GPL(alarm_start_relative);
@@ -654,7 +654,7 @@
 		overrun++;
 	}
 
-	alarm->node.expires = ktime_add(alarm->node.expires, interval);
+	alarm->node.expires = ktime_add_safe(alarm->node.expires, interval);
 	return overrun;
 }
 EXPORT_SYMBOL_GPL(alarm_forward);
@@ -840,13 +840,21 @@
 
 	/* start the timer */
 	timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval);
+
+	/*
+	 * Rate limit to the tick as a hot fix to prevent DOS. Will be
+	 * mopped up later.
+	 */
+	if (ktime_to_ns(timr->it.alarm.interval) < TICK_NSEC)
+		timr->it.alarm.interval = ktime_set(0, TICK_NSEC);
+
 	exp = timespec_to_ktime(new_setting->it_value);
 	/* Convert (if necessary) to absolute time */
 	if (flags != TIMER_ABSTIME) {
 		ktime_t now;
 
 		now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime();
-		exp = ktime_add(now, exp);
+		exp = ktime_add_safe(now, exp);
 	}
 
 	alarm_start(&timr->it.alarm.alarmtimer, exp);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index bfe589e..234d3e4 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -116,6 +116,26 @@
 	tk->offs_boot = ktime_add(tk->offs_boot, delta);
 }
 
+/*
+ * tk_clock_read - atomic clocksource read() helper
+ *
+ * This helper is necessary to use in the read paths because, while the
+ * seqlock ensures we don't return a bad value while structures are updated,
+ * it doesn't protect from potential crashes. There is the possibility that
+ * the tkr's clocksource may change between the read reference, and the
+ * clock reference passed to the read function.  This can cause crashes if
+ * the wrong clocksource is passed to the wrong read function.
+ * This isn't necessary to use when holding the timekeeper_lock or doing
+ * a read of the fast-timekeeper tkrs (which is protected by its own locking
+ * and update logic).
+ */
+static inline u64 tk_clock_read(struct tk_read_base *tkr)
+{
+	struct clocksource *clock = READ_ONCE(tkr->clock);
+
+	return clock->read(clock);
+}
+
 #ifdef CONFIG_DEBUG_TIMEKEEPING
 #define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
 
@@ -173,7 +193,7 @@
 	 */
 	do {
 		seq = read_seqcount_begin(&tk_core.seq);
-		now = tkr->read(tkr->clock);
+		now = tk_clock_read(tkr);
 		last = tkr->cycle_last;
 		mask = tkr->mask;
 		max = tkr->clock->max_cycles;
@@ -207,7 +227,7 @@
 	cycle_t cycle_now, delta;
 
 	/* read clocksource */
-	cycle_now = tkr->read(tkr->clock);
+	cycle_now = tk_clock_read(tkr);
 
 	/* calculate the delta since the last update_wall_time */
 	delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
@@ -236,12 +256,10 @@
 	++tk->cs_was_changed_seq;
 	old_clock = tk->tkr_mono.clock;
 	tk->tkr_mono.clock = clock;
-	tk->tkr_mono.read = clock->read;
 	tk->tkr_mono.mask = clock->mask;
-	tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock);
+	tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
 
 	tk->tkr_raw.clock = clock;
-	tk->tkr_raw.read = clock->read;
 	tk->tkr_raw.mask = clock->mask;
 	tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
 
@@ -260,8 +278,7 @@
 	/* Go back from cycles -> shifted ns */
 	tk->xtime_interval = (u64) interval * clock->mult;
 	tk->xtime_remainder = ntpinterval - tk->xtime_interval;
-	tk->raw_interval =
-		((u64) interval * clock->mult) >> clock->shift;
+	tk->raw_interval = interval * clock->mult;
 
 	 /* if changing clocks, convert xtime_nsec shift units */
 	if (old_clock) {
@@ -405,7 +422,7 @@
 
 		now += timekeeping_delta_to_ns(tkr,
 				clocksource_delta(
-					tkr->read(tkr->clock),
+					tk_clock_read(tkr),
 					tkr->cycle_last,
 					tkr->mask));
 	} while (read_seqcount_retry(&tkf->seq, seq));
@@ -462,6 +479,10 @@
 	return cycles_at_suspend;
 }
 
+static struct clocksource dummy_clock = {
+	.read = dummy_clock_read,
+};
+
 /**
  * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
  * @tk: Timekeeper to snapshot.
@@ -478,13 +499,13 @@
 	struct tk_read_base *tkr = &tk->tkr_mono;
 
 	memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
-	cycles_at_suspend = tkr->read(tkr->clock);
-	tkr_dummy.read = dummy_clock_read;
+	cycles_at_suspend = tk_clock_read(tkr);
+	tkr_dummy.clock = &dummy_clock;
 	update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
 
 	tkr = &tk->tkr_raw;
 	memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
-	tkr_dummy.read = dummy_clock_read;
+	tkr_dummy.clock = &dummy_clock;
 	update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
 }
 
@@ -650,11 +671,10 @@
  */
 static void timekeeping_forward_now(struct timekeeper *tk)
 {
-	struct clocksource *clock = tk->tkr_mono.clock;
 	cycle_t cycle_now, delta;
 	s64 nsec;
 
-	cycle_now = tk->tkr_mono.read(clock);
+	cycle_now = tk_clock_read(&tk->tkr_mono);
 	delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
 	tk->tkr_mono.cycle_last = cycle_now;
 	tk->tkr_raw.cycle_last  = cycle_now;
@@ -930,8 +950,7 @@
 
 	do {
 		seq = read_seqcount_begin(&tk_core.seq);
-
-		now = tk->tkr_mono.read(tk->tkr_mono.clock);
+		now = tk_clock_read(&tk->tkr_mono);
 		systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
 		systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
 		base_real = ktime_add(tk->tkr_mono.base,
@@ -1110,7 +1129,7 @@
 		 * Check whether the system counter value provided by the
 		 * device driver is on the current timekeeping interval.
 		 */
-		now = tk->tkr_mono.read(tk->tkr_mono.clock);
+		now = tk_clock_read(&tk->tkr_mono);
 		interval_start = tk->tkr_mono.cycle_last;
 		if (!cycle_between(interval_start, cycles, now)) {
 			clock_was_set_seq = tk->clock_was_set_seq;
@@ -1668,7 +1687,7 @@
 	 * The less preferred source will only be tried if there is no better
 	 * usable source. The rtc part is handled separately in rtc core code.
 	 */
-	cycle_now = tk->tkr_mono.read(clock);
+	cycle_now = tk_clock_read(&tk->tkr_mono);
 	if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
 		cycle_now > tk->tkr_mono.cycle_last) {
 		u64 num, max = ULLONG_MAX;
@@ -2032,7 +2051,7 @@
 						unsigned int *clock_set)
 {
 	cycle_t interval = tk->cycle_interval << shift;
-	u64 raw_nsecs;
+	u64 snsec_per_sec;
 
 	/* If the offset is smaller than a shifted interval, do nothing */
 	if (offset < interval)
@@ -2047,14 +2066,15 @@
 	*clock_set |= accumulate_nsecs_to_secs(tk);
 
 	/* Accumulate raw time */
-	raw_nsecs = (u64)tk->raw_interval << shift;
-	raw_nsecs += tk->raw_time.tv_nsec;
-	if (raw_nsecs >= NSEC_PER_SEC) {
-		u64 raw_secs = raw_nsecs;
-		raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
-		tk->raw_time.tv_sec += raw_secs;
+	tk->tkr_raw.xtime_nsec += (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
+	tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
+	snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
+	while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
+		tk->tkr_raw.xtime_nsec -= snsec_per_sec;
+		tk->raw_time.tv_sec++;
 	}
-	tk->raw_time.tv_nsec = raw_nsecs;
+	tk->raw_time.tv_nsec = tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift;
+	tk->tkr_raw.xtime_nsec -= (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
 
 	/* Accumulate error between NTP and clock interval */
 	tk->ntp_error += tk->ntp_tick << shift;
@@ -2086,7 +2106,7 @@
 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
 	offset = real_tk->cycle_interval;
 #else
-	offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock),
+	offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
 				   tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
 #endif
 
diff --git a/kernel/trace/ipc_logging.c b/kernel/trace/ipc_logging.c
index 6d310ab..31e6a8e 100644
--- a/kernel/trace/ipc_logging.c
+++ b/kernel/trace/ipc_logging.c
@@ -314,8 +314,11 @@
 		ilctxt->write_page->hdr.end_time = t_now;
 
 		ilctxt->write_page = get_next_page(ilctxt, ilctxt->write_page);
-		if (WARN_ON(ilctxt->write_page == NULL))
+		if (WARN_ON(ilctxt->write_page == NULL)) {
+			spin_unlock(&ilctxt->context_lock_lhb1);
+			read_unlock_irqrestore(&context_list_lock_lha1, flags);
 			return;
+		}
 		ilctxt->write_page->hdr.write_offset = 0;
 		ilctxt->write_page->hdr.start_time = t_now;
 		memcpy((ilctxt->write_page->data +
diff --git a/kernel/ucount.c b/kernel/ucount.c
index f4ac185..c761cdb 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -231,11 +231,10 @@
 	 * properly.
 	 */
 	user_header = register_sysctl("user", empty);
+	kmemleak_ignore(user_header);
 	BUG_ON(!user_header);
 	BUG_ON(!setup_userns_sysctls(&init_user_ns));
 #endif
 	return 0;
 }
 subsys_initcall(user_namespace_sysctl_init);
-
-
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 44ae68a..cffb5f2 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -25,33 +25,15 @@
 
 #include <asm/irq_regs.h>
 #include <linux/kvm_para.h>
-#include <linux/perf_event.h>
 #include <linux/kthread.h>
 #include <soc/qcom/watchdog.h>
 
-/*
- * The run state of the lockup detectors is controlled by the content of the
- * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
- * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
- *
- * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled'
- * are variables that are only used as an 'interface' between the parameters
- * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The
- * 'watchdog_thresh' variable is handled differently because its value is not
- * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh'
- * is equal zero.
- */
-#define NMI_WATCHDOG_ENABLED_BIT   0
-#define SOFT_WATCHDOG_ENABLED_BIT  1
-#define NMI_WATCHDOG_ENABLED      (1 << NMI_WATCHDOG_ENABLED_BIT)
-#define SOFT_WATCHDOG_ENABLED     (1 << SOFT_WATCHDOG_ENABLED_BIT)
-
 static DEFINE_MUTEX(watchdog_proc_mutex);
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED;
+#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
+unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED;
 #else
-static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
+unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
 #endif
 int __read_mostly nmi_watchdog_enabled;
 int __read_mostly soft_watchdog_enabled;
@@ -61,9 +43,6 @@
 #ifdef CONFIG_SMP
 int __read_mostly sysctl_softlockup_all_cpu_backtrace;
 int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
-#else
-#define sysctl_softlockup_all_cpu_backtrace 0
-#define sysctl_hardlockup_all_cpu_backtrace 0
 #endif
 static struct cpumask watchdog_cpumask __read_mostly;
 unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
@@ -72,6 +51,8 @@
 #define for_each_watchdog_cpu(cpu) \
 	for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
 
+atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
+
 /*
  * The 'watchdog_running' variable is set to 1 when the watchdog threads
  * are registered/started and is set to 0 when the watchdog threads are
@@ -103,55 +84,9 @@
 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
 static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
 static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-static DEFINE_PER_CPU(bool, hard_watchdog_warn);
-static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
-#endif
-#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
-static cpumask_t __read_mostly watchdog_cpus;
-#endif
-#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
-static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
-#endif
 static unsigned long soft_lockup_nmi_warn;
 
-/* boot commands */
-/*
- * Should we panic when a soft-lockup or hard-lockup occurs:
- */
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-unsigned int __read_mostly hardlockup_panic =
-			CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
-static unsigned long __maybe_unused hardlockup_allcpu_dumped;
-/*
- * We may not want to enable hard lockup detection by default in all cases,
- * for example when running the kernel as a guest on a hypervisor. In these
- * cases this function can be called to disable hard lockup detection. This
- * function should only be executed once by the boot processor before the
- * kernel command line parameters are parsed, because otherwise it is not
- * possible to override this in hardlockup_panic_setup().
- */
-void hardlockup_detector_disable(void)
-{
-	watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
-}
-
-static int __init hardlockup_panic_setup(char *str)
-{
-	if (!strncmp(str, "panic", 5))
-		hardlockup_panic = 1;
-	else if (!strncmp(str, "nopanic", 7))
-		hardlockup_panic = 0;
-	else if (!strncmp(str, "0", 1))
-		watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
-	else if (!strncmp(str, "1", 1))
-		watchdog_enabled |= NMI_WATCHDOG_ENABLED;
-	return 1;
-}
-__setup("nmi_watchdog=", hardlockup_panic_setup);
-#endif
-
 unsigned int __read_mostly softlockup_panic =
 			CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
 
@@ -272,32 +207,14 @@
 	wq_watchdog_touch(-1);
 }
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-void touch_nmi_watchdog(void)
-{
-	/*
-	 * Using __raw here because some code paths have
-	 * preemption enabled.  If preemption is enabled
-	 * then interrupts should be enabled too, in which
-	 * case we shouldn't have to worry about the watchdog
-	 * going off.
-	 */
-	raw_cpu_write(watchdog_nmi_touch, true);
-	touch_softlockup_watchdog();
-}
-EXPORT_SYMBOL(touch_nmi_watchdog);
-
-#endif
-
 void touch_softlockup_watchdog_sync(void)
 {
 	__this_cpu_write(softlockup_touch_sync, true);
 	__this_cpu_write(watchdog_touch_ts, 0);
 }
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
 /* watchdog detector functions */
-static bool is_hardlockup(void)
+bool is_hardlockup(void)
 {
 	unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
 
@@ -307,80 +224,6 @@
 	__this_cpu_write(hrtimer_interrupts_saved, hrint);
 	return false;
 }
-#endif
-
-#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
-static unsigned int watchdog_next_cpu(unsigned int cpu)
-{
-	cpumask_t cpus = watchdog_cpus;
-	unsigned int next_cpu;
-
-	next_cpu = cpumask_next(cpu, &cpus);
-	if (next_cpu >= nr_cpu_ids)
-		next_cpu = cpumask_first(&cpus);
-
-	if (next_cpu == cpu)
-		return nr_cpu_ids;
-
-	return next_cpu;
-}
-
-static int is_hardlockup_other_cpu(unsigned int cpu)
-{
-	unsigned long hrint = per_cpu(hrtimer_interrupts, cpu);
-
-	if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint)
-		return 1;
-
-	per_cpu(hrtimer_interrupts_saved, cpu) = hrint;
-	return 0;
-}
-
-static void watchdog_check_hardlockup_other_cpu(void)
-{
-	unsigned int next_cpu;
-
-	/*
-	 * Test for hardlockups every 3 samples.  The sample period is
-	 *  watchdog_thresh * 2 / 5, so 3 samples gets us back to slightly over
-	 *  watchdog_thresh (over by 20%).
-	 */
-	if (__this_cpu_read(hrtimer_interrupts) % 3 != 0)
-		return;
-
-	/* check for a hardlockup on the next cpu */
-	next_cpu = watchdog_next_cpu(smp_processor_id());
-	if (next_cpu >= nr_cpu_ids)
-		return;
-
-	smp_rmb();
-
-	if (per_cpu(watchdog_nmi_touch, next_cpu) == true) {
-		per_cpu(watchdog_nmi_touch, next_cpu) = false;
-		return;
-	}
-
-	if (is_hardlockup_other_cpu(next_cpu)) {
-		/* only warn once */
-		if (per_cpu(hard_watchdog_warn, next_cpu) == true)
-			return;
-
-		if (hardlockup_panic) {
-			pr_err("Watchdog detected hard LOCKUP on cpu %u",
-					next_cpu);
-			msm_trigger_wdog_bite();
-		}
-		else
-			WARN(1, "Watchdog detected hard LOCKUP on cpu %u", next_cpu);
-
-		per_cpu(hard_watchdog_warn, next_cpu) = true;
-	} else {
-		per_cpu(hard_watchdog_warn, next_cpu) = false;
-	}
-}
-#else
-static inline void watchdog_check_hardlockup_other_cpu(void) { return; }
-#endif
 
 static int is_softlockup(unsigned long touch_ts)
 {
@@ -394,80 +237,22 @@
 	return 0;
 }
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
-
-static struct perf_event_attr wd_hw_attr = {
-	.type		= PERF_TYPE_HARDWARE,
-	.config		= PERF_COUNT_HW_CPU_CYCLES,
-	.size		= sizeof(struct perf_event_attr),
-	.pinned		= 1,
-	.disabled	= 1,
-};
-
-/* Callback function for perf event subsystem */
-static void watchdog_overflow_callback(struct perf_event *event,
-		 struct perf_sample_data *data,
-		 struct pt_regs *regs)
-{
-	/* Ensure the watchdog never gets throttled */
-	event->hw.interrupts = 0;
-
-	if (__this_cpu_read(watchdog_nmi_touch) == true) {
-		__this_cpu_write(watchdog_nmi_touch, false);
-		return;
-	}
-
-	/* check for a hardlockup
-	 * This is done by making sure our timer interrupt
-	 * is incrementing.  The timer interrupt should have
-	 * fired multiple times before we overflow'd.  If it hasn't
-	 * then this is a good indication the cpu is stuck
-	 */
-	if (is_hardlockup()) {
-		int this_cpu = smp_processor_id();
-
-		/* only print hardlockups once */
-		if (__this_cpu_read(hard_watchdog_warn) == true)
-			return;
-
-		pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
-		if (hardlockup_panic)
-			msm_trigger_wdog_bite();
-
-		print_modules();
-		print_irqtrace_events(current);
-		if (regs)
-			show_regs(regs);
-		else
-			dump_stack();
-
-		/*
-		 * Perform all-CPU dump only once to avoid multiple hardlockups
-		 * generating interleaving traces
-		 */
-		if (sysctl_hardlockup_all_cpu_backtrace &&
-				!test_and_set_bit(0, &hardlockup_allcpu_dumped))
-			trigger_allbutself_cpu_backtrace();
-
-		if (hardlockup_panic)
-			nmi_panic(regs, "Hard LOCKUP");
-
-		__this_cpu_write(hard_watchdog_warn, true);
-		return;
-	}
-
-	__this_cpu_write(hard_watchdog_warn, false);
-	return;
-}
-#endif /* CONFIG_HARDLOCKUP_DETECTOR_NMI */
-
 static void watchdog_interrupt_count(void)
 {
 	__this_cpu_inc(hrtimer_interrupts);
 }
 
-static int watchdog_nmi_enable(unsigned int cpu);
-static void watchdog_nmi_disable(unsigned int cpu);
+/*
+ * These two functions are mostly architecture specific
+ * defining them as weak here.
+ */
+int __weak watchdog_nmi_enable(unsigned int cpu)
+{
+	return 0;
+}
+void __weak watchdog_nmi_disable(unsigned int cpu)
+{
+}
 
 static int watchdog_enable_all_cpus(void);
 static void watchdog_disable_all_cpus(void);
@@ -480,12 +265,12 @@
 	int duration;
 	int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
 
+	if (atomic_read(&watchdog_park_in_progress) != 0)
+		return HRTIMER_NORESTART;
+
 	/* kick the hardlockup detector */
 	watchdog_interrupt_count();
 
-	/* test for hardlockups on the next cpu */
-	watchdog_check_hardlockup_other_cpu();
-
 	/* kick the softlockup detector */
 	wake_up_process(__this_cpu_read(softlockup_watchdog));
 
@@ -694,144 +479,6 @@
 		watchdog_nmi_disable(cpu);
 }
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
-/*
- * People like the simple clean cpu node info on boot.
- * Reduce the watchdog noise by only printing messages
- * that are different from what cpu0 displayed.
- */
-static unsigned long cpu0_err;
-
-static int watchdog_nmi_enable(unsigned int cpu)
-{
-	struct perf_event_attr *wd_attr;
-	struct perf_event *event = per_cpu(watchdog_ev, cpu);
-
-	/* nothing to do if the hard lockup detector is disabled */
-	if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
-		goto out;
-
-	/* is it already setup and enabled? */
-	if (event && event->state > PERF_EVENT_STATE_OFF)
-		goto out;
-
-	/* it is setup but not enabled */
-	if (event != NULL)
-		goto out_enable;
-
-	wd_attr = &wd_hw_attr;
-	wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
-
-	/* Try to register using hardware perf events */
-	event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
-
-	/* save cpu0 error for future comparision */
-	if (cpu == 0 && IS_ERR(event))
-		cpu0_err = PTR_ERR(event);
-
-	if (!IS_ERR(event)) {
-		/* only print for cpu0 or different than cpu0 */
-		if (cpu == 0 || cpu0_err)
-			pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
-		goto out_save;
-	}
-
-	/*
-	 * Disable the hard lockup detector if _any_ CPU fails to set up
-	 * set up the hardware perf event. The watchdog() function checks
-	 * the NMI_WATCHDOG_ENABLED bit periodically.
-	 *
-	 * The barriers are for syncing up watchdog_enabled across all the
-	 * cpus, as clear_bit() does not use barriers.
-	 */
-	smp_mb__before_atomic();
-	clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled);
-	smp_mb__after_atomic();
-
-	/* skip displaying the same error again */
-	if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
-		return PTR_ERR(event);
-
-	/* vary the KERN level based on the returned errno */
-	if (PTR_ERR(event) == -EOPNOTSUPP)
-		pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
-	else if (PTR_ERR(event) == -ENOENT)
-		pr_warn("disabled (cpu%i): hardware events not enabled\n",
-			 cpu);
-	else
-		pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
-			cpu, PTR_ERR(event));
-
-	pr_info("Shutting down hard lockup detector on all cpus\n");
-
-	return PTR_ERR(event);
-
-	/* success path */
-out_save:
-	per_cpu(watchdog_ev, cpu) = event;
-out_enable:
-	perf_event_enable(per_cpu(watchdog_ev, cpu));
-out:
-	return 0;
-}
-
-static void watchdog_nmi_disable(unsigned int cpu)
-{
-	struct perf_event *event = per_cpu(watchdog_ev, cpu);
-
-	if (event) {
-		perf_event_disable(event);
-		per_cpu(watchdog_ev, cpu) = NULL;
-
-		/* should be in cleanup, but blocks oprofile */
-		perf_event_release_kernel(event);
-	}
-	if (cpu == 0) {
-		/* watchdog_nmi_enable() expects this to be zero initially. */
-		cpu0_err = 0;
-	}
-}
-
-#else
-#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
-static int watchdog_nmi_enable(unsigned int cpu)
-{
-	/*
-	 * The new cpu will be marked online before the first hrtimer interrupt
-	 * runs on it.  If another cpu tests for a hardlockup on the new cpu
-	 * before it has run its first hrtimer, it will get a false positive.
-	 * Touch the watchdog on the new cpu to delay the first check for at
-	 * least 3 sampling periods to guarantee one hrtimer has run on the new
-	 * cpu.
-	 */
-	per_cpu(watchdog_nmi_touch, cpu) = true;
-	smp_wmb();
-	cpumask_set_cpu(cpu, &watchdog_cpus);
-	return 0;
-}
-
-static void watchdog_nmi_disable(unsigned int cpu)
-{
-	unsigned int next_cpu = watchdog_next_cpu(cpu);
-
-	/*
-	 * Offlining this cpu will cause the cpu before this one to start
-	 * checking the one after this one.  If this cpu just finished checking
-	 * the next cpu and updating hrtimer_interrupts_saved, and then the
-	 * previous cpu checks it within one sample period, it will trigger a
-	 * false positive.  Touch the watchdog on the next cpu to prevent it.
-	 */
-	if (next_cpu < nr_cpu_ids)
-		per_cpu(watchdog_nmi_touch, next_cpu) = true;
-	smp_wmb();
-	cpumask_clear_cpu(cpu, &watchdog_cpus);
-}
-#else
-static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
-static void watchdog_nmi_disable(unsigned int cpu) { return; }
-#endif /* CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU */
-#endif /* CONFIG_HARDLOCKUP_DETECTOR_NMI */
-
 static struct smp_hotplug_thread watchdog_threads = {
 	.store			= &softlockup_watchdog,
 	.thread_should_run	= watchdog_should_run,
@@ -859,12 +506,16 @@
 {
 	int cpu, ret = 0;
 
+	atomic_set(&watchdog_park_in_progress, 1);
+
 	for_each_watchdog_cpu(cpu) {
 		ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
 		if (ret)
 			break;
 	}
 
+	atomic_set(&watchdog_park_in_progress, 0);
+
 	return ret;
 }
 
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
new file mode 100644
index 0000000..12b8dd6
--- /dev/null
+++ b/kernel/watchdog_hld.c
@@ -0,0 +1,230 @@
+/*
+ * Detect hard lockups on a system
+ *
+ * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
+ *
+ * Note: Most of this code is borrowed heavily from the original softlockup
+ * detector, so thanks to Ingo for the initial implementation.
+ * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
+ * to those contributors as well.
+ */
+
+#define pr_fmt(fmt) "NMI watchdog: " fmt
+
+#include <linux/nmi.h>
+#include <linux/module.h>
+#include <asm/irq_regs.h>
+#include <linux/perf_event.h>
+
+static DEFINE_PER_CPU(bool, hard_watchdog_warn);
+static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
+static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
+
+/* boot commands */
+/*
+ * Should we panic when a soft-lockup or hard-lockup occurs:
+ */
+unsigned int __read_mostly hardlockup_panic =
+			CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
+static unsigned long hardlockup_allcpu_dumped;
+/*
+ * We may not want to enable hard lockup detection by default in all cases,
+ * for example when running the kernel as a guest on a hypervisor. In these
+ * cases this function can be called to disable hard lockup detection. This
+ * function should only be executed once by the boot processor before the
+ * kernel command line parameters are parsed, because otherwise it is not
+ * possible to override this in hardlockup_panic_setup().
+ */
+void hardlockup_detector_disable(void)
+{
+	watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
+}
+
+static int __init hardlockup_panic_setup(char *str)
+{
+	if (!strncmp(str, "panic", 5))
+		hardlockup_panic = 1;
+	else if (!strncmp(str, "nopanic", 7))
+		hardlockup_panic = 0;
+	else if (!strncmp(str, "0", 1))
+		watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
+	else if (!strncmp(str, "1", 1))
+		watchdog_enabled |= NMI_WATCHDOG_ENABLED;
+	return 1;
+}
+__setup("nmi_watchdog=", hardlockup_panic_setup);
+
+void touch_nmi_watchdog(void)
+{
+	/*
+	 * Using __raw here because some code paths have
+	 * preemption enabled.  If preemption is enabled
+	 * then interrupts should be enabled too, in which
+	 * case we shouldn't have to worry about the watchdog
+	 * going off.
+	 */
+	raw_cpu_write(watchdog_nmi_touch, true);
+	touch_softlockup_watchdog();
+}
+EXPORT_SYMBOL(touch_nmi_watchdog);
+
+static struct perf_event_attr wd_hw_attr = {
+	.type		= PERF_TYPE_HARDWARE,
+	.config		= PERF_COUNT_HW_CPU_CYCLES,
+	.size		= sizeof(struct perf_event_attr),
+	.pinned		= 1,
+	.disabled	= 1,
+};
+
+/* Callback function for perf event subsystem */
+static void watchdog_overflow_callback(struct perf_event *event,
+		 struct perf_sample_data *data,
+		 struct pt_regs *regs)
+{
+	/* Ensure the watchdog never gets throttled */
+	event->hw.interrupts = 0;
+
+	if (atomic_read(&watchdog_park_in_progress) != 0)
+		return;
+
+	if (__this_cpu_read(watchdog_nmi_touch) == true) {
+		__this_cpu_write(watchdog_nmi_touch, false);
+		return;
+	}
+
+	/* check for a hardlockup
+	 * This is done by making sure our timer interrupt
+	 * is incrementing.  The timer interrupt should have
+	 * fired multiple times before we overflow'd.  If it hasn't
+	 * then this is a good indication the cpu is stuck
+	 */
+	if (is_hardlockup()) {
+		int this_cpu = smp_processor_id();
+
+		/* only print hardlockups once */
+		if (__this_cpu_read(hard_watchdog_warn) == true)
+			return;
+
+		pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
+		print_modules();
+		print_irqtrace_events(current);
+		if (regs)
+			show_regs(regs);
+		else
+			dump_stack();
+
+		/*
+		 * Perform all-CPU dump only once to avoid multiple hardlockups
+		 * generating interleaving traces
+		 */
+		if (sysctl_hardlockup_all_cpu_backtrace &&
+				!test_and_set_bit(0, &hardlockup_allcpu_dumped))
+			trigger_allbutself_cpu_backtrace();
+
+		if (hardlockup_panic)
+			nmi_panic(regs, "Hard LOCKUP");
+
+		__this_cpu_write(hard_watchdog_warn, true);
+		return;
+	}
+
+	__this_cpu_write(hard_watchdog_warn, false);
+	return;
+}
+
+/*
+ * People like the simple clean cpu node info on boot.
+ * Reduce the watchdog noise by only printing messages
+ * that are different from what cpu0 displayed.
+ */
+static unsigned long cpu0_err;
+
+int watchdog_nmi_enable(unsigned int cpu)
+{
+	struct perf_event_attr *wd_attr;
+	struct perf_event *event = per_cpu(watchdog_ev, cpu);
+
+	/* nothing to do if the hard lockup detector is disabled */
+	if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
+		goto out;
+
+	/* is it already setup and enabled? */
+	if (event && event->state > PERF_EVENT_STATE_OFF)
+		goto out;
+
+	/* it is setup but not enabled */
+	if (event != NULL)
+		goto out_enable;
+
+	wd_attr = &wd_hw_attr;
+	wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
+
+	/* Try to register using hardware perf events */
+	event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
+
+	/* save cpu0 error for future comparision */
+	if (cpu == 0 && IS_ERR(event))
+		cpu0_err = PTR_ERR(event);
+
+	if (!IS_ERR(event)) {
+		/* only print for cpu0 or different than cpu0 */
+		if (cpu == 0 || cpu0_err)
+			pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
+		goto out_save;
+	}
+
+	/*
+	 * Disable the hard lockup detector if _any_ CPU fails to set up
+	 * set up the hardware perf event. The watchdog() function checks
+	 * the NMI_WATCHDOG_ENABLED bit periodically.
+	 *
+	 * The barriers are for syncing up watchdog_enabled across all the
+	 * cpus, as clear_bit() does not use barriers.
+	 */
+	smp_mb__before_atomic();
+	clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled);
+	smp_mb__after_atomic();
+
+	/* skip displaying the same error again */
+	if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
+		return PTR_ERR(event);
+
+	/* vary the KERN level based on the returned errno */
+	if (PTR_ERR(event) == -EOPNOTSUPP)
+		pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
+	else if (PTR_ERR(event) == -ENOENT)
+		pr_warn("disabled (cpu%i): hardware events not enabled\n",
+			 cpu);
+	else
+		pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
+			cpu, PTR_ERR(event));
+
+	pr_info("Shutting down hard lockup detector on all cpus\n");
+
+	return PTR_ERR(event);
+
+	/* success path */
+out_save:
+	per_cpu(watchdog_ev, cpu) = event;
+out_enable:
+	perf_event_enable(per_cpu(watchdog_ev, cpu));
+out:
+	return 0;
+}
+
+void watchdog_nmi_disable(unsigned int cpu)
+{
+	struct perf_event *event = per_cpu(watchdog_ev, cpu);
+
+	if (event) {
+		perf_event_disable(event);
+		per_cpu(watchdog_ev, cpu) = NULL;
+
+		/* should be in cleanup, but blocks oprofile */
+		perf_event_release_kernel(event);
+	}
+	if (cpu == 0) {
+		/* watchdog_nmi_enable() expects this to be zero initially. */
+		cpu0_err = 0;
+	}
+}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 6878aa8..2f9f7aa 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -775,27 +775,15 @@
 	  The overhead should be minimal.  A periodic hrtimer runs to
 	  generate interrupts and kick the watchdog task every 4 seconds.
 	  An NMI is generated every 10 seconds or so to check for hardlockups.
-	  If NMIs are not available on the platform, every 12 seconds the
-	  hrtimer interrupt on one cpu will be used to check for hardlockups
-	  on the next cpu.
 
 	  The frequency of hrtimer and NMI events and the soft and hard lockup
 	  thresholds can be controlled through the sysctl watchdog_thresh.
 
-config HARDLOCKUP_DETECTOR_NMI
+config HARDLOCKUP_DETECTOR
 	def_bool y
 	depends on LOCKUP_DETECTOR && !HAVE_NMI_WATCHDOG
 	depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI
 
-config HARDLOCKUP_DETECTOR_OTHER_CPU
-	def_bool y
-	depends on LOCKUP_DETECTOR && SMP
-	depends on !HARDLOCKUP_DETECTOR_NMI && !HAVE_NMI_WATCHDOG
-
-config HARDLOCKUP_DETECTOR
-	def_bool y
-	depends on HARDLOCKUP_DETECTOR_NMI || HARDLOCKUP_DETECTOR_OTHER_CPU
-
 config BOOTPARAM_HARDLOCKUP_PANIC
 	bool "Panic (Reboot) On Hard Lockups"
 	depends on HARDLOCKUP_DETECTOR
diff --git a/lib/cmdline.c b/lib/cmdline.c
index 8f13cf7..79069d7 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -22,14 +22,14 @@
  *	the values[M, M+1, ..., N] into the ints array in get_options.
  */
 
-static int get_range(char **str, int *pint)
+static int get_range(char **str, int *pint, int n)
 {
 	int x, inc_counter, upper_range;
 
 	(*str)++;
 	upper_range = simple_strtol((*str), NULL, 0);
 	inc_counter = upper_range - *pint;
-	for (x = *pint; x < upper_range; x++)
+	for (x = *pint; n && x < upper_range; x++, n--)
 		*pint++ = x;
 	return inc_counter;
 }
@@ -96,7 +96,7 @@
 			break;
 		if (res == 3) {
 			int range_nums;
-			range_nums = get_range((char **)&str, ints + i);
+			range_nums = get_range((char **)&str, ints + i, nints - i);
 			if (range_nums < 0)
 				break;
 			/*
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index 74a54b7..9f79547 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -43,7 +43,7 @@
 u32 crc32c(u32 crc, const void *address, unsigned int length)
 {
 	SHASH_DESC_ON_STACK(shash, tfm);
-	u32 *ctx = (u32 *)shash_desc_ctx(shash);
+	u32 ret, *ctx = (u32 *)shash_desc_ctx(shash);
 	int err;
 
 	shash->tfm = tfm;
@@ -53,7 +53,9 @@
 	err = crypto_shash_update(shash, address, length);
 	BUG_ON(err);
 
-	return *ctx;
+	ret = *ctx;
+	barrier_data(ctx);
+	return ret;
 }
 
 EXPORT_SYMBOL(crc32c);
diff --git a/mm/gup.c b/mm/gup.c
index ec4f827..c63a034 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -370,11 +370,6 @@
 	/* mlock all present pages, but do not fault in new pages */
 	if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
 		return -ENOENT;
-	/* For mm_populate(), just skip the stack guard page. */
-	if ((*flags & FOLL_POPULATE) &&
-			(stack_guard_page_start(vma, address) ||
-			 stack_guard_page_end(vma, address + PAGE_SIZE)))
-		return -ENOENT;
 	if (*flags & FOLL_WRITE)
 		fault_flags |= FAULT_FLAG_WRITE;
 	if (*flags & FOLL_REMOTE)
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 35d2db8..4df20e1 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -13,6 +13,7 @@
  *
  */
 
+#include <linux/ftrace.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/printk.h>
@@ -359,6 +360,8 @@
 	if (likely(!kasan_report_enabled()))
 		return;
 
+	disable_trace_on_warning();
+
 	info.access_addr = (void *)addr;
 	info.access_size = size;
 	info.is_write = is_write;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 4bd4480..ce7d416 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1176,7 +1176,10 @@
 	 * page_remove_rmap() in try_to_unmap_one(). So to determine page status
 	 * correctly, we save a copy of the page flags at this time.
 	 */
-	page_flags = p->flags;
+	if (PageHuge(p))
+		page_flags = hpage->flags;
+	else
+		page_flags = p->flags;
 
 	/*
 	 * unpoison always clear PG_hwpoison inside page lock
diff --git a/mm/memory.c b/mm/memory.c
index 91e1653..49d9b42 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2699,40 +2699,6 @@
 }
 
 /*
- * This is like a special single-page "expand_{down|up}wards()",
- * except we must first make sure that 'address{-|+}PAGE_SIZE'
- * doesn't hit another vma.
- */
-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
-{
-	address &= PAGE_MASK;
-	if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
-		struct vm_area_struct *prev = vma->vm_prev;
-
-		/*
-		 * Is there a mapping abutting this one below?
-		 *
-		 * That's only ok if it's the same stack mapping
-		 * that has gotten split..
-		 */
-		if (prev && prev->vm_end == address)
-			return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
-
-		return expand_downwards(vma, address - PAGE_SIZE);
-	}
-	if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
-		struct vm_area_struct *next = vma->vm_next;
-
-		/* As VM_GROWSDOWN but s/below/above/ */
-		if (next && next->vm_start == address + PAGE_SIZE)
-			return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
-
-		return expand_upwards(vma, address + PAGE_SIZE);
-	}
-	return 0;
-}
-
-/*
  * We enter with non-exclusive mmap_sem (to exclude vma changes,
  * but allow concurrent faults), and pte mapped but not yet locked.
  * We return with mmap_sem still held, but pte unmapped and unlocked.
@@ -2748,10 +2714,6 @@
 	if (vma->vm_flags & VM_SHARED)
 		return VM_FAULT_SIGBUS;
 
-	/* Check if we need to add a guard page to the stack */
-	if (check_stack_guard_page(vma, fe->address) < 0)
-		return VM_FAULT_SIGSEGV;
-
 	/*
 	 * Use pte_alloc() instead of pte_alloc_map().  We can't run
 	 * pte_offset_map() on pmds where a huge pmd might be created
diff --git a/mm/mmap.c b/mm/mmap.c
index 143d62f..b8f91e0e 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -183,6 +183,7 @@
 	unsigned long retval;
 	unsigned long newbrk, oldbrk;
 	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *next;
 	unsigned long min_brk;
 	bool populate;
 
@@ -228,7 +229,8 @@
 	}
 
 	/* Check against existing mmap mappings. */
-	if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
+	next = find_vma(mm, oldbrk);
+	if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
 		goto out;
 
 	/* Ok, looks good - let it rip. */
@@ -251,10 +253,22 @@
 
 static long vma_compute_subtree_gap(struct vm_area_struct *vma)
 {
-	unsigned long max, subtree_gap;
-	max = vma->vm_start;
-	if (vma->vm_prev)
-		max -= vma->vm_prev->vm_end;
+	unsigned long max, prev_end, subtree_gap;
+
+	/*
+	 * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we
+	 * allow two stack_guard_gaps between them here, and when choosing
+	 * an unmapped area; whereas when expanding we only require one.
+	 * That's a little inconsistent, but keeps the code here simpler.
+	 */
+	max = vm_start_gap(vma);
+	if (vma->vm_prev) {
+		prev_end = vm_end_gap(vma->vm_prev);
+		if (max > prev_end)
+			max -= prev_end;
+		else
+			max = 0;
+	}
 	if (vma->vm_rb.rb_left) {
 		subtree_gap = rb_entry(vma->vm_rb.rb_left,
 				struct vm_area_struct, vm_rb)->rb_subtree_gap;
@@ -350,7 +364,7 @@
 			anon_vma_unlock_read(anon_vma);
 		}
 
-		highest_address = vma->vm_end;
+		highest_address = vm_end_gap(vma);
 		vma = vma->vm_next;
 		i++;
 	}
@@ -539,7 +553,7 @@
 	if (vma->vm_next)
 		vma_gap_update(vma->vm_next);
 	else
-		mm->highest_vm_end = vma->vm_end;
+		mm->highest_vm_end = vm_end_gap(vma);
 
 	/*
 	 * vma->vm_prev wasn't known when we followed the rbtree to find the
@@ -854,7 +868,7 @@
 			vma_gap_update(vma);
 		if (end_changed) {
 			if (!next)
-				mm->highest_vm_end = end;
+				mm->highest_vm_end = vm_end_gap(vma);
 			else if (!adjust_next)
 				vma_gap_update(next);
 		}
@@ -939,7 +953,7 @@
 			 * mm->highest_vm_end doesn't need any update
 			 * in remove_next == 1 case.
 			 */
-			VM_WARN_ON(mm->highest_vm_end != end);
+			VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
 		}
 	}
 	if (insert && file)
@@ -1792,7 +1806,7 @@
 
 	while (true) {
 		/* Visit left subtree if it looks promising */
-		gap_end = vma->vm_start;
+		gap_end = vm_start_gap(vma);
 		if (gap_end >= low_limit && vma->vm_rb.rb_left) {
 			struct vm_area_struct *left =
 				rb_entry(vma->vm_rb.rb_left,
@@ -1803,12 +1817,13 @@
 			}
 		}
 
-		gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
+		gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
 check_current:
 		/* Check if current node has a suitable gap */
 		if (gap_start > high_limit)
 			return -ENOMEM;
-		if (gap_end >= low_limit && gap_end - gap_start >= length)
+		if (gap_end >= low_limit &&
+		    gap_end > gap_start && gap_end - gap_start >= length)
 			goto found;
 
 		/* Visit right subtree if it looks promising */
@@ -1830,8 +1845,8 @@
 			vma = rb_entry(rb_parent(prev),
 				       struct vm_area_struct, vm_rb);
 			if (prev == vma->vm_rb.rb_left) {
-				gap_start = vma->vm_prev->vm_end;
-				gap_end = vma->vm_start;
+				gap_start = vm_end_gap(vma->vm_prev);
+				gap_end = vm_start_gap(vma);
 				goto check_current;
 			}
 		}
@@ -1895,7 +1910,7 @@
 
 	while (true) {
 		/* Visit right subtree if it looks promising */
-		gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
+		gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
 		if (gap_start <= high_limit && vma->vm_rb.rb_right) {
 			struct vm_area_struct *right =
 				rb_entry(vma->vm_rb.rb_right,
@@ -1908,10 +1923,11 @@
 
 check_current:
 		/* Check if current node has a suitable gap */
-		gap_end = vma->vm_start;
+		gap_end = vm_start_gap(vma);
 		if (gap_end < low_limit)
 			return -ENOMEM;
-		if (gap_start <= high_limit && gap_end - gap_start >= length)
+		if (gap_start <= high_limit &&
+		    gap_end > gap_start && gap_end - gap_start >= length)
 			goto found;
 
 		/* Visit left subtree if it looks promising */
@@ -1934,7 +1950,7 @@
 				       struct vm_area_struct, vm_rb);
 			if (prev == vma->vm_rb.rb_right) {
 				gap_start = vma->vm_prev ?
-					vma->vm_prev->vm_end : 0;
+					vm_end_gap(vma->vm_prev) : 0;
 				goto check_current;
 			}
 		}
@@ -1972,7 +1988,7 @@
 		unsigned long len, unsigned long pgoff, unsigned long flags)
 {
 	struct mm_struct *mm = current->mm;
-	struct vm_area_struct *vma;
+	struct vm_area_struct *vma, *prev;
 	struct vm_unmapped_area_info info;
 
 	if (len > TASK_SIZE - mmap_min_addr)
@@ -1983,9 +1999,10 @@
 
 	if (addr) {
 		addr = PAGE_ALIGN(addr);
-		vma = find_vma(mm, addr);
+		vma = find_vma_prev(mm, addr, &prev);
 		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)) &&
+		    (!prev || addr >= vm_end_gap(prev)))
 			return addr;
 	}
 
@@ -2008,7 +2025,7 @@
 			  const unsigned long len, const unsigned long pgoff,
 			  const unsigned long flags)
 {
-	struct vm_area_struct *vma;
+	struct vm_area_struct *vma, *prev;
 	struct mm_struct *mm = current->mm;
 	unsigned long addr = addr0;
 	struct vm_unmapped_area_info info;
@@ -2023,9 +2040,10 @@
 	/* requesting a specific address */
 	if (addr) {
 		addr = PAGE_ALIGN(addr);
-		vma = find_vma(mm, addr);
+		vma = find_vma_prev(mm, addr, &prev);
 		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
-				(!vma || addr + len <= vma->vm_start))
+				(!vma || addr + len <= vm_start_gap(vma)) &&
+				(!prev || addr >= vm_end_gap(prev)))
 			return addr;
 	}
 
@@ -2160,21 +2178,19 @@
  * update accounting. This is shared with both the
  * grow-up and grow-down cases.
  */
-static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
+static int acct_stack_growth(struct vm_area_struct *vma,
+			     unsigned long size, unsigned long grow)
 {
 	struct mm_struct *mm = vma->vm_mm;
 	struct rlimit *rlim = current->signal->rlim;
-	unsigned long new_start, actual_size;
+	unsigned long new_start;
 
 	/* address space limit tests */
 	if (!may_expand_vm(mm, vma->vm_flags, grow))
 		return -ENOMEM;
 
 	/* Stack limit test */
-	actual_size = size;
-	if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
-		actual_size -= PAGE_SIZE;
-	if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+	if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
 		return -ENOMEM;
 
 	/* mlock limit tests */
@@ -2212,16 +2228,32 @@
 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 {
 	struct mm_struct *mm = vma->vm_mm;
+	struct vm_area_struct *next;
+	unsigned long gap_addr;
 	int error = 0;
 
 	if (!(vma->vm_flags & VM_GROWSUP))
 		return -EFAULT;
 
-	/* Guard against wrapping around to address 0. */
-	if (address < PAGE_ALIGN(address+4))
-		address = PAGE_ALIGN(address+4);
-	else
+	/* Guard against exceeding limits of the address space. */
+	address &= PAGE_MASK;
+	if (address >= TASK_SIZE)
 		return -ENOMEM;
+	address += PAGE_SIZE;
+
+	/* Enforce stack_guard_gap */
+	gap_addr = address + stack_guard_gap;
+
+	/* Guard against overflow */
+	if (gap_addr < address || gap_addr > TASK_SIZE)
+		gap_addr = TASK_SIZE;
+
+	next = vma->vm_next;
+	if (next && next->vm_start < gap_addr) {
+		if (!(next->vm_flags & VM_GROWSUP))
+			return -ENOMEM;
+		/* Check that both stack segments have the same anon_vma? */
+	}
 
 	/* We must make sure the anon_vma is allocated. */
 	if (unlikely(anon_vma_prepare(vma)))
@@ -2266,7 +2298,7 @@
 				if (vma->vm_next)
 					vma_gap_update(vma->vm_next);
 				else
-					mm->highest_vm_end = address;
+					mm->highest_vm_end = vm_end_gap(vma);
 				spin_unlock(&mm->page_table_lock);
 
 				perf_event_mmap(vma);
@@ -2287,6 +2319,8 @@
 				   unsigned long address)
 {
 	struct mm_struct *mm = vma->vm_mm;
+	struct vm_area_struct *prev;
+	unsigned long gap_addr;
 	int error;
 
 	address &= PAGE_MASK;
@@ -2294,6 +2328,17 @@
 	if (error)
 		return error;
 
+	/* Enforce stack_guard_gap */
+	gap_addr = address - stack_guard_gap;
+	if (gap_addr > address)
+		return -ENOMEM;
+	prev = vma->vm_prev;
+	if (prev && prev->vm_end > gap_addr) {
+		if (!(prev->vm_flags & VM_GROWSDOWN))
+			return -ENOMEM;
+		/* Check that both stack segments have the same anon_vma? */
+	}
+
 	/* We must make sure the anon_vma is allocated. */
 	if (unlikely(anon_vma_prepare(vma)))
 		return -ENOMEM;
@@ -2348,28 +2393,25 @@
 	return error;
 }
 
-/*
- * Note how expand_stack() refuses to expand the stack all the way to
- * abut the next virtual mapping, *unless* that mapping itself is also
- * a stack mapping. We want to leave room for a guard page, after all
- * (the guard page itself is not added here, that is done by the
- * actual page faulting logic)
- *
- * This matches the behavior of the guard page logic (see mm/memory.c:
- * check_stack_guard_page()), which only allows the guard page to be
- * removed under these circumstances.
- */
+/* enforced gap between the expanding stack and other mappings. */
+unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
+
+static int __init cmdline_parse_stack_guard_gap(char *p)
+{
+	unsigned long val;
+	char *endptr;
+
+	val = simple_strtoul(p, &endptr, 10);
+	if (!*endptr)
+		stack_guard_gap = val << PAGE_SHIFT;
+
+	return 0;
+}
+__setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
+
 #ifdef CONFIG_STACK_GROWSUP
 int expand_stack(struct vm_area_struct *vma, unsigned long address)
 {
-	struct vm_area_struct *next;
-
-	address &= PAGE_MASK;
-	next = vma->vm_next;
-	if (next && next->vm_start == address + PAGE_SIZE) {
-		if (!(next->vm_flags & VM_GROWSUP))
-			return -ENOMEM;
-	}
 	return expand_upwards(vma, address);
 }
 
@@ -2391,14 +2433,6 @@
 #else
 int expand_stack(struct vm_area_struct *vma, unsigned long address)
 {
-	struct vm_area_struct *prev;
-
-	address &= PAGE_MASK;
-	prev = vma->vm_prev;
-	if (prev && prev->vm_end == address) {
-		if (!(prev->vm_flags & VM_GROWSDOWN))
-			return -ENOMEM;
-	}
 	return expand_downwards(vma, address);
 }
 
@@ -2496,7 +2530,7 @@
 		vma->vm_prev = prev;
 		vma_gap_update(vma);
 	} else
-		mm->highest_vm_end = prev ? prev->vm_end : 0;
+		mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
 	tail_vma->vm_next = NULL;
 
 	/* Kill the cache */
diff --git a/mm/shmem.c b/mm/shmem.c
index e9c2b6e..142887f 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -412,6 +412,7 @@
 		struct shrink_control *sc, unsigned long nr_to_split)
 {
 	LIST_HEAD(list), *pos, *next;
+	LIST_HEAD(to_remove);
 	struct inode *inode;
 	struct shmem_inode_info *info;
 	struct page *page;
@@ -438,9 +439,8 @@
 		/* Check if there's anything to gain */
 		if (round_up(inode->i_size, PAGE_SIZE) ==
 				round_up(inode->i_size, HPAGE_PMD_SIZE)) {
-			list_del_init(&info->shrinklist);
+			list_move(&info->shrinklist, &to_remove);
 			removed++;
-			iput(inode);
 			goto next;
 		}
 
@@ -451,6 +451,13 @@
 	}
 	spin_unlock(&sbinfo->shrinklist_lock);
 
+	list_for_each_safe(pos, next, &to_remove) {
+		info = list_entry(pos, struct shmem_inode_info, shrinklist);
+		inode = &info->vfs_inode;
+		list_del_init(&info->shrinklist);
+		iput(inode);
+	}
+
 	list_for_each_safe(pos, next, &list) {
 		int ret;
 
diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
index 310ac0b..454d6d7 100644
--- a/mm/swap_cgroup.c
+++ b/mm/swap_cgroup.c
@@ -48,6 +48,9 @@
 		if (!page)
 			goto not_enough_page;
 		ctrl->map[idx] = page;
+
+		if (!(idx % SWAP_CLUSTER_MAX))
+			cond_resched();
 	}
 	return 0;
 not_enough_page:
diff --git a/net/core/dev.c b/net/core/dev.c
index dff8012..a143dbd 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -141,6 +141,8 @@
 #include <linux/netfilter_ingress.h>
 #include <linux/sctp.h>
 #include <linux/crash_dump.h>
+#include <linux/tcp.h>
+#include <net/tcp.h>
 
 #include "net-sysfs.h"
 
@@ -2988,6 +2990,10 @@
 	if (netif_needs_gso(skb, features)) {
 		struct sk_buff *segs;
 
+		__be16 src_port = tcp_hdr(skb)->source;
+		__be16 dest_port = tcp_hdr(skb)->dest;
+
+		trace_print_skb_gso(skb, src_port, dest_port);
 		segs = skb_gso_segment(skb, features);
 		if (IS_ERR(segs)) {
 			goto out_kfree_skb;
@@ -4400,6 +4406,7 @@
 	}
 
 out:
+	__this_cpu_add(softnet_data.gro_coalesced, NAPI_GRO_CB(skb)->count > 1);
 	return netif_receive_skb_internal(skb);
 }
 
@@ -4836,9 +4843,15 @@
 		while (remsd) {
 			struct softnet_data *next = remsd->rps_ipi_next;
 
-			if (cpu_online(remsd->cpu))
+			if (cpu_online(remsd->cpu)) {
 				smp_call_function_single_async(remsd->cpu,
 							   &remsd->csd);
+			} else {
+				pr_err("%s() cpu offline\n", __func__);
+				rps_lock(remsd);
+				remsd->backlog.state = 0;
+				rps_unlock(remsd);
+			}
 			remsd = next;
 		}
 	} else
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 047a175..072c1f4 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1394,9 +1394,12 @@
 	if (regs.len > reglen)
 		regs.len = reglen;
 
-	regbuf = vzalloc(reglen);
-	if (reglen && !regbuf)
-		return -ENOMEM;
+	regbuf = NULL;
+	if (reglen) {
+		regbuf = vzalloc(reglen);
+		if (!regbuf)
+			return -ENOMEM;
+	}
 
 	ops->get_regs(dev, &regs, regbuf);
 
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
index 14d0934..699c4e7 100644
--- a/net/core/net-procfs.c
+++ b/net/core/net-procfs.c
@@ -158,12 +158,12 @@
 	rcu_read_unlock();
 #endif
 
-	seq_printf(seq,
-		   "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
-		   sd->processed, sd->dropped, sd->time_squeeze, 0,
-		   0, 0, 0, 0, /* was fastroute */
-		   0,	/* was cpu_collision */
-		   sd->received_rps, flow_limit_count);
+	seq_printf
+	(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
+	 sd->processed, sd->dropped, sd->time_squeeze, 0,
+	 0, 0, 0, 0, /* was fastroute */
+	 0, /* was cpu_collision */
+	 sd->received_rps, flow_limit_count, sd->gro_coalesced);
 	return 0;
 }
 
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 89a8cac4..51b27ae 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1263,7 +1263,7 @@
 /*
  *	ax25 -> ASCII conversion
  */
-static char *ax2asc2(ax25_address *a, char *buf)
+static void ax2asc2(ax25_address *a, char *buf)
 {
 	char c, *s;
 	int n;
@@ -1285,10 +1285,10 @@
 	*s++ = n + '0';
 	*s++ = '\0';
 
-	if (*buf == '\0' || *buf == '-')
-		return "*";
-
-	return buf;
+	if (*buf == '\0' || *buf == '-') {
+		buf[0] = '*';
+		buf[1] = '\0';
+	}
 }
 #endif /* CONFIG_AX25 */
 
@@ -1322,7 +1322,7 @@
 	}
 #endif
 	sprintf(tbuf, "%pI4", n->primary_key);
-	seq_printf(seq, "%-16s 0x%-10x0x%-10x%s     *        %s\n",
+	seq_printf(seq, "%-16s 0x%-10x0x%-10x%-17s     *        %s\n",
 		   tbuf, hatype, arp_state_to_flags(n), hbuffer, dev->name);
 	read_unlock(&n->lock);
 }
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index f6c50af..3d063eb 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -117,7 +117,7 @@
 	     (fwmark > 0 && skb->mark == fwmark)) &&
 	    (full || tp->snd_cwnd != tcp_probe.lastcwnd)) {
 
-		spin_lock(&tcp_probe.lock);
+		spin_lock_bh(&tcp_probe.lock);
 		/* If log fills, just silently drop */
 		if (tcp_probe_avail() > 1) {
 			struct tcp_log *p = tcp_probe.log + tcp_probe.head;
@@ -157,7 +157,7 @@
 			tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1);
 		}
 		tcp_probe.lastcwnd = tp->snd_cwnd;
-		spin_unlock(&tcp_probe.lock);
+		spin_unlock_bh(&tcp_probe.lock);
 
 		wake_up(&tcp_probe.wait);
 	}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 58d7c1d..d600735 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3427,9 +3427,15 @@
 			}
 
 			if (idev) {
-				if (idev->if_flags & IF_READY)
-					/* device is already configured. */
+				if (idev->if_flags & IF_READY) {
+					/* device is already configured -
+					 * but resend MLD reports, we might
+					 * have roamed and need to update
+					 * multicast snooping switches
+					 */
+					ipv6_mc_up(idev);
 					break;
+				}
 				idev->if_flags |= IF_READY;
 			}
 
@@ -4044,6 +4050,12 @@
 
 	if (bump_id)
 		rt_genid_bump_ipv6(dev_net(dev));
+
+	/* Make sure that a new temporary address will be created
+	 * before this temporary address becomes deprecated.
+	 */
+	if (ifp->flags & IFA_F_TEMPORARY)
+		addrconf_verify_rtnl();
 }
 
 static void addrconf_dad_run(struct inet6_dev *idev)
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 9217390..edf15f0 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -167,18 +167,22 @@
 	if (np->sndflow)
 		fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
 
-	addr_type = ipv6_addr_type(&usin->sin6_addr);
-
-	if (addr_type == IPV6_ADDR_ANY) {
+	if (ipv6_addr_any(&usin->sin6_addr)) {
 		/*
 		 *	connect to self
 		 */
-		usin->sin6_addr.s6_addr[15] = 0x01;
+		if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
+			ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
+					       &usin->sin6_addr);
+		else
+			usin->sin6_addr = in6addr_loopback;
 	}
 
+	addr_type = ipv6_addr_type(&usin->sin6_addr);
+
 	daddr = &usin->sin6_addr;
 
-	if (addr_type == IPV6_ADDR_MAPPED) {
+	if (addr_type & IPV6_ADDR_MAPPED) {
 		struct sockaddr_in sin;
 
 		if (__ipv6_only_sock(sk)) {
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
index e604013..7a5b9812 100644
--- a/net/ipv6/ila/ila_xlat.c
+++ b/net/ipv6/ila/ila_xlat.c
@@ -68,6 +68,7 @@
 {
 	u32 *v = (u32 *)loc.v32;
 
+	__ila_hash_secret_init();
 	return jhash_2words(v[0], v[1], hashrnd);
 }
 
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index d472a5f..fafad39 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -67,9 +67,6 @@
 	struct in6_addr *nexthop;
 	int ret;
 
-	skb->protocol = htons(ETH_P_IPV6);
-	skb->dev = dev;
-
 	if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
 		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
 
@@ -153,6 +150,9 @@
 	struct net_device *dev = skb_dst(skb)->dev;
 	struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
 
+	skb->protocol = htons(ETH_P_IPV6);
+	skb->dev = dev;
+
 	if (unlikely(idev->cnf.disable_ipv6)) {
 		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
 		kfree_skb(skb);
@@ -867,7 +867,6 @@
 	if (skb->sk && dst_allfrag(skb_dst(skb)))
 		sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
 
-	skb->dev = skb_dst(skb)->dev;
 	icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 	err = -EMSGSIZE;
 
@@ -1028,6 +1027,9 @@
 		}
 	}
 #endif
+	if (ipv6_addr_v4mapped(&fl6->saddr) &&
+	    !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr)))
+		return -EAFNOSUPPORT;
 
 	return 0;
 
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index f8a6036..9828dc2 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -148,8 +148,13 @@
 	 *	connect() to INADDR_ANY means loopback (BSD'ism).
 	 */
 
-	if (ipv6_addr_any(&usin->sin6_addr))
-		usin->sin6_addr.s6_addr[15] = 0x1;
+	if (ipv6_addr_any(&usin->sin6_addr)) {
+		if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
+			ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
+					       &usin->sin6_addr);
+		else
+			usin->sin6_addr = in6addr_loopback;
+	}
 
 	addr_type = ipv6_addr_type(&usin->sin6_addr);
 
@@ -188,7 +193,7 @@
 	 *	TCP over IPv4
 	 */
 
-	if (addr_type == IPV6_ADDR_MAPPED) {
+	if (addr_type & IPV6_ADDR_MAPPED) {
 		u32 exthdrlen = icsk->icsk_ext_hdr_len;
 		struct sockaddr_in sin;
 
@@ -1237,9 +1242,6 @@
 	if (skb->protocol == htons(ETH_P_IP))
 		return tcp_v4_do_rcv(sk, skb);
 
-	if (tcp_filter(sk, skb))
-		goto discard;
-
 	/*
 	 *	socket locking is here for SMP purposes as backlog rcv
 	 *	is currently called with bh processing disabled.
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 26d5718..c925fd9 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1049,6 +1049,10 @@
 			if (addr_len < SIN6_LEN_RFC2133)
 				return -EINVAL;
 			daddr = &sin6->sin6_addr;
+			if (ipv6_addr_any(daddr) &&
+			    ipv6_addr_v4mapped(&np->saddr))
+				ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
+						       daddr);
 			break;
 		case AF_INET:
 			goto do_udp_sendmsg;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index fd6541f..07001b6 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -865,6 +865,8 @@
 	default:
 		return -EINVAL;
 	}
+	sdata->u.ap.req_smps = sdata->smps_mode;
+
 	sdata->needed_rx_chains = sdata->local->rx_chains;
 
 	mutex_lock(&local->mtx);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index a31d307..62d13ea 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -66,6 +66,8 @@
 		    2 + (IEEE80211_MAX_SUPP_RATES - 8) +
 		    2 + sizeof(struct ieee80211_ht_cap) +
 		    2 + sizeof(struct ieee80211_ht_operation) +
+		    2 + sizeof(struct ieee80211_vht_cap) +
+		    2 + sizeof(struct ieee80211_vht_operation) +
 		    ifibss->ie_len;
 	presp = kzalloc(sizeof(*presp) + frame_len, GFP_KERNEL);
 	if (!presp)
@@ -487,14 +489,14 @@
 	struct beacon_data *presp, *old_presp;
 	struct cfg80211_bss *cbss;
 	const struct cfg80211_bss_ies *ies;
-	u16 capability = 0;
+	u16 capability = WLAN_CAPABILITY_IBSS;
 	u64 tsf;
 	int ret = 0;
 
 	sdata_assert_lock(sdata);
 
 	if (ifibss->privacy)
-		capability = WLAN_CAPABILITY_PRIVACY;
+		capability |= WLAN_CAPABILITY_PRIVACY;
 
 	cbss = cfg80211_get_bss(sdata->local->hw.wiphy, ifibss->chandef.chan,
 				ifibss->bssid, ifibss->ssid,
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index acaaf61..c45a0fc 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1585,12 +1585,16 @@
 	 */
 	if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
 	    !ieee80211_has_morefrags(hdr->frame_control) &&
+	    !ieee80211_is_back_req(hdr->frame_control) &&
 	    !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
 	    (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
 	     rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
-	    /* PM bit is only checked in frames where it isn't reserved,
+	    /*
+	     * PM bit is only checked in frames where it isn't reserved,
 	     * in AP mode it's reserved in non-bufferable management frames
 	     * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field)
+	     * BAR frames should be ignored as specified in
+	     * IEEE 802.11-2012 10.2.1.2.
 	     */
 	    (!ieee80211_is_mgmt(hdr->frame_control) ||
 	     ieee80211_is_bufferable_mmpdu(hdr->frame_control))) {
@@ -2467,7 +2471,8 @@
 		if (is_multicast_ether_addr(hdr->addr1)) {
 			mpp_addr = hdr->addr3;
 			proxied_addr = mesh_hdr->eaddr1;
-		} else if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6) {
+		} else if ((mesh_hdr->flags & MESH_FLAGS_AE) ==
+			    MESH_FLAGS_AE_A5_A6) {
 			/* has_a4 already checked in ieee80211_rx_mesh_check */
 			mpp_addr = hdr->addr4;
 			proxied_addr = mesh_hdr->eaddr2;
@@ -3949,6 +3954,7 @@
 	stats->last_rate = sta_stats_encode_rate(status);
 
 	stats->fragments++;
+	stats->packets++;
 
 	if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
 		stats->last_signal = status->signal;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 8e05032..b2c823ff 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -2148,7 +2148,7 @@
 			struct ieee80211_sta_rx_stats *cpurxs;
 
 			cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
-			sinfo->rx_packets += cpurxs->dropped;
+			sinfo->rx_dropped_misc += cpurxs->dropped;
 		}
 	}
 
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 42ce9bd..5c71d60 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -17,6 +17,7 @@
 #include <asm/unaligned.h>
 #include <net/mac80211.h>
 #include <crypto/aes.h>
+#include <crypto/algapi.h>
 
 #include "ieee80211_i.h"
 #include "michael.h"
@@ -153,7 +154,7 @@
 	data_len = skb->len - hdrlen - MICHAEL_MIC_LEN;
 	key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
 	michael_mic(key, hdr, data, data_len, mic);
-	if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0)
+	if (crypto_memneq(mic, data + data_len, MICHAEL_MIC_LEN))
 		goto mic_fail;
 
 	/* remove Michael MIC from payload */
@@ -1047,7 +1048,7 @@
 		bip_aad(skb, aad);
 		ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad,
 				   skb->data + 24, skb->len - 24, mic);
-		if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) {
+		if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
 			key->u.aes_cmac.icverrors++;
 			return RX_DROP_UNUSABLE;
 		}
@@ -1097,7 +1098,7 @@
 		bip_aad(skb, aad);
 		ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad,
 				       skb->data + 24, skb->len - 24, mic);
-		if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) {
+		if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
 			key->u.aes_cmac.icverrors++;
 			return RX_DROP_UNUSABLE;
 		}
@@ -1201,7 +1202,7 @@
 		if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce,
 				       skb->data + 24, skb->len - 24,
 				       mic) < 0 ||
-		    memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) {
+		    crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
 			key->u.aes_gmac.icverrors++;
 			return RX_DROP_UNUSABLE;
 		}
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index c3fc14e..3a8dc39 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1630,8 +1630,6 @@
 		ports[ports_c++] = SIP_PORT;
 
 	for (i = 0; i < ports_c; i++) {
-		memset(&sip[i], 0, sizeof(sip[i]));
-
 		nf_ct_helper_init(&sip[4 * i], AF_INET, IPPROTO_UDP, "sip",
 				  SIP_PORT, ports[i], i, sip_exp_policy,
 				  SIP_EXPECT_MAX,
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 3dca90d..ffb9e8a 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -13,7 +13,6 @@
 /* Internal logging interface, which relies on the real
    LOG target modules */
 
-#define NF_LOG_PREFIXLEN		128
 #define NFLOGGER_NAME_LEN		64
 
 static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index e5194f6f..778fcdb 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -3637,10 +3637,18 @@
 		goto err5;
 	}
 
+	if (set->size &&
+	    !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact)) {
+		err = -ENFILE;
+		goto err6;
+	}
+
 	nft_trans_elem(trans) = elem;
 	list_add_tail(&trans->list, &ctx->net->nft.commit_list);
 	return 0;
 
+err6:
+	set->ops->remove(set, &elem);
 err5:
 	kfree(trans);
 err4:
@@ -3687,15 +3695,9 @@
 		return -EBUSY;
 
 	nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
-		if (set->size &&
-		    !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact))
-			return -ENFILE;
-
 		err = nft_add_set_elem(&ctx, set, attr, nlh->nlmsg_flags);
-		if (err < 0) {
-			atomic_dec(&set->nelems);
+		if (err < 0)
 			break;
-		}
 	}
 	return err;
 }
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c
index 1b01404..c7704e9 100644
--- a/net/netfilter/nft_log.c
+++ b/net/netfilter/nft_log.c
@@ -38,7 +38,8 @@
 
 static const struct nla_policy nft_log_policy[NFTA_LOG_MAX + 1] = {
 	[NFTA_LOG_GROUP]	= { .type = NLA_U16 },
-	[NFTA_LOG_PREFIX]	= { .type = NLA_STRING },
+	[NFTA_LOG_PREFIX]	= { .type = NLA_STRING,
+				    .len = NF_LOG_PREFIXLEN - 1 },
 	[NFTA_LOG_SNAPLEN]	= { .type = NLA_U32 },
 	[NFTA_LOG_QTHRESHOLD]	= { .type = NLA_U16 },
 	[NFTA_LOG_LEVEL]	= { .type = NLA_U32 },
diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c
index 18c737a..7fc3407 100644
--- a/net/rxrpc/key.c
+++ b/net/rxrpc/key.c
@@ -217,7 +217,7 @@
 				       unsigned int *_toklen)
 {
 	const __be32 *xdr = *_xdr;
-	unsigned int toklen = *_toklen, n_parts, loop, tmp;
+	unsigned int toklen = *_toklen, n_parts, loop, tmp, paddedlen;
 
 	/* there must be at least one name, and at least #names+1 length
 	 * words */
@@ -247,16 +247,16 @@
 		toklen -= 4;
 		if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX)
 			return -EINVAL;
-		if (tmp > toklen)
+		paddedlen = (tmp + 3) & ~3;
+		if (paddedlen > toklen)
 			return -EINVAL;
 		princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL);
 		if (!princ->name_parts[loop])
 			return -ENOMEM;
 		memcpy(princ->name_parts[loop], xdr, tmp);
 		princ->name_parts[loop][tmp] = 0;
-		tmp = (tmp + 3) & ~3;
-		toklen -= tmp;
-		xdr += tmp >> 2;
+		toklen -= paddedlen;
+		xdr += paddedlen >> 2;
 	}
 
 	if (toklen < 4)
@@ -265,16 +265,16 @@
 	toklen -= 4;
 	if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX)
 		return -EINVAL;
-	if (tmp > toklen)
+	paddedlen = (tmp + 3) & ~3;
+	if (paddedlen > toklen)
 		return -EINVAL;
 	princ->realm = kmalloc(tmp + 1, GFP_KERNEL);
 	if (!princ->realm)
 		return -ENOMEM;
 	memcpy(princ->realm, xdr, tmp);
 	princ->realm[tmp] = 0;
-	tmp = (tmp + 3) & ~3;
-	toklen -= tmp;
-	xdr += tmp >> 2;
+	toklen -= paddedlen;
+	xdr += paddedlen >> 2;
 
 	_debug("%s/...@%s", princ->name_parts[0], princ->realm);
 
@@ -293,7 +293,7 @@
 					 unsigned int *_toklen)
 {
 	const __be32 *xdr = *_xdr;
-	unsigned int toklen = *_toklen, len;
+	unsigned int toklen = *_toklen, len, paddedlen;
 
 	/* there must be at least one tag and one length word */
 	if (toklen <= 8)
@@ -307,15 +307,17 @@
 	toklen -= 8;
 	if (len > max_data_size)
 		return -EINVAL;
+	paddedlen = (len + 3) & ~3;
+	if (paddedlen > toklen)
+		return -EINVAL;
 	td->data_len = len;
 
 	if (len > 0) {
 		td->data = kmemdup(xdr, len, GFP_KERNEL);
 		if (!td->data)
 			return -ENOMEM;
-		len = (len + 3) & ~3;
-		toklen -= len;
-		xdr += len >> 2;
+		toklen -= paddedlen;
+		xdr += paddedlen >> 2;
 	}
 
 	_debug("tag %x len %x", td->tag, td->data_len);
@@ -387,7 +389,7 @@
 				    const __be32 **_xdr, unsigned int *_toklen)
 {
 	const __be32 *xdr = *_xdr;
-	unsigned int toklen = *_toklen, len;
+	unsigned int toklen = *_toklen, len, paddedlen;
 
 	/* there must be at least one length word */
 	if (toklen <= 4)
@@ -399,6 +401,9 @@
 	toklen -= 4;
 	if (len > AFSTOKEN_K5_TIX_MAX)
 		return -EINVAL;
+	paddedlen = (len + 3) & ~3;
+	if (paddedlen > toklen)
+		return -EINVAL;
 	*_tktlen = len;
 
 	_debug("ticket len %u", len);
@@ -407,9 +412,8 @@
 		*_ticket = kmemdup(xdr, len, GFP_KERNEL);
 		if (!*_ticket)
 			return -ENOMEM;
-		len = (len + 3) & ~3;
-		toklen -= len;
-		xdr += len >> 2;
+		toklen -= paddedlen;
+		xdr += paddedlen >> 2;
 	}
 
 	*_xdr = xdr;
@@ -552,7 +556,7 @@
 {
 	const __be32 *xdr = prep->data, *token;
 	const char *cp;
-	unsigned int len, tmp, loop, ntoken, toklen, sec_ix;
+	unsigned int len, paddedlen, loop, ntoken, toklen, sec_ix;
 	size_t datalen = prep->datalen;
 	int ret;
 
@@ -578,22 +582,21 @@
 	if (len < 1 || len > AFSTOKEN_CELL_MAX)
 		goto not_xdr;
 	datalen -= 4;
-	tmp = (len + 3) & ~3;
-	if (tmp > datalen)
+	paddedlen = (len + 3) & ~3;
+	if (paddedlen > datalen)
 		goto not_xdr;
 
 	cp = (const char *) xdr;
 	for (loop = 0; loop < len; loop++)
 		if (!isprint(cp[loop]))
 			goto not_xdr;
-	if (len < tmp)
-		for (; loop < tmp; loop++)
-			if (cp[loop])
-				goto not_xdr;
+	for (; loop < paddedlen; loop++)
+		if (cp[loop])
+			goto not_xdr;
 	_debug("cellname: [%u/%u] '%*.*s'",
-	       len, tmp, len, len, (const char *) xdr);
-	datalen -= tmp;
-	xdr += tmp >> 2;
+	       len, paddedlen, len, len, (const char *) xdr);
+	datalen -= paddedlen;
+	xdr += paddedlen >> 2;
 
 	/* get the token count */
 	if (datalen < 12)
@@ -614,10 +617,11 @@
 		sec_ix = ntohl(*xdr);
 		datalen -= 4;
 		_debug("token: [%x/%zx] %x", toklen, datalen, sec_ix);
-		if (toklen < 20 || toklen > datalen)
+		paddedlen = (toklen + 3) & ~3;
+		if (toklen < 20 || toklen > datalen || paddedlen > datalen)
 			goto not_xdr;
-		datalen -= (toklen + 3) & ~3;
-		xdr += (toklen + 3) >> 2;
+		datalen -= paddedlen;
+		xdr += paddedlen >> 2;
 
 	} while (--loop > 0);
 
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 744cfe6c5..c2225cc 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1206,12 +1206,7 @@
 			qdisc_len = q->q.qlen;
 			if (q->ops->change(q, &req.attr))
 				pr_err("%s(): qdisc change failed", __func__);
-		} else {
-			WARN_ONCE(1, "%s(): called on queue which does %s",
-				  __func__, "not support change() operation");
 		}
-	} else {
-		WARN_ONCE(1, "%s(): called on bad queue", __func__);
 	}
 	return qdisc_len;
 }
diff --git a/net/sctp/offload.c b/net/sctp/offload.c
index 7e869d0..4f5a2b5 100644
--- a/net/sctp/offload.c
+++ b/net/sctp/offload.c
@@ -68,7 +68,7 @@
 		goto out;
 	}
 
-	segs = skb_segment(skb, features | NETIF_F_HW_CSUM);
+	segs = skb_segment(skb, features | NETIF_F_HW_CSUM | NETIF_F_SG);
 	if (IS_ERR(segs))
 		goto out;
 
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 14346dc..e1719c6 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -235,8 +235,12 @@
 					      sctp_assoc_t id)
 {
 	struct sctp_association *addr_asoc = NULL, *id_asoc = NULL;
-	struct sctp_transport *transport;
+	struct sctp_af *af = sctp_get_af_specific(addr->ss_family);
 	union sctp_addr *laddr = (union sctp_addr *)addr;
+	struct sctp_transport *transport;
+
+	if (sctp_verify_addr(sk, laddr, af->sockaddr_len))
+		return NULL;
 
 	addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
 					       laddr,
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 28bf4fe..ab8a2d5 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -110,6 +110,10 @@
 	char addr_string[16];
 
 	tn->own_addr = addr;
+
+	/* Ensure that the new address is visible before we reinit. */
+	smp_mb();
+
 	tipc_named_reinit(net);
 	tipc_sk_reinit(net);
 
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 9d2f4c2..2775332 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -263,6 +263,11 @@
 	write_lock_bh(&n->lock);
 }
 
+static void tipc_node_write_unlock_fast(struct tipc_node *n)
+{
+	write_unlock_bh(&n->lock);
+}
+
 static void tipc_node_write_unlock(struct tipc_node *n)
 {
 	struct net *net = n->net;
@@ -417,7 +422,7 @@
 	}
 	tipc_node_write_lock(n);
 	list_add_tail(subscr, &n->publ_list);
-	tipc_node_write_unlock(n);
+	tipc_node_write_unlock_fast(n);
 	tipc_node_put(n);
 }
 
@@ -435,7 +440,7 @@
 	}
 	tipc_node_write_lock(n);
 	list_del_init(subscr);
-	tipc_node_write_unlock(n);
+	tipc_node_write_unlock_fast(n);
 	tipc_node_put(n);
 }
 
diff --git a/net/tipc/server.c b/net/tipc/server.c
index 215849c..f89c0c2 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -91,7 +91,8 @@
 static void tipc_conn_kref_release(struct kref *kref)
 {
 	struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
-	struct sockaddr_tipc *saddr = con->server->saddr;
+	struct tipc_server *s = con->server;
+	struct sockaddr_tipc *saddr = s->saddr;
 	struct socket *sock = con->sock;
 	struct sock *sk;
 
@@ -106,6 +107,11 @@
 		tipc_sock_release(con);
 		sock_release(sock);
 		con->sock = NULL;
+
+		spin_lock_bh(&s->idr_lock);
+		idr_remove(&s->conn_idr, con->conid);
+		s->idr_in_use--;
+		spin_unlock_bh(&s->idr_lock);
 	}
 
 	tipc_clean_outqueues(con);
@@ -128,8 +134,10 @@
 
 	spin_lock_bh(&s->idr_lock);
 	con = idr_find(&s->conn_idr, conid);
-	if (con)
+	if (con && test_bit(CF_CONNECTED, &con->flags))
 		conn_get(con);
+	else
+		con = NULL;
 	spin_unlock_bh(&s->idr_lock);
 	return con;
 }
@@ -198,15 +206,8 @@
 
 static void tipc_close_conn(struct tipc_conn *con)
 {
-	struct tipc_server *s = con->server;
-
 	if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
 
-		spin_lock_bh(&s->idr_lock);
-		idr_remove(&s->conn_idr, con->conid);
-		s->idr_in_use--;
-		spin_unlock_bh(&s->idr_lock);
-
 		/* We shouldn't flush pending works as we may be in the
 		 * thread. In fact the races with pending rx/tx work structs
 		 * are harmless for us here as we have already deleted this
@@ -458,6 +459,11 @@
 	if (!con)
 		return -EINVAL;
 
+	if (!test_bit(CF_CONNECTED, &con->flags)) {
+		conn_put(con);
+		return 0;
+	}
+
 	e = tipc_alloc_entry(data, len);
 	if (!e) {
 		conn_put(con);
@@ -471,12 +477,8 @@
 	list_add_tail(&e->list, &con->outqueue);
 	spin_unlock_bh(&con->outqueue_lock);
 
-	if (test_bit(CF_CONNECTED, &con->flags)) {
-		if (!queue_work(s->send_wq, &con->swork))
-			conn_put(con);
-	} else {
+	if (!queue_work(s->send_wq, &con->swork))
 		conn_put(con);
-	}
 	return 0;
 }
 
@@ -500,7 +502,7 @@
 	int ret;
 
 	spin_lock_bh(&con->outqueue_lock);
-	while (1) {
+	while (test_bit(CF_CONNECTED, &con->flags)) {
 		e = list_entry(con->outqueue.next, struct outqueue_entry,
 			       list);
 		if ((struct list_head *) e == &con->outqueue)
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 41f0138..25bc5c3 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -335,8 +335,6 @@
 	INIT_LIST_HEAD(&tsk->publications);
 	msg = &tsk->phdr;
 	tn = net_generic(sock_net(sk), tipc_net_id);
-	tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
-		      NAMED_H_SIZE, 0);
 
 	/* Finish initializing socket data structures */
 	sock->ops = ops;
@@ -346,6 +344,13 @@
 		pr_warn("Socket create failed; port number exhausted\n");
 		return -EINVAL;
 	}
+
+	/* Ensure tsk is visible before we read own_addr. */
+	smp_mb();
+
+	tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
+		      NAMED_H_SIZE, 0);
+
 	msg_set_origport(msg, tsk->portid);
 	setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk);
 	sk->sk_backlog_rcv = tipc_backlog_rcv;
@@ -2264,24 +2269,27 @@
 void tipc_sk_reinit(struct net *net)
 {
 	struct tipc_net *tn = net_generic(net, tipc_net_id);
-	const struct bucket_table *tbl;
-	struct rhash_head *pos;
+	struct rhashtable_iter iter;
 	struct tipc_sock *tsk;
 	struct tipc_msg *msg;
-	int i;
 
-	rcu_read_lock();
-	tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
-	for (i = 0; i < tbl->size; i++) {
-		rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
+	rhashtable_walk_enter(&tn->sk_rht, &iter);
+
+	do {
+		tsk = ERR_PTR(rhashtable_walk_start(&iter));
+		if (tsk)
+			continue;
+
+		while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
 			spin_lock_bh(&tsk->sk.sk_lock.slock);
 			msg = &tsk->phdr;
 			msg_set_prevnode(msg, tn->own_addr);
 			msg_set_orignode(msg, tn->own_addr);
 			spin_unlock_bh(&tsk->sk.sk_lock.slock);
 		}
-	}
-	rcu_read_unlock();
+
+		rhashtable_walk_stop(&iter);
+	} while (tsk == ERR_PTR(-EAGAIN));
 }
 
 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 0dd0224..9d94e65 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -54,6 +54,8 @@
 
 static void tipc_subscrp_delete(struct tipc_subscription *sub);
 static void tipc_subscrb_put(struct tipc_subscriber *subscriber);
+static void tipc_subscrp_put(struct tipc_subscription *subscription);
+static void tipc_subscrp_get(struct tipc_subscription *subscription);
 
 /**
  * htohl - convert value to endianness used by destination
@@ -123,6 +125,7 @@
 {
 	struct tipc_name_seq seq;
 
+	tipc_subscrp_get(sub);
 	tipc_subscrp_convert_seq(&sub->evt.s.seq, sub->swap, &seq);
 	if (!tipc_subscrp_check_overlap(&seq, found_lower, found_upper))
 		return;
@@ -132,30 +135,23 @@
 
 	tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref,
 				node);
+	tipc_subscrp_put(sub);
 }
 
 static void tipc_subscrp_timeout(unsigned long data)
 {
 	struct tipc_subscription *sub = (struct tipc_subscription *)data;
-	struct tipc_subscriber *subscriber = sub->subscriber;
 
 	/* Notify subscriber of timeout */
 	tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
 				TIPC_SUBSCR_TIMEOUT, 0, 0);
 
-	spin_lock_bh(&subscriber->lock);
-	tipc_subscrp_delete(sub);
-	spin_unlock_bh(&subscriber->lock);
-
-	tipc_subscrb_put(subscriber);
+	tipc_subscrp_put(sub);
 }
 
 static void tipc_subscrb_kref_release(struct kref *kref)
 {
-	struct tipc_subscriber *subcriber = container_of(kref,
-					    struct tipc_subscriber, kref);
-
-	kfree(subcriber);
+	kfree(container_of(kref,struct tipc_subscriber, kref));
 }
 
 static void tipc_subscrb_put(struct tipc_subscriber *subscriber)
@@ -168,6 +164,59 @@
 	kref_get(&subscriber->kref);
 }
 
+static void tipc_subscrp_kref_release(struct kref *kref)
+{
+	struct tipc_subscription *sub = container_of(kref,
+						     struct tipc_subscription,
+						     kref);
+	struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
+	struct tipc_subscriber *subscriber = sub->subscriber;
+
+	spin_lock_bh(&subscriber->lock);
+	tipc_nametbl_unsubscribe(sub);
+	list_del(&sub->subscrp_list);
+	atomic_dec(&tn->subscription_count);
+	spin_unlock_bh(&subscriber->lock);
+	kfree(sub);
+	tipc_subscrb_put(subscriber);
+}
+
+static void tipc_subscrp_put(struct tipc_subscription *subscription)
+{
+	kref_put(&subscription->kref, tipc_subscrp_kref_release);
+}
+
+static void tipc_subscrp_get(struct tipc_subscription *subscription)
+{
+	kref_get(&subscription->kref);
+}
+
+/* tipc_subscrb_subscrp_delete - delete a specific subscription or all
+ * subscriptions for a given subscriber.
+ */
+static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber,
+					struct tipc_subscr *s)
+{
+	struct list_head *subscription_list = &subscriber->subscrp_list;
+	struct tipc_subscription *sub, *temp;
+
+	spin_lock_bh(&subscriber->lock);
+	list_for_each_entry_safe(sub, temp, subscription_list,  subscrp_list) {
+		if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr)))
+			continue;
+
+		tipc_subscrp_get(sub);
+		spin_unlock_bh(&subscriber->lock);
+		tipc_subscrp_delete(sub);
+		tipc_subscrp_put(sub);
+		spin_lock_bh(&subscriber->lock);
+
+		if (s)
+			break;
+	}
+	spin_unlock_bh(&subscriber->lock);
+}
+
 static struct tipc_subscriber *tipc_subscrb_create(int conid)
 {
 	struct tipc_subscriber *subscriber;
@@ -177,8 +226,8 @@
 		pr_warn("Subscriber rejected, no memory\n");
 		return NULL;
 	}
-	kref_init(&subscriber->kref);
 	INIT_LIST_HEAD(&subscriber->subscrp_list);
+	kref_init(&subscriber->kref);
 	subscriber->conid = conid;
 	spin_lock_init(&subscriber->lock);
 
@@ -187,55 +236,22 @@
 
 static void tipc_subscrb_delete(struct tipc_subscriber *subscriber)
 {
-	struct tipc_subscription *sub, *temp;
-	u32 timeout;
-
-	spin_lock_bh(&subscriber->lock);
-	/* Destroy any existing subscriptions for subscriber */
-	list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
-				 subscrp_list) {
-		timeout = htohl(sub->evt.s.timeout, sub->swap);
-		if ((timeout == TIPC_WAIT_FOREVER) || del_timer(&sub->timer)) {
-			tipc_subscrp_delete(sub);
-			tipc_subscrb_put(subscriber);
-		}
-	}
-	spin_unlock_bh(&subscriber->lock);
-
+	tipc_subscrb_subscrp_delete(subscriber, NULL);
 	tipc_subscrb_put(subscriber);
 }
 
 static void tipc_subscrp_delete(struct tipc_subscription *sub)
 {
-	struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
+	u32 timeout = htohl(sub->evt.s.timeout, sub->swap);
 
-	tipc_nametbl_unsubscribe(sub);
-	list_del(&sub->subscrp_list);
-	kfree(sub);
-	atomic_dec(&tn->subscription_count);
+	if (timeout == TIPC_WAIT_FOREVER || del_timer(&sub->timer))
+		tipc_subscrp_put(sub);
 }
 
 static void tipc_subscrp_cancel(struct tipc_subscr *s,
 				struct tipc_subscriber *subscriber)
 {
-	struct tipc_subscription *sub, *temp;
-	u32 timeout;
-
-	spin_lock_bh(&subscriber->lock);
-	/* Find first matching subscription, exit if not found */
-	list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
-				 subscrp_list) {
-		if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) {
-			timeout = htohl(sub->evt.s.timeout, sub->swap);
-			if ((timeout == TIPC_WAIT_FOREVER) ||
-			    del_timer(&sub->timer)) {
-				tipc_subscrp_delete(sub);
-				tipc_subscrb_put(subscriber);
-			}
-			break;
-		}
-	}
-	spin_unlock_bh(&subscriber->lock);
+	tipc_subscrb_subscrp_delete(subscriber, s);
 }
 
 static struct tipc_subscription *tipc_subscrp_create(struct net *net,
@@ -272,6 +288,7 @@
 	sub->swap = swap;
 	memcpy(&sub->evt.s, s, sizeof(*s));
 	atomic_inc(&tn->subscription_count);
+	kref_init(&sub->kref);
 	return sub;
 }
 
@@ -288,17 +305,16 @@
 
 	spin_lock_bh(&subscriber->lock);
 	list_add(&sub->subscrp_list, &subscriber->subscrp_list);
-	tipc_subscrb_get(subscriber);
 	sub->subscriber = subscriber;
 	tipc_nametbl_subscribe(sub);
+	tipc_subscrb_get(subscriber);
 	spin_unlock_bh(&subscriber->lock);
 
-	timeout = htohl(sub->evt.s.timeout, swap);
-	if (timeout == TIPC_WAIT_FOREVER)
-		return;
-
 	setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub);
-	mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout));
+	timeout = htohl(sub->evt.s.timeout, swap);
+
+	if (timeout != TIPC_WAIT_FOREVER)
+		mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout));
 }
 
 /* Handle one termination request for the subscriber */
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index be60103..ffdc214 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -57,6 +57,7 @@
  * @evt: template for events generated by subscription
  */
 struct tipc_subscription {
+	struct kref kref;
 	struct tipc_subscriber *subscriber;
 	struct net *net;
 	struct timer_list timer;
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 60ee74c..7c8b406 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -456,6 +456,8 @@
 	if (iftype == NL80211_IFTYPE_MESH_POINT)
 		skb_copy_bits(skb, hdrlen, &mesh_flags, 1);
 
+	mesh_flags &= MESH_FLAGS_AE;
+
 	switch (hdr->frame_control &
 		cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
 	case cpu_to_le16(IEEE80211_FCTL_TODS):
@@ -471,9 +473,9 @@
 			     iftype != NL80211_IFTYPE_STATION))
 			return -1;
 		if (iftype == NL80211_IFTYPE_MESH_POINT) {
-			if (mesh_flags & MESH_FLAGS_AE_A4)
+			if (mesh_flags == MESH_FLAGS_AE_A4)
 				return -1;
-			if (mesh_flags & MESH_FLAGS_AE_A5_A6) {
+			if (mesh_flags == MESH_FLAGS_AE_A5_A6) {
 				skb_copy_bits(skb, hdrlen +
 					offsetof(struct ieee80211s_hdr, eaddr1),
 					tmp.h_dest, 2 * ETH_ALEN);
@@ -489,9 +491,9 @@
 		     ether_addr_equal(tmp.h_source, addr)))
 			return -1;
 		if (iftype == NL80211_IFTYPE_MESH_POINT) {
-			if (mesh_flags & MESH_FLAGS_AE_A5_A6)
+			if (mesh_flags == MESH_FLAGS_AE_A5_A6)
 				return -1;
-			if (mesh_flags & MESH_FLAGS_AE_A4)
+			if (mesh_flags == MESH_FLAGS_AE_A4)
 				skb_copy_bits(skb, hdrlen +
 					offsetof(struct ieee80211s_hdr, eaddr1),
 					tmp.h_source, ETH_ALEN);
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index bbba7be..6faddfb 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -2509,7 +2509,7 @@
 	struct snd_pcm_substream *substream;
 	const struct snd_pcm_chmap_elem *map;
 
-	if (snd_BUG_ON(!info->chmap))
+	if (!info->chmap)
 		return -EINVAL;
 	substream = snd_pcm_chmap_substream(info, idx);
 	if (!substream)
@@ -2541,7 +2541,7 @@
 	unsigned int __user *dst;
 	int c, count = 0;
 
-	if (snd_BUG_ON(!info->chmap))
+	if (!info->chmap)
 		return -EINVAL;
 	if (size < 8)
 		return -ENOMEM;
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index 00060c4..9741757 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -606,7 +606,9 @@
 		cycle = increment_cycle_count(cycle, 1);
 		if (handle_out_packet(s, cycle, i) < 0) {
 			s->packet_index = -1;
-			amdtp_stream_pcm_abort(s);
+			if (in_interrupt())
+				amdtp_stream_pcm_abort(s);
+			WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
 			return;
 		}
 	}
@@ -658,7 +660,9 @@
 	/* Queueing error or detecting invalid payload. */
 	if (i < packets) {
 		s->packet_index = -1;
-		amdtp_stream_pcm_abort(s);
+		if (in_interrupt())
+			amdtp_stream_pcm_abort(s);
+		WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
 		return;
 	}
 
diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h
index c1bc7fa..f7c054b 100644
--- a/sound/firewire/amdtp-stream.h
+++ b/sound/firewire/amdtp-stream.h
@@ -124,7 +124,7 @@
 	/* For a PCM substream processing. */
 	struct snd_pcm_substream *pcm;
 	struct tasklet_struct period_tasklet;
-	unsigned int pcm_buffer_pointer;
+	snd_pcm_uframes_t pcm_buffer_pointer;
 	unsigned int pcm_period_pointer;
 
 	/* To wait for first packet. */
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 17224de..f800858 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -878,28 +878,24 @@
 config SND_SOC_WCD934X_DSD
         tristate
 
-config SND_SOC_WCD9320
-        tristate
-
-config SND_SOC_WCD9330
-        tristate
-	depends on WCD9330_CODEC
-
 config SND_SOC_WCD9335
         tristate
-	depends on WCD9335_CODEC
+	depends on WCD9XXX_CODEC_CORE
+	select SND_SOC_WCD9XXX
 	select SND_SOC_WCD_MBHC
 	select SND_SOC_WCD_MBHC_LEGACY
+	select SND_SOC_WCD_CPE
 
 config SND_SOC_WCD934X
         tristate
-	depends on WCD934X_CODEC
-	select SND_SOC_WCD9XXX_V2
+	depends on WCD9XXX_CODEC_CORE
+	select SND_SOC_WCD9XXX
 	select AUDIO_EXT_CLK
 	select SND_SOC_WCD_DSP_MGR
 	select SND_SOC_WCD_SPI
 	select SND_SOC_WCD934X_MBHC
         select SND_SOC_WCD934X_DSD
+	select WCD_DSP_GLINK
 
 config SND_SOC_WCD934X_MBHC
         tristate
@@ -907,10 +903,14 @@
 	select SND_SOC_WCD_MBHC
 	select SND_SOC_WCD_MBHC_ADC
 
+config REGMAP_SWR
+	tristate
+	default y
+
 config SND_SOC_WSA881X
         tristate
+	depends on REGMAP_SWR
 	select MSM_CDC_PINCTRL
-	select REGMAP_SWR
 
 config SND_SOC_WSA881X_ANALOG
         tristate
@@ -918,19 +918,15 @@
 
 config SND_SOC_WCD9XXX
 	tristate
-	default y if SND_SOC_WCD9320=y || SND_SOC_WCD9330=y || SND_SOC_WCD9335=y
-
-config SND_SOC_WCD9XXX_V2
-	tristate
-	default y if SND_SOC_WCD9335=y
+	default y if SND_SOC_WCD9335=y || SND_SOC_WCD934X=y
 
 config SND_SOC_WCD_CPE
 	tristate
-	default y if SND_SOC_WCD9330=y || SND_SOC_WCD9335=y
+	default y if SND_SOC_WCD9335=y
 
 config AUDIO_EXT_CLK
 	tristate
-	default y if SND_SOC_WCD9335=y || SND_SOC_WCD9330=y || SND_SOC_SDM660_CDC=y
+	default y if SND_SOC_WCD9335=y || SND_SOC_SDM660_CDC=y
 
 config SND_SOC_WCD_MBHC
 	tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 8c84460..d5e4ab2 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -158,30 +158,27 @@
 snd-soc-twl6040-objs := twl6040.o
 snd-soc-uda134x-objs := uda134x.o
 snd-soc-uda1380-objs := uda1380.o
-snd-soc-wcd9320-objs := wcd9320.o wcd9320-tables.o
-snd-soc-wcd9330-objs := wcd9330.o wcd9330-tables.o
 snd-soc-wcd9335-objs := wcd9335.o
 snd-soc-wcd934x-objs := wcd934x.o
-snd-soc-wcd9xxx-objs := wcd9xxx-resmgr.o wcd9xxx-mbhc.o wcd9xxx-common.o wcdcal-hwdep.o
-snd-soc-wcd9xxx-v2-objs := wcd9xxx-common-v2.o wcd9xxx-resmgr-v2.o wcdcal-hwdep.o
+snd-soc-wcd9xxx-objs := wcd9xxx-common-v2.o wcd9xxx-resmgr-v2.o \
+			wcdcal-hwdep.o wcd-dsp-mgr.o wcd-dsp-utils.o \
+			wcd9xxx-soc-init.o
 ifeq ($(CONFIG_COMMON_CLK_MSM), y)
-	audio-ext-clock-objs := audio-ext-clk.o
+	snd-soc-wcd9xxx-objs += audio-ext-clk.o
 endif
 
 ifeq ($(CONFIG_COMMON_CLK_QCOM), y)
-	audio-ext-clock-up-objs := audio-ext-clk-up.o
+	snd-soc-wcd9xxx-objs += audio-ext-clk-up.o
 endif
 snd-soc-wcd-cpe-objs := wcd_cpe_services.o wcd_cpe_core.o
 snd-soc-wsa881x-objs := wsa881x.o wsa881x-tables.o wsa881x-regmap.o wsa881x-temp-sensor.o
-ifeq ($(CONFIG_SND_SOC_WCD_MBHC_LEGACY), y)
+ifneq (,$(filter $(CONFIG_SND_SOC_WCD_MBHC_LEGACY),y m))
 	snd-soc-wcd-mbhc-objs := wcd-mbhc-v2.o wcd-mbhc-legacy.o
-else ifeq ($(CONFIG_SND_SOC_WCD_MBHC_ADC), y)
+else ifneq (,$(filter $(CONFIG_SND_SOC_WCD_MBHC_ADC),y m))
 	snd-soc-wcd-mbhc-objs := wcd-mbhc-v2.o wcd-mbhc-adc.o
 endif
 snd-soc-wsa881x-analog-objs := wsa881x-analog.o wsa881x-tables-analog.o
 snd-soc-wsa881x-analog-objs += wsa881x-regmap-analog.o wsa881x-irq.o
-snd-soc-wcd-dsp-utils-objs := wcd-dsp-utils.o
-snd-soc-wcd-dsp-mgr-objs := wcd-dsp-mgr.o
 snd-soc-wcd-spi-objs := wcd-spi.o
 snd-soc-wl1273-objs := wl1273.o
 snd-soc-wm-adsp-objs := wm_adsp.o
@@ -407,23 +404,14 @@
 obj-$(CONFIG_SND_SOC_UDA134X)	+= snd-soc-uda134x.o
 obj-$(CONFIG_SND_SOC_UDA1380)	+= snd-soc-uda1380.o
 obj-$(CONFIG_SND_SOC_WCD9320)	+= snd-soc-wcd9320.o
-obj-$(CONFIG_SND_SOC_WCD9330)	+= snd-soc-wcd9330.o
 obj-$(CONFIG_SND_SOC_WCD9335)	+= snd-soc-wcd9335.o
 obj-$(CONFIG_SND_SOC_WCD934X)	+= wcd934x/
-ifeq ($(CONFIG_COMMON_CLK_MSM), y)
-	obj-$(CONFIG_AUDIO_EXT_CLK)	+= audio-ext-clock.o
-endif
-ifeq ($(CONFIG_COMMON_CLK_QCOM), y)
-	obj-$(CONFIG_AUDIO_EXT_CLK)     += audio-ext-clock-up.o
-endif
-obj-$(CONFIG_SND_SOC_WCD9XXX)   += snd-soc-wcd9xxx.o
-obj-$(CONFIG_SND_SOC_WCD9XXX_V2) += snd-soc-wcd9xxx-v2.o
+obj-$(CONFIG_SND_SOC_WCD9XXX) += snd-soc-wcd9xxx.o
 obj-$(CONFIG_SND_SOC_WCD_CPE)   += snd-soc-wcd-cpe.o
 obj-$(CONFIG_SND_SOC_WCD_MBHC)  += snd-soc-wcd-mbhc.o
 obj-$(CONFIG_SND_SOC_WSA881X)	+= snd-soc-wsa881x.o
 obj-$(CONFIG_SND_SOC_WSA881X_ANALOG)	+= snd-soc-wsa881x-analog.o
 obj-$(CONFIG_SND_SOC_WL1273)	+= snd-soc-wl1273.o
-obj-$(CONFIG_SND_SOC_WCD_DSP_MGR)	+= snd-soc-wcd-dsp-mgr.o snd-soc-wcd-dsp-utils.o
 obj-$(CONFIG_SND_SOC_WCD_SPI)  += snd-soc-wcd-spi.o
 obj-$(CONFIG_SND_SOC_WM0010)	+= snd-soc-wm0010.o
 obj-$(CONFIG_SND_SOC_WM1250_EV1) += snd-soc-wm1250-ev1.o
diff --git a/sound/soc/codecs/audio-ext-clk-up.c b/sound/soc/codecs/audio-ext-clk-up.c
index f12f4ca..31c063d 100644
--- a/sound/soc/codecs/audio-ext-clk-up.c
+++ b/sound/soc/codecs/audio-ext-clk-up.c
@@ -23,6 +23,7 @@
 #include <linux/of_gpio.h>
 #include <dt-bindings/clock/qcom,audio-ext-clk.h>
 #include <sound/q6afe-v2.h>
+#include "audio-ext-clk-up.h"
 
 enum audio_clk_mux {
 	AP_CLK2,
@@ -176,6 +177,15 @@
 	struct pinctrl_info *pnctrl_info = &audio_lpass_mclk->pnctrl_info;
 	int ret;
 
+	lpass_mclk.enable = 1;
+	ret = afe_set_lpass_clock_v2(AFE_PORT_ID_PRIMARY_MI2S_RX,
+				&lpass_mclk);
+	if (ret < 0) {
+		pr_err("%s afe_set_digital_codec_core_clock failed\n",
+			__func__);
+		return ret;
+	}
+
 	if (pnctrl_info->pinctrl) {
 		ret = pinctrl_select_state(pnctrl_info->pinctrl,
 				pnctrl_info->active);
@@ -186,15 +196,6 @@
 		}
 	}
 
-	lpass_mclk.enable = 1;
-	ret = afe_set_lpass_clock_v2(AFE_PORT_ID_PRIMARY_MI2S_RX,
-				&lpass_mclk);
-	if (ret < 0) {
-		pr_err("%s afe_set_digital_codec_core_clock failed\n",
-			__func__);
-		return ret;
-	}
-
 	if (pnctrl_info->base)
 		iowrite32(1, pnctrl_info->base);
 	return 0;
@@ -611,17 +612,15 @@
 	.remove = audio_ref_clk_remove,
 };
 
-static int __init audio_ref_clk_platform_init(void)
+int audio_ref_clk_platform_init(void)
 {
 	return platform_driver_register(&audio_ref_clk_driver);
 }
-module_init(audio_ref_clk_platform_init);
 
-static void __exit audio_ref_clk_platform_exit(void)
+void audio_ref_clk_platform_exit(void)
 {
 	platform_driver_unregister(&audio_ref_clk_driver);
 }
-module_exit(audio_ref_clk_platform_exit);
 
 MODULE_DESCRIPTION("Audio Ref Up Clock module platform driver");
 MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/audio-ext-clk-up.h b/sound/soc/codecs/audio-ext-clk-up.h
new file mode 100644
index 0000000..8a0232e
--- /dev/null
+++ b/sound/soc/codecs/audio-ext-clk-up.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __AUDIO_EXT_CLK_UP_H_
+#define __AUDIO_EXT_CLK_UP_H_
+
+int audio_ref_clk_platform_init(void);
+void audio_ref_clk_platform_exit(void);
+
+#endif
diff --git a/sound/soc/codecs/audio-ext-clk.c b/sound/soc/codecs/audio-ext-clk.c
index ef795df..72f16f5 100644
--- a/sound/soc/codecs/audio-ext-clk.c
+++ b/sound/soc/codecs/audio-ext-clk.c
@@ -23,6 +23,7 @@
 #include <linux/of_gpio.h>
 #include <dt-bindings/clock/audio-ext-clk.h>
 #include <sound/q6afe-v2.h>
+#include "audio-ext-clk-up.h"
 
 struct pinctrl_info {
 	struct pinctrl *pinctrl;
@@ -333,17 +334,15 @@
 	.remove = audio_ref_clk_remove,
 };
 
-static int __init audio_ref_clk_platform_init(void)
+int audio_ref_clk_platform_init(void)
 {
 	return platform_driver_register(&audio_ref_clk_driver);
 }
-module_init(audio_ref_clk_platform_init);
 
-static void __exit audio_ref_clk_platform_exit(void)
+void audio_ref_clk_platform_exit(void)
 {
 	platform_driver_unregister(&audio_ref_clk_driver);
 }
-module_exit(audio_ref_clk_platform_exit);
 
 MODULE_DESCRIPTION("Audio Ref Clock module platform driver");
 MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
index 62fdb94..15d62c3 100644
--- a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
+++ b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
@@ -1039,7 +1039,6 @@
 		__func__, reg);
 	sdw_rd_addr_base = MSM_SDW_AHB_BRIDGE_RD_ADDR_0;
 	sdw_rd_data_base = MSM_SDW_AHB_BRIDGE_RD_DATA_0;
-
 	/*
 	 * Add sleep as SWR slave access read takes time.
 	 * Allow for RD_DONE to complete for previous register if any.
@@ -1054,6 +1053,8 @@
 		dev_err(msm_sdw->dev, "%s: RD Addr Failure\n", __func__);
 		goto err;
 	}
+	/* Add sleep for SWR register read value to get updated. */
+	usleep_range(100, 105);
 	/* Check for RD value */
 	ret = regmap_bulk_read(msm_sdw->regmap, sdw_rd_data_base,
 			       (u8 *)&val, 4);
@@ -1079,12 +1080,12 @@
 	sdw_wr_addr_base = MSM_SDW_AHB_BRIDGE_WR_ADDR_0;
 	sdw_wr_data_base = MSM_SDW_AHB_BRIDGE_WR_DATA_0;
 
-	/*
-	 * Add sleep as SWR slave write takes time.
-	 * Allow for any previous pending write to complete.
-	 */
-	usleep_range(50, 55);
 	for (i = 0; i < len; i += 2) {
+		/*
+		 * Add sleep as SWR slave write takes time.
+		 * Allow for any previous pending write to complete.
+		 */
+		usleep_range(100, 105);
 		/* First Write the Data to register */
 		ret = regmap_bulk_write(msm_sdw->regmap,
 			sdw_wr_data_base, bulk_reg[i].buf, 4);
diff --git a/sound/soc/codecs/wcd-dsp-mgr.c b/sound/soc/codecs/wcd-dsp-mgr.c
index 93c2fd1..661db2b 100644
--- a/sound/soc/codecs/wcd-dsp-mgr.c
+++ b/sound/soc/codecs/wcd-dsp-mgr.c
@@ -610,6 +610,25 @@
 	return cmpnt->cdev;
 }
 
+static int wdsp_get_devops_for_cmpnt(struct device *wdsp_dev,
+				     enum wdsp_cmpnt_type type,
+				     void *data)
+{
+	struct wdsp_mgr_priv *wdsp;
+	int ret = 0;
+
+	if (!wdsp_dev || type >= WDSP_CMPNT_TYPE_MAX)
+		return -EINVAL;
+
+	wdsp = dev_get_drvdata(wdsp_dev);
+	ret = wdsp_unicast_event(wdsp, type,
+				 WDSP_EVENT_GET_DEVOPS, data);
+	if (ret)
+		WDSP_ERR(wdsp, "get_dev_ops failed for cmpnt type %d",
+			 type);
+	return ret;
+}
+
 static void wdsp_collect_ramdumps(struct wdsp_mgr_priv *wdsp)
 {
 	struct wdsp_img_section img_section;
@@ -941,6 +960,7 @@
 static struct wdsp_mgr_ops wdsp_ops = {
 	.register_cmpnt_ops = wdsp_register_cmpnt_ops,
 	.get_dev_for_cmpnt = wdsp_get_dev_for_cmpnt,
+	.get_devops_for_cmpnt = wdsp_get_devops_for_cmpnt,
 	.signal_handler = wdsp_signal_handler,
 	.vote_for_dsp = wdsp_vote_for_dsp,
 	.suspend = wdsp_suspend,
@@ -1217,7 +1237,16 @@
 	.probe = wdsp_mgr_probe,
 	.remove = wdsp_mgr_remove,
 };
-module_platform_driver(wdsp_mgr_driver);
+
+int wcd_dsp_mgr_init(void)
+{
+	return platform_driver_register(&wdsp_mgr_driver);
+}
+
+void wcd_dsp_mgr_exit(void)
+{
+	platform_driver_unregister(&wdsp_mgr_driver);
+}
 
 MODULE_DESCRIPTION("WCD DSP manager driver");
 MODULE_DEVICE_TABLE(of, wdsp_mgr_dt_match);
diff --git a/sound/soc/codecs/wcd-mbhc-adc.h b/sound/soc/codecs/wcd-mbhc-adc.h
index 112d508..3116108 100644
--- a/sound/soc/codecs/wcd-mbhc-adc.h
+++ b/sound/soc/codecs/wcd-mbhc-adc.h
@@ -24,7 +24,7 @@
 	MUX_CTL_NONE,
 };
 
-#ifdef CONFIG_SND_SOC_WCD_MBHC_ADC
+#if IS_ENABLED(CONFIG_SND_SOC_WCD_MBHC_ADC)
 void wcd_mbhc_adc_init(struct wcd_mbhc *mbhc);
 #else
 static inline void wcd_mbhc_adc_init(struct wcd_mbhc *mbhc)
diff --git a/sound/soc/codecs/wcd-mbhc-v2-api.h b/sound/soc/codecs/wcd-mbhc-v2-api.h
index fab2b49..7b6e945 100644
--- a/sound/soc/codecs/wcd-mbhc-v2-api.h
+++ b/sound/soc/codecs/wcd-mbhc-v2-api.h
@@ -14,7 +14,7 @@
 
 #include "wcd-mbhc-v2.h"
 
-#ifdef CONFIG_SND_SOC_WCD_MBHC
+#if IS_ENABLED(CONFIG_SND_SOC_WCD_MBHC)
 int wcd_mbhc_start(struct wcd_mbhc *mbhc,
 		       struct wcd_mbhc_config *mbhc_cfg);
 void wcd_mbhc_stop(struct wcd_mbhc *mbhc);
diff --git a/sound/soc/codecs/wcd-mbhc-v2.c b/sound/soc/codecs/wcd-mbhc-v2.c
index cb96f2b..3b2426d 100644
--- a/sound/soc/codecs/wcd-mbhc-v2.c
+++ b/sound/soc/codecs/wcd-mbhc-v2.c
@@ -2058,9 +2058,12 @@
 	mbhc->mbhc_cb->free_irq(codec, mbhc->intr_ids->hph_right_ocp, mbhc);
 	if (mbhc->mbhc_cb && mbhc->mbhc_cb->register_notifier)
 		mbhc->mbhc_cb->register_notifier(mbhc, &mbhc->nblock, false);
-	if (mbhc->mbhc_fn->wcd_cancel_hs_detect_plug)
+	if (mbhc->mbhc_fn->wcd_cancel_hs_detect_plug) {
+		WCD_MBHC_RSC_LOCK(mbhc);
 		mbhc->mbhc_fn->wcd_cancel_hs_detect_plug(mbhc,
 					&mbhc->correct_plug_swch);
+		WCD_MBHC_RSC_UNLOCK(mbhc);
+	}
 	mutex_destroy(&mbhc->codec_resource_lock);
 	mutex_destroy(&mbhc->hphl_pa_lock);
 	mutex_destroy(&mbhc->hphr_pa_lock);
diff --git a/sound/soc/codecs/wcd-spi.c b/sound/soc/codecs/wcd-spi.c
index a08b598..957d642 100644
--- a/sound/soc/codecs/wcd-spi.c
+++ b/sound/soc/codecs/wcd-spi.c
@@ -837,7 +837,7 @@
  * about the write are encapsulated in @msg. Write size should be multiple
  * of 4 bytes and write address should be 4-byte aligned.
  */
-int wcd_spi_data_write(struct spi_device *spi,
+static int wcd_spi_data_write(struct spi_device *spi,
 		       struct wcd_spi_msg *msg)
 {
 	if (!spi || !msg) {
@@ -850,7 +850,6 @@
 			    __func__, msg->remote_addr, msg->len);
 	return wcd_spi_data_xfer(spi, msg, WCD_SPI_XFER_WRITE);
 }
-EXPORT_SYMBOL(wcd_spi_data_write);
 
 /*
  * wcd_spi_data_read: Read data from WCD SPI
@@ -861,7 +860,7 @@
  * about the read are encapsulated in @msg. Read size should be multiple
  * of 4 bytes and read address should be 4-byte aligned.
  */
-int wcd_spi_data_read(struct spi_device *spi,
+static int wcd_spi_data_read(struct spi_device *spi,
 		      struct wcd_spi_msg *msg)
 {
 	if (!spi || !msg) {
@@ -874,7 +873,6 @@
 			    __func__, msg->remote_addr, msg->len);
 	return wcd_spi_data_xfer(spi, msg, WCD_SPI_XFER_READ);
 }
-EXPORT_SYMBOL(wcd_spi_data_read);
 
 static int wdsp_spi_dload_section(struct spi_device *spi,
 				  void *data)
@@ -925,6 +923,7 @@
 {
 	struct spi_device *spi = to_spi_device(dev);
 	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+	struct wcd_spi_ops *spi_ops;
 	int ret = 0;
 
 	dev_dbg(&spi->dev, "%s: event type %d\n",
@@ -979,6 +978,20 @@
 		ret = wcd_spi_wait_for_resume(wcd_spi);
 		break;
 
+	case WDSP_EVENT_GET_DEVOPS:
+		if (!data) {
+			dev_err(&spi->dev, "%s: invalid data\n",
+				__func__);
+			ret = -EINVAL;
+			break;
+		}
+
+		spi_ops = (struct wcd_spi_ops *) data;
+		spi_ops->spi_dev = spi;
+		spi_ops->read_dev = wcd_spi_data_read;
+		spi_ops->write_dev = wcd_spi_data_write;
+		break;
+
 	default:
 		dev_dbg(&spi->dev, "%s: Unhandled event %d\n",
 			__func__, event);
diff --git a/sound/soc/codecs/wcd9330-tables.c b/sound/soc/codecs/wcd9330-tables.c
deleted file mode 100644
index 1866fb3..0000000
--- a/sound/soc/codecs/wcd9330-tables.c
+++ /dev/null
@@ -1,1675 +0,0 @@
-/* Copyright (c) 2014, 2017 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/mfd/wcd9xxx/wcd9330_registers.h>
-#include "wcd9330.h"
-
-const u8 tomtom_reg_readable[WCD9330_MAX_REGISTER + 1] = {
-	[TOMTOM_A_CHIP_CTL] = 1,
-	[TOMTOM_A_CHIP_STATUS] = 1,
-	[TOMTOM_A_CHIP_ID_BYTE_0] = 1,
-	[TOMTOM_A_CHIP_ID_BYTE_1] = 1,
-	[TOMTOM_A_CHIP_ID_BYTE_2] = 1,
-	[TOMTOM_A_CHIP_ID_BYTE_3] = 1,
-	[TOMTOM_A_CHIP_I2C_SLAVE_ID] = 1,
-	[TOMTOM_A_SLAVE_ID_1] = 1,
-	[TOMTOM_A_SLAVE_ID_2] = 1,
-	[TOMTOM_A_SLAVE_ID_3] = 1,
-	[TOMTOM_A_PIN_CTL_OE0] = 1,
-	[TOMTOM_A_PIN_CTL_OE1] = 1,
-	[TOMTOM_A_PIN_CTL_OE2] = 1,
-	[TOMTOM_A_PIN_CTL_DATA0] = 1,
-	[TOMTOM_A_PIN_CTL_DATA1] = 1,
-	[TOMTOM_A_PIN_CTL_DATA2] = 1,
-	[TOMTOM_A_HDRIVE_GENERIC] = 1,
-	[TOMTOM_A_HDRIVE_OVERRIDE] = 1,
-	[TOMTOM_A_ANA_CSR_WAIT_STATE] = 1,
-	[TOMTOM_A_PROCESS_MONITOR_CTL0] = 1,
-	[TOMTOM_A_PROCESS_MONITOR_CTL1] = 1,
-	[TOMTOM_A_PROCESS_MONITOR_CTL2] = 1,
-	[TOMTOM_A_PROCESS_MONITOR_CTL3] = 1,
-	[TOMTOM_A_QFUSE_CTL] = 1,
-	[TOMTOM_A_QFUSE_STATUS] = 1,
-	[TOMTOM_A_QFUSE_DATA_OUT0] = 1,
-	[TOMTOM_A_QFUSE_DATA_OUT1] = 1,
-	[TOMTOM_A_QFUSE_DATA_OUT2] = 1,
-	[TOMTOM_A_QFUSE_DATA_OUT3] = 1,
-	[TOMTOM_A_QFUSE_DATA_OUT4] = 1,
-	[TOMTOM_A_QFUSE_DATA_OUT5] = 1,
-	[TOMTOM_A_QFUSE_DATA_OUT6] = 1,
-	[TOMTOM_A_QFUSE_DATA_OUT7] = 1,
-	[TOMTOM_A_CDC_CTL] = 1,
-	[TOMTOM_A_LEAKAGE_CTL] = 1,
-	[TOMTOM_A_SVASS_MEM_PTR0] = 1,
-	[TOMTOM_A_SVASS_MEM_PTR1] = 1,
-	[TOMTOM_A_SVASS_MEM_PTR2] = 1,
-	[TOMTOM_A_SVASS_MEM_CTL] = 1,
-	[TOMTOM_A_SVASS_MEM_BANK] = 1,
-	[TOMTOM_A_DMIC_B1_CTL] = 1,
-	[TOMTOM_A_DMIC_B2_CTL] = 1,
-	[TOMTOM_A_SVASS_CLKRST_CTL] = 1,
-	[TOMTOM_A_SVASS_CPAR_CFG] = 1,
-	[TOMTOM_A_SVASS_BUF_RDY_INT_PERIOD] = 1,
-	[TOMTOM_A_SVASS_CPAR_WDOG_CFG] = 1,
-	[TOMTOM_A_SVASS_CFG] = 1,
-	[TOMTOM_A_SVASS_SPE_CFG] = 1,
-	[TOMTOM_A_SVASS_STATUS] = 1,
-	[TOMTOM_A_SVASS_INT_MASK] = 1,
-	[TOMTOM_A_SVASS_INT_STATUS] = 1,
-	[TOMTOM_A_SVASS_INT_CLR] = 0,
-	[TOMTOM_A_SVASS_DEBUG] = 1,
-	[TOMTOM_A_SVASS_SPE_BKUP_INT] = 0,
-	[TOMTOM_A_SVASS_MEM_ACC] = 1,
-	[TOMTOM_A_MEM_LEAKAGE_CTL] = 1,
-	[TOMTOM_A_SVASS_SPE_INBOX_TRG] = 0,
-	[TOMTOM_A_SVASS_SPE_INBOX_0] = 0,
-	[TOMTOM_A_SVASS_SPE_INBOX_1] = 0,
-	[TOMTOM_A_SVASS_SPE_INBOX_2] = 0,
-	[TOMTOM_A_SVASS_SPE_INBOX_3] = 0,
-	[TOMTOM_A_SVASS_SPE_INBOX_4] = 0,
-	[TOMTOM_A_SVASS_SPE_INBOX_5] = 0,
-	[TOMTOM_A_SVASS_SPE_INBOX_6] = 0,
-	[TOMTOM_A_SVASS_SPE_INBOX_7] = 0,
-	[TOMTOM_A_SVASS_SPE_INBOX_8] = 0,
-	[TOMTOM_A_SVASS_SPE_INBOX_9] = 0,
-	[TOMTOM_A_SVASS_SPE_INBOX_10] = 0,
-	[TOMTOM_A_SVASS_SPE_INBOX_11] = 0,
-	[TOMTOM_A_SVASS_SPE_OUTBOX_0] = 1,
-	[TOMTOM_A_SVASS_SPE_OUTBOX_1] = 1,
-	[TOMTOM_A_SVASS_SPE_OUTBOX_2] = 1,
-	[TOMTOM_A_SVASS_SPE_OUTBOX_3] = 1,
-	[TOMTOM_A_SVASS_SPE_OUTBOX_4] = 1,
-	[TOMTOM_A_SVASS_SPE_OUTBOX_5] = 1,
-	[TOMTOM_A_SVASS_SPE_OUTBOX_6] = 1,
-	[TOMTOM_A_SVASS_SPE_OUTBOX_7] = 1,
-	[TOMTOM_A_SVASS_SPE_OUTBOX_8] = 1,
-	[TOMTOM_A_SVASS_SPE_OUTBOX_9] = 1,
-	[TOMTOM_A_SVASS_SPE_OUTBOX_10] = 1,
-	[TOMTOM_A_SVASS_SPE_OUTBOX_11] = 1,
-	[TOMTOM_A_INTR_MODE] = 1,
-	[TOMTOM_A_INTR1_MASK0] = 1,
-	[TOMTOM_A_INTR1_MASK1] = 1,
-	[TOMTOM_A_INTR1_MASK2] = 1,
-	[TOMTOM_A_INTR1_MASK3] = 1,
-	[TOMTOM_A_INTR1_STATUS0] = 1,
-	[TOMTOM_A_INTR1_STATUS1] = 1,
-	[TOMTOM_A_INTR1_STATUS2] = 1,
-	[TOMTOM_A_INTR1_STATUS3] = 1,
-	[TOMTOM_A_INTR1_CLEAR0] = 0,
-	[TOMTOM_A_INTR1_CLEAR1] = 0,
-	[TOMTOM_A_INTR1_CLEAR2] = 0,
-	[TOMTOM_A_INTR1_CLEAR3] = 0,
-	[TOMTOM_A_INTR1_LEVEL0] = 1,
-	[TOMTOM_A_INTR1_LEVEL1] = 1,
-	[TOMTOM_A_INTR1_LEVEL2] = 1,
-	[TOMTOM_A_INTR1_LEVEL3] = 1,
-	[TOMTOM_A_INTR1_TEST0] = 1,
-	[TOMTOM_A_INTR1_TEST1] = 1,
-	[TOMTOM_A_INTR1_TEST2] = 1,
-	[TOMTOM_A_INTR1_TEST3] = 1,
-	[TOMTOM_A_INTR1_SET0] = 1,
-	[TOMTOM_A_INTR1_SET1] = 1,
-	[TOMTOM_A_INTR1_SET2] = 1,
-	[TOMTOM_A_INTR1_SET3] = 1,
-	[TOMTOM_A_INTR2_MASK0] = 1,
-	[TOMTOM_A_INTR2_STATUS0] = 1,
-	[TOMTOM_A_INTR2_CLEAR0] = 0,
-	[TOMTOM_A_INTR2_LEVEL0] = 1,
-	[TOMTOM_A_INTR2_TEST0] = 1,
-	[TOMTOM_A_INTR2_SET0] = 1,
-	[TOMTOM_A_CDC_TX_I2S_SCK_MODE] = 1,
-	[TOMTOM_A_CDC_TX_I2S_WS_MODE] = 1,
-	[TOMTOM_A_CDC_DMIC_DATA0_MODE] = 1,
-	[TOMTOM_A_CDC_DMIC_CLK0_MODE] = 1,
-	[TOMTOM_A_CDC_DMIC_DATA1_MODE] = 1,
-	[TOMTOM_A_CDC_DMIC_CLK1_MODE] = 1,
-	[TOMTOM_A_CDC_RX_I2S_SCK_MODE] = 1,
-	[TOMTOM_A_CDC_RX_I2S_WS_MODE] = 1,
-	[TOMTOM_A_CDC_DMIC_DATA2_MODE] = 1,
-	[TOMTOM_A_CDC_DMIC_CLK2_MODE] = 1,
-	[TOMTOM_A_CDC_INTR1_MODE] = 1,
-	[TOMTOM_A_CDC_SB_NRZ_SEL_MODE] = 1,
-	[TOMTOM_A_CDC_INTR2_MODE] = 1,
-	[TOMTOM_A_CDC_RF_PA_ON_MODE] = 1,
-	[TOMTOM_A_CDC_BOOST_MODE] = 1,
-	[TOMTOM_A_CDC_JTCK_MODE] = 1,
-	[TOMTOM_A_CDC_JTDI_MODE] = 1,
-	[TOMTOM_A_CDC_JTMS_MODE] = 1,
-	[TOMTOM_A_CDC_JTDO_MODE] = 1,
-	[TOMTOM_A_CDC_JTRST_MODE] = 1,
-	[TOMTOM_A_CDC_BIST_MODE_MODE] = 1,
-	[TOMTOM_A_CDC_MAD_MAIN_CTL_1] = 1,
-	[TOMTOM_A_CDC_MAD_MAIN_CTL_2] = 1,
-	[TOMTOM_A_CDC_MAD_AUDIO_CTL_1] = 1,
-	[TOMTOM_A_CDC_MAD_AUDIO_CTL_2] = 1,
-	[TOMTOM_A_CDC_MAD_AUDIO_CTL_3] = 1,
-	[TOMTOM_A_CDC_MAD_AUDIO_CTL_4] = 1,
-	[TOMTOM_A_CDC_MAD_AUDIO_CTL_5] = 1,
-	[TOMTOM_A_CDC_MAD_AUDIO_CTL_6] = 1,
-	[TOMTOM_A_CDC_MAD_AUDIO_CTL_7] = 1,
-	[TOMTOM_A_CDC_MAD_AUDIO_CTL_8] = 1,
-	[TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_PTR] = 1,
-	[TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_VAL] = 1,
-	[TOMTOM_A_CDC_MAD_ULTR_CTL_1] = 1,
-	[TOMTOM_A_CDC_MAD_ULTR_CTL_2] = 1,
-	[TOMTOM_A_CDC_MAD_ULTR_CTL_3] = 1,
-	[TOMTOM_A_CDC_MAD_ULTR_CTL_4] = 1,
-	[TOMTOM_A_CDC_MAD_ULTR_CTL_5] = 1,
-	[TOMTOM_A_CDC_MAD_ULTR_CTL_6] = 1,
-	[TOMTOM_A_CDC_MAD_ULTR_CTL_7] = 1,
-	[TOMTOM_A_CDC_MAD_BEACON_CTL_1] = 1,
-	[TOMTOM_A_CDC_MAD_BEACON_CTL_2] = 1,
-	[TOMTOM_A_CDC_MAD_BEACON_CTL_3] = 1,
-	[TOMTOM_A_CDC_MAD_BEACON_CTL_4] = 1,
-	[TOMTOM_A_CDC_MAD_BEACON_CTL_5] = 1,
-	[TOMTOM_A_CDC_MAD_BEACON_CTL_6] = 1,
-	[TOMTOM_A_CDC_MAD_BEACON_CTL_7] = 1,
-	[TOMTOM_A_CDC_MAD_BEACON_CTL_8] = 1,
-	[TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_PTR] = 1,
-	[TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_VAL] = 1,
-	[TOMTOM_A_CDC_MAD_INP_SEL] = 1,
-	[TOMTOM_A_BIAS_REF_CTL] = 1,
-	[TOMTOM_A_BIAS_CENTRAL_BG_CTL] = 1,
-	[TOMTOM_A_BIAS_PRECHRG_CTL] = 1,
-	[TOMTOM_A_BIAS_CURR_CTL_1] = 1,
-	[TOMTOM_A_BIAS_CURR_CTL_2] = 1,
-	[TOMTOM_A_BIAS_OSC_BG_CTL] = 1,
-	[TOMTOM_A_CLK_BUFF_EN1] = 1,
-	[TOMTOM_A_CLK_BUFF_EN2] = 1,
-	[TOMTOM_A_LDO_L_MODE_1] = 1,
-	[TOMTOM_A_LDO_L_MODE_2] = 1,
-	[TOMTOM_A_LDO_L_CTRL_1] = 1,
-	[TOMTOM_A_LDO_L_CTRL_2] = 1,
-	[TOMTOM_A_LDO_L_CTRL_3] = 1,
-	[TOMTOM_A_LDO_L_CTRL_4] = 1,
-	[TOMTOM_A_LDO_H_MODE_1] = 1,
-	[TOMTOM_A_LDO_H_MODE_2] = 1,
-	[TOMTOM_A_LDO_H_LOOP_CTL] = 1,
-	[TOMTOM_A_LDO_H_COMP_1] = 1,
-	[TOMTOM_A_LDO_H_COMP_2] = 1,
-	[TOMTOM_A_LDO_H_BIAS_1] = 1,
-	[TOMTOM_A_LDO_H_BIAS_2] = 1,
-	[TOMTOM_A_LDO_H_BIAS_3] = 1,
-	[TOMTOM_A_VBAT_CLK] = 1,
-	[TOMTOM_A_VBAT_LOOP] = 1,
-	[TOMTOM_A_VBAT_REF] = 1,
-	[TOMTOM_A_VBAT_ADC_TEST] = 1,
-	[TOMTOM_A_VBAT_FE] = 1,
-	[TOMTOM_A_VBAT_BIAS_1] = 1,
-	[TOMTOM_A_VBAT_BIAS_2] = 1,
-	[TOMTOM_A_VBAT_ADC_DATA_MSB] = 1,
-	[TOMTOM_A_VBAT_ADC_DATA_LSB] = 1,
-	[TOMTOM_A_FLL_NREF] = 1,
-	[TOMTOM_A_FLL_KDCO_TUNE] = 1,
-	[TOMTOM_A_FLL_LOCK_THRESH] = 1,
-	[TOMTOM_A_FLL_LOCK_DET_COUNT] = 1,
-	[TOMTOM_A_FLL_DAC_THRESHOLD] = 1,
-	[TOMTOM_A_FLL_TEST_DCO_FREERUN] = 1,
-	[TOMTOM_A_FLL_TEST_ENABLE] = 1,
-	[TOMTOM_A_MICB_CFILT_1_CTL] = 1,
-	[TOMTOM_A_MICB_CFILT_1_VAL] = 1,
-	[TOMTOM_A_MICB_CFILT_1_PRECHRG] = 1,
-	[TOMTOM_A_MICB_1_CTL] = 1,
-	[TOMTOM_A_MICB_1_INT_RBIAS] = 1,
-	[TOMTOM_A_MICB_1_MBHC] = 1,
-	[TOMTOM_A_MICB_CFILT_2_CTL] = 1,
-	[TOMTOM_A_MICB_CFILT_2_VAL] = 1,
-	[TOMTOM_A_MICB_CFILT_2_PRECHRG] = 1,
-	[TOMTOM_A_MICB_2_CTL] = 1,
-	[TOMTOM_A_MICB_2_INT_RBIAS] = 1,
-	[TOMTOM_A_MICB_2_MBHC] = 1,
-	[TOMTOM_A_MICB_CFILT_3_CTL] = 1,
-	[TOMTOM_A_MICB_CFILT_3_VAL] = 1,
-	[TOMTOM_A_MICB_CFILT_3_PRECHRG] = 1,
-	[TOMTOM_A_MICB_3_CTL] = 1,
-	[TOMTOM_A_MICB_3_INT_RBIAS] = 1,
-	[TOMTOM_A_MICB_3_MBHC] = 1,
-	[TOMTOM_A_MICB_4_CTL] = 1,
-	[TOMTOM_A_MICB_4_INT_RBIAS] = 1,
-	[TOMTOM_A_MICB_4_MBHC] = 1,
-	[TOMTOM_A_SPKR_DRV2_EN] = 1,
-	[TOMTOM_A_SPKR_DRV2_GAIN] = 1,
-	[TOMTOM_A_SPKR_DRV2_DAC_CTL] = 1,
-	[TOMTOM_A_SPKR_DRV2_OCP_CTL] = 1,
-	[TOMTOM_A_SPKR_DRV2_CLIP_DET] = 1,
-	[TOMTOM_A_SPKR_DRV2_DBG_DAC] = 1,
-	[TOMTOM_A_SPKR_DRV2_DBG_PA] = 1,
-	[TOMTOM_A_SPKR_DRV2_DBG_PWRSTG] = 1,
-	[TOMTOM_A_SPKR_DRV2_BIAS_LDO] = 1,
-	[TOMTOM_A_SPKR_DRV2_BIAS_INT] = 1,
-	[TOMTOM_A_SPKR_DRV2_BIAS_PA] = 1,
-	[TOMTOM_A_SPKR_DRV2_STATUS_OCP] = 1,
-	[TOMTOM_A_SPKR_DRV2_STATUS_PA] = 1,
-	[TOMTOM_A_MBHC_INSERT_DETECT] = 1,
-	[TOMTOM_A_MBHC_INSERT_DET_STATUS] = 1,
-	[TOMTOM_A_TX_COM_BIAS] = 1,
-	[TOMTOM_A_MBHC_INSERT_DETECT2] = 1,
-	[TOMTOM_A_MBHC_SCALING_MUX_1] = 1,
-	[TOMTOM_A_MBHC_SCALING_MUX_2] = 1,
-	[TOMTOM_A_MAD_ANA_CTRL] = 1,
-	[TOMTOM_A_TX_SUP_SWITCH_CTRL_1] = 1,
-	[TOMTOM_A_TX_SUP_SWITCH_CTRL_2] = 1,
-	[TOMTOM_A_TX_1_GAIN] = 1,
-	[TOMTOM_A_TX_1_2_TEST_EN] = 1,
-	[TOMTOM_A_TX_2_GAIN] = 1,
-	[TOMTOM_A_TX_1_2_ADC_IB] = 1,
-	[TOMTOM_A_TX_1_2_ATEST_REFCTRL] = 1,
-	[TOMTOM_A_TX_1_2_TEST_CTL] = 1,
-	[TOMTOM_A_TX_1_2_TEST_BLOCK_EN] = 1,
-	[TOMTOM_A_TX_1_2_TXFE_CLKDIV] = 1,
-	[TOMTOM_A_TX_1_2_SAR_ERR_CH1] = 1,
-	[TOMTOM_A_TX_1_2_SAR_ERR_CH2] = 1,
-	[TOMTOM_A_TX_3_GAIN] = 1,
-	[TOMTOM_A_TX_3_4_TEST_EN] = 1,
-	[TOMTOM_A_TX_4_GAIN] = 1,
-	[TOMTOM_A_TX_3_4_ADC_IB] = 1,
-	[TOMTOM_A_TX_3_4_ATEST_REFCTRL] = 1,
-	[TOMTOM_A_TX_3_4_TEST_CTL] = 1,
-	[TOMTOM_A_TX_3_4_TEST_BLOCK_EN] = 1,
-	[TOMTOM_A_TX_3_4_TXFE_CKDIV] = 1,
-	[TOMTOM_A_TX_3_4_SAR_ERR_CH3] = 1,
-	[TOMTOM_A_TX_3_4_SAR_ERR_CH4] = 1,
-	[TOMTOM_A_TX_5_GAIN] = 1,
-	[TOMTOM_A_TX_5_6_TEST_EN] = 1,
-	[TOMTOM_A_TX_6_GAIN] = 1,
-	[TOMTOM_A_TX_5_6_ADC_IB] = 1,
-	[TOMTOM_A_TX_5_6_ATEST_REFCTRL] = 1,
-	[TOMTOM_A_TX_5_6_TEST_CTL] = 1,
-	[TOMTOM_A_TX_5_6_TEST_BLOCK_EN] = 1,
-	[TOMTOM_A_TX_5_6_TXFE_CKDIV] = 1,
-	[TOMTOM_A_TX_5_6_SAR_ERR_CH5] = 1,
-	[TOMTOM_A_TX_5_6_SAR_ERR_CH6] = 1,
-	[TOMTOM_A_TX_7_MBHC_EN] = 1,
-	[TOMTOM_A_TX_7_MBHC_ATEST_REFCTRL] = 1,
-	[TOMTOM_A_TX_7_MBHC_ADC] = 1,
-	[TOMTOM_A_TX_7_MBHC_TEST_CTL] = 1,
-	[TOMTOM_A_TX_7_MBHC_SAR_ERR] = 1,
-	[TOMTOM_A_TX_7_TXFE_CLKDIV] = 1,
-	[TOMTOM_A_RCO_CTRL] = 1,
-	[TOMTOM_A_RCO_CALIBRATION_CTRL1] = 1,
-	[TOMTOM_A_RCO_CALIBRATION_CTRL2] = 1,
-	[TOMTOM_A_RCO_CALIBRATION_CTRL3] = 1,
-	[TOMTOM_A_RCO_TEST_CTRL] = 1,
-	[TOMTOM_A_RCO_CALIBRATION_RESULT1] = 1,
-	[TOMTOM_A_RCO_CALIBRATION_RESULT2] = 1,
-	[TOMTOM_A_BUCK_MODE_1] = 1,
-	[TOMTOM_A_BUCK_MODE_2] = 1,
-	[TOMTOM_A_BUCK_MODE_3] = 1,
-	[TOMTOM_A_BUCK_MODE_4] = 1,
-	[TOMTOM_A_BUCK_MODE_5] = 1,
-	[TOMTOM_A_BUCK_CTRL_VCL_1] = 1,
-	[TOMTOM_A_BUCK_CTRL_VCL_2] = 1,
-	[TOMTOM_A_BUCK_CTRL_VCL_3] = 1,
-	[TOMTOM_A_BUCK_CTRL_CCL_1] = 1,
-	[TOMTOM_A_BUCK_CTRL_CCL_2] = 1,
-	[TOMTOM_A_BUCK_CTRL_CCL_3] = 1,
-	[TOMTOM_A_BUCK_CTRL_CCL_4] = 1,
-	[TOMTOM_A_BUCK_CTRL_PWM_DRVR_1] = 1,
-	[TOMTOM_A_BUCK_CTRL_PWM_DRVR_2] = 1,
-	[TOMTOM_A_BUCK_CTRL_PWM_DRVR_3] = 1,
-	[TOMTOM_A_BUCK_TMUX_A_D] = 1,
-	[TOMTOM_A_NCP_BUCKREF] = 1,
-	[TOMTOM_A_NCP_EN] = 1,
-	[TOMTOM_A_NCP_CLK] = 1,
-	[TOMTOM_A_NCP_STATIC] = 1,
-	[TOMTOM_A_NCP_VTH_LOW] = 1,
-	[TOMTOM_A_NCP_VTH_HIGH] = 1,
-	[TOMTOM_A_NCP_ATEST] = 1,
-	[TOMTOM_A_NCP_DTEST] = 1,
-	[TOMTOM_A_NCP_DLY1] = 1,
-	[TOMTOM_A_NCP_DLY2] = 1,
-	[TOMTOM_A_RX_AUX_SW_CTL] = 1,
-	[TOMTOM_A_RX_PA_AUX_IN_CONN] = 1,
-	[TOMTOM_A_RX_COM_TIMER_DIV] = 1,
-	[TOMTOM_A_RX_COM_OCP_CTL] = 1,
-	[TOMTOM_A_RX_COM_OCP_COUNT] = 1,
-	[TOMTOM_A_RX_COM_DAC_CTL] = 1,
-	[TOMTOM_A_RX_COM_BIAS] = 1,
-	[TOMTOM_A_RX_HPH_AUTO_CHOP] = 1,
-	[TOMTOM_A_RX_HPH_CHOP_CTL] = 1,
-	[TOMTOM_A_RX_HPH_BIAS_PA] = 1,
-	[TOMTOM_A_RX_HPH_BIAS_LDO] = 1,
-	[TOMTOM_A_RX_HPH_BIAS_CNP] = 1,
-	[TOMTOM_A_RX_HPH_BIAS_WG_OCP] = 1,
-	[TOMTOM_A_RX_HPH_OCP_CTL] = 1,
-	[TOMTOM_A_RX_HPH_CNP_EN] = 1,
-	[TOMTOM_A_RX_HPH_CNP_WG_CTL] = 1,
-	[TOMTOM_A_RX_HPH_CNP_WG_TIME] = 1,
-	[TOMTOM_A_RX_HPH_L_GAIN] = 1,
-	[TOMTOM_A_RX_HPH_L_TEST] = 1,
-	[TOMTOM_A_RX_HPH_L_PA_CTL] = 1,
-	[TOMTOM_A_RX_HPH_L_DAC_CTL] = 1,
-	[TOMTOM_A_RX_HPH_L_ATEST] = 1,
-	[TOMTOM_A_RX_HPH_L_STATUS] = 1,
-	[TOMTOM_A_RX_HPH_R_GAIN] = 1,
-	[TOMTOM_A_RX_HPH_R_TEST] = 1,
-	[TOMTOM_A_RX_HPH_R_PA_CTL] = 1,
-	[TOMTOM_A_RX_HPH_R_DAC_CTL] = 1,
-	[TOMTOM_A_RX_HPH_R_ATEST] = 1,
-	[TOMTOM_A_RX_HPH_R_STATUS] = 1,
-	[TOMTOM_A_RX_EAR_BIAS_PA] = 1,
-	[TOMTOM_A_RX_EAR_BIAS_CMBUFF] = 1,
-	[TOMTOM_A_RX_EAR_EN] = 1,
-	[TOMTOM_A_RX_EAR_GAIN] = 1,
-	[TOMTOM_A_RX_EAR_CMBUFF] = 1,
-	[TOMTOM_A_RX_EAR_ICTL] = 1,
-	[TOMTOM_A_RX_EAR_CCOMP] = 1,
-	[TOMTOM_A_RX_EAR_VCM] = 1,
-	[TOMTOM_A_RX_EAR_CNP] = 1,
-	[TOMTOM_A_RX_EAR_DAC_CTL_ATEST] = 1,
-	[TOMTOM_A_RX_EAR_STATUS] = 1,
-	[TOMTOM_A_RX_LINE_BIAS_PA] = 1,
-	[TOMTOM_A_RX_BUCK_BIAS1] = 1,
-	[TOMTOM_A_RX_BUCK_BIAS2] = 1,
-	[TOMTOM_A_RX_LINE_COM] = 1,
-	[TOMTOM_A_RX_LINE_CNP_EN] = 1,
-	[TOMTOM_A_RX_LINE_CNP_WG_CTL] = 1,
-	[TOMTOM_A_RX_LINE_CNP_WG_TIME] = 1,
-	[TOMTOM_A_RX_LINE_1_GAIN] = 1,
-	[TOMTOM_A_RX_LINE_1_TEST] = 1,
-	[TOMTOM_A_RX_LINE_1_DAC_CTL] = 1,
-	[TOMTOM_A_RX_LINE_1_STATUS] = 1,
-	[TOMTOM_A_RX_LINE_2_GAIN] = 1,
-	[TOMTOM_A_RX_LINE_2_TEST] = 1,
-	[TOMTOM_A_RX_LINE_2_DAC_CTL] = 1,
-	[TOMTOM_A_RX_LINE_2_STATUS] = 1,
-	[TOMTOM_A_RX_LINE_3_GAIN] = 1,
-	[TOMTOM_A_RX_LINE_3_TEST] = 1,
-	[TOMTOM_A_RX_LINE_3_DAC_CTL] = 1,
-	[TOMTOM_A_RX_LINE_3_STATUS] = 1,
-	[TOMTOM_A_RX_LINE_4_GAIN] = 1,
-	[TOMTOM_A_RX_LINE_4_TEST] = 1,
-	[TOMTOM_A_RX_LINE_4_DAC_CTL] = 1,
-	[TOMTOM_A_RX_LINE_4_STATUS] = 1,
-	[TOMTOM_A_RX_LINE_CNP_DBG] = 1,
-	[TOMTOM_A_SPKR_DRV1_EN] = 1,
-	[TOMTOM_A_SPKR_DRV1_GAIN] = 1,
-	[TOMTOM_A_SPKR_DRV1_DAC_CTL] = 1,
-	[TOMTOM_A_SPKR_DRV1_OCP_CTL] = 1,
-	[TOMTOM_A_SPKR_DRV1_CLIP_DET] = 1,
-	[TOMTOM_A_SPKR_DRV1_IEC] = 1,
-	[TOMTOM_A_SPKR_DRV1_DBG_DAC] = 1,
-	[TOMTOM_A_SPKR_DRV1_DBG_PA] = 1,
-	[TOMTOM_A_SPKR_DRV1_DBG_PWRSTG] = 1,
-	[TOMTOM_A_SPKR_DRV1_BIAS_LDO] = 1,
-	[TOMTOM_A_SPKR_DRV1_BIAS_INT] = 1,
-	[TOMTOM_A_SPKR_DRV1_BIAS_PA] = 1,
-	[TOMTOM_A_SPKR_DRV1_STATUS_OCP] = 1,
-	[TOMTOM_A_SPKR_DRV1_STATUS_PA] = 1,
-	[TOMTOM_A_SPKR1_PROT_EN] = 1,
-	[TOMTOM_A_SPKR1_PROT_ADC_TEST_EN] = 1,
-	[TOMTOM_A_SPKR1_PROT_ATEST] = 1,
-	[TOMTOM_A_SPKR1_PROT_LDO_CTRL] = 1,
-	[TOMTOM_A_SPKR1_PROT_ISENSE_CTRL] = 1,
-	[TOMTOM_A_SPKR1_PROT_VSENSE_CTRL] = 1,
-	[TOMTOM_A_SPKR2_PROT_EN] = 1,
-	[TOMTOM_A_SPKR2_PROT_ADC_TEST_EN] = 1,
-	[TOMTOM_A_SPKR2_PROT_ATEST] = 1,
-	[TOMTOM_A_SPKR2_PROT_LDO_CTRL] = 1,
-	[TOMTOM_A_SPKR2_PROT_ISENSE_CTRL] = 1,
-	[TOMTOM_A_SPKR2_PROT_VSENSE_CTRL] = 1,
-	[TOMTOM_A_MBHC_HPH] = 1,
-	[TOMTOM_A_CDC_ANC1_B1_CTL] = 1,
-	[TOMTOM_A_CDC_ANC2_B1_CTL] = 1,
-	[TOMTOM_A_CDC_ANC1_SHIFT] = 1,
-	[TOMTOM_A_CDC_ANC2_SHIFT] = 1,
-	[TOMTOM_A_CDC_ANC1_IIR_B1_CTL] = 1,
-	[TOMTOM_A_CDC_ANC2_IIR_B1_CTL] = 1,
-	[TOMTOM_A_CDC_ANC1_IIR_B2_CTL] = 1,
-	[TOMTOM_A_CDC_ANC2_IIR_B2_CTL] = 1,
-	[TOMTOM_A_CDC_ANC1_IIR_B3_CTL] = 1,
-	[TOMTOM_A_CDC_ANC2_IIR_B3_CTL] = 1,
-	[TOMTOM_A_CDC_ANC1_LPF_B1_CTL] = 1,
-	[TOMTOM_A_CDC_ANC2_LPF_B1_CTL] = 1,
-	[TOMTOM_A_CDC_ANC1_LPF_B2_CTL] = 1,
-	[TOMTOM_A_CDC_ANC2_LPF_B2_CTL] = 1,
-	[TOMTOM_A_CDC_ANC1_SPARE] = 1,
-	[TOMTOM_A_CDC_ANC2_SPARE] = 1,
-	[TOMTOM_A_CDC_ANC1_SMLPF_CTL] = 1,
-	[TOMTOM_A_CDC_ANC2_SMLPF_CTL] = 1,
-	[TOMTOM_A_CDC_ANC1_DCFLT_CTL] = 1,
-	[TOMTOM_A_CDC_ANC2_DCFLT_CTL] = 1,
-	[TOMTOM_A_CDC_ANC1_GAIN_CTL] = 1,
-	[TOMTOM_A_CDC_ANC2_GAIN_CTL] = 1,
-	[TOMTOM_A_CDC_ANC1_B2_CTL] = 1,
-	[TOMTOM_A_CDC_ANC2_B2_CTL] = 1,
-	[TOMTOM_A_CDC_TX1_VOL_CTL_TIMER] = 1,
-	[TOMTOM_A_CDC_TX2_VOL_CTL_TIMER] = 1,
-	[TOMTOM_A_CDC_TX3_VOL_CTL_TIMER] = 1,
-	[TOMTOM_A_CDC_TX4_VOL_CTL_TIMER] = 1,
-	[TOMTOM_A_CDC_TX5_VOL_CTL_TIMER] = 1,
-	[TOMTOM_A_CDC_TX6_VOL_CTL_TIMER] = 1,
-	[TOMTOM_A_CDC_TX7_VOL_CTL_TIMER] = 1,
-	[TOMTOM_A_CDC_TX8_VOL_CTL_TIMER] = 1,
-	[TOMTOM_A_CDC_TX9_VOL_CTL_TIMER] = 1,
-	[TOMTOM_A_CDC_TX10_VOL_CTL_TIMER] = 1,
-	[TOMTOM_A_CDC_TX1_VOL_CTL_GAIN] = 1,
-	[TOMTOM_A_CDC_TX2_VOL_CTL_GAIN] = 1,
-	[TOMTOM_A_CDC_TX3_VOL_CTL_GAIN] = 1,
-	[TOMTOM_A_CDC_TX4_VOL_CTL_GAIN] = 1,
-	[TOMTOM_A_CDC_TX5_VOL_CTL_GAIN] = 1,
-	[TOMTOM_A_CDC_TX6_VOL_CTL_GAIN] = 1,
-	[TOMTOM_A_CDC_TX7_VOL_CTL_GAIN] = 1,
-	[TOMTOM_A_CDC_TX8_VOL_CTL_GAIN] = 1,
-	[TOMTOM_A_CDC_TX9_VOL_CTL_GAIN] = 1,
-	[TOMTOM_A_CDC_TX10_VOL_CTL_GAIN] = 1,
-	[TOMTOM_A_CDC_TX1_VOL_CTL_CFG] = 1,
-	[TOMTOM_A_CDC_TX2_VOL_CTL_CFG] = 1,
-	[TOMTOM_A_CDC_TX3_VOL_CTL_CFG] = 1,
-	[TOMTOM_A_CDC_TX4_VOL_CTL_CFG] = 1,
-	[TOMTOM_A_CDC_TX5_VOL_CTL_CFG] = 1,
-	[TOMTOM_A_CDC_TX6_VOL_CTL_CFG] = 1,
-	[TOMTOM_A_CDC_TX7_VOL_CTL_CFG] = 1,
-	[TOMTOM_A_CDC_TX8_VOL_CTL_CFG] = 1,
-	[TOMTOM_A_CDC_TX9_VOL_CTL_CFG] = 1,
-	[TOMTOM_A_CDC_TX10_VOL_CTL_CFG] = 1,
-	[TOMTOM_A_CDC_TX1_MUX_CTL] = 1,
-	[TOMTOM_A_CDC_TX2_MUX_CTL] = 1,
-	[TOMTOM_A_CDC_TX3_MUX_CTL] = 1,
-	[TOMTOM_A_CDC_TX4_MUX_CTL] = 1,
-	[TOMTOM_A_CDC_TX5_MUX_CTL] = 1,
-	[TOMTOM_A_CDC_TX6_MUX_CTL] = 1,
-	[TOMTOM_A_CDC_TX7_MUX_CTL] = 1,
-	[TOMTOM_A_CDC_TX8_MUX_CTL] = 1,
-	[TOMTOM_A_CDC_TX9_MUX_CTL] = 1,
-	[TOMTOM_A_CDC_TX10_MUX_CTL] = 1,
-	[TOMTOM_A_CDC_TX1_CLK_FS_CTL] = 1,
-	[TOMTOM_A_CDC_TX2_CLK_FS_CTL] = 1,
-	[TOMTOM_A_CDC_TX3_CLK_FS_CTL] = 1,
-	[TOMTOM_A_CDC_TX4_CLK_FS_CTL] = 1,
-	[TOMTOM_A_CDC_TX5_CLK_FS_CTL] = 1,
-	[TOMTOM_A_CDC_TX6_CLK_FS_CTL] = 1,
-	[TOMTOM_A_CDC_TX7_CLK_FS_CTL] = 1,
-	[TOMTOM_A_CDC_TX8_CLK_FS_CTL] = 1,
-	[TOMTOM_A_CDC_TX9_CLK_FS_CTL] = 1,
-	[TOMTOM_A_CDC_TX10_CLK_FS_CTL] = 1,
-	[TOMTOM_A_CDC_TX1_DMIC_CTL] = 1,
-	[TOMTOM_A_CDC_TX2_DMIC_CTL] = 1,
-	[TOMTOM_A_CDC_TX3_DMIC_CTL] = 1,
-	[TOMTOM_A_CDC_TX4_DMIC_CTL] = 1,
-	[TOMTOM_A_CDC_TX5_DMIC_CTL] = 1,
-	[TOMTOM_A_CDC_TX6_DMIC_CTL] = 1,
-	[TOMTOM_A_CDC_TX7_DMIC_CTL] = 1,
-	[TOMTOM_A_CDC_TX8_DMIC_CTL] = 1,
-	[TOMTOM_A_CDC_TX9_DMIC_CTL] = 1,
-	[TOMTOM_A_CDC_TX10_DMIC_CTL] = 1,
-	[TOMTOM_A_CDC_SPKR_CLIPDET_VAL0] = 1,
-	[TOMTOM_A_CDC_SPKR_CLIPDET_VAL1] = 1,
-	[TOMTOM_A_CDC_SPKR_CLIPDET_VAL2] = 1,
-	[TOMTOM_A_CDC_SPKR_CLIPDET_VAL3] = 1,
-	[TOMTOM_A_CDC_SPKR_CLIPDET_VAL4] = 1,
-	[TOMTOM_A_CDC_SPKR_CLIPDET_VAL5] = 1,
-	[TOMTOM_A_CDC_SPKR_CLIPDET_VAL6] = 1,
-	[TOMTOM_A_CDC_SPKR_CLIPDET_VAL7] = 1,
-	[TOMTOM_A_CDC_DEBUG_B1_CTL] = 1,
-	[TOMTOM_A_CDC_DEBUG_B2_CTL] = 1,
-	[TOMTOM_A_CDC_DEBUG_B3_CTL] = 1,
-	[TOMTOM_A_CDC_DEBUG_B4_CTL] = 1,
-	[TOMTOM_A_CDC_DEBUG_B5_CTL] = 1,
-	[TOMTOM_A_CDC_DEBUG_B6_CTL] = 1,
-	[TOMTOM_A_CDC_DEBUG_B7_CTL] = 1,
-	[TOMTOM_A_CDC_SRC1_PDA_CFG] = 1,
-	[TOMTOM_A_CDC_SRC2_PDA_CFG] = 1,
-	[TOMTOM_A_CDC_SRC1_FS_CTL] = 1,
-	[TOMTOM_A_CDC_SRC2_FS_CTL] = 1,
-	[TOMTOM_A_CDC_RX1_B1_CTL] = 1,
-	[TOMTOM_A_CDC_RX2_B1_CTL] = 1,
-	[TOMTOM_A_CDC_RX3_B1_CTL] = 1,
-	[TOMTOM_A_CDC_RX4_B1_CTL] = 1,
-	[TOMTOM_A_CDC_RX5_B1_CTL] = 1,
-	[TOMTOM_A_CDC_RX6_B1_CTL] = 1,
-	[TOMTOM_A_CDC_RX7_B1_CTL] = 1,
-	[TOMTOM_A_CDC_RX1_B2_CTL] = 1,
-	[TOMTOM_A_CDC_RX2_B2_CTL] = 1,
-	[TOMTOM_A_CDC_RX3_B2_CTL] = 1,
-	[TOMTOM_A_CDC_RX4_B2_CTL] = 1,
-	[TOMTOM_A_CDC_RX5_B2_CTL] = 1,
-	[TOMTOM_A_CDC_RX6_B2_CTL] = 1,
-	[TOMTOM_A_CDC_RX7_B2_CTL] = 1,
-	[TOMTOM_A_CDC_RX1_B3_CTL] = 1,
-	[TOMTOM_A_CDC_RX2_B3_CTL] = 1,
-	[TOMTOM_A_CDC_RX3_B3_CTL] = 1,
-	[TOMTOM_A_CDC_RX4_B3_CTL] = 1,
-	[TOMTOM_A_CDC_RX5_B3_CTL] = 1,
-	[TOMTOM_A_CDC_RX6_B3_CTL] = 1,
-	[TOMTOM_A_CDC_RX7_B3_CTL] = 1,
-	[TOMTOM_A_CDC_RX1_B4_CTL] = 1,
-	[TOMTOM_A_CDC_RX2_B4_CTL] = 1,
-	[TOMTOM_A_CDC_RX3_B4_CTL] = 1,
-	[TOMTOM_A_CDC_RX4_B4_CTL] = 1,
-	[TOMTOM_A_CDC_RX5_B4_CTL] = 1,
-	[TOMTOM_A_CDC_RX6_B4_CTL] = 1,
-	[TOMTOM_A_CDC_RX7_B4_CTL] = 1,
-	[TOMTOM_A_CDC_RX1_B5_CTL] = 1,
-	[TOMTOM_A_CDC_RX2_B5_CTL] = 1,
-	[TOMTOM_A_CDC_RX3_B5_CTL] = 1,
-	[TOMTOM_A_CDC_RX4_B5_CTL] = 1,
-	[TOMTOM_A_CDC_RX5_B5_CTL] = 1,
-	[TOMTOM_A_CDC_RX6_B5_CTL] = 1,
-	[TOMTOM_A_CDC_RX7_B5_CTL] = 1,
-	[TOMTOM_A_CDC_RX1_B6_CTL] = 1,
-	[TOMTOM_A_CDC_RX2_B6_CTL] = 1,
-	[TOMTOM_A_CDC_RX3_B6_CTL] = 1,
-	[TOMTOM_A_CDC_RX4_B6_CTL] = 1,
-	[TOMTOM_A_CDC_RX5_B6_CTL] = 1,
-	[TOMTOM_A_CDC_RX6_B6_CTL] = 1,
-	[TOMTOM_A_CDC_RX7_B6_CTL] = 1,
-	[TOMTOM_A_CDC_RX1_VOL_CTL_B1_CTL] = 1,
-	[TOMTOM_A_CDC_RX2_VOL_CTL_B1_CTL] = 1,
-	[TOMTOM_A_CDC_RX3_VOL_CTL_B1_CTL] = 1,
-	[TOMTOM_A_CDC_RX4_VOL_CTL_B1_CTL] = 1,
-	[TOMTOM_A_CDC_RX5_VOL_CTL_B1_CTL] = 1,
-	[TOMTOM_A_CDC_RX6_VOL_CTL_B1_CTL] = 1,
-	[TOMTOM_A_CDC_RX7_VOL_CTL_B1_CTL] = 1,
-	[TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL] = 1,
-	[TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL] = 1,
-	[TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL] = 1,
-	[TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL] = 1,
-	[TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL] = 1,
-	[TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL] = 1,
-	[TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL] = 1,
-	[TOMTOM_A_CDC_VBAT_CFG] = 1,
-	[TOMTOM_A_CDC_VBAT_ADC_CAL1] = 1,
-	[TOMTOM_A_CDC_VBAT_ADC_CAL2] = 1,
-	[TOMTOM_A_CDC_VBAT_ADC_CAL3] = 1,
-	[TOMTOM_A_CDC_VBAT_PK_EST1] = 1,
-	[TOMTOM_A_CDC_VBAT_PK_EST2] = 1,
-	[TOMTOM_A_CDC_VBAT_PK_EST3] = 1,
-	[TOMTOM_A_CDC_VBAT_RF_PROC1] = 1,
-	[TOMTOM_A_CDC_VBAT_RF_PROC2] = 1,
-	[TOMTOM_A_CDC_VBAT_TAC1] = 1,
-	[TOMTOM_A_CDC_VBAT_TAC2] = 1,
-	[TOMTOM_A_CDC_VBAT_TAC3] = 1,
-	[TOMTOM_A_CDC_VBAT_TAC4] = 1,
-	[TOMTOM_A_CDC_VBAT_GAIN_UPD1] = 1,
-	[TOMTOM_A_CDC_VBAT_GAIN_UPD2] = 1,
-	[TOMTOM_A_CDC_VBAT_GAIN_UPD3] = 1,
-	[TOMTOM_A_CDC_VBAT_GAIN_UPD4] = 1,
-	[TOMTOM_A_CDC_VBAT_DEBUG1] = 1,
-	[TOMTOM_A_CDC_VBAT_GAIN_UPD_MON] = 0,
-	[TOMTOM_A_CDC_VBAT_GAIN_MON_VAL] = 1,
-	[TOMTOM_A_CDC_CLK_ANC_RESET_CTL] = 1,
-	[TOMTOM_A_CDC_CLK_RX_RESET_CTL] = 1,
-	[TOMTOM_A_CDC_CLK_TX_RESET_B1_CTL] = 1,
-	[TOMTOM_A_CDC_CLK_TX_RESET_B2_CTL] = 1,
-	[TOMTOM_A_CDC_CLK_RX_I2S_CTL] = 1,
-	[TOMTOM_A_CDC_CLK_TX_I2S_CTL] = 1,
-	[TOMTOM_A_CDC_CLK_OTHR_RESET_B1_CTL] = 1,
-	[TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL] = 1,
-	[TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL] = 1,
-	[TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL] = 1,
-	[TOMTOM_A_CDC_CLK_OTHR_CTL] = 1,
-	[TOMTOM_A_CDC_CLK_ANC_CLK_EN_CTL] = 1,
-	[TOMTOM_A_CDC_CLK_RX_B1_CTL] = 1,
-	[TOMTOM_A_CDC_CLK_RX_B2_CTL] = 1,
-	[TOMTOM_A_CDC_CLK_MCLK_CTL] = 1,
-	[TOMTOM_A_CDC_CLK_PDM_CTL] = 1,
-	[TOMTOM_A_CDC_CLK_SD_CTL] = 1,
-	[TOMTOM_A_CDC_CLSH_B1_CTL] = 1,
-	[TOMTOM_A_CDC_CLSH_B2_CTL] = 1,
-	[TOMTOM_A_CDC_CLSH_B3_CTL] = 1,
-	[TOMTOM_A_CDC_CLSH_BUCK_NCP_VARS] = 1,
-	[TOMTOM_A_CDC_CLSH_IDLE_HPH_THSD] = 1,
-	[TOMTOM_A_CDC_CLSH_IDLE_EAR_THSD] = 1,
-	[TOMTOM_A_CDC_CLSH_FCLKONLY_HPH_THSD] = 1,
-	[TOMTOM_A_CDC_CLSH_FCLKONLY_EAR_THSD] = 1,
-	[TOMTOM_A_CDC_CLSH_K_ADDR] = 1,
-	[TOMTOM_A_CDC_CLSH_K_DATA] = 1,
-	[TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_L] = 1,
-	[TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_U] = 1,
-	[TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_L] = 1,
-	[TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_U] = 1,
-	[TOMTOM_A_CDC_CLSH_V_PA_HD_EAR] = 1,
-	[TOMTOM_A_CDC_CLSH_V_PA_HD_HPH] = 1,
-	[TOMTOM_A_CDC_CLSH_V_PA_MIN_EAR] = 1,
-	[TOMTOM_A_CDC_CLSH_V_PA_MIN_HPH] = 1,
-	[TOMTOM_A_CDC_IIR1_GAIN_B1_CTL] = 1,
-	[TOMTOM_A_CDC_IIR2_GAIN_B1_CTL] = 1,
-	[TOMTOM_A_CDC_IIR1_GAIN_B2_CTL] = 1,
-	[TOMTOM_A_CDC_IIR2_GAIN_B2_CTL] = 1,
-	[TOMTOM_A_CDC_IIR1_GAIN_B3_CTL] = 1,
-	[TOMTOM_A_CDC_IIR2_GAIN_B3_CTL] = 1,
-	[TOMTOM_A_CDC_IIR1_GAIN_B4_CTL] = 1,
-	[TOMTOM_A_CDC_IIR2_GAIN_B4_CTL] = 1,
-	[TOMTOM_A_CDC_IIR1_GAIN_B5_CTL] = 1,
-	[TOMTOM_A_CDC_IIR2_GAIN_B5_CTL] = 1,
-	[TOMTOM_A_CDC_IIR1_GAIN_B6_CTL] = 1,
-	[TOMTOM_A_CDC_IIR2_GAIN_B6_CTL] = 1,
-	[TOMTOM_A_CDC_IIR1_GAIN_B7_CTL] = 1,
-	[TOMTOM_A_CDC_IIR2_GAIN_B7_CTL] = 1,
-	[TOMTOM_A_CDC_IIR1_GAIN_B8_CTL] = 1,
-	[TOMTOM_A_CDC_IIR2_GAIN_B8_CTL] = 1,
-	[TOMTOM_A_CDC_IIR1_CTL] = 1,
-	[TOMTOM_A_CDC_IIR2_CTL] = 1,
-	[TOMTOM_A_CDC_IIR1_GAIN_TIMER_CTL] = 1,
-	[TOMTOM_A_CDC_IIR2_GAIN_TIMER_CTL] = 1,
-	[TOMTOM_A_CDC_IIR1_COEF_B1_CTL] = 1,
-	[TOMTOM_A_CDC_IIR2_COEF_B1_CTL] = 1,
-	[TOMTOM_A_CDC_IIR1_COEF_B2_CTL] = 1,
-	[TOMTOM_A_CDC_IIR2_COEF_B2_CTL] = 1,
-	[TOMTOM_A_CDC_TOP_GAIN_UPDATE] = 1,
-	[TOMTOM_A_CDC_PA_RAMP_B1_CTL] = 1,
-	[TOMTOM_A_CDC_PA_RAMP_B2_CTL] = 1,
-	[TOMTOM_A_CDC_PA_RAMP_B3_CTL] = 1,
-	[TOMTOM_A_CDC_PA_RAMP_B4_CTL] = 1,
-	[TOMTOM_A_CDC_SPKR_CLIPDET_B1_CTL] = 1,
-	[TOMTOM_A_CDC_SPKR2_CLIPDET_B1_CTL] = 1,
-	[TOMTOM_A_CDC_COMP0_B1_CTL] = 1,
-	[TOMTOM_A_CDC_COMP1_B1_CTL] = 1,
-	[TOMTOM_A_CDC_COMP2_B1_CTL] = 1,
-	[TOMTOM_A_CDC_COMP0_B2_CTL] = 1,
-	[TOMTOM_A_CDC_COMP1_B2_CTL] = 1,
-	[TOMTOM_A_CDC_COMP2_B2_CTL] = 1,
-	[TOMTOM_A_CDC_COMP0_B3_CTL] = 1,
-	[TOMTOM_A_CDC_COMP1_B3_CTL] = 1,
-	[TOMTOM_A_CDC_COMP2_B3_CTL] = 1,
-	[TOMTOM_A_CDC_COMP0_B4_CTL] = 1,
-	[TOMTOM_A_CDC_COMP1_B4_CTL] = 1,
-	[TOMTOM_A_CDC_COMP2_B4_CTL] = 1,
-	[TOMTOM_A_CDC_COMP0_B5_CTL] = 1,
-	[TOMTOM_A_CDC_COMP1_B5_CTL] = 1,
-	[TOMTOM_A_CDC_COMP2_B5_CTL] = 1,
-	[TOMTOM_A_CDC_COMP0_B6_CTL] = 1,
-	[TOMTOM_A_CDC_COMP1_B6_CTL] = 1,
-	[TOMTOM_A_CDC_COMP2_B6_CTL] = 1,
-	[TOMTOM_A_CDC_COMP0_SHUT_DOWN_STATUS] = 1,
-	[TOMTOM_A_CDC_COMP1_SHUT_DOWN_STATUS] = 1,
-	[TOMTOM_A_CDC_COMP2_SHUT_DOWN_STATUS] = 1,
-	[TOMTOM_A_CDC_COMP0_FS_CFG] = 1,
-	[TOMTOM_A_CDC_COMP1_FS_CFG] = 1,
-	[TOMTOM_A_CDC_COMP2_FS_CFG] = 1,
-	[TOMTOM_A_CDC_CONN_RX1_B1_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_RX1_B2_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_RX1_B3_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_RX2_B1_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_RX2_B2_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_RX2_B3_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_RX3_B1_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_RX3_B2_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_RX4_B1_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_RX4_B2_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_RX5_B1_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_RX5_B2_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_RX6_B1_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_RX6_B2_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_RX7_B1_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_RX7_B2_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_RX7_B3_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_ANC_B1_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_ANC_B2_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_TX_B1_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_TX_B2_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_TX_B3_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_TX_B4_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_EQ1_B1_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_EQ1_B2_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_EQ1_B3_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_EQ1_B4_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_EQ2_B1_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_EQ2_B2_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_EQ2_B3_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_EQ2_B4_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_SRC1_B1_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_SRC1_B2_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_SRC2_B1_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_SRC2_B2_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_TX_SB_B1_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_TX_SB_B2_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_TX_SB_B3_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_TX_SB_B4_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_TX_SB_B5_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_TX_SB_B6_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_TX_SB_B7_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_TX_SB_B8_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_TX_SB_B9_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_TX_SB_B10_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_TX_SB_B11_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_RX_SB_B1_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_RX_SB_B2_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_CLSH_CTL] = 1,
-	[TOMTOM_A_CDC_CONN_MISC] = 1,
-	[TOMTOM_A_CDC_CONN_RX8_B1_CTL] = 1,
-	[TOMTOM_A_CDC_CLIP_ADJ_SPKR_B1_CTL] = 1,
-	[TOMTOM_A_CDC_CLIP_ADJ_SPKR_CLIP_LEVEL_ADJUST] = 1,
-	[TOMTOM_A_CDC_CLIP_ADJ_SPKR_MIN_CLIP_THRESHOLD] = 1,
-	[TOMTOM_A_CDC_CLIP_ADJ_SPKR_THRESHOLD_STATUS] = 1,
-	[TOMTOM_A_CDC_CLIP_ADJ_SPKR_SAMPLE_MARK] = 1,
-	[TOMTOM_A_CDC_CLIP_ADJ_SPKR_BOOST_GATING] = 1,
-	[TOMTOM_A_CDC_CLIP_ADJ_SPKR2_B1_CTL] = 1,
-	[TOMTOM_A_CDC_CLIP_ADJ_SPKR2_CLIP_LEVEL_ADJUST] = 1,
-	[TOMTOM_A_CDC_CLIP_ADJ_SPKR2_MIN_CLIP_THRESHOLD] = 1,
-	[TOMTOM_A_CDC_CLIP_ADJ_SPKR2_THRESHOLD_STATUS] = 1,
-	[TOMTOM_A_CDC_CLIP_ADJ_SPKR2_SAMPLE_MARK] = 1,
-	[TOMTOM_A_CDC_CLIP_ADJ_SPKR2_BOOST_GATING] = 1,
-	[TOMTOM_A_CDC_MBHC_EN_CTL] = 1,
-	[TOMTOM_A_CDC_MBHC_FIR_B1_CFG] = 1,
-	[TOMTOM_A_CDC_MBHC_FIR_B2_CFG] = 1,
-	[TOMTOM_A_CDC_MBHC_TIMER_B1_CTL] = 1,
-	[TOMTOM_A_CDC_MBHC_TIMER_B2_CTL] = 1,
-	[TOMTOM_A_CDC_MBHC_TIMER_B3_CTL] = 1,
-	[TOMTOM_A_CDC_MBHC_TIMER_B4_CTL] = 1,
-	[TOMTOM_A_CDC_MBHC_TIMER_B5_CTL] = 1,
-	[TOMTOM_A_CDC_MBHC_TIMER_B6_CTL] = 1,
-	[TOMTOM_A_CDC_MBHC_B1_STATUS] = 1,
-	[TOMTOM_A_CDC_MBHC_B2_STATUS] = 1,
-	[TOMTOM_A_CDC_MBHC_B3_STATUS] = 1,
-	[TOMTOM_A_CDC_MBHC_B4_STATUS] = 1,
-	[TOMTOM_A_CDC_MBHC_B5_STATUS] = 1,
-	[TOMTOM_A_CDC_MBHC_B1_CTL] = 1,
-	[TOMTOM_A_CDC_MBHC_B2_CTL] = 1,
-	[TOMTOM_A_CDC_MBHC_VOLT_B1_CTL] = 1,
-	[TOMTOM_A_CDC_MBHC_VOLT_B2_CTL] = 1,
-	[TOMTOM_A_CDC_MBHC_VOLT_B3_CTL] = 1,
-	[TOMTOM_A_CDC_MBHC_VOLT_B4_CTL] = 1,
-	[TOMTOM_A_CDC_MBHC_VOLT_B5_CTL] = 1,
-	[TOMTOM_A_CDC_MBHC_VOLT_B6_CTL] = 1,
-	[TOMTOM_A_CDC_MBHC_VOLT_B7_CTL] = 1,
-	[TOMTOM_A_CDC_MBHC_VOLT_B8_CTL] = 1,
-	[TOMTOM_A_CDC_MBHC_VOLT_B9_CTL] = 1,
-	[TOMTOM_A_CDC_MBHC_VOLT_B10_CTL] = 1,
-	[TOMTOM_A_CDC_MBHC_VOLT_B11_CTL] = 1,
-	[TOMTOM_A_CDC_MBHC_VOLT_B12_CTL] = 1,
-	[TOMTOM_A_CDC_MBHC_CLK_CTL] = 1,
-	[TOMTOM_A_CDC_MBHC_INT_CTL] = 1,
-	[TOMTOM_A_CDC_MBHC_DEBUG_CTL] = 1,
-	[TOMTOM_A_CDC_MBHC_SPARE] = 1,
-	[TOMTOM_A_CDC_RX8_B1_CTL] = 1,
-	[TOMTOM_A_CDC_RX8_B2_CTL] = 1,
-	[TOMTOM_A_CDC_RX8_B3_CTL] = 1,
-	[TOMTOM_A_CDC_RX8_B4_CTL] = 1,
-	[TOMTOM_A_CDC_RX8_B5_CTL] = 1,
-	[TOMTOM_A_CDC_RX8_B6_CTL] = 1,
-	[TOMTOM_A_CDC_RX8_VOL_CTL_B1_CTL] = 1,
-	[TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL] = 1,
-	[TOMTOM_A_CDC_SPKR2_CLIPDET_VAL0] = 1,
-	[TOMTOM_A_CDC_SPKR2_CLIPDET_VAL1] = 1,
-	[TOMTOM_A_CDC_SPKR2_CLIPDET_VAL2] = 1,
-	[TOMTOM_A_CDC_SPKR2_CLIPDET_VAL3] = 1,
-	[TOMTOM_A_CDC_SPKR2_CLIPDET_VAL4] = 1,
-	[TOMTOM_A_CDC_SPKR2_CLIPDET_VAL5] = 1,
-	[TOMTOM_A_CDC_SPKR2_CLIPDET_VAL6] = 1,
-	[TOMTOM_A_CDC_SPKR2_CLIPDET_VAL7] = 1,
-	[TOMTOM_A_CDC_BOOST_MODE_CTL] = 1,
-	[TOMTOM_A_CDC_BOOST_THRESHOLD] = 1,
-	[TOMTOM_A_CDC_BOOST_TAP_SEL] = 1,
-	[TOMTOM_A_CDC_BOOST_HOLD_TIME] = 1,
-	[TOMTOM_A_CDC_BOOST_TRGR_EN] = 1,
-};
-
-const u8 tomtom_reset_reg_defaults[TOMTOM_CACHE_SIZE] = {
-	[TOMTOM_A_CHIP_CTL] = TOMTOM_A_CHIP_CTL__POR,
-	[TOMTOM_A_CHIP_STATUS] = TOMTOM_A_CHIP_STATUS__POR,
-	[TOMTOM_A_CHIP_ID_BYTE_0] = TOMTOM_A_CHIP_ID_BYTE_0__POR,
-	[TOMTOM_A_CHIP_ID_BYTE_1] = TOMTOM_A_CHIP_ID_BYTE_1__POR,
-	[TOMTOM_A_CHIP_ID_BYTE_2] = TOMTOM_A_CHIP_ID_BYTE_2__POR,
-	[TOMTOM_A_CHIP_ID_BYTE_3] = TOMTOM_A_CHIP_ID_BYTE_3__POR,
-	[TOMTOM_A_CHIP_I2C_SLAVE_ID] = TOMTOM_A_CHIP_I2C_SLAVE_ID__POR,
-	[TOMTOM_A_SLAVE_ID_1] = TOMTOM_A_SLAVE_ID_1__POR,
-	[TOMTOM_A_SLAVE_ID_2] = TOMTOM_A_SLAVE_ID_2__POR,
-	[TOMTOM_A_SLAVE_ID_3] = TOMTOM_A_SLAVE_ID_3__POR,
-	[TOMTOM_A_PIN_CTL_OE0] = TOMTOM_A_PIN_CTL_OE0__POR,
-	[TOMTOM_A_PIN_CTL_OE1] = TOMTOM_A_PIN_CTL_OE1__POR,
-	[TOMTOM_A_PIN_CTL_OE2] = TOMTOM_A_PIN_CTL_OE2__POR,
-	[TOMTOM_A_PIN_CTL_DATA0] = TOMTOM_A_PIN_CTL_DATA0__POR,
-	[TOMTOM_A_PIN_CTL_DATA1] = TOMTOM_A_PIN_CTL_DATA1__POR,
-	[TOMTOM_A_PIN_CTL_DATA2] = TOMTOM_A_PIN_CTL_DATA2__POR,
-	[TOMTOM_A_HDRIVE_GENERIC] = TOMTOM_A_HDRIVE_GENERIC__POR,
-	[TOMTOM_A_HDRIVE_OVERRIDE] = TOMTOM_A_HDRIVE_OVERRIDE__POR,
-	[TOMTOM_A_ANA_CSR_WAIT_STATE] = TOMTOM_A_ANA_CSR_WAIT_STATE__POR,
-	[TOMTOM_A_PROCESS_MONITOR_CTL0] = TOMTOM_A_PROCESS_MONITOR_CTL0__POR,
-	[TOMTOM_A_PROCESS_MONITOR_CTL1] = TOMTOM_A_PROCESS_MONITOR_CTL1__POR,
-	[TOMTOM_A_PROCESS_MONITOR_CTL2] = TOMTOM_A_PROCESS_MONITOR_CTL2__POR,
-	[TOMTOM_A_PROCESS_MONITOR_CTL3] = TOMTOM_A_PROCESS_MONITOR_CTL3__POR,
-	[TOMTOM_A_QFUSE_CTL] = TOMTOM_A_QFUSE_CTL__POR,
-	[TOMTOM_A_QFUSE_STATUS] = TOMTOM_A_QFUSE_STATUS__POR,
-	[TOMTOM_A_QFUSE_DATA_OUT0] = TOMTOM_A_QFUSE_DATA_OUT0__POR,
-	[TOMTOM_A_QFUSE_DATA_OUT1] = TOMTOM_A_QFUSE_DATA_OUT1__POR,
-	[TOMTOM_A_QFUSE_DATA_OUT2] = TOMTOM_A_QFUSE_DATA_OUT2__POR,
-	[TOMTOM_A_QFUSE_DATA_OUT3] = TOMTOM_A_QFUSE_DATA_OUT3__POR,
-	[TOMTOM_A_QFUSE_DATA_OUT4] = TOMTOM_A_QFUSE_DATA_OUT4__POR,
-	[TOMTOM_A_QFUSE_DATA_OUT5] = TOMTOM_A_QFUSE_DATA_OUT5__POR,
-	[TOMTOM_A_QFUSE_DATA_OUT6] = TOMTOM_A_QFUSE_DATA_OUT6__POR,
-	[TOMTOM_A_QFUSE_DATA_OUT7] = TOMTOM_A_QFUSE_DATA_OUT7__POR,
-	[TOMTOM_A_CDC_CTL] = TOMTOM_A_CDC_CTL__POR,
-	[TOMTOM_A_LEAKAGE_CTL] = TOMTOM_A_LEAKAGE_CTL__POR,
-	[TOMTOM_A_SVASS_MEM_PTR0] = TOMTOM_A_SVASS_MEM_PTR0__POR,
-	[TOMTOM_A_SVASS_MEM_PTR1] = TOMTOM_A_SVASS_MEM_PTR1__POR,
-	[TOMTOM_A_SVASS_MEM_PTR2] = TOMTOM_A_SVASS_MEM_PTR2__POR,
-	[TOMTOM_A_SVASS_MEM_CTL] = TOMTOM_A_SVASS_MEM_CTL__POR,
-	[TOMTOM_A_SVASS_MEM_BANK] = TOMTOM_A_SVASS_MEM_BANK__POR,
-	[TOMTOM_A_DMIC_B1_CTL] = TOMTOM_A_DMIC_B1_CTL__POR,
-	[TOMTOM_A_DMIC_B2_CTL] = TOMTOM_A_DMIC_B2_CTL__POR,
-	[TOMTOM_A_SVASS_CLKRST_CTL] = TOMTOM_A_SVASS_CLKRST_CTL__POR,
-	[TOMTOM_A_SVASS_CPAR_CFG] = TOMTOM_A_SVASS_CPAR_CFG__POR,
-	[TOMTOM_A_SVASS_BUF_RDY_INT_PERIOD] =
-				TOMTOM_A_SVASS_BUF_RDY_INT_PERIOD__POR,
-	[TOMTOM_A_SVASS_CPAR_WDOG_CFG] = TOMTOM_A_SVASS_CPAR_WDOG_CFG__POR,
-	[TOMTOM_A_SVASS_CFG] = TOMTOM_A_SVASS_CFG__POR,
-	[TOMTOM_A_SVASS_SPE_CFG] = TOMTOM_A_SVASS_SPE_CFG__POR,
-	[TOMTOM_A_SVASS_STATUS] = TOMTOM_A_SVASS_STATUS__POR,
-	[TOMTOM_A_SVASS_INT_MASK] = TOMTOM_A_SVASS_INT_MASK__POR,
-	[TOMTOM_A_SVASS_INT_STATUS] = TOMTOM_A_SVASS_INT_STATUS__POR,
-	[TOMTOM_A_SVASS_INT_CLR] = TOMTOM_A_SVASS_INT_CLR__POR,
-	[TOMTOM_A_SVASS_DEBUG] = TOMTOM_A_SVASS_DEBUG__POR,
-	[TOMTOM_A_SVASS_SPE_BKUP_INT] = TOMTOM_A_SVASS_SPE_BKUP_INT__POR,
-	[TOMTOM_A_SVASS_MEM_ACC] = TOMTOM_A_SVASS_MEM_ACC__POR,
-	[TOMTOM_A_MEM_LEAKAGE_CTL] = TOMTOM_A_MEM_LEAKAGE_CTL__POR,
-	[TOMTOM_A_SVASS_SPE_INBOX_TRG] = TOMTOM_A_SVASS_SPE_INBOX_TRG__POR,
-	[TOMTOM_A_SVASS_SPE_INBOX_0] = TOMTOM_A_SVASS_SPE_INBOX_0__POR,
-	[TOMTOM_A_SVASS_SPE_INBOX_1] = TOMTOM_A_SVASS_SPE_INBOX_1__POR,
-	[TOMTOM_A_SVASS_SPE_INBOX_2] = TOMTOM_A_SVASS_SPE_INBOX_2__POR,
-	[TOMTOM_A_SVASS_SPE_INBOX_3] = TOMTOM_A_SVASS_SPE_INBOX_3__POR,
-	[TOMTOM_A_SVASS_SPE_INBOX_4] = TOMTOM_A_SVASS_SPE_INBOX_4__POR,
-	[TOMTOM_A_SVASS_SPE_INBOX_5] = TOMTOM_A_SVASS_SPE_INBOX_5__POR,
-	[TOMTOM_A_SVASS_SPE_INBOX_6] = TOMTOM_A_SVASS_SPE_INBOX_6__POR,
-	[TOMTOM_A_SVASS_SPE_INBOX_7] = TOMTOM_A_SVASS_SPE_INBOX_7__POR,
-	[TOMTOM_A_SVASS_SPE_INBOX_8] = TOMTOM_A_SVASS_SPE_INBOX_8__POR,
-	[TOMTOM_A_SVASS_SPE_INBOX_9] = TOMTOM_A_SVASS_SPE_INBOX_9__POR,
-	[TOMTOM_A_SVASS_SPE_INBOX_10] = TOMTOM_A_SVASS_SPE_INBOX_10__POR,
-	[TOMTOM_A_SVASS_SPE_INBOX_11] = TOMTOM_A_SVASS_SPE_INBOX_11__POR,
-	[TOMTOM_A_SVASS_SPE_OUTBOX_0] = TOMTOM_A_SVASS_SPE_OUTBOX_0__POR,
-	[TOMTOM_A_SVASS_SPE_OUTBOX_1] = TOMTOM_A_SVASS_SPE_OUTBOX_1__POR,
-	[TOMTOM_A_SVASS_SPE_OUTBOX_2] = TOMTOM_A_SVASS_SPE_OUTBOX_2__POR,
-	[TOMTOM_A_SVASS_SPE_OUTBOX_3] = TOMTOM_A_SVASS_SPE_OUTBOX_3__POR,
-	[TOMTOM_A_SVASS_SPE_OUTBOX_4] = TOMTOM_A_SVASS_SPE_OUTBOX_4__POR,
-	[TOMTOM_A_SVASS_SPE_OUTBOX_5] = TOMTOM_A_SVASS_SPE_OUTBOX_5__POR,
-	[TOMTOM_A_SVASS_SPE_OUTBOX_6] = TOMTOM_A_SVASS_SPE_OUTBOX_6__POR,
-	[TOMTOM_A_SVASS_SPE_OUTBOX_7] = TOMTOM_A_SVASS_SPE_OUTBOX_7__POR,
-	[TOMTOM_A_SVASS_SPE_OUTBOX_8] = TOMTOM_A_SVASS_SPE_OUTBOX_8__POR,
-	[TOMTOM_A_SVASS_SPE_OUTBOX_9] = TOMTOM_A_SVASS_SPE_OUTBOX_9__POR,
-	[TOMTOM_A_SVASS_SPE_OUTBOX_10] = TOMTOM_A_SVASS_SPE_OUTBOX_10__POR,
-	[TOMTOM_A_SVASS_SPE_OUTBOX_11] = TOMTOM_A_SVASS_SPE_OUTBOX_11__POR,
-	[TOMTOM_A_INTR_MODE] = TOMTOM_A_INTR_MODE__POR,
-	[TOMTOM_A_INTR1_MASK0] = TOMTOM_A_INTR1_MASK0__POR,
-	[TOMTOM_A_INTR1_MASK1] = TOMTOM_A_INTR1_MASK1__POR,
-	[TOMTOM_A_INTR1_MASK2] = TOMTOM_A_INTR1_MASK2__POR,
-	[TOMTOM_A_INTR1_MASK3] = TOMTOM_A_INTR1_MASK3__POR,
-	[TOMTOM_A_INTR1_STATUS0] = TOMTOM_A_INTR1_STATUS0__POR,
-	[TOMTOM_A_INTR1_STATUS1] = TOMTOM_A_INTR1_STATUS1__POR,
-	[TOMTOM_A_INTR1_STATUS2] = TOMTOM_A_INTR1_STATUS2__POR,
-	[TOMTOM_A_INTR1_STATUS3] = TOMTOM_A_INTR1_STATUS3__POR,
-	[TOMTOM_A_INTR1_CLEAR0] = TOMTOM_A_INTR1_CLEAR0__POR,
-	[TOMTOM_A_INTR1_CLEAR1] = TOMTOM_A_INTR1_CLEAR1__POR,
-	[TOMTOM_A_INTR1_CLEAR2] = TOMTOM_A_INTR1_CLEAR2__POR,
-	[TOMTOM_A_INTR1_CLEAR3] = TOMTOM_A_INTR1_CLEAR3__POR,
-	[TOMTOM_A_INTR1_LEVEL0] = TOMTOM_A_INTR1_LEVEL0__POR,
-	[TOMTOM_A_INTR1_LEVEL1] = TOMTOM_A_INTR1_LEVEL1__POR,
-	[TOMTOM_A_INTR1_LEVEL2] = TOMTOM_A_INTR1_LEVEL2__POR,
-	[TOMTOM_A_INTR1_LEVEL3] = TOMTOM_A_INTR1_LEVEL3__POR,
-	[TOMTOM_A_INTR1_TEST0] = TOMTOM_A_INTR1_TEST0__POR,
-	[TOMTOM_A_INTR1_TEST1] = TOMTOM_A_INTR1_TEST1__POR,
-	[TOMTOM_A_INTR1_TEST2] = TOMTOM_A_INTR1_TEST2__POR,
-	[TOMTOM_A_INTR1_TEST3] = TOMTOM_A_INTR1_TEST3__POR,
-	[TOMTOM_A_INTR1_SET0] = TOMTOM_A_INTR1_SET0__POR,
-	[TOMTOM_A_INTR1_SET1] = TOMTOM_A_INTR1_SET1__POR,
-	[TOMTOM_A_INTR1_SET2] = TOMTOM_A_INTR1_SET2__POR,
-	[TOMTOM_A_INTR1_SET3] = TOMTOM_A_INTR1_SET3__POR,
-	[TOMTOM_A_INTR2_MASK0] = TOMTOM_A_INTR2_MASK0__POR,
-	[TOMTOM_A_INTR2_STATUS0] = TOMTOM_A_INTR2_STATUS0__POR,
-	[TOMTOM_A_INTR2_CLEAR0] = TOMTOM_A_INTR2_CLEAR0__POR,
-	[TOMTOM_A_INTR2_LEVEL0] = TOMTOM_A_INTR2_LEVEL0__POR,
-	[TOMTOM_A_INTR2_TEST0] = TOMTOM_A_INTR2_TEST0__POR,
-	[TOMTOM_A_INTR2_SET0] = TOMTOM_A_INTR2_SET0__POR,
-	[TOMTOM_A_CDC_TX_I2S_SCK_MODE] = TOMTOM_A_CDC_TX_I2S_SCK_MODE__POR,
-	[TOMTOM_A_CDC_TX_I2S_WS_MODE] = TOMTOM_A_CDC_TX_I2S_WS_MODE__POR,
-	[TOMTOM_A_CDC_DMIC_DATA0_MODE] = TOMTOM_A_CDC_DMIC_DATA0_MODE__POR,
-	[TOMTOM_A_CDC_DMIC_CLK0_MODE] = TOMTOM_A_CDC_DMIC_CLK0_MODE__POR,
-	[TOMTOM_A_CDC_DMIC_DATA1_MODE] = TOMTOM_A_CDC_DMIC_DATA1_MODE__POR,
-	[TOMTOM_A_CDC_DMIC_CLK1_MODE] = TOMTOM_A_CDC_DMIC_CLK1_MODE__POR,
-	[TOMTOM_A_CDC_RX_I2S_SCK_MODE] = TOMTOM_A_CDC_RX_I2S_SCK_MODE__POR,
-	[TOMTOM_A_CDC_RX_I2S_WS_MODE] = TOMTOM_A_CDC_RX_I2S_WS_MODE__POR,
-	[TOMTOM_A_CDC_DMIC_DATA2_MODE] = TOMTOM_A_CDC_DMIC_DATA2_MODE__POR,
-	[TOMTOM_A_CDC_DMIC_CLK2_MODE] = TOMTOM_A_CDC_DMIC_CLK2_MODE__POR,
-	[TOMTOM_A_CDC_INTR1_MODE] = TOMTOM_A_CDC_INTR1_MODE__POR,
-	[TOMTOM_A_CDC_SB_NRZ_SEL_MODE] = TOMTOM_A_CDC_SB_NRZ_SEL_MODE__POR,
-	[TOMTOM_A_CDC_INTR2_MODE] = TOMTOM_A_CDC_INTR2_MODE__POR,
-	[TOMTOM_A_CDC_RF_PA_ON_MODE] = TOMTOM_A_CDC_RF_PA_ON_MODE__POR,
-	[TOMTOM_A_CDC_BOOST_MODE] = TOMTOM_A_CDC_BOOST_MODE__POR,
-	[TOMTOM_A_CDC_JTCK_MODE] = TOMTOM_A_CDC_JTCK_MODE__POR,
-	[TOMTOM_A_CDC_JTDI_MODE] = TOMTOM_A_CDC_JTDI_MODE__POR,
-	[TOMTOM_A_CDC_JTMS_MODE] = TOMTOM_A_CDC_JTMS_MODE__POR,
-	[TOMTOM_A_CDC_JTDO_MODE] = TOMTOM_A_CDC_JTDO_MODE__POR,
-	[TOMTOM_A_CDC_JTRST_MODE] = TOMTOM_A_CDC_JTRST_MODE__POR,
-	[TOMTOM_A_CDC_BIST_MODE_MODE] = TOMTOM_A_CDC_BIST_MODE_MODE__POR,
-	[TOMTOM_A_CDC_MAD_MAIN_CTL_1] = TOMTOM_A_CDC_MAD_MAIN_CTL_1__POR,
-	[TOMTOM_A_CDC_MAD_MAIN_CTL_2] = TOMTOM_A_CDC_MAD_MAIN_CTL_2__POR,
-	[TOMTOM_A_CDC_MAD_AUDIO_CTL_1] = TOMTOM_A_CDC_MAD_AUDIO_CTL_1__POR,
-	[TOMTOM_A_CDC_MAD_AUDIO_CTL_2] = TOMTOM_A_CDC_MAD_AUDIO_CTL_2__POR,
-	[TOMTOM_A_CDC_MAD_AUDIO_CTL_3] = TOMTOM_A_CDC_MAD_AUDIO_CTL_3__POR,
-	[TOMTOM_A_CDC_MAD_AUDIO_CTL_4] = TOMTOM_A_CDC_MAD_AUDIO_CTL_4__POR,
-	[TOMTOM_A_CDC_MAD_AUDIO_CTL_5] = TOMTOM_A_CDC_MAD_AUDIO_CTL_5__POR,
-	[TOMTOM_A_CDC_MAD_AUDIO_CTL_6] = TOMTOM_A_CDC_MAD_AUDIO_CTL_6__POR,
-	[TOMTOM_A_CDC_MAD_AUDIO_CTL_7] = TOMTOM_A_CDC_MAD_AUDIO_CTL_7__POR,
-	[TOMTOM_A_CDC_MAD_AUDIO_CTL_8] = TOMTOM_A_CDC_MAD_AUDIO_CTL_8__POR,
-	[TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_PTR] =
-				TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_PTR__POR,
-	[TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_VAL] =
-				TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_VAL__POR,
-	[TOMTOM_A_CDC_MAD_ULTR_CTL_1] = TOMTOM_A_CDC_MAD_ULTR_CTL_1__POR,
-	[TOMTOM_A_CDC_MAD_ULTR_CTL_2] = TOMTOM_A_CDC_MAD_ULTR_CTL_2__POR,
-	[TOMTOM_A_CDC_MAD_ULTR_CTL_3] = TOMTOM_A_CDC_MAD_ULTR_CTL_3__POR,
-	[TOMTOM_A_CDC_MAD_ULTR_CTL_4] = TOMTOM_A_CDC_MAD_ULTR_CTL_4__POR,
-	[TOMTOM_A_CDC_MAD_ULTR_CTL_5] = TOMTOM_A_CDC_MAD_ULTR_CTL_5__POR,
-	[TOMTOM_A_CDC_MAD_ULTR_CTL_6] = TOMTOM_A_CDC_MAD_ULTR_CTL_6__POR,
-	[TOMTOM_A_CDC_MAD_ULTR_CTL_7] = TOMTOM_A_CDC_MAD_ULTR_CTL_7__POR,
-	[TOMTOM_A_CDC_MAD_BEACON_CTL_1] = TOMTOM_A_CDC_MAD_BEACON_CTL_1__POR,
-	[TOMTOM_A_CDC_MAD_BEACON_CTL_2] = TOMTOM_A_CDC_MAD_BEACON_CTL_2__POR,
-	[TOMTOM_A_CDC_MAD_BEACON_CTL_3] = TOMTOM_A_CDC_MAD_BEACON_CTL_3__POR,
-	[TOMTOM_A_CDC_MAD_BEACON_CTL_4] = TOMTOM_A_CDC_MAD_BEACON_CTL_4__POR,
-	[TOMTOM_A_CDC_MAD_BEACON_CTL_5] = TOMTOM_A_CDC_MAD_BEACON_CTL_5__POR,
-	[TOMTOM_A_CDC_MAD_BEACON_CTL_6] = TOMTOM_A_CDC_MAD_BEACON_CTL_6__POR,
-	[TOMTOM_A_CDC_MAD_BEACON_CTL_7] = TOMTOM_A_CDC_MAD_BEACON_CTL_7__POR,
-	[TOMTOM_A_CDC_MAD_BEACON_CTL_8] = TOMTOM_A_CDC_MAD_BEACON_CTL_8__POR,
-	[TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_PTR] =
-				TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_PTR__POR,
-	[TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_VAL] =
-				TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_VAL__POR,
-	[TOMTOM_A_CDC_MAD_INP_SEL] = TOMTOM_A_CDC_MAD_INP_SEL__POR,
-	[TOMTOM_A_BIAS_REF_CTL] = TOMTOM_A_BIAS_REF_CTL__POR,
-	[TOMTOM_A_BIAS_CENTRAL_BG_CTL] = TOMTOM_A_BIAS_CENTRAL_BG_CTL__POR,
-	[TOMTOM_A_BIAS_PRECHRG_CTL] = TOMTOM_A_BIAS_PRECHRG_CTL__POR,
-	[TOMTOM_A_BIAS_CURR_CTL_1] = TOMTOM_A_BIAS_CURR_CTL_1__POR,
-	[TOMTOM_A_BIAS_CURR_CTL_2] = TOMTOM_A_BIAS_CURR_CTL_2__POR,
-	[TOMTOM_A_BIAS_OSC_BG_CTL] = TOMTOM_A_BIAS_OSC_BG_CTL__POR,
-	[TOMTOM_A_CLK_BUFF_EN1] = TOMTOM_A_CLK_BUFF_EN1__POR,
-	[TOMTOM_A_CLK_BUFF_EN2] = TOMTOM_A_CLK_BUFF_EN2__POR,
-	[TOMTOM_A_LDO_L_MODE_1] = TOMTOM_A_LDO_L_MODE_1__POR,
-	[TOMTOM_A_LDO_L_MODE_2] = TOMTOM_A_LDO_L_MODE_2__POR,
-	[TOMTOM_A_LDO_L_CTRL_1] = TOMTOM_A_LDO_L_CTRL_1__POR,
-	[TOMTOM_A_LDO_L_CTRL_2] = TOMTOM_A_LDO_L_CTRL_2__POR,
-	[TOMTOM_A_LDO_L_CTRL_3] = TOMTOM_A_LDO_L_CTRL_3__POR,
-	[TOMTOM_A_LDO_L_CTRL_4] = TOMTOM_A_LDO_L_CTRL_4__POR,
-	[TOMTOM_A_LDO_H_MODE_1] = TOMTOM_A_LDO_H_MODE_1__POR,
-	[TOMTOM_A_LDO_H_MODE_2] = TOMTOM_A_LDO_H_MODE_2__POR,
-	[TOMTOM_A_LDO_H_LOOP_CTL] = TOMTOM_A_LDO_H_LOOP_CTL__POR,
-	[TOMTOM_A_LDO_H_COMP_1] = TOMTOM_A_LDO_H_COMP_1__POR,
-	[TOMTOM_A_LDO_H_COMP_2] = TOMTOM_A_LDO_H_COMP_2__POR,
-	[TOMTOM_A_LDO_H_BIAS_1] = TOMTOM_A_LDO_H_BIAS_1__POR,
-	[TOMTOM_A_LDO_H_BIAS_2] = TOMTOM_A_LDO_H_BIAS_2__POR,
-	[TOMTOM_A_LDO_H_BIAS_3] = TOMTOM_A_LDO_H_BIAS_3__POR,
-	[TOMTOM_A_VBAT_CLK] = TOMTOM_A_VBAT_CLK__POR,
-	[TOMTOM_A_VBAT_LOOP] = TOMTOM_A_VBAT_LOOP__POR,
-	[TOMTOM_A_VBAT_REF] = TOMTOM_A_VBAT_REF__POR,
-	[TOMTOM_A_VBAT_ADC_TEST] = TOMTOM_A_VBAT_ADC_TEST__POR,
-	[TOMTOM_A_VBAT_FE] = TOMTOM_A_VBAT_FE__POR,
-	[TOMTOM_A_VBAT_BIAS_1] = TOMTOM_A_VBAT_BIAS_1__POR,
-	[TOMTOM_A_VBAT_BIAS_2] = TOMTOM_A_VBAT_BIAS_2__POR,
-	[TOMTOM_A_VBAT_ADC_DATA_MSB] = TOMTOM_A_VBAT_ADC_DATA_MSB__POR,
-	[TOMTOM_A_VBAT_ADC_DATA_LSB] = TOMTOM_A_VBAT_ADC_DATA_LSB__POR,
-	[TOMTOM_A_FLL_NREF] = TOMTOM_A_FLL_NREF__POR,
-	[TOMTOM_A_FLL_KDCO_TUNE] = TOMTOM_A_FLL_KDCO_TUNE__POR,
-	[TOMTOM_A_FLL_LOCK_THRESH] = TOMTOM_A_FLL_LOCK_THRESH__POR,
-	[TOMTOM_A_FLL_LOCK_DET_COUNT] = TOMTOM_A_FLL_LOCK_DET_COUNT__POR,
-	[TOMTOM_A_FLL_DAC_THRESHOLD] = TOMTOM_A_FLL_DAC_THRESHOLD__POR,
-	[TOMTOM_A_FLL_TEST_DCO_FREERUN] = TOMTOM_A_FLL_TEST_DCO_FREERUN__POR,
-	[TOMTOM_A_FLL_TEST_ENABLE] = TOMTOM_A_FLL_TEST_ENABLE__POR,
-	[TOMTOM_A_MICB_CFILT_1_CTL] = TOMTOM_A_MICB_CFILT_1_CTL__POR,
-	[TOMTOM_A_MICB_CFILT_1_VAL] = TOMTOM_A_MICB_CFILT_1_VAL__POR,
-	[TOMTOM_A_MICB_CFILT_1_PRECHRG] = TOMTOM_A_MICB_CFILT_1_PRECHRG__POR,
-	[TOMTOM_A_MICB_1_CTL] = TOMTOM_A_MICB_1_CTL__POR,
-	[TOMTOM_A_MICB_1_INT_RBIAS] = TOMTOM_A_MICB_1_INT_RBIAS__POR,
-	[TOMTOM_A_MICB_1_MBHC] = TOMTOM_A_MICB_1_MBHC__POR,
-	[TOMTOM_A_MICB_CFILT_2_CTL] = TOMTOM_A_MICB_CFILT_2_CTL__POR,
-	[TOMTOM_A_MICB_CFILT_2_VAL] = TOMTOM_A_MICB_CFILT_2_VAL__POR,
-	[TOMTOM_A_MICB_CFILT_2_PRECHRG] = TOMTOM_A_MICB_CFILT_2_PRECHRG__POR,
-	[TOMTOM_A_MICB_2_CTL] = TOMTOM_A_MICB_2_CTL__POR,
-	[TOMTOM_A_MICB_2_INT_RBIAS] = TOMTOM_A_MICB_2_INT_RBIAS__POR,
-	[TOMTOM_A_MICB_2_MBHC] = TOMTOM_A_MICB_2_MBHC__POR,
-	[TOMTOM_A_MICB_CFILT_3_CTL] = TOMTOM_A_MICB_CFILT_3_CTL__POR,
-	[TOMTOM_A_MICB_CFILT_3_VAL] = TOMTOM_A_MICB_CFILT_3_VAL__POR,
-	[TOMTOM_A_MICB_CFILT_3_PRECHRG] = TOMTOM_A_MICB_CFILT_3_PRECHRG__POR,
-	[TOMTOM_A_MICB_3_CTL] = TOMTOM_A_MICB_3_CTL__POR,
-	[TOMTOM_A_MICB_3_INT_RBIAS] = TOMTOM_A_MICB_3_INT_RBIAS__POR,
-	[TOMTOM_A_MICB_3_MBHC] = TOMTOM_A_MICB_3_MBHC__POR,
-	[TOMTOM_A_MICB_4_CTL] = TOMTOM_A_MICB_4_CTL__POR,
-	[TOMTOM_A_MICB_4_INT_RBIAS] = TOMTOM_A_MICB_4_INT_RBIAS__POR,
-	[TOMTOM_A_MICB_4_MBHC] = TOMTOM_A_MICB_4_MBHC__POR,
-	[TOMTOM_A_SPKR_DRV2_EN] = TOMTOM_A_SPKR_DRV2_EN__POR,
-	[TOMTOM_A_SPKR_DRV2_GAIN] = TOMTOM_A_SPKR_DRV2_GAIN__POR,
-	[TOMTOM_A_SPKR_DRV2_DAC_CTL] = TOMTOM_A_SPKR_DRV2_DAC_CTL__POR,
-	[TOMTOM_A_SPKR_DRV2_OCP_CTL] = TOMTOM_A_SPKR_DRV2_OCP_CTL__POR,
-	[TOMTOM_A_SPKR_DRV2_CLIP_DET] = TOMTOM_A_SPKR_DRV2_CLIP_DET__POR,
-	[TOMTOM_A_SPKR_DRV2_DBG_DAC] = TOMTOM_A_SPKR_DRV2_DBG_DAC__POR,
-	[TOMTOM_A_SPKR_DRV2_DBG_PA] = TOMTOM_A_SPKR_DRV2_DBG_PA__POR,
-	[TOMTOM_A_SPKR_DRV2_DBG_PWRSTG] = TOMTOM_A_SPKR_DRV2_DBG_PWRSTG__POR,
-	[TOMTOM_A_SPKR_DRV2_BIAS_LDO] = TOMTOM_A_SPKR_DRV2_BIAS_LDO__POR,
-	[TOMTOM_A_SPKR_DRV2_BIAS_INT] = TOMTOM_A_SPKR_DRV2_BIAS_INT__POR,
-	[TOMTOM_A_SPKR_DRV2_BIAS_PA] = TOMTOM_A_SPKR_DRV2_BIAS_PA__POR,
-	[TOMTOM_A_SPKR_DRV2_STATUS_OCP] = TOMTOM_A_SPKR_DRV2_STATUS_OCP__POR,
-	[TOMTOM_A_SPKR_DRV2_STATUS_PA] = TOMTOM_A_SPKR_DRV2_STATUS_PA__POR,
-	[TOMTOM_A_MBHC_INSERT_DETECT] = TOMTOM_A_MBHC_INSERT_DETECT__POR,
-	[TOMTOM_A_MBHC_INSERT_DET_STATUS] =
-				TOMTOM_A_MBHC_INSERT_DET_STATUS__POR,
-	[TOMTOM_A_TX_COM_BIAS] = TOMTOM_A_TX_COM_BIAS__POR,
-	[TOMTOM_A_MBHC_INSERT_DETECT2] = TOMTOM_A_MBHC_INSERT_DETECT2__POR,
-	[TOMTOM_A_MBHC_SCALING_MUX_1] = TOMTOM_A_MBHC_SCALING_MUX_1__POR,
-	[TOMTOM_A_MBHC_SCALING_MUX_2] = TOMTOM_A_MBHC_SCALING_MUX_2__POR,
-	[TOMTOM_A_MAD_ANA_CTRL] = TOMTOM_A_MAD_ANA_CTRL__POR,
-	[TOMTOM_A_TX_SUP_SWITCH_CTRL_1] = TOMTOM_A_TX_SUP_SWITCH_CTRL_1__POR,
-	[TOMTOM_A_TX_SUP_SWITCH_CTRL_2] = TOMTOM_A_TX_SUP_SWITCH_CTRL_2__POR,
-	[TOMTOM_A_TX_1_GAIN] = TOMTOM_A_TX_1_GAIN__POR,
-	[TOMTOM_A_TX_1_2_TEST_EN] = TOMTOM_A_TX_1_2_TEST_EN__POR,
-	[TOMTOM_A_TX_2_GAIN] = TOMTOM_A_TX_2_GAIN__POR,
-	[TOMTOM_A_TX_1_2_ADC_IB] = TOMTOM_A_TX_1_2_ADC_IB__POR,
-	[TOMTOM_A_TX_1_2_ATEST_REFCTRL] = TOMTOM_A_TX_1_2_ATEST_REFCTRL__POR,
-	[TOMTOM_A_TX_1_2_TEST_CTL] = TOMTOM_A_TX_1_2_TEST_CTL__POR,
-	[TOMTOM_A_TX_1_2_TEST_BLOCK_EN] = TOMTOM_A_TX_1_2_TEST_BLOCK_EN__POR,
-	[TOMTOM_A_TX_1_2_TXFE_CLKDIV] = TOMTOM_A_TX_1_2_TXFE_CLKDIV__POR,
-	[TOMTOM_A_TX_1_2_SAR_ERR_CH1] = TOMTOM_A_TX_1_2_SAR_ERR_CH1__POR,
-	[TOMTOM_A_TX_1_2_SAR_ERR_CH2] = TOMTOM_A_TX_1_2_SAR_ERR_CH2__POR,
-	[TOMTOM_A_TX_3_GAIN] = TOMTOM_A_TX_3_GAIN__POR,
-	[TOMTOM_A_TX_3_4_TEST_EN] = TOMTOM_A_TX_3_4_TEST_EN__POR,
-	[TOMTOM_A_TX_4_GAIN] = TOMTOM_A_TX_4_GAIN__POR,
-	[TOMTOM_A_TX_3_4_ADC_IB] = TOMTOM_A_TX_3_4_ADC_IB__POR,
-	[TOMTOM_A_TX_3_4_ATEST_REFCTRL] = TOMTOM_A_TX_3_4_ATEST_REFCTRL__POR,
-	[TOMTOM_A_TX_3_4_TEST_CTL] = TOMTOM_A_TX_3_4_TEST_CTL__POR,
-	[TOMTOM_A_TX_3_4_TEST_BLOCK_EN] = TOMTOM_A_TX_3_4_TEST_BLOCK_EN__POR,
-	[TOMTOM_A_TX_3_4_TXFE_CKDIV] = TOMTOM_A_TX_3_4_TXFE_CKDIV__POR,
-	[TOMTOM_A_TX_3_4_SAR_ERR_CH3] = TOMTOM_A_TX_3_4_SAR_ERR_CH3__POR,
-	[TOMTOM_A_TX_3_4_SAR_ERR_CH4] = TOMTOM_A_TX_3_4_SAR_ERR_CH4__POR,
-	[TOMTOM_A_TX_5_GAIN] = TOMTOM_A_TX_5_GAIN__POR,
-	[TOMTOM_A_TX_5_6_TEST_EN] = TOMTOM_A_TX_5_6_TEST_EN__POR,
-	[TOMTOM_A_TX_6_GAIN] = TOMTOM_A_TX_6_GAIN__POR,
-	[TOMTOM_A_TX_5_6_ADC_IB] = TOMTOM_A_TX_5_6_ADC_IB__POR,
-	[TOMTOM_A_TX_5_6_ATEST_REFCTRL] = TOMTOM_A_TX_5_6_ATEST_REFCTRL__POR,
-	[TOMTOM_A_TX_5_6_TEST_CTL] = TOMTOM_A_TX_5_6_TEST_CTL__POR,
-	[TOMTOM_A_TX_5_6_TEST_BLOCK_EN] = TOMTOM_A_TX_5_6_TEST_BLOCK_EN__POR,
-	[TOMTOM_A_TX_5_6_TXFE_CKDIV] = TOMTOM_A_TX_5_6_TXFE_CKDIV__POR,
-	[TOMTOM_A_TX_5_6_SAR_ERR_CH5] = TOMTOM_A_TX_5_6_SAR_ERR_CH5__POR,
-	[TOMTOM_A_TX_5_6_SAR_ERR_CH6] = TOMTOM_A_TX_5_6_SAR_ERR_CH6__POR,
-	[TOMTOM_A_TX_7_MBHC_EN] = TOMTOM_A_TX_7_MBHC_EN__POR,
-	[TOMTOM_A_TX_7_MBHC_ATEST_REFCTRL] =
-				TOMTOM_A_TX_7_MBHC_ATEST_REFCTRL__POR,
-	[TOMTOM_A_TX_7_MBHC_ADC] = TOMTOM_A_TX_7_MBHC_ADC__POR,
-	[TOMTOM_A_TX_7_MBHC_TEST_CTL] = TOMTOM_A_TX_7_MBHC_TEST_CTL__POR,
-	[TOMTOM_A_TX_7_MBHC_SAR_ERR] = TOMTOM_A_TX_7_MBHC_SAR_ERR__POR,
-	[TOMTOM_A_TX_7_TXFE_CLKDIV] = TOMTOM_A_TX_7_TXFE_CLKDIV__POR,
-	[TOMTOM_A_RCO_CTRL] = TOMTOM_A_RCO_CTRL__POR,
-	[TOMTOM_A_RCO_CALIBRATION_CTRL1] = TOMTOM_A_RCO_CALIBRATION_CTRL1__POR,
-	[TOMTOM_A_RCO_CALIBRATION_CTRL2] = TOMTOM_A_RCO_CALIBRATION_CTRL2__POR,
-	[TOMTOM_A_RCO_CALIBRATION_CTRL3] = TOMTOM_A_RCO_CALIBRATION_CTRL3__POR,
-	[TOMTOM_A_RCO_TEST_CTRL] = TOMTOM_A_RCO_TEST_CTRL__POR,
-	[TOMTOM_A_RCO_CALIBRATION_RESULT1] =
-				TOMTOM_A_RCO_CALIBRATION_RESULT1__POR,
-	[TOMTOM_A_RCO_CALIBRATION_RESULT2] =
-				TOMTOM_A_RCO_CALIBRATION_RESULT2__POR,
-	[TOMTOM_A_BUCK_MODE_1] = TOMTOM_A_BUCK_MODE_1__POR,
-	[TOMTOM_A_BUCK_MODE_2] = TOMTOM_A_BUCK_MODE_2__POR,
-	[TOMTOM_A_BUCK_MODE_3] = TOMTOM_A_BUCK_MODE_3__POR,
-	[TOMTOM_A_BUCK_MODE_4] = TOMTOM_A_BUCK_MODE_4__POR,
-	[TOMTOM_A_BUCK_MODE_5] = TOMTOM_A_BUCK_MODE_5__POR,
-	[TOMTOM_A_BUCK_CTRL_VCL_1] = TOMTOM_A_BUCK_CTRL_VCL_1__POR,
-	[TOMTOM_A_BUCK_CTRL_VCL_2] = TOMTOM_A_BUCK_CTRL_VCL_2__POR,
-	[TOMTOM_A_BUCK_CTRL_VCL_3] = TOMTOM_A_BUCK_CTRL_VCL_3__POR,
-	[TOMTOM_A_BUCK_CTRL_CCL_1] = TOMTOM_A_BUCK_CTRL_CCL_1__POR,
-	[TOMTOM_A_BUCK_CTRL_CCL_2] = TOMTOM_A_BUCK_CTRL_CCL_2__POR,
-	[TOMTOM_A_BUCK_CTRL_CCL_3] = TOMTOM_A_BUCK_CTRL_CCL_3__POR,
-	[TOMTOM_A_BUCK_CTRL_CCL_4] = TOMTOM_A_BUCK_CTRL_CCL_4__POR,
-	[TOMTOM_A_BUCK_CTRL_PWM_DRVR_1] = TOMTOM_A_BUCK_CTRL_PWM_DRVR_1__POR,
-	[TOMTOM_A_BUCK_CTRL_PWM_DRVR_2] = TOMTOM_A_BUCK_CTRL_PWM_DRVR_2__POR,
-	[TOMTOM_A_BUCK_CTRL_PWM_DRVR_3] = TOMTOM_A_BUCK_CTRL_PWM_DRVR_3__POR,
-	[TOMTOM_A_BUCK_TMUX_A_D] = TOMTOM_A_BUCK_TMUX_A_D__POR,
-	[TOMTOM_A_NCP_BUCKREF] = TOMTOM_A_NCP_BUCKREF__POR,
-	[TOMTOM_A_NCP_EN] = TOMTOM_A_NCP_EN__POR,
-	[TOMTOM_A_NCP_CLK] = TOMTOM_A_NCP_CLK__POR,
-	[TOMTOM_A_NCP_STATIC] = TOMTOM_A_NCP_STATIC__POR,
-	[TOMTOM_A_NCP_VTH_LOW] = TOMTOM_A_NCP_VTH_LOW__POR,
-	[TOMTOM_A_NCP_VTH_HIGH] = TOMTOM_A_NCP_VTH_HIGH__POR,
-	[TOMTOM_A_NCP_ATEST] = TOMTOM_A_NCP_ATEST__POR,
-	[TOMTOM_A_NCP_DTEST] = TOMTOM_A_NCP_DTEST__POR,
-	[TOMTOM_A_NCP_DLY1] = TOMTOM_A_NCP_DLY1__POR,
-	[TOMTOM_A_NCP_DLY2] = TOMTOM_A_NCP_DLY2__POR,
-	[TOMTOM_A_RX_AUX_SW_CTL] = TOMTOM_A_RX_AUX_SW_CTL__POR,
-	[TOMTOM_A_RX_PA_AUX_IN_CONN] = TOMTOM_A_RX_PA_AUX_IN_CONN__POR,
-	[TOMTOM_A_RX_COM_TIMER_DIV] = TOMTOM_A_RX_COM_TIMER_DIV__POR,
-	[TOMTOM_A_RX_COM_OCP_CTL] = TOMTOM_A_RX_COM_OCP_CTL__POR,
-	[TOMTOM_A_RX_COM_OCP_COUNT] = TOMTOM_A_RX_COM_OCP_COUNT__POR,
-	[TOMTOM_A_RX_COM_DAC_CTL] = TOMTOM_A_RX_COM_DAC_CTL__POR,
-	[TOMTOM_A_RX_COM_BIAS] = TOMTOM_A_RX_COM_BIAS__POR,
-	[TOMTOM_A_RX_HPH_AUTO_CHOP] = TOMTOM_A_RX_HPH_AUTO_CHOP__POR,
-	[TOMTOM_A_RX_HPH_CHOP_CTL] = TOMTOM_A_RX_HPH_CHOP_CTL__POR,
-	[TOMTOM_A_RX_HPH_BIAS_PA] = TOMTOM_A_RX_HPH_BIAS_PA__POR,
-	[TOMTOM_A_RX_HPH_BIAS_LDO] = TOMTOM_A_RX_HPH_BIAS_LDO__POR,
-	[TOMTOM_A_RX_HPH_BIAS_CNP] = TOMTOM_A_RX_HPH_BIAS_CNP__POR,
-	[TOMTOM_A_RX_HPH_BIAS_WG_OCP] = TOMTOM_A_RX_HPH_BIAS_WG_OCP__POR,
-	[TOMTOM_A_RX_HPH_OCP_CTL] = TOMTOM_A_RX_HPH_OCP_CTL__POR,
-	[TOMTOM_A_RX_HPH_CNP_EN] = TOMTOM_A_RX_HPH_CNP_EN__POR,
-	[TOMTOM_A_RX_HPH_CNP_WG_CTL] = TOMTOM_A_RX_HPH_CNP_WG_CTL__POR,
-	[TOMTOM_A_RX_HPH_CNP_WG_TIME] = TOMTOM_A_RX_HPH_CNP_WG_TIME__POR,
-	[TOMTOM_A_RX_HPH_L_GAIN] = TOMTOM_A_RX_HPH_L_GAIN__POR,
-	[TOMTOM_A_RX_HPH_L_TEST] = TOMTOM_A_RX_HPH_L_TEST__POR,
-	[TOMTOM_A_RX_HPH_L_PA_CTL] = TOMTOM_A_RX_HPH_L_PA_CTL__POR,
-	[TOMTOM_A_RX_HPH_L_DAC_CTL] = TOMTOM_A_RX_HPH_L_DAC_CTL__POR,
-	[TOMTOM_A_RX_HPH_L_ATEST] = TOMTOM_A_RX_HPH_L_ATEST__POR,
-	[TOMTOM_A_RX_HPH_L_STATUS] = TOMTOM_A_RX_HPH_L_STATUS__POR,
-	[TOMTOM_A_RX_HPH_R_GAIN] = TOMTOM_A_RX_HPH_R_GAIN__POR,
-	[TOMTOM_A_RX_HPH_R_TEST] = TOMTOM_A_RX_HPH_R_TEST__POR,
-	[TOMTOM_A_RX_HPH_R_PA_CTL] = TOMTOM_A_RX_HPH_R_PA_CTL__POR,
-	[TOMTOM_A_RX_HPH_R_DAC_CTL] = TOMTOM_A_RX_HPH_R_DAC_CTL__POR,
-	[TOMTOM_A_RX_HPH_R_ATEST] = TOMTOM_A_RX_HPH_R_ATEST__POR,
-	[TOMTOM_A_RX_HPH_R_STATUS] = TOMTOM_A_RX_HPH_R_STATUS__POR,
-	[TOMTOM_A_RX_EAR_BIAS_PA] = TOMTOM_A_RX_EAR_BIAS_PA__POR,
-	[TOMTOM_A_RX_EAR_BIAS_CMBUFF] = TOMTOM_A_RX_EAR_BIAS_CMBUFF__POR,
-	[TOMTOM_A_RX_EAR_EN] = TOMTOM_A_RX_EAR_EN__POR,
-	[TOMTOM_A_RX_EAR_GAIN] = TOMTOM_A_RX_EAR_GAIN__POR,
-	[TOMTOM_A_RX_EAR_CMBUFF] = TOMTOM_A_RX_EAR_CMBUFF__POR,
-	[TOMTOM_A_RX_EAR_ICTL] = TOMTOM_A_RX_EAR_ICTL__POR,
-	[TOMTOM_A_RX_EAR_CCOMP] = TOMTOM_A_RX_EAR_CCOMP__POR,
-	[TOMTOM_A_RX_EAR_VCM] = TOMTOM_A_RX_EAR_VCM__POR,
-	[TOMTOM_A_RX_EAR_CNP] = TOMTOM_A_RX_EAR_CNP__POR,
-	[TOMTOM_A_RX_EAR_DAC_CTL_ATEST] = TOMTOM_A_RX_EAR_DAC_CTL_ATEST__POR,
-	[TOMTOM_A_RX_EAR_STATUS] = TOMTOM_A_RX_EAR_STATUS__POR,
-	[TOMTOM_A_RX_LINE_BIAS_PA] = TOMTOM_A_RX_LINE_BIAS_PA__POR,
-	[TOMTOM_A_RX_BUCK_BIAS1] = TOMTOM_A_RX_BUCK_BIAS1__POR,
-	[TOMTOM_A_RX_BUCK_BIAS2] = TOMTOM_A_RX_BUCK_BIAS2__POR,
-	[TOMTOM_A_RX_LINE_COM] = TOMTOM_A_RX_LINE_COM__POR,
-	[TOMTOM_A_RX_LINE_CNP_EN] = TOMTOM_A_RX_LINE_CNP_EN__POR,
-	[TOMTOM_A_RX_LINE_CNP_WG_CTL] = TOMTOM_A_RX_LINE_CNP_WG_CTL__POR,
-	[TOMTOM_A_RX_LINE_CNP_WG_TIME] = TOMTOM_A_RX_LINE_CNP_WG_TIME__POR,
-	[TOMTOM_A_RX_LINE_1_GAIN] = TOMTOM_A_RX_LINE_1_GAIN__POR,
-	[TOMTOM_A_RX_LINE_1_TEST] = TOMTOM_A_RX_LINE_1_TEST__POR,
-	[TOMTOM_A_RX_LINE_1_DAC_CTL] = TOMTOM_A_RX_LINE_1_DAC_CTL__POR,
-	[TOMTOM_A_RX_LINE_1_STATUS] = TOMTOM_A_RX_LINE_1_STATUS__POR,
-	[TOMTOM_A_RX_LINE_2_GAIN] = TOMTOM_A_RX_LINE_2_GAIN__POR,
-	[TOMTOM_A_RX_LINE_2_TEST] = TOMTOM_A_RX_LINE_2_TEST__POR,
-	[TOMTOM_A_RX_LINE_2_DAC_CTL] = TOMTOM_A_RX_LINE_2_DAC_CTL__POR,
-	[TOMTOM_A_RX_LINE_2_STATUS] = TOMTOM_A_RX_LINE_2_STATUS__POR,
-	[TOMTOM_A_RX_LINE_3_GAIN] = TOMTOM_A_RX_LINE_3_GAIN__POR,
-	[TOMTOM_A_RX_LINE_3_TEST] = TOMTOM_A_RX_LINE_3_TEST__POR,
-	[TOMTOM_A_RX_LINE_3_DAC_CTL] = TOMTOM_A_RX_LINE_3_DAC_CTL__POR,
-	[TOMTOM_A_RX_LINE_3_STATUS] = TOMTOM_A_RX_LINE_3_STATUS__POR,
-	[TOMTOM_A_RX_LINE_4_GAIN] = TOMTOM_A_RX_LINE_4_GAIN__POR,
-	[TOMTOM_A_RX_LINE_4_TEST] = TOMTOM_A_RX_LINE_4_TEST__POR,
-	[TOMTOM_A_RX_LINE_4_DAC_CTL] = TOMTOM_A_RX_LINE_4_DAC_CTL__POR,
-	[TOMTOM_A_RX_LINE_4_STATUS] = TOMTOM_A_RX_LINE_4_STATUS__POR,
-	[TOMTOM_A_RX_LINE_CNP_DBG] = TOMTOM_A_RX_LINE_CNP_DBG__POR,
-	[TOMTOM_A_SPKR_DRV1_EN] = TOMTOM_A_SPKR_DRV1_EN__POR,
-	[TOMTOM_A_SPKR_DRV1_GAIN] = TOMTOM_A_SPKR_DRV1_GAIN__POR,
-	[TOMTOM_A_SPKR_DRV1_DAC_CTL] = TOMTOM_A_SPKR_DRV1_DAC_CTL__POR,
-	[TOMTOM_A_SPKR_DRV1_OCP_CTL] = TOMTOM_A_SPKR_DRV1_OCP_CTL__POR,
-	[TOMTOM_A_SPKR_DRV1_CLIP_DET] = TOMTOM_A_SPKR_DRV1_CLIP_DET__POR,
-	[TOMTOM_A_SPKR_DRV1_IEC] = TOMTOM_A_SPKR_DRV1_IEC__POR,
-	[TOMTOM_A_SPKR_DRV1_DBG_DAC] = TOMTOM_A_SPKR_DRV1_DBG_DAC__POR,
-	[TOMTOM_A_SPKR_DRV1_DBG_PA] = TOMTOM_A_SPKR_DRV1_DBG_PA__POR,
-	[TOMTOM_A_SPKR_DRV1_DBG_PWRSTG] = TOMTOM_A_SPKR_DRV1_DBG_PWRSTG__POR,
-	[TOMTOM_A_SPKR_DRV1_BIAS_LDO] = TOMTOM_A_SPKR_DRV1_BIAS_LDO__POR,
-	[TOMTOM_A_SPKR_DRV1_BIAS_INT] = TOMTOM_A_SPKR_DRV1_BIAS_INT__POR,
-	[TOMTOM_A_SPKR_DRV1_BIAS_PA] = TOMTOM_A_SPKR_DRV1_BIAS_PA__POR,
-	[TOMTOM_A_SPKR_DRV1_STATUS_OCP] = TOMTOM_A_SPKR_DRV1_STATUS_OCP__POR,
-	[TOMTOM_A_SPKR_DRV1_STATUS_PA] = TOMTOM_A_SPKR_DRV1_STATUS_PA__POR,
-	[TOMTOM_A_SPKR1_PROT_EN] = TOMTOM_A_SPKR1_PROT_EN__POR,
-	[TOMTOM_A_SPKR1_PROT_ADC_TEST_EN] =
-				TOMTOM_A_SPKR1_PROT_ADC_TEST_EN__POR,
-	[TOMTOM_A_SPKR1_PROT_ATEST] = TOMTOM_A_SPKR1_PROT_ATEST__POR,
-	[TOMTOM_A_SPKR1_PROT_LDO_CTRL] = TOMTOM_A_SPKR1_PROT_LDO_CTRL__POR,
-	[TOMTOM_A_SPKR1_PROT_ISENSE_CTRL] =
-				TOMTOM_A_SPKR1_PROT_ISENSE_CTRL__POR,
-	[TOMTOM_A_SPKR1_PROT_VSENSE_CTRL] =
-				TOMTOM_A_SPKR1_PROT_VSENSE_CTRL__POR,
-	[TOMTOM_A_SPKR2_PROT_EN] = TOMTOM_A_SPKR2_PROT_EN__POR,
-	[TOMTOM_A_SPKR2_PROT_ADC_TEST_EN] =
-				TOMTOM_A_SPKR2_PROT_ADC_TEST_EN__POR,
-	[TOMTOM_A_SPKR2_PROT_ATEST] = TOMTOM_A_SPKR2_PROT_ATEST__POR,
-	[TOMTOM_A_SPKR2_PROT_LDO_CTRL] = TOMTOM_A_SPKR2_PROT_LDO_CTRL__POR,
-	[TOMTOM_A_SPKR2_PROT_ISENSE_CTRL] =
-				TOMTOM_A_SPKR2_PROT_ISENSE_CTRL__POR,
-	[TOMTOM_A_SPKR2_PROT_VSENSE_CTRL] =
-				TOMTOM_A_SPKR2_PROT_VSENSE_CTRL__POR,
-	[TOMTOM_A_MBHC_HPH] = TOMTOM_A_MBHC_HPH__POR,
-	[TOMTOM_A_CDC_ANC1_B1_CTL] = TOMTOM_A_CDC_ANC1_B1_CTL__POR,
-	[TOMTOM_A_CDC_ANC2_B1_CTL] = TOMTOM_A_CDC_ANC2_B1_CTL__POR,
-	[TOMTOM_A_CDC_ANC1_SHIFT] = TOMTOM_A_CDC_ANC1_SHIFT__POR,
-	[TOMTOM_A_CDC_ANC2_SHIFT] = TOMTOM_A_CDC_ANC2_SHIFT__POR,
-	[TOMTOM_A_CDC_ANC1_IIR_B1_CTL] = TOMTOM_A_CDC_ANC1_IIR_B1_CTL__POR,
-	[TOMTOM_A_CDC_ANC2_IIR_B1_CTL] = TOMTOM_A_CDC_ANC2_IIR_B1_CTL__POR,
-	[TOMTOM_A_CDC_ANC1_IIR_B2_CTL] = TOMTOM_A_CDC_ANC1_IIR_B2_CTL__POR,
-	[TOMTOM_A_CDC_ANC2_IIR_B2_CTL] = TOMTOM_A_CDC_ANC2_IIR_B2_CTL__POR,
-	[TOMTOM_A_CDC_ANC1_IIR_B3_CTL] = TOMTOM_A_CDC_ANC1_IIR_B3_CTL__POR,
-	[TOMTOM_A_CDC_ANC2_IIR_B3_CTL] = TOMTOM_A_CDC_ANC2_IIR_B3_CTL__POR,
-	[TOMTOM_A_CDC_ANC1_LPF_B1_CTL] = TOMTOM_A_CDC_ANC1_LPF_B1_CTL__POR,
-	[TOMTOM_A_CDC_ANC2_LPF_B1_CTL] = TOMTOM_A_CDC_ANC2_LPF_B1_CTL__POR,
-	[TOMTOM_A_CDC_ANC1_LPF_B2_CTL] = TOMTOM_A_CDC_ANC1_LPF_B2_CTL__POR,
-	[TOMTOM_A_CDC_ANC2_LPF_B2_CTL] = TOMTOM_A_CDC_ANC2_LPF_B2_CTL__POR,
-	[TOMTOM_A_CDC_ANC1_SPARE] = TOMTOM_A_CDC_ANC1_SPARE__POR,
-	[TOMTOM_A_CDC_ANC2_SPARE] = TOMTOM_A_CDC_ANC2_SPARE__POR,
-	[TOMTOM_A_CDC_ANC1_SMLPF_CTL] = TOMTOM_A_CDC_ANC1_SMLPF_CTL__POR,
-	[TOMTOM_A_CDC_ANC2_SMLPF_CTL] = TOMTOM_A_CDC_ANC2_SMLPF_CTL__POR,
-	[TOMTOM_A_CDC_ANC1_DCFLT_CTL] = TOMTOM_A_CDC_ANC1_DCFLT_CTL__POR,
-	[TOMTOM_A_CDC_ANC2_DCFLT_CTL] = TOMTOM_A_CDC_ANC2_DCFLT_CTL__POR,
-	[TOMTOM_A_CDC_ANC1_GAIN_CTL] = TOMTOM_A_CDC_ANC1_GAIN_CTL__POR,
-	[TOMTOM_A_CDC_ANC2_GAIN_CTL] = TOMTOM_A_CDC_ANC2_GAIN_CTL__POR,
-	[TOMTOM_A_CDC_ANC1_B2_CTL] = TOMTOM_A_CDC_ANC1_B2_CTL__POR,
-	[TOMTOM_A_CDC_ANC2_B2_CTL] = TOMTOM_A_CDC_ANC2_B2_CTL__POR,
-	[TOMTOM_A_CDC_TX1_VOL_CTL_TIMER] = TOMTOM_A_CDC_TX1_VOL_CTL_TIMER__POR,
-	[TOMTOM_A_CDC_TX2_VOL_CTL_TIMER] = TOMTOM_A_CDC_TX2_VOL_CTL_TIMER__POR,
-	[TOMTOM_A_CDC_TX3_VOL_CTL_TIMER] = TOMTOM_A_CDC_TX3_VOL_CTL_TIMER__POR,
-	[TOMTOM_A_CDC_TX4_VOL_CTL_TIMER] = TOMTOM_A_CDC_TX4_VOL_CTL_TIMER__POR,
-	[TOMTOM_A_CDC_TX5_VOL_CTL_TIMER] = TOMTOM_A_CDC_TX5_VOL_CTL_TIMER__POR,
-	[TOMTOM_A_CDC_TX6_VOL_CTL_TIMER] = TOMTOM_A_CDC_TX6_VOL_CTL_TIMER__POR,
-	[TOMTOM_A_CDC_TX7_VOL_CTL_TIMER] = TOMTOM_A_CDC_TX7_VOL_CTL_TIMER__POR,
-	[TOMTOM_A_CDC_TX8_VOL_CTL_TIMER] = TOMTOM_A_CDC_TX8_VOL_CTL_TIMER__POR,
-	[TOMTOM_A_CDC_TX9_VOL_CTL_TIMER] = TOMTOM_A_CDC_TX9_VOL_CTL_TIMER__POR,
-	[TOMTOM_A_CDC_TX10_VOL_CTL_TIMER] =
-				TOMTOM_A_CDC_TX10_VOL_CTL_TIMER__POR,
-	[TOMTOM_A_CDC_TX1_VOL_CTL_GAIN] = TOMTOM_A_CDC_TX1_VOL_CTL_GAIN__POR,
-	[TOMTOM_A_CDC_TX2_VOL_CTL_GAIN] = TOMTOM_A_CDC_TX2_VOL_CTL_GAIN__POR,
-	[TOMTOM_A_CDC_TX3_VOL_CTL_GAIN] = TOMTOM_A_CDC_TX3_VOL_CTL_GAIN__POR,
-	[TOMTOM_A_CDC_TX4_VOL_CTL_GAIN] = TOMTOM_A_CDC_TX4_VOL_CTL_GAIN__POR,
-	[TOMTOM_A_CDC_TX5_VOL_CTL_GAIN] = TOMTOM_A_CDC_TX5_VOL_CTL_GAIN__POR,
-	[TOMTOM_A_CDC_TX6_VOL_CTL_GAIN] = TOMTOM_A_CDC_TX6_VOL_CTL_GAIN__POR,
-	[TOMTOM_A_CDC_TX7_VOL_CTL_GAIN] = TOMTOM_A_CDC_TX7_VOL_CTL_GAIN__POR,
-	[TOMTOM_A_CDC_TX8_VOL_CTL_GAIN] = TOMTOM_A_CDC_TX8_VOL_CTL_GAIN__POR,
-	[TOMTOM_A_CDC_TX9_VOL_CTL_GAIN] = TOMTOM_A_CDC_TX9_VOL_CTL_GAIN__POR,
-	[TOMTOM_A_CDC_TX10_VOL_CTL_GAIN] = TOMTOM_A_CDC_TX10_VOL_CTL_GAIN__POR,
-	[TOMTOM_A_CDC_TX1_VOL_CTL_CFG] = TOMTOM_A_CDC_TX1_VOL_CTL_CFG__POR,
-	[TOMTOM_A_CDC_TX2_VOL_CTL_CFG] = TOMTOM_A_CDC_TX2_VOL_CTL_CFG__POR,
-	[TOMTOM_A_CDC_TX3_VOL_CTL_CFG] = TOMTOM_A_CDC_TX3_VOL_CTL_CFG__POR,
-	[TOMTOM_A_CDC_TX4_VOL_CTL_CFG] = TOMTOM_A_CDC_TX4_VOL_CTL_CFG__POR,
-	[TOMTOM_A_CDC_TX5_VOL_CTL_CFG] = TOMTOM_A_CDC_TX5_VOL_CTL_CFG__POR,
-	[TOMTOM_A_CDC_TX6_VOL_CTL_CFG] = TOMTOM_A_CDC_TX6_VOL_CTL_CFG__POR,
-	[TOMTOM_A_CDC_TX7_VOL_CTL_CFG] = TOMTOM_A_CDC_TX7_VOL_CTL_CFG__POR,
-	[TOMTOM_A_CDC_TX8_VOL_CTL_CFG] = TOMTOM_A_CDC_TX8_VOL_CTL_CFG__POR,
-	[TOMTOM_A_CDC_TX9_VOL_CTL_CFG] = TOMTOM_A_CDC_TX9_VOL_CTL_CFG__POR,
-	[TOMTOM_A_CDC_TX10_VOL_CTL_CFG] = TOMTOM_A_CDC_TX10_VOL_CTL_CFG__POR,
-	[TOMTOM_A_CDC_TX1_MUX_CTL] = TOMTOM_A_CDC_TX1_MUX_CTL__POR,
-	[TOMTOM_A_CDC_TX2_MUX_CTL] = TOMTOM_A_CDC_TX2_MUX_CTL__POR,
-	[TOMTOM_A_CDC_TX3_MUX_CTL] = TOMTOM_A_CDC_TX3_MUX_CTL__POR,
-	[TOMTOM_A_CDC_TX4_MUX_CTL] = TOMTOM_A_CDC_TX4_MUX_CTL__POR,
-	[TOMTOM_A_CDC_TX5_MUX_CTL] = TOMTOM_A_CDC_TX5_MUX_CTL__POR,
-	[TOMTOM_A_CDC_TX6_MUX_CTL] = TOMTOM_A_CDC_TX6_MUX_CTL__POR,
-	[TOMTOM_A_CDC_TX7_MUX_CTL] = TOMTOM_A_CDC_TX7_MUX_CTL__POR,
-	[TOMTOM_A_CDC_TX8_MUX_CTL] = TOMTOM_A_CDC_TX8_MUX_CTL__POR,
-	[TOMTOM_A_CDC_TX9_MUX_CTL] = TOMTOM_A_CDC_TX9_MUX_CTL__POR,
-	[TOMTOM_A_CDC_TX10_MUX_CTL] = TOMTOM_A_CDC_TX10_MUX_CTL__POR,
-	[TOMTOM_A_CDC_TX1_CLK_FS_CTL] = TOMTOM_A_CDC_TX1_CLK_FS_CTL__POR,
-	[TOMTOM_A_CDC_TX2_CLK_FS_CTL] = TOMTOM_A_CDC_TX2_CLK_FS_CTL__POR,
-	[TOMTOM_A_CDC_TX3_CLK_FS_CTL] = TOMTOM_A_CDC_TX3_CLK_FS_CTL__POR,
-	[TOMTOM_A_CDC_TX4_CLK_FS_CTL] = TOMTOM_A_CDC_TX4_CLK_FS_CTL__POR,
-	[TOMTOM_A_CDC_TX5_CLK_FS_CTL] = TOMTOM_A_CDC_TX5_CLK_FS_CTL__POR,
-	[TOMTOM_A_CDC_TX6_CLK_FS_CTL] = TOMTOM_A_CDC_TX6_CLK_FS_CTL__POR,
-	[TOMTOM_A_CDC_TX7_CLK_FS_CTL] = TOMTOM_A_CDC_TX7_CLK_FS_CTL__POR,
-	[TOMTOM_A_CDC_TX8_CLK_FS_CTL] = TOMTOM_A_CDC_TX8_CLK_FS_CTL__POR,
-	[TOMTOM_A_CDC_TX9_CLK_FS_CTL] = TOMTOM_A_CDC_TX9_CLK_FS_CTL__POR,
-	[TOMTOM_A_CDC_TX10_CLK_FS_CTL] = TOMTOM_A_CDC_TX10_CLK_FS_CTL__POR,
-	[TOMTOM_A_CDC_TX1_DMIC_CTL] = TOMTOM_A_CDC_TX1_DMIC_CTL__POR,
-	[TOMTOM_A_CDC_TX2_DMIC_CTL] = TOMTOM_A_CDC_TX2_DMIC_CTL__POR,
-	[TOMTOM_A_CDC_TX3_DMIC_CTL] = TOMTOM_A_CDC_TX3_DMIC_CTL__POR,
-	[TOMTOM_A_CDC_TX4_DMIC_CTL] = TOMTOM_A_CDC_TX4_DMIC_CTL__POR,
-	[TOMTOM_A_CDC_TX5_DMIC_CTL] = TOMTOM_A_CDC_TX5_DMIC_CTL__POR,
-	[TOMTOM_A_CDC_TX6_DMIC_CTL] = TOMTOM_A_CDC_TX6_DMIC_CTL__POR,
-	[TOMTOM_A_CDC_TX7_DMIC_CTL] = TOMTOM_A_CDC_TX7_DMIC_CTL__POR,
-	[TOMTOM_A_CDC_TX8_DMIC_CTL] = TOMTOM_A_CDC_TX8_DMIC_CTL__POR,
-	[TOMTOM_A_CDC_TX9_DMIC_CTL] = TOMTOM_A_CDC_TX9_DMIC_CTL__POR,
-	[TOMTOM_A_CDC_TX10_DMIC_CTL] = TOMTOM_A_CDC_TX10_DMIC_CTL__POR,
-	[TOMTOM_A_CDC_SPKR_CLIPDET_VAL0] = TOMTOM_A_CDC_SPKR_CLIPDET_VAL0__POR,
-	[TOMTOM_A_CDC_SPKR_CLIPDET_VAL1] = TOMTOM_A_CDC_SPKR_CLIPDET_VAL1__POR,
-	[TOMTOM_A_CDC_SPKR_CLIPDET_VAL2] = TOMTOM_A_CDC_SPKR_CLIPDET_VAL2__POR,
-	[TOMTOM_A_CDC_SPKR_CLIPDET_VAL3] = TOMTOM_A_CDC_SPKR_CLIPDET_VAL3__POR,
-	[TOMTOM_A_CDC_SPKR_CLIPDET_VAL4] = TOMTOM_A_CDC_SPKR_CLIPDET_VAL4__POR,
-	[TOMTOM_A_CDC_SPKR_CLIPDET_VAL5] = TOMTOM_A_CDC_SPKR_CLIPDET_VAL5__POR,
-	[TOMTOM_A_CDC_SPKR_CLIPDET_VAL6] = TOMTOM_A_CDC_SPKR_CLIPDET_VAL6__POR,
-	[TOMTOM_A_CDC_SPKR_CLIPDET_VAL7] = TOMTOM_A_CDC_SPKR_CLIPDET_VAL7__POR,
-	[TOMTOM_A_CDC_DEBUG_B1_CTL] = TOMTOM_A_CDC_DEBUG_B1_CTL__POR,
-	[TOMTOM_A_CDC_DEBUG_B2_CTL] = TOMTOM_A_CDC_DEBUG_B2_CTL__POR,
-	[TOMTOM_A_CDC_DEBUG_B3_CTL] = TOMTOM_A_CDC_DEBUG_B3_CTL__POR,
-	[TOMTOM_A_CDC_DEBUG_B4_CTL] = TOMTOM_A_CDC_DEBUG_B4_CTL__POR,
-	[TOMTOM_A_CDC_DEBUG_B5_CTL] = TOMTOM_A_CDC_DEBUG_B5_CTL__POR,
-	[TOMTOM_A_CDC_DEBUG_B6_CTL] = TOMTOM_A_CDC_DEBUG_B6_CTL__POR,
-	[TOMTOM_A_CDC_DEBUG_B7_CTL] = TOMTOM_A_CDC_DEBUG_B7_CTL__POR,
-	[TOMTOM_A_CDC_SRC1_PDA_CFG] = TOMTOM_A_CDC_SRC1_PDA_CFG__POR,
-	[TOMTOM_A_CDC_SRC2_PDA_CFG] = TOMTOM_A_CDC_SRC2_PDA_CFG__POR,
-	[TOMTOM_A_CDC_SRC1_FS_CTL] = TOMTOM_A_CDC_SRC1_FS_CTL__POR,
-	[TOMTOM_A_CDC_SRC2_FS_CTL] = TOMTOM_A_CDC_SRC2_FS_CTL__POR,
-	[TOMTOM_A_CDC_RX1_B1_CTL] = TOMTOM_A_CDC_RX1_B1_CTL__POR,
-	[TOMTOM_A_CDC_RX2_B1_CTL] = TOMTOM_A_CDC_RX2_B1_CTL__POR,
-	[TOMTOM_A_CDC_RX3_B1_CTL] = TOMTOM_A_CDC_RX3_B1_CTL__POR,
-	[TOMTOM_A_CDC_RX4_B1_CTL] = TOMTOM_A_CDC_RX4_B1_CTL__POR,
-	[TOMTOM_A_CDC_RX5_B1_CTL] = TOMTOM_A_CDC_RX5_B1_CTL__POR,
-	[TOMTOM_A_CDC_RX6_B1_CTL] = TOMTOM_A_CDC_RX6_B1_CTL__POR,
-	[TOMTOM_A_CDC_RX7_B1_CTL] = TOMTOM_A_CDC_RX7_B1_CTL__POR,
-	[TOMTOM_A_CDC_RX1_B2_CTL] = TOMTOM_A_CDC_RX1_B2_CTL__POR,
-	[TOMTOM_A_CDC_RX2_B2_CTL] = TOMTOM_A_CDC_RX2_B2_CTL__POR,
-	[TOMTOM_A_CDC_RX3_B2_CTL] = TOMTOM_A_CDC_RX3_B2_CTL__POR,
-	[TOMTOM_A_CDC_RX4_B2_CTL] = TOMTOM_A_CDC_RX4_B2_CTL__POR,
-	[TOMTOM_A_CDC_RX5_B2_CTL] = TOMTOM_A_CDC_RX5_B2_CTL__POR,
-	[TOMTOM_A_CDC_RX6_B2_CTL] = TOMTOM_A_CDC_RX6_B2_CTL__POR,
-	[TOMTOM_A_CDC_RX7_B2_CTL] = TOMTOM_A_CDC_RX7_B2_CTL__POR,
-	[TOMTOM_A_CDC_RX1_B3_CTL] = TOMTOM_A_CDC_RX1_B3_CTL__POR,
-	[TOMTOM_A_CDC_RX2_B3_CTL] = TOMTOM_A_CDC_RX2_B3_CTL__POR,
-	[TOMTOM_A_CDC_RX3_B3_CTL] = TOMTOM_A_CDC_RX3_B3_CTL__POR,
-	[TOMTOM_A_CDC_RX4_B3_CTL] = TOMTOM_A_CDC_RX4_B3_CTL__POR,
-	[TOMTOM_A_CDC_RX5_B3_CTL] = TOMTOM_A_CDC_RX5_B3_CTL__POR,
-	[TOMTOM_A_CDC_RX6_B3_CTL] = TOMTOM_A_CDC_RX6_B3_CTL__POR,
-	[TOMTOM_A_CDC_RX7_B3_CTL] = TOMTOM_A_CDC_RX7_B3_CTL__POR,
-	[TOMTOM_A_CDC_RX1_B4_CTL] = TOMTOM_A_CDC_RX1_B4_CTL__POR,
-	[TOMTOM_A_CDC_RX2_B4_CTL] = TOMTOM_A_CDC_RX2_B4_CTL__POR,
-	[TOMTOM_A_CDC_RX3_B4_CTL] = TOMTOM_A_CDC_RX3_B4_CTL__POR,
-	[TOMTOM_A_CDC_RX4_B4_CTL] = TOMTOM_A_CDC_RX4_B4_CTL__POR,
-	[TOMTOM_A_CDC_RX5_B4_CTL] = TOMTOM_A_CDC_RX5_B4_CTL__POR,
-	[TOMTOM_A_CDC_RX6_B4_CTL] = TOMTOM_A_CDC_RX6_B4_CTL__POR,
-	[TOMTOM_A_CDC_RX7_B4_CTL] = TOMTOM_A_CDC_RX7_B4_CTL__POR,
-	[TOMTOM_A_CDC_RX1_B5_CTL] = TOMTOM_A_CDC_RX1_B5_CTL__POR,
-	[TOMTOM_A_CDC_RX2_B5_CTL] = TOMTOM_A_CDC_RX2_B5_CTL__POR,
-	[TOMTOM_A_CDC_RX3_B5_CTL] = TOMTOM_A_CDC_RX3_B5_CTL__POR,
-	[TOMTOM_A_CDC_RX4_B5_CTL] = TOMTOM_A_CDC_RX4_B5_CTL__POR,
-	[TOMTOM_A_CDC_RX5_B5_CTL] = TOMTOM_A_CDC_RX5_B5_CTL__POR,
-	[TOMTOM_A_CDC_RX6_B5_CTL] = TOMTOM_A_CDC_RX6_B5_CTL__POR,
-	[TOMTOM_A_CDC_RX7_B5_CTL] = TOMTOM_A_CDC_RX7_B5_CTL__POR,
-	[TOMTOM_A_CDC_RX1_B6_CTL] = TOMTOM_A_CDC_RX1_B6_CTL__POR,
-	[TOMTOM_A_CDC_RX2_B6_CTL] = TOMTOM_A_CDC_RX2_B6_CTL__POR,
-	[TOMTOM_A_CDC_RX3_B6_CTL] = TOMTOM_A_CDC_RX3_B6_CTL__POR,
-	[TOMTOM_A_CDC_RX4_B6_CTL] = TOMTOM_A_CDC_RX4_B6_CTL__POR,
-	[TOMTOM_A_CDC_RX5_B6_CTL] = TOMTOM_A_CDC_RX5_B6_CTL__POR,
-	[TOMTOM_A_CDC_RX6_B6_CTL] = TOMTOM_A_CDC_RX6_B6_CTL__POR,
-	[TOMTOM_A_CDC_RX7_B6_CTL] = TOMTOM_A_CDC_RX7_B6_CTL__POR,
-	[TOMTOM_A_CDC_RX1_VOL_CTL_B1_CTL] =
-				TOMTOM_A_CDC_RX1_VOL_CTL_B1_CTL__POR,
-	[TOMTOM_A_CDC_RX2_VOL_CTL_B1_CTL] =
-				TOMTOM_A_CDC_RX2_VOL_CTL_B1_CTL__POR,
-	[TOMTOM_A_CDC_RX3_VOL_CTL_B1_CTL] =
-				TOMTOM_A_CDC_RX3_VOL_CTL_B1_CTL__POR,
-	[TOMTOM_A_CDC_RX4_VOL_CTL_B1_CTL] =
-				TOMTOM_A_CDC_RX4_VOL_CTL_B1_CTL__POR,
-	[TOMTOM_A_CDC_RX5_VOL_CTL_B1_CTL] =
-				TOMTOM_A_CDC_RX5_VOL_CTL_B1_CTL__POR,
-	[TOMTOM_A_CDC_RX6_VOL_CTL_B1_CTL] =
-				TOMTOM_A_CDC_RX6_VOL_CTL_B1_CTL__POR,
-	[TOMTOM_A_CDC_RX7_VOL_CTL_B1_CTL] =
-				TOMTOM_A_CDC_RX7_VOL_CTL_B1_CTL__POR,
-	[TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL] =
-				TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL__POR,
-	[TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL] =
-				TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL__POR,
-	[TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL] =
-				TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL__POR,
-	[TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL] =
-				TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL__POR,
-	[TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL] =
-				TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL__POR,
-	[TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL] =
-				TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL__POR,
-	[TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL] =
-				TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL__POR,
-	[TOMTOM_A_CDC_VBAT_CFG] = TOMTOM_A_CDC_VBAT_CFG__POR,
-	[TOMTOM_A_CDC_VBAT_ADC_CAL1] = TOMTOM_A_CDC_VBAT_ADC_CAL1__POR,
-	[TOMTOM_A_CDC_VBAT_ADC_CAL2] = TOMTOM_A_CDC_VBAT_ADC_CAL2__POR,
-	[TOMTOM_A_CDC_VBAT_ADC_CAL3] = TOMTOM_A_CDC_VBAT_ADC_CAL3__POR,
-	[TOMTOM_A_CDC_VBAT_PK_EST1] = TOMTOM_A_CDC_VBAT_PK_EST1__POR,
-	[TOMTOM_A_CDC_VBAT_PK_EST2] = TOMTOM_A_CDC_VBAT_PK_EST2__POR,
-	[TOMTOM_A_CDC_VBAT_PK_EST3] = TOMTOM_A_CDC_VBAT_PK_EST3__POR,
-	[TOMTOM_A_CDC_VBAT_RF_PROC1] = TOMTOM_A_CDC_VBAT_RF_PROC1__POR,
-	[TOMTOM_A_CDC_VBAT_RF_PROC2] = TOMTOM_A_CDC_VBAT_RF_PROC2__POR,
-	[TOMTOM_A_CDC_VBAT_TAC1] = TOMTOM_A_CDC_VBAT_TAC1__POR,
-	[TOMTOM_A_CDC_VBAT_TAC2] = TOMTOM_A_CDC_VBAT_TAC2__POR,
-	[TOMTOM_A_CDC_VBAT_TAC3] = TOMTOM_A_CDC_VBAT_TAC3__POR,
-	[TOMTOM_A_CDC_VBAT_TAC4] = TOMTOM_A_CDC_VBAT_TAC4__POR,
-	[TOMTOM_A_CDC_VBAT_GAIN_UPD1] = TOMTOM_A_CDC_VBAT_GAIN_UPD1__POR,
-	[TOMTOM_A_CDC_VBAT_GAIN_UPD2] = TOMTOM_A_CDC_VBAT_GAIN_UPD2__POR,
-	[TOMTOM_A_CDC_VBAT_GAIN_UPD3] = TOMTOM_A_CDC_VBAT_GAIN_UPD3__POR,
-	[TOMTOM_A_CDC_VBAT_GAIN_UPD4] = TOMTOM_A_CDC_VBAT_GAIN_UPD4__POR,
-	[TOMTOM_A_CDC_VBAT_DEBUG1] = TOMTOM_A_CDC_VBAT_DEBUG1__POR,
-	[TOMTOM_A_CDC_VBAT_GAIN_UPD_MON] = TOMTOM_A_CDC_VBAT_GAIN_UPD_MON__POR,
-	[TOMTOM_A_CDC_VBAT_GAIN_MON_VAL] = TOMTOM_A_CDC_VBAT_GAIN_MON_VAL__POR,
-	[TOMTOM_A_CDC_CLK_ANC_RESET_CTL] = TOMTOM_A_CDC_CLK_ANC_RESET_CTL__POR,
-	[TOMTOM_A_CDC_CLK_RX_RESET_CTL] = TOMTOM_A_CDC_CLK_RX_RESET_CTL__POR,
-	[TOMTOM_A_CDC_CLK_TX_RESET_B1_CTL] =
-				TOMTOM_A_CDC_CLK_TX_RESET_B1_CTL__POR,
-	[TOMTOM_A_CDC_CLK_TX_RESET_B2_CTL] =
-				TOMTOM_A_CDC_CLK_TX_RESET_B2_CTL__POR,
-	[TOMTOM_A_CDC_CLK_RX_I2S_CTL] = TOMTOM_A_CDC_CLK_RX_I2S_CTL__POR,
-	[TOMTOM_A_CDC_CLK_TX_I2S_CTL] = TOMTOM_A_CDC_CLK_TX_I2S_CTL__POR,
-	[TOMTOM_A_CDC_CLK_OTHR_RESET_B1_CTL] =
-				TOMTOM_A_CDC_CLK_OTHR_RESET_B1_CTL__POR,
-	[TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL] =
-				TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL__POR,
-	[TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL] =
-				TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL__POR,
-	[TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL] =
-				TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL__POR,
-	[TOMTOM_A_CDC_CLK_OTHR_CTL] = TOMTOM_A_CDC_CLK_OTHR_CTL__POR,
-	[TOMTOM_A_CDC_CLK_ANC_CLK_EN_CTL] =
-				TOMTOM_A_CDC_CLK_ANC_CLK_EN_CTL__POR,
-	[TOMTOM_A_CDC_CLK_RX_B1_CTL] = TOMTOM_A_CDC_CLK_RX_B1_CTL__POR,
-	[TOMTOM_A_CDC_CLK_RX_B2_CTL] = TOMTOM_A_CDC_CLK_RX_B2_CTL__POR,
-	[TOMTOM_A_CDC_CLK_MCLK_CTL] = TOMTOM_A_CDC_CLK_MCLK_CTL__POR,
-	[TOMTOM_A_CDC_CLK_PDM_CTL] = TOMTOM_A_CDC_CLK_PDM_CTL__POR,
-	[TOMTOM_A_CDC_CLK_SD_CTL] = TOMTOM_A_CDC_CLK_SD_CTL__POR,
-	[TOMTOM_A_CDC_CLSH_B1_CTL] = TOMTOM_A_CDC_CLSH_B1_CTL__POR,
-	[TOMTOM_A_CDC_CLSH_B2_CTL] = TOMTOM_A_CDC_CLSH_B2_CTL__POR,
-	[TOMTOM_A_CDC_CLSH_B3_CTL] = TOMTOM_A_CDC_CLSH_B3_CTL__POR,
-	[TOMTOM_A_CDC_CLSH_BUCK_NCP_VARS] =
-				TOMTOM_A_CDC_CLSH_BUCK_NCP_VARS__POR,
-	[TOMTOM_A_CDC_CLSH_IDLE_HPH_THSD] =
-				TOMTOM_A_CDC_CLSH_IDLE_HPH_THSD__POR,
-	[TOMTOM_A_CDC_CLSH_IDLE_EAR_THSD] =
-				TOMTOM_A_CDC_CLSH_IDLE_EAR_THSD__POR,
-	[TOMTOM_A_CDC_CLSH_FCLKONLY_HPH_THSD] =
-				TOMTOM_A_CDC_CLSH_FCLKONLY_HPH_THSD__POR,
-	[TOMTOM_A_CDC_CLSH_FCLKONLY_EAR_THSD] =
-				TOMTOM_A_CDC_CLSH_FCLKONLY_EAR_THSD__POR,
-	[TOMTOM_A_CDC_CLSH_K_ADDR] = TOMTOM_A_CDC_CLSH_K_ADDR__POR,
-	[TOMTOM_A_CDC_CLSH_K_DATA] = TOMTOM_A_CDC_CLSH_K_DATA__POR,
-	[TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_L] =
-				TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_L__POR,
-	[TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_U] =
-				TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_U__POR,
-	[TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_L] =
-				TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_L__POR,
-	[TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_U] =
-				TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_U__POR,
-	[TOMTOM_A_CDC_CLSH_V_PA_HD_EAR] = TOMTOM_A_CDC_CLSH_V_PA_HD_EAR__POR,
-	[TOMTOM_A_CDC_CLSH_V_PA_HD_HPH] = TOMTOM_A_CDC_CLSH_V_PA_HD_HPH__POR,
-	[TOMTOM_A_CDC_CLSH_V_PA_MIN_EAR] = TOMTOM_A_CDC_CLSH_V_PA_MIN_EAR__POR,
-	[TOMTOM_A_CDC_CLSH_V_PA_MIN_HPH] = TOMTOM_A_CDC_CLSH_V_PA_MIN_HPH__POR,
-	[TOMTOM_A_CDC_IIR1_GAIN_B1_CTL] = TOMTOM_A_CDC_IIR1_GAIN_B1_CTL__POR,
-	[TOMTOM_A_CDC_IIR2_GAIN_B1_CTL] = TOMTOM_A_CDC_IIR2_GAIN_B1_CTL__POR,
-	[TOMTOM_A_CDC_IIR1_GAIN_B2_CTL] = TOMTOM_A_CDC_IIR1_GAIN_B2_CTL__POR,
-	[TOMTOM_A_CDC_IIR2_GAIN_B2_CTL] = TOMTOM_A_CDC_IIR2_GAIN_B2_CTL__POR,
-	[TOMTOM_A_CDC_IIR1_GAIN_B3_CTL] = TOMTOM_A_CDC_IIR1_GAIN_B3_CTL__POR,
-	[TOMTOM_A_CDC_IIR2_GAIN_B3_CTL] = TOMTOM_A_CDC_IIR2_GAIN_B3_CTL__POR,
-	[TOMTOM_A_CDC_IIR1_GAIN_B4_CTL] = TOMTOM_A_CDC_IIR1_GAIN_B4_CTL__POR,
-	[TOMTOM_A_CDC_IIR2_GAIN_B4_CTL] = TOMTOM_A_CDC_IIR2_GAIN_B4_CTL__POR,
-	[TOMTOM_A_CDC_IIR1_GAIN_B5_CTL] = TOMTOM_A_CDC_IIR1_GAIN_B5_CTL__POR,
-	[TOMTOM_A_CDC_IIR2_GAIN_B5_CTL] = TOMTOM_A_CDC_IIR2_GAIN_B5_CTL__POR,
-	[TOMTOM_A_CDC_IIR1_GAIN_B6_CTL] = TOMTOM_A_CDC_IIR1_GAIN_B6_CTL__POR,
-	[TOMTOM_A_CDC_IIR2_GAIN_B6_CTL] = TOMTOM_A_CDC_IIR2_GAIN_B6_CTL__POR,
-	[TOMTOM_A_CDC_IIR1_GAIN_B7_CTL] = TOMTOM_A_CDC_IIR1_GAIN_B7_CTL__POR,
-	[TOMTOM_A_CDC_IIR2_GAIN_B7_CTL] = TOMTOM_A_CDC_IIR2_GAIN_B7_CTL__POR,
-	[TOMTOM_A_CDC_IIR1_GAIN_B8_CTL] = TOMTOM_A_CDC_IIR1_GAIN_B8_CTL__POR,
-	[TOMTOM_A_CDC_IIR2_GAIN_B8_CTL] = TOMTOM_A_CDC_IIR2_GAIN_B8_CTL__POR,
-	[TOMTOM_A_CDC_IIR1_CTL] = TOMTOM_A_CDC_IIR1_CTL__POR,
-	[TOMTOM_A_CDC_IIR2_CTL] = TOMTOM_A_CDC_IIR2_CTL__POR,
-	[TOMTOM_A_CDC_IIR1_GAIN_TIMER_CTL] =
-				TOMTOM_A_CDC_IIR1_GAIN_TIMER_CTL__POR,
-	[TOMTOM_A_CDC_IIR2_GAIN_TIMER_CTL] =
-				TOMTOM_A_CDC_IIR2_GAIN_TIMER_CTL__POR,
-	[TOMTOM_A_CDC_IIR1_COEF_B1_CTL] = TOMTOM_A_CDC_IIR1_COEF_B1_CTL__POR,
-	[TOMTOM_A_CDC_IIR2_COEF_B1_CTL] = TOMTOM_A_CDC_IIR2_COEF_B1_CTL__POR,
-	[TOMTOM_A_CDC_IIR1_COEF_B2_CTL] = TOMTOM_A_CDC_IIR1_COEF_B2_CTL__POR,
-	[TOMTOM_A_CDC_IIR2_COEF_B2_CTL] = TOMTOM_A_CDC_IIR2_COEF_B2_CTL__POR,
-	[TOMTOM_A_CDC_TOP_GAIN_UPDATE] = TOMTOM_A_CDC_TOP_GAIN_UPDATE__POR,
-	[TOMTOM_A_CDC_PA_RAMP_B1_CTL] = TOMTOM_A_CDC_PA_RAMP_B1_CTL__POR,
-	[TOMTOM_A_CDC_PA_RAMP_B2_CTL] = TOMTOM_A_CDC_PA_RAMP_B2_CTL__POR,
-	[TOMTOM_A_CDC_PA_RAMP_B3_CTL] = TOMTOM_A_CDC_PA_RAMP_B3_CTL__POR,
-	[TOMTOM_A_CDC_PA_RAMP_B4_CTL] = TOMTOM_A_CDC_PA_RAMP_B4_CTL__POR,
-	[TOMTOM_A_CDC_SPKR_CLIPDET_B1_CTL] =
-				TOMTOM_A_CDC_SPKR_CLIPDET_B1_CTL__POR,
-	[TOMTOM_A_CDC_SPKR2_CLIPDET_B1_CTL] =
-				TOMTOM_A_CDC_SPKR2_CLIPDET_B1_CTL__POR,
-	[TOMTOM_A_CDC_COMP0_B1_CTL] = TOMTOM_A_CDC_COMP0_B1_CTL__POR,
-	[TOMTOM_A_CDC_COMP1_B1_CTL] = TOMTOM_A_CDC_COMP1_B1_CTL__POR,
-	[TOMTOM_A_CDC_COMP2_B1_CTL] = TOMTOM_A_CDC_COMP2_B1_CTL__POR,
-	[TOMTOM_A_CDC_COMP0_B2_CTL] = TOMTOM_A_CDC_COMP0_B2_CTL__POR,
-	[TOMTOM_A_CDC_COMP1_B2_CTL] = TOMTOM_A_CDC_COMP1_B2_CTL__POR,
-	[TOMTOM_A_CDC_COMP2_B2_CTL] = TOMTOM_A_CDC_COMP2_B2_CTL__POR,
-	[TOMTOM_A_CDC_COMP0_B3_CTL] = TOMTOM_A_CDC_COMP0_B3_CTL__POR,
-	[TOMTOM_A_CDC_COMP1_B3_CTL] = TOMTOM_A_CDC_COMP1_B3_CTL__POR,
-	[TOMTOM_A_CDC_COMP2_B3_CTL] = TOMTOM_A_CDC_COMP2_B3_CTL__POR,
-	[TOMTOM_A_CDC_COMP0_B4_CTL] = TOMTOM_A_CDC_COMP0_B4_CTL__POR,
-	[TOMTOM_A_CDC_COMP1_B4_CTL] = TOMTOM_A_CDC_COMP1_B4_CTL__POR,
-	[TOMTOM_A_CDC_COMP2_B4_CTL] = TOMTOM_A_CDC_COMP2_B4_CTL__POR,
-	[TOMTOM_A_CDC_COMP0_B5_CTL] = TOMTOM_A_CDC_COMP0_B5_CTL__POR,
-	[TOMTOM_A_CDC_COMP1_B5_CTL] = TOMTOM_A_CDC_COMP1_B5_CTL__POR,
-	[TOMTOM_A_CDC_COMP2_B5_CTL] = TOMTOM_A_CDC_COMP2_B5_CTL__POR,
-	[TOMTOM_A_CDC_COMP0_B6_CTL] = TOMTOM_A_CDC_COMP0_B6_CTL__POR,
-	[TOMTOM_A_CDC_COMP1_B6_CTL] = TOMTOM_A_CDC_COMP1_B6_CTL__POR,
-	[TOMTOM_A_CDC_COMP2_B6_CTL] = TOMTOM_A_CDC_COMP2_B6_CTL__POR,
-	[TOMTOM_A_CDC_COMP0_SHUT_DOWN_STATUS] =
-				TOMTOM_A_CDC_COMP0_SHUT_DOWN_STATUS__POR,
-	[TOMTOM_A_CDC_COMP1_SHUT_DOWN_STATUS] =
-				TOMTOM_A_CDC_COMP1_SHUT_DOWN_STATUS__POR,
-	[TOMTOM_A_CDC_COMP2_SHUT_DOWN_STATUS] =
-				TOMTOM_A_CDC_COMP2_SHUT_DOWN_STATUS__POR,
-	[TOMTOM_A_CDC_COMP0_FS_CFG] = TOMTOM_A_CDC_COMP0_FS_CFG__POR,
-	[TOMTOM_A_CDC_COMP1_FS_CFG] = TOMTOM_A_CDC_COMP1_FS_CFG__POR,
-	[TOMTOM_A_CDC_COMP2_FS_CFG] = TOMTOM_A_CDC_COMP2_FS_CFG__POR,
-	[TOMTOM_A_CDC_CONN_RX1_B1_CTL] = TOMTOM_A_CDC_CONN_RX1_B1_CTL__POR,
-	[TOMTOM_A_CDC_CONN_RX1_B2_CTL] = TOMTOM_A_CDC_CONN_RX1_B2_CTL__POR,
-	[TOMTOM_A_CDC_CONN_RX1_B3_CTL] = TOMTOM_A_CDC_CONN_RX1_B3_CTL__POR,
-	[TOMTOM_A_CDC_CONN_RX2_B1_CTL] = TOMTOM_A_CDC_CONN_RX2_B1_CTL__POR,
-	[TOMTOM_A_CDC_CONN_RX2_B2_CTL] = TOMTOM_A_CDC_CONN_RX2_B2_CTL__POR,
-	[TOMTOM_A_CDC_CONN_RX2_B3_CTL] = TOMTOM_A_CDC_CONN_RX2_B3_CTL__POR,
-	[TOMTOM_A_CDC_CONN_RX3_B1_CTL] = TOMTOM_A_CDC_CONN_RX3_B1_CTL__POR,
-	[TOMTOM_A_CDC_CONN_RX3_B2_CTL] = TOMTOM_A_CDC_CONN_RX3_B2_CTL__POR,
-	[TOMTOM_A_CDC_CONN_RX4_B1_CTL] = TOMTOM_A_CDC_CONN_RX4_B1_CTL__POR,
-	[TOMTOM_A_CDC_CONN_RX4_B2_CTL] = TOMTOM_A_CDC_CONN_RX4_B2_CTL__POR,
-	[TOMTOM_A_CDC_CONN_RX5_B1_CTL] = TOMTOM_A_CDC_CONN_RX5_B1_CTL__POR,
-	[TOMTOM_A_CDC_CONN_RX5_B2_CTL] = TOMTOM_A_CDC_CONN_RX5_B2_CTL__POR,
-	[TOMTOM_A_CDC_CONN_RX6_B1_CTL] = TOMTOM_A_CDC_CONN_RX6_B1_CTL__POR,
-	[TOMTOM_A_CDC_CONN_RX6_B2_CTL] = TOMTOM_A_CDC_CONN_RX6_B2_CTL__POR,
-	[TOMTOM_A_CDC_CONN_RX7_B1_CTL] = TOMTOM_A_CDC_CONN_RX7_B1_CTL__POR,
-	[TOMTOM_A_CDC_CONN_RX7_B2_CTL] = TOMTOM_A_CDC_CONN_RX7_B2_CTL__POR,
-	[TOMTOM_A_CDC_CONN_RX7_B3_CTL] = TOMTOM_A_CDC_CONN_RX7_B3_CTL__POR,
-	[TOMTOM_A_CDC_CONN_ANC_B1_CTL] = TOMTOM_A_CDC_CONN_ANC_B1_CTL__POR,
-	[TOMTOM_A_CDC_CONN_ANC_B2_CTL] = TOMTOM_A_CDC_CONN_ANC_B2_CTL__POR,
-	[TOMTOM_A_CDC_CONN_TX_B1_CTL] = TOMTOM_A_CDC_CONN_TX_B1_CTL__POR,
-	[TOMTOM_A_CDC_CONN_TX_B2_CTL] = TOMTOM_A_CDC_CONN_TX_B2_CTL__POR,
-	[TOMTOM_A_CDC_CONN_TX_B3_CTL] = TOMTOM_A_CDC_CONN_TX_B3_CTL__POR,
-	[TOMTOM_A_CDC_CONN_TX_B4_CTL] = TOMTOM_A_CDC_CONN_TX_B4_CTL__POR,
-	[TOMTOM_A_CDC_CONN_EQ1_B1_CTL] = TOMTOM_A_CDC_CONN_EQ1_B1_CTL__POR,
-	[TOMTOM_A_CDC_CONN_EQ1_B2_CTL] = TOMTOM_A_CDC_CONN_EQ1_B2_CTL__POR,
-	[TOMTOM_A_CDC_CONN_EQ1_B3_CTL] = TOMTOM_A_CDC_CONN_EQ1_B3_CTL__POR,
-	[TOMTOM_A_CDC_CONN_EQ1_B4_CTL] = TOMTOM_A_CDC_CONN_EQ1_B4_CTL__POR,
-	[TOMTOM_A_CDC_CONN_EQ2_B1_CTL] = TOMTOM_A_CDC_CONN_EQ2_B1_CTL__POR,
-	[TOMTOM_A_CDC_CONN_EQ2_B2_CTL] = TOMTOM_A_CDC_CONN_EQ2_B2_CTL__POR,
-	[TOMTOM_A_CDC_CONN_EQ2_B3_CTL] = TOMTOM_A_CDC_CONN_EQ2_B3_CTL__POR,
-	[TOMTOM_A_CDC_CONN_EQ2_B4_CTL] = TOMTOM_A_CDC_CONN_EQ2_B4_CTL__POR,
-	[TOMTOM_A_CDC_CONN_SRC1_B1_CTL] = TOMTOM_A_CDC_CONN_SRC1_B1_CTL__POR,
-	[TOMTOM_A_CDC_CONN_SRC1_B2_CTL] = TOMTOM_A_CDC_CONN_SRC1_B2_CTL__POR,
-	[TOMTOM_A_CDC_CONN_SRC2_B1_CTL] = TOMTOM_A_CDC_CONN_SRC2_B1_CTL__POR,
-	[TOMTOM_A_CDC_CONN_SRC2_B2_CTL] = TOMTOM_A_CDC_CONN_SRC2_B2_CTL__POR,
-	[TOMTOM_A_CDC_CONN_TX_SB_B1_CTL] = TOMTOM_A_CDC_CONN_TX_SB_B1_CTL__POR,
-	[TOMTOM_A_CDC_CONN_TX_SB_B2_CTL] = TOMTOM_A_CDC_CONN_TX_SB_B2_CTL__POR,
-	[TOMTOM_A_CDC_CONN_TX_SB_B3_CTL] = TOMTOM_A_CDC_CONN_TX_SB_B3_CTL__POR,
-	[TOMTOM_A_CDC_CONN_TX_SB_B4_CTL] = TOMTOM_A_CDC_CONN_TX_SB_B4_CTL__POR,
-	[TOMTOM_A_CDC_CONN_TX_SB_B5_CTL] = TOMTOM_A_CDC_CONN_TX_SB_B5_CTL__POR,
-	[TOMTOM_A_CDC_CONN_TX_SB_B6_CTL] = TOMTOM_A_CDC_CONN_TX_SB_B6_CTL__POR,
-	[TOMTOM_A_CDC_CONN_TX_SB_B7_CTL] = TOMTOM_A_CDC_CONN_TX_SB_B7_CTL__POR,
-	[TOMTOM_A_CDC_CONN_TX_SB_B8_CTL] = TOMTOM_A_CDC_CONN_TX_SB_B8_CTL__POR,
-	[TOMTOM_A_CDC_CONN_TX_SB_B9_CTL] = TOMTOM_A_CDC_CONN_TX_SB_B9_CTL__POR,
-	[TOMTOM_A_CDC_CONN_TX_SB_B10_CTL] =
-				TOMTOM_A_CDC_CONN_TX_SB_B10_CTL__POR,
-	[TOMTOM_A_CDC_CONN_TX_SB_B11_CTL] =
-				TOMTOM_A_CDC_CONN_TX_SB_B11_CTL__POR,
-	[TOMTOM_A_CDC_CONN_RX_SB_B1_CTL] = TOMTOM_A_CDC_CONN_RX_SB_B1_CTL__POR,
-	[TOMTOM_A_CDC_CONN_RX_SB_B2_CTL] = TOMTOM_A_CDC_CONN_RX_SB_B2_CTL__POR,
-	[TOMTOM_A_CDC_CONN_CLSH_CTL] = TOMTOM_A_CDC_CONN_CLSH_CTL__POR,
-	[TOMTOM_A_CDC_CONN_MISC] = TOMTOM_A_CDC_CONN_MISC__POR,
-	[TOMTOM_A_CDC_CONN_RX8_B1_CTL] = TOMTOM_A_CDC_CONN_RX8_B1_CTL__POR,
-	[TOMTOM_A_CDC_CLIP_ADJ_SPKR_B1_CTL] =
-				TOMTOM_A_CDC_CLIP_ADJ_SPKR_B1_CTL__POR,
-	[TOMTOM_A_CDC_CLIP_ADJ_SPKR_CLIP_LEVEL_ADJUST] =
-			TOMTOM_A_CDC_CLIP_ADJ_SPKR_CLIP_LEVEL_ADJUST__POR,
-	[TOMTOM_A_CDC_CLIP_ADJ_SPKR_MIN_CLIP_THRESHOLD] =
-			TOMTOM_A_CDC_CLIP_ADJ_SPKR_MIN_CLIP_THRESHOLD__POR,
-	[TOMTOM_A_CDC_CLIP_ADJ_SPKR_THRESHOLD_STATUS] =
-			TOMTOM_A_CDC_CLIP_ADJ_SPKR_THRESHOLD_STATUS__POR,
-	[TOMTOM_A_CDC_CLIP_ADJ_SPKR_SAMPLE_MARK] =
-			TOMTOM_A_CDC_CLIP_ADJ_SPKR_SAMPLE_MARK__POR,
-	[TOMTOM_A_CDC_CLIP_ADJ_SPKR_BOOST_GATING] =
-			TOMTOM_A_CDC_CLIP_ADJ_SPKR_BOOST_GATING__POR,
-	[TOMTOM_A_CDC_CLIP_ADJ_SPKR2_B1_CTL] =
-				TOMTOM_A_CDC_CLIP_ADJ_SPKR2_B1_CTL__POR,
-	[TOMTOM_A_CDC_CLIP_ADJ_SPKR2_CLIP_LEVEL_ADJUST] =
-			TOMTOM_A_CDC_CLIP_ADJ_SPKR2_CLIP_LEVEL_ADJUST__POR,
-	[TOMTOM_A_CDC_CLIP_ADJ_SPKR2_MIN_CLIP_THRESHOLD] =
-			TOMTOM_A_CDC_CLIP_ADJ_SPKR2_MIN_CLIP_THRESHOLD__POR,
-	[TOMTOM_A_CDC_CLIP_ADJ_SPKR2_THRESHOLD_STATUS] =
-			TOMTOM_A_CDC_CLIP_ADJ_SPKR2_THRESHOLD_STATUS__POR,
-	[TOMTOM_A_CDC_CLIP_ADJ_SPKR2_SAMPLE_MARK] =
-				TOMTOM_A_CDC_CLIP_ADJ_SPKR2_SAMPLE_MARK__POR,
-	[TOMTOM_A_CDC_CLIP_ADJ_SPKR2_BOOST_GATING] =
-				TOMTOM_A_CDC_CLIP_ADJ_SPKR2_BOOST_GATING__POR,
-	[TOMTOM_A_CDC_MBHC_EN_CTL] = TOMTOM_A_CDC_MBHC_EN_CTL__POR,
-	[TOMTOM_A_CDC_MBHC_FIR_B1_CFG] = TOMTOM_A_CDC_MBHC_FIR_B1_CFG__POR,
-	[TOMTOM_A_CDC_MBHC_FIR_B2_CFG] = TOMTOM_A_CDC_MBHC_FIR_B2_CFG__POR,
-	[TOMTOM_A_CDC_MBHC_TIMER_B1_CTL] = TOMTOM_A_CDC_MBHC_TIMER_B1_CTL__POR,
-	[TOMTOM_A_CDC_MBHC_TIMER_B2_CTL] = TOMTOM_A_CDC_MBHC_TIMER_B2_CTL__POR,
-	[TOMTOM_A_CDC_MBHC_TIMER_B3_CTL] = TOMTOM_A_CDC_MBHC_TIMER_B3_CTL__POR,
-	[TOMTOM_A_CDC_MBHC_TIMER_B4_CTL] = TOMTOM_A_CDC_MBHC_TIMER_B4_CTL__POR,
-	[TOMTOM_A_CDC_MBHC_TIMER_B5_CTL] = TOMTOM_A_CDC_MBHC_TIMER_B5_CTL__POR,
-	[TOMTOM_A_CDC_MBHC_TIMER_B6_CTL] = TOMTOM_A_CDC_MBHC_TIMER_B6_CTL__POR,
-	[TOMTOM_A_CDC_MBHC_B1_STATUS] = TOMTOM_A_CDC_MBHC_B1_STATUS__POR,
-	[TOMTOM_A_CDC_MBHC_B2_STATUS] = TOMTOM_A_CDC_MBHC_B2_STATUS__POR,
-	[TOMTOM_A_CDC_MBHC_B3_STATUS] = TOMTOM_A_CDC_MBHC_B3_STATUS__POR,
-	[TOMTOM_A_CDC_MBHC_B4_STATUS] = TOMTOM_A_CDC_MBHC_B4_STATUS__POR,
-	[TOMTOM_A_CDC_MBHC_B5_STATUS] = TOMTOM_A_CDC_MBHC_B5_STATUS__POR,
-	[TOMTOM_A_CDC_MBHC_B1_CTL] = TOMTOM_A_CDC_MBHC_B1_CTL__POR,
-	[TOMTOM_A_CDC_MBHC_B2_CTL] = TOMTOM_A_CDC_MBHC_B2_CTL__POR,
-	[TOMTOM_A_CDC_MBHC_VOLT_B1_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B1_CTL__POR,
-	[TOMTOM_A_CDC_MBHC_VOLT_B2_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B2_CTL__POR,
-	[TOMTOM_A_CDC_MBHC_VOLT_B3_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B3_CTL__POR,
-	[TOMTOM_A_CDC_MBHC_VOLT_B4_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B4_CTL__POR,
-	[TOMTOM_A_CDC_MBHC_VOLT_B5_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B5_CTL__POR,
-	[TOMTOM_A_CDC_MBHC_VOLT_B6_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B6_CTL__POR,
-	[TOMTOM_A_CDC_MBHC_VOLT_B7_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B7_CTL__POR,
-	[TOMTOM_A_CDC_MBHC_VOLT_B8_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B8_CTL__POR,
-	[TOMTOM_A_CDC_MBHC_VOLT_B9_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B9_CTL__POR,
-	[TOMTOM_A_CDC_MBHC_VOLT_B10_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B10_CTL__POR,
-	[TOMTOM_A_CDC_MBHC_VOLT_B11_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B11_CTL__POR,
-	[TOMTOM_A_CDC_MBHC_VOLT_B12_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B12_CTL__POR,
-	[TOMTOM_A_CDC_MBHC_CLK_CTL] = TOMTOM_A_CDC_MBHC_CLK_CTL__POR,
-	[TOMTOM_A_CDC_MBHC_INT_CTL] = TOMTOM_A_CDC_MBHC_INT_CTL__POR,
-	[TOMTOM_A_CDC_MBHC_DEBUG_CTL] = TOMTOM_A_CDC_MBHC_DEBUG_CTL__POR,
-	[TOMTOM_A_CDC_MBHC_SPARE] = TOMTOM_A_CDC_MBHC_SPARE__POR,
-	[TOMTOM_A_CDC_RX8_B1_CTL] = TOMTOM_A_CDC_RX8_B1_CTL__POR,
-	[TOMTOM_A_CDC_RX8_B2_CTL] = TOMTOM_A_CDC_RX8_B2_CTL__POR,
-	[TOMTOM_A_CDC_RX8_B3_CTL] = TOMTOM_A_CDC_RX8_B3_CTL__POR,
-	[TOMTOM_A_CDC_RX8_B4_CTL] = TOMTOM_A_CDC_RX8_B4_CTL__POR,
-	[TOMTOM_A_CDC_RX8_B5_CTL] = TOMTOM_A_CDC_RX8_B5_CTL__POR,
-	[TOMTOM_A_CDC_RX8_B6_CTL] = TOMTOM_A_CDC_RX8_B6_CTL__POR,
-	[TOMTOM_A_CDC_RX8_VOL_CTL_B1_CTL] =
-				TOMTOM_A_CDC_RX8_VOL_CTL_B1_CTL__POR,
-	[TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL] =
-				TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL__POR,
-	[TOMTOM_A_CDC_SPKR2_CLIPDET_VAL0] =
-				TOMTOM_A_CDC_SPKR2_CLIPDET_VAL0__POR,
-	[TOMTOM_A_CDC_SPKR2_CLIPDET_VAL1] =
-				TOMTOM_A_CDC_SPKR2_CLIPDET_VAL1__POR,
-	[TOMTOM_A_CDC_SPKR2_CLIPDET_VAL2] =
-				TOMTOM_A_CDC_SPKR2_CLIPDET_VAL2__POR,
-	[TOMTOM_A_CDC_SPKR2_CLIPDET_VAL3] =
-				TOMTOM_A_CDC_SPKR2_CLIPDET_VAL3__POR,
-	[TOMTOM_A_CDC_SPKR2_CLIPDET_VAL4] =
-				TOMTOM_A_CDC_SPKR2_CLIPDET_VAL4__POR,
-	[TOMTOM_A_CDC_SPKR2_CLIPDET_VAL5] =
-				TOMTOM_A_CDC_SPKR2_CLIPDET_VAL5__POR,
-	[TOMTOM_A_CDC_SPKR2_CLIPDET_VAL6] =
-				TOMTOM_A_CDC_SPKR2_CLIPDET_VAL6__POR,
-	[TOMTOM_A_CDC_SPKR2_CLIPDET_VAL7] =
-				TOMTOM_A_CDC_SPKR2_CLIPDET_VAL7__POR,
-	[TOMTOM_A_CDC_BOOST_MODE_CTL] = TOMTOM_A_CDC_BOOST_MODE_CTL__POR,
-	[TOMTOM_A_CDC_BOOST_THRESHOLD] = TOMTOM_A_CDC_BOOST_THRESHOLD__POR,
-	[TOMTOM_A_CDC_BOOST_TAP_SEL] = TOMTOM_A_CDC_BOOST_TAP_SEL__POR,
-	[TOMTOM_A_CDC_BOOST_HOLD_TIME] = TOMTOM_A_CDC_BOOST_HOLD_TIME__POR,
-	[TOMTOM_A_CDC_BOOST_TRGR_EN] = TOMTOM_A_CDC_BOOST_TRGR_EN__POR,
-};
diff --git a/sound/soc/codecs/wcd9330.c b/sound/soc/codecs/wcd9330.c
deleted file mode 100644
index 4278e36..0000000
--- a/sound/soc/codecs/wcd9330.c
+++ /dev/null
@@ -1,9113 +0,0 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/firmware.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/device.h>
-#include <linux/printk.h>
-#include <linux/ratelimit.h>
-#include <linux/debugfs.h>
-#include <linux/wait.h>
-#include <linux/bitops.h>
-#include <linux/regmap.h>
-#include <linux/mfd/wcd9xxx/core.h>
-#include <linux/mfd/wcd9xxx/wcd9xxx-irq.h>
-#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
-#include <linux/mfd/wcd9xxx/wcd9330_registers.h>
-#include <linux/mfd/wcd9xxx/pdata.h>
-#include <linux/regulator/consumer.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-#include <sound/tlv.h>
-#include <linux/delay.h>
-#include <linux/pm_runtime.h>
-#include <linux/kernel.h>
-#include <linux/gpio.h>
-#include <linux/clk.h>
-#include "wcd9330.h"
-#include "wcd9xxx-resmgr.h"
-#include "wcd9xxx-common.h"
-#include "wcdcal-hwdep.h"
-#include "wcd_cpe_core.h"
-
-enum {
-	BUS_DOWN,
-	ADC1_TXFE,
-	ADC2_TXFE,
-	ADC3_TXFE,
-	ADC4_TXFE,
-	ADC5_TXFE,
-	ADC6_TXFE,
-	HPH_DELAY,
-};
-
-#define TOMTOM_MAD_SLIMBUS_TX_PORT 12
-#define TOMTOM_MAD_AUDIO_FIRMWARE_PATH "wcd9320/wcd9320_mad_audio.bin"
-#define TOMTOM_VALIDATE_RX_SBPORT_RANGE(port) ((port >= 16) && (port <= 23))
-#define TOMTOM_VALIDATE_TX_SBPORT_RANGE(port) ((port >= 0) && (port <= 15))
-#define TOMTOM_CONVERT_RX_SBPORT_ID(port) (port - 16) /* RX1 port ID = 0 */
-#define TOMTOM_BIT_ADJ_SHIFT_PORT1_6 4
-#define TOMTOM_BIT_ADJ_SHIFT_PORT7_10 5
-
-#define TOMTOM_HPH_PA_SETTLE_COMP_ON 10000
-#define TOMTOM_HPH_PA_SETTLE_COMP_OFF 13000
-#define TOMTOM_HPH_PA_RAMP_DELAY 30000
-
-#define TOMTOM_SVASS_INT_STATUS_RCO_WDOG 0x20
-#define TOMTOM_SVASS_INT_STATUS_WDOG_BITE 0x02
-
-/* Add any SVA IRQs that are to be treated as FATAL */
-#define TOMTOM_CPE_FATAL_IRQS \
-	(TOMTOM_SVASS_INT_STATUS_RCO_WDOG | \
-	 TOMTOM_SVASS_INT_STATUS_WDOG_BITE)
-
-#define DAPM_MICBIAS2_EXTERNAL_STANDALONE "MIC BIAS2 External Standalone"
-
-/* RX_HPH_CNP_WG_TIME increases by 0.24ms */
-#define TOMTOM_WG_TIME_FACTOR_US	240
-
-#define RX8_PATH 8
-#define HPH_PA_ENABLE true
-#define HPH_PA_DISABLE false
-
-#define SLIM_BW_CLK_GEAR_9 6200000
-#define SLIM_BW_UNVOTE 0
-
-static int cpe_debug_mode;
-module_param(cpe_debug_mode, int, 0664);
-MODULE_PARM_DESC(cpe_debug_mode, "boot cpe in debug mode");
-
-static atomic_t kp_tomtom_priv;
-
-static int high_perf_mode;
-module_param(high_perf_mode, int, 0664);
-MODULE_PARM_DESC(high_perf_mode, "enable/disable class AB config for hph");
-
-static struct afe_param_slimbus_slave_port_cfg tomtom_slimbus_slave_port_cfg = {
-	.minor_version = 1,
-	.slimbus_dev_id = AFE_SLIMBUS_DEVICE_1,
-	.slave_dev_pgd_la = 0,
-	.slave_dev_intfdev_la = 0,
-	.bit_width = 16,
-	.data_format = 0,
-	.num_channels = 1
-};
-
-static struct afe_param_cdc_reg_cfg audio_reg_cfg[] = {
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_CDC_MAD_MAIN_CTL_1),
-		HW_MAD_AUDIO_ENABLE, 0x1, 8, 0
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_CDC_MAD_AUDIO_CTL_3),
-		HW_MAD_AUDIO_SLEEP_TIME, 0xF, 8, 0
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_CDC_MAD_AUDIO_CTL_4),
-		HW_MAD_TX_AUDIO_SWITCH_OFF, 0x1, 8, 0
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_INTR_MODE),
-		MAD_AUDIO_INT_DEST_SELECT_REG, 0x4, 8, 0
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_INTR2_MASK0),
-		MAD_AUDIO_INT_MASK_REG, 0x2, 8, 0
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_INTR2_STATUS0),
-		MAD_AUDIO_INT_STATUS_REG, 0x2, 8, 0
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_INTR2_CLEAR0),
-		MAD_AUDIO_INT_CLEAR_REG, 0x2, 8, 0
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET + TOMTOM_SB_PGD_PORT_TX_BASE),
-		SB_PGD_PORT_TX_WATERMARK_N, 0x1E, 8, 0x1
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET + TOMTOM_SB_PGD_PORT_TX_BASE),
-		SB_PGD_PORT_TX_ENABLE_N, 0x1, 8, 0x1
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET + TOMTOM_SB_PGD_PORT_RX_BASE),
-		SB_PGD_PORT_RX_WATERMARK_N, 0x1E, 8, 0x1
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET + TOMTOM_SB_PGD_PORT_RX_BASE),
-		SB_PGD_PORT_RX_ENABLE_N, 0x1, 8, 0x1
-	},
-	{	1,
-		(TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_CDC_ANC1_IIR_B1_CTL),
-		AANC_FF_GAIN_ADAPTIVE, 0x4, 8, 0
-	},
-	{	1,
-		(TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_CDC_ANC1_IIR_B1_CTL),
-		AANC_FFGAIN_ADAPTIVE_EN, 0x8, 8, 0
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_CDC_ANC1_GAIN_CTL),
-		AANC_GAIN_CONTROL, 0xFF, 8, 0
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_INTR2_MASK0),
-		MAD_CLIP_INT_MASK_REG, 0x10, 8, 0
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_INTR2_MASK0),
-		MAD2_CLIP_INT_MASK_REG, 0x20, 8, 0
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_INTR2_STATUS0),
-		MAD_CLIP_INT_STATUS_REG, 0x10, 8, 0
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_INTR2_STATUS0),
-		MAD2_CLIP_INT_STATUS_REG, 0x20, 8, 0
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_INTR2_CLEAR0),
-		MAD_CLIP_INT_CLEAR_REG, 0x10, 8, 0
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_INTR2_CLEAR0),
-		MAD2_CLIP_INT_CLEAR_REG, 0x20, 8, 0
-	},
-};
-
-static struct afe_param_cdc_reg_cfg clip_reg_cfg[] = {
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET +
-				 TOMTOM_A_CDC_SPKR_CLIPDET_B1_CTL),
-		SPKR_CLIP_PIPE_BANK_SEL, 0x3, 8, 0
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET +
-				TOMTOM_A_CDC_SPKR_CLIPDET_VAL0),
-		SPKR_CLIPDET_VAL0, 0xff, 8, 0
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET +
-				TOMTOM_A_CDC_SPKR_CLIPDET_VAL1),
-		SPKR_CLIPDET_VAL1, 0xff, 8, 0
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET +
-				TOMTOM_A_CDC_SPKR_CLIPDET_VAL2),
-		SPKR_CLIPDET_VAL2, 0xff, 8, 0
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET +
-				TOMTOM_A_CDC_SPKR_CLIPDET_VAL3),
-		SPKR_CLIPDET_VAL3, 0xff, 8, 0
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET +
-				TOMTOM_A_CDC_SPKR_CLIPDET_VAL4),
-		SPKR_CLIPDET_VAL4, 0xff, 8, 0
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET +
-				TOMTOM_A_CDC_SPKR_CLIPDET_VAL5),
-		SPKR_CLIPDET_VAL5, 0xff, 8, 0
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET +
-				TOMTOM_A_CDC_SPKR_CLIPDET_VAL6),
-		SPKR_CLIPDET_VAL6, 0xff, 8, 0
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET +
-				TOMTOM_A_CDC_SPKR_CLIPDET_VAL7),
-		SPKR_CLIPDET_VAL7, 0xff, 8, 0
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET +
-				 TOMTOM_A_CDC_SPKR2_CLIPDET_B1_CTL),
-		SPKR2_CLIP_PIPE_BANK_SEL, 0x3, 8, 0
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET +
-				TOMTOM_A_CDC_SPKR2_CLIPDET_VAL0),
-		SPKR2_CLIPDET_VAL0, 0xff, 8, 0
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET +
-				TOMTOM_A_CDC_SPKR2_CLIPDET_VAL1),
-		SPKR2_CLIPDET_VAL1, 0xff, 8, 0
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET +
-				TOMTOM_A_CDC_SPKR2_CLIPDET_VAL2),
-		SPKR2_CLIPDET_VAL2, 0xff, 8, 0
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET +
-				TOMTOM_A_CDC_SPKR2_CLIPDET_VAL3),
-		SPKR2_CLIPDET_VAL3, 0xff, 8, 0
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET +
-				TOMTOM_A_CDC_SPKR2_CLIPDET_VAL4),
-		SPKR2_CLIPDET_VAL4, 0xff, 8, 0
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET +
-				TOMTOM_A_CDC_SPKR2_CLIPDET_VAL5),
-		SPKR2_CLIPDET_VAL5, 0xff, 8, 0
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET +
-				TOMTOM_A_CDC_SPKR2_CLIPDET_VAL6),
-		SPKR2_CLIPDET_VAL6, 0xff, 8, 0
-	},
-	{
-		1,
-		(TOMTOM_REGISTER_START_OFFSET +
-				TOMTOM_A_CDC_SPKR2_CLIPDET_VAL7),
-		SPKR2_CLIPDET_VAL7, 0xff, 8, 0
-	},
-};
-
-static struct afe_param_cdc_reg_cfg_data tomtom_audio_reg_cfg = {
-	.num_registers = ARRAY_SIZE(audio_reg_cfg),
-	.reg_data = audio_reg_cfg,
-};
-
-static struct afe_param_cdc_reg_cfg_data tomtom_clip_reg_cfg = {
-	.num_registers = ARRAY_SIZE(clip_reg_cfg),
-	.reg_data = clip_reg_cfg,
-};
-
-static struct afe_param_id_cdc_aanc_version tomtom_cdc_aanc_version = {
-	.cdc_aanc_minor_version = AFE_API_VERSION_CDC_AANC_VERSION,
-	.aanc_hw_version        = AANC_HW_BLOCK_VERSION_2,
-};
-
-static struct afe_param_id_clip_bank_sel clip_bank_sel = {
-	.minor_version = AFE_API_VERSION_CLIP_BANK_SEL_CFG,
-	.num_banks = AFE_CLIP_MAX_BANKS,
-	.bank_map = {0, 1, 2, 3},
-};
-
-#define WCD9330_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
-			SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |\
-			SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_192000)
-
-#define NUM_DECIMATORS 10
-#define NUM_INTERPOLATORS 8
-#define BITS_PER_REG 8
-#define TOMTOM_TX_PORT_NUMBER	16
-#define TOMTOM_RX_PORT_START_NUMBER	16
-
-#define TOMTOM_I2S_MASTER_MODE_MASK 0x08
-
-#define TOMTOM_SLIM_CLOSE_TIMEOUT 1000
-#define TOMTOM_SLIM_IRQ_OVERFLOW (1 << 0)
-#define TOMTOM_SLIM_IRQ_UNDERFLOW (1 << 1)
-#define TOMTOM_SLIM_IRQ_PORT_CLOSED (1 << 2)
-#define TOMTOM_MCLK_CLK_12P288MHZ 12288000
-#define TOMTOM_MCLK_CLK_9P6MHZ 9600000
-
-#define TOMTOM_FORMATS_S16_S24_LE (SNDRV_PCM_FMTBIT_S16_LE | \
-			SNDRV_PCM_FORMAT_S24_LE)
-
-#define TOMTOM_FORMATS (SNDRV_PCM_FMTBIT_S16_LE)
-
-#define TOMTOM_SLIM_PGD_PORT_INT_TX_EN0 (TOMTOM_SLIM_PGD_PORT_INT_EN0 + 2)
-#define TOMTOM_ZDET_BOX_CAR_AVG_LOOP_COUNT 1
-#define TOMTOM_ZDET_MUL_FACTOR_1X 7218
-#define TOMTOM_ZDET_MUL_FACTOR_10X (TOMTOM_ZDET_MUL_FACTOR_1X * 10)
-#define TOMTOM_ZDET_MUL_FACTOR_100X (TOMTOM_ZDET_MUL_FACTOR_1X * 100)
-#define TOMTOM_ZDET_ERROR_APPROX_MUL_FACTOR 655
-#define TOMTOM_ZDET_ERROR_APPROX_SHIFT 16
-#define TOMTOM_ZDET_ZONE_3_DEFAULT_VAL 1000000
-
-enum {
-	AIF1_PB = 0,
-	AIF1_CAP,
-	AIF2_PB,
-	AIF2_CAP,
-	AIF3_PB,
-	AIF3_CAP,
-	AIF4_VIFEED,
-	AIF4_MAD_TX,
-	NUM_CODEC_DAIS,
-};
-
-enum {
-	RX_MIX1_INP_SEL_ZERO = 0,
-	RX_MIX1_INP_SEL_SRC1,
-	RX_MIX1_INP_SEL_SRC2,
-	RX_MIX1_INP_SEL_IIR1,
-	RX_MIX1_INP_SEL_IIR2,
-	RX_MIX1_INP_SEL_RX1,
-	RX_MIX1_INP_SEL_RX2,
-	RX_MIX1_INP_SEL_RX3,
-	RX_MIX1_INP_SEL_RX4,
-	RX_MIX1_INP_SEL_RX5,
-	RX_MIX1_INP_SEL_RX6,
-	RX_MIX1_INP_SEL_RX7,
-	RX_MIX1_INP_SEL_AUXRX,
-};
-enum {
-	RX8_MIX1_INP_SEL_ZERO = 0,
-	RX8_MIX1_INP_SEL_IIR1,
-	RX8_MIX1_INP_SEL_IIR2,
-	RX8_MIX1_INP_SEL_RX1,
-	RX8_MIX1_INP_SEL_RX2,
-	RX8_MIX1_INP_SEL_RX3,
-	RX8_MIX1_INP_SEL_RX4,
-	RX8_MIX1_INP_SEL_RX5,
-	RX8_MIX1_INP_SEL_RX6,
-	RX8_MIX1_INP_SEL_RX7,
-	RX8_MIX1_INP_SEL_RX8,
-};
-
-#define TOMTOM_COMP_DIGITAL_GAIN_OFFSET 3
-
-static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0);
-static const DECLARE_TLV_DB_SCALE(line_gain, 0, 7, 1);
-static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1);
-static struct snd_soc_dai_driver tomtom_dai[];
-static const DECLARE_TLV_DB_SCALE(aux_pga_gain, 0, 2, 0);
-
-/* Codec supports 2 IIR filters */
-enum {
-	IIR1 = 0,
-	IIR2,
-	IIR_MAX,
-};
-/* Codec supports 5 bands */
-enum {
-	BAND1 = 0,
-	BAND2,
-	BAND3,
-	BAND4,
-	BAND5,
-	BAND_MAX,
-};
-
-enum {
-	COMPANDER_0,
-	COMPANDER_1,
-	COMPANDER_2,
-	COMPANDER_MAX,
-};
-
-enum {
-	COMPANDER_FS_8KHZ = 0,
-	COMPANDER_FS_16KHZ,
-	COMPANDER_FS_32KHZ,
-	COMPANDER_FS_48KHZ,
-	COMPANDER_FS_96KHZ,
-	COMPANDER_FS_192KHZ,
-	COMPANDER_FS_MAX,
-};
-
-struct comp_sample_dependent_params {
-	u32 peak_det_timeout;
-	u32 rms_meter_div_fact;
-	u32 rms_meter_resamp_fact;
-};
-
-struct hpf_work {
-	struct tomtom_priv *tomtom;
-	u32 decimator;
-	u8 tx_hpf_cut_of_freq;
-	bool tx_hpf_bypass;
-	struct delayed_work dwork;
-};
-
-static struct hpf_work tx_hpf_work[NUM_DECIMATORS];
-
-static const struct wcd9xxx_ch tomtom_rx_chs[TOMTOM_RX_MAX] = {
-	WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER, 0),
-	WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 1, 1),
-	WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 2, 2),
-	WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 3, 3),
-	WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 4, 4),
-	WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 5, 5),
-	WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 6, 6),
-	WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 7, 7),
-	WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 8, 8),
-	WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 9, 9),
-	WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 10, 10),
-	WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 11, 11),
-	WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 12, 12),
-};
-
-static const struct wcd9xxx_ch tomtom_tx_chs[TOMTOM_TX_MAX] = {
-	WCD9XXX_CH(0, 0),
-	WCD9XXX_CH(1, 1),
-	WCD9XXX_CH(2, 2),
-	WCD9XXX_CH(3, 3),
-	WCD9XXX_CH(4, 4),
-	WCD9XXX_CH(5, 5),
-	WCD9XXX_CH(6, 6),
-	WCD9XXX_CH(7, 7),
-	WCD9XXX_CH(8, 8),
-	WCD9XXX_CH(9, 9),
-	WCD9XXX_CH(10, 10),
-	WCD9XXX_CH(11, 11),
-	WCD9XXX_CH(12, 12),
-	WCD9XXX_CH(13, 13),
-	WCD9XXX_CH(14, 14),
-	WCD9XXX_CH(15, 15),
-};
-
-static const u32 vport_check_table[NUM_CODEC_DAIS] = {
-	0,					/* AIF1_PB */
-	(1 << AIF2_CAP) | (1 << AIF3_CAP),	/* AIF1_CAP */
-	0,					/* AIF2_PB */
-	(1 << AIF1_CAP) | (1 << AIF3_CAP),	/* AIF2_CAP */
-	0,					/* AIF3_PB */
-	(1 << AIF1_CAP) | (1 << AIF2_CAP),	/* AIF3_CAP */
-};
-
-static const u32 vport_i2s_check_table[NUM_CODEC_DAIS] = {
-	0,	/* AIF1_PB */
-	0,	/* AIF1_CAP */
-	0,	/* AIF2_PB */
-	0,	/* AIF2_CAP */
-};
-
-/*
- * Interrupt table for v3 corresponds to newer version
- * codecs (wcd9330)
- */
-static const struct intr_data wcd9330_intr_tbl[] = {
-	{WCD9XXX_IRQ_SLIMBUS, false},
-	{WCD9XXX_IRQ_MBHC_INSERTION, true},
-	{WCD9XXX_IRQ_MBHC_POTENTIAL, true},
-	{WCD9XXX_IRQ_MBHC_RELEASE, true},
-	{WCD9XXX_IRQ_MBHC_PRESS, true},
-	{WCD9XXX_IRQ_MBHC_SHORT_TERM, true},
-	{WCD9XXX_IRQ_MBHC_REMOVAL, true},
-	{WCD9330_IRQ_MBHC_JACK_SWITCH, true},
-	{WCD9XXX_IRQ_BG_PRECHARGE, false},
-	{WCD9XXX_IRQ_PA1_STARTUP, false},
-	{WCD9XXX_IRQ_PA2_STARTUP, false},
-	{WCD9XXX_IRQ_PA3_STARTUP, false},
-	{WCD9XXX_IRQ_PA4_STARTUP, false},
-	{WCD9XXX_IRQ_PA5_STARTUP, false},
-	{WCD9XXX_IRQ_MICBIAS1_PRECHARGE, false},
-	{WCD9XXX_IRQ_MICBIAS2_PRECHARGE, false},
-	{WCD9XXX_IRQ_MICBIAS3_PRECHARGE, false},
-	{WCD9XXX_IRQ_HPH_PA_OCPL_FAULT, false},
-	{WCD9XXX_IRQ_HPH_PA_OCPR_FAULT, false},
-	{WCD9XXX_IRQ_EAR_PA_OCPL_FAULT, false},
-	{WCD9XXX_IRQ_HPH_L_PA_STARTUP, false},
-	{WCD9XXX_IRQ_HPH_R_PA_STARTUP, false},
-	{WCD9320_IRQ_EAR_PA_STARTUP, false},
-	{WCD9330_IRQ_SVASS_ERR_EXCEPTION, false},
-	{WCD9330_IRQ_SVASS_ENGINE, true},
-	{WCD9330_IRQ_MAD_AUDIO, false},
-	{WCD9330_IRQ_MAD_BEACON, false},
-	{WCD9330_IRQ_MAD_ULTRASOUND, false},
-	{WCD9330_IRQ_SPEAKER1_CLIPPING, false},
-	{WCD9330_IRQ_SPEAKER2_CLIPPING, false},
-	{WCD9330_IRQ_VBAT_MONITOR_ATTACK, false},
-	{WCD9330_IRQ_VBAT_MONITOR_RELEASE, false},
-};
-
-struct tomtom_priv {
-	struct snd_soc_codec *codec;
-	u32 adc_count;
-	u32 rx_bias_count;
-	s32 dmic_1_2_clk_cnt;
-	s32 dmic_3_4_clk_cnt;
-	s32 dmic_5_6_clk_cnt;
-	s32 ldo_h_users;
-	s32 micb_2_users;
-
-	u32 anc_slot;
-	bool anc_func;
-
-	/* cal info for codec */
-	struct fw_info *fw_data;
-
-	/*track tomtom interface type*/
-	u8 intf_type;
-
-	/* num of slim ports required */
-	struct wcd9xxx_codec_dai_data  dai[NUM_CODEC_DAIS];
-
-	/*compander*/
-	int comp_enabled[COMPANDER_MAX];
-	u32 comp_fs[COMPANDER_MAX];
-
-	/* Maintain the status of AUX PGA */
-	int aux_pga_cnt;
-	u8 aux_l_gain;
-	u8 aux_r_gain;
-
-	bool spkr_pa_widget_on;
-	struct regulator *spkdrv_reg;
-	struct regulator *spkdrv2_reg;
-
-	bool mbhc_started;
-
-	struct afe_param_cdc_slimbus_slave_cfg slimbus_slave_cfg;
-
-	/* resmgr module */
-	struct wcd9xxx_resmgr resmgr;
-	/* mbhc module */
-	struct wcd9xxx_mbhc mbhc;
-
-	/* class h specific data */
-	struct wcd9xxx_clsh_cdc_data clsh_d;
-
-	int (*machine_codec_event_cb)(struct snd_soc_codec *codec,
-			enum wcd9xxx_codec_event);
-	int (*codec_ext_clk_en_cb)(struct snd_soc_codec *codec,
-			int enable, bool dapm);
-	int (*codec_get_ext_clk_cnt)(void);
-	/*
-	 * list used to save/restore registers at start and
-	 * end of impedance measurement
-	 */
-	struct list_head reg_save_restore;
-
-	/* handle to cpe core */
-	struct wcd_cpe_core *cpe_core;
-
-	/* UHQA (class AB) mode */
-	u8 uhqa_mode;
-
-	/* Multiplication factor used for impedance detection */
-	int zdet_gain_mul_fact;
-
-	/* to track the status */
-	unsigned long status_mask;
-
-	int ext_clk_users;
-	struct clk *wcd_ext_clk;
-
-	/* Port values for Rx and Tx codec_dai */
-	unsigned int rx_port_value;
-	unsigned int tx_port_value;
-
-	struct mutex codec_mutex;
-};
-
-static const u32 comp_shift[] = {
-	4, /* Compander 0's clock source is on interpolator 7 */
-	0,
-	2,
-};
-
-static const int comp_rx_path[] = {
-	COMPANDER_1,
-	COMPANDER_1,
-	COMPANDER_2,
-	COMPANDER_2,
-	COMPANDER_2,
-	COMPANDER_2,
-	COMPANDER_0,
-	COMPANDER_0,
-	COMPANDER_MAX,
-};
-
-static const struct comp_sample_dependent_params comp_samp_params[] = {
-	{
-		/* 8 Khz */
-		.peak_det_timeout = 0x06,
-		.rms_meter_div_fact = 0x09,
-		.rms_meter_resamp_fact = 0x06,
-	},
-	{
-		/* 16 Khz */
-		.peak_det_timeout = 0x07,
-		.rms_meter_div_fact = 0x0A,
-		.rms_meter_resamp_fact = 0x0C,
-	},
-	{
-		/* 32 Khz */
-		.peak_det_timeout = 0x08,
-		.rms_meter_div_fact = 0x0B,
-		.rms_meter_resamp_fact = 0x1E,
-	},
-	{
-		/* 48 Khz */
-		.peak_det_timeout = 0x09,
-		.rms_meter_div_fact = 0x0B,
-		.rms_meter_resamp_fact = 0x28,
-	},
-	{
-		/* 96 Khz */
-		.peak_det_timeout = 0x0A,
-		.rms_meter_div_fact = 0x0C,
-		.rms_meter_resamp_fact = 0x50,
-	},
-	{
-		/* 192 Khz */
-		.peak_det_timeout = 0x0B,
-		.rms_meter_div_fact = 0xC,
-		.rms_meter_resamp_fact = 0xA0,
-	},
-};
-
-static unsigned short rx_digital_gain_reg[] = {
-	TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL,
-	TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL,
-	TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL,
-	TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL,
-	TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL,
-	TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL,
-	TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL,
-	TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL,
-};
-
-
-static unsigned short tx_digital_gain_reg[] = {
-	TOMTOM_A_CDC_TX1_VOL_CTL_GAIN,
-	TOMTOM_A_CDC_TX2_VOL_CTL_GAIN,
-	TOMTOM_A_CDC_TX3_VOL_CTL_GAIN,
-	TOMTOM_A_CDC_TX4_VOL_CTL_GAIN,
-	TOMTOM_A_CDC_TX5_VOL_CTL_GAIN,
-	TOMTOM_A_CDC_TX6_VOL_CTL_GAIN,
-	TOMTOM_A_CDC_TX7_VOL_CTL_GAIN,
-	TOMTOM_A_CDC_TX8_VOL_CTL_GAIN,
-	TOMTOM_A_CDC_TX9_VOL_CTL_GAIN,
-	TOMTOM_A_CDC_TX10_VOL_CTL_GAIN,
-};
-
-/*
- * wcd9330_get_codec_info: Get codec specific information
- *
- * @wcd9xxx: pointer to wcd9xxx structure
- * @wcd_type: pointer to wcd9xxx_codec_type structure
- *
- * Returns 0 for success or negative error code for failure
- */
-int wcd9330_get_codec_info(struct wcd9xxx *wcd9xxx,
-			   struct wcd9xxx_codec_type *wcd_type)
-{
-	u16 id_minor, id_major;
-	struct regmap *wcd_regmap;
-	int rc, val, version = 0;
-
-	if (!wcd9xxx || !wcd_type)
-		return -EINVAL;
-
-	if (!wcd9xxx->regmap) {
-		dev_err(wcd9xxx->dev, "%s: wcd9xxx regmap is null!\n",
-			__func__);
-		return -EINVAL;
-	}
-	wcd_regmap = wcd9xxx->regmap;
-	rc = regmap_bulk_read(wcd_regmap, TOMTOM_A_CHIP_ID_BYTE_0,
-			      (u8 *)&id_minor, sizeof(u16));
-	if (rc)
-		return -EINVAL;
-
-	rc = regmap_bulk_read(wcd_regmap, TOMTOM_A_CHIP_ID_BYTE_2,
-			      (u8 *)&id_major, sizeof(u16));
-	if (rc)
-		return -EINVAL;
-
-	dev_info(wcd9xxx->dev, "%s: wcd9xxx chip id major 0x%x, minor 0x%x\n",
-		 __func__, id_major, id_minor);
-
-	if (id_minor == cpu_to_le16(0x1))
-		version = 2;
-	else if (id_minor == cpu_to_le16(0x0))
-		version = 1;
-	else
-		dev_err(wcd9xxx->dev, "%s: wcd9330 version unknown (major 0x%x, minor 0x%x)\n",
-			__func__, id_major, id_minor);
-
-	/* Fill codec type info */
-	wcd_type->id_major = id_major;
-	wcd_type->id_minor = id_minor;
-	wcd_type->num_irqs = WCD9330_NUM_IRQS;
-	wcd_type->version = version;
-	wcd_type->slim_slave_type = WCD9XXX_SLIM_SLAVE_ADDR_TYPE_1;
-	wcd_type->i2c_chip_status = 0x01;
-	wcd_type->intr_tbl = wcd9330_intr_tbl;
-	wcd_type->intr_tbl_size = ARRAY_SIZE(wcd9330_intr_tbl);
-
-	wcd_type->intr_reg[WCD9XXX_INTR_STATUS_BASE] =
-						TOMTOM_A_INTR1_STATUS0;
-	wcd_type->intr_reg[WCD9XXX_INTR_CLEAR_BASE] =
-						TOMTOM_A_INTR1_CLEAR0;
-	wcd_type->intr_reg[WCD9XXX_INTR_MASK_BASE] =
-						TOMTOM_A_INTR1_MASK0;
-	wcd_type->intr_reg[WCD9XXX_INTR_LEVEL_BASE] =
-						TOMTOM_A_INTR1_LEVEL0;
-	wcd_type->intr_reg[WCD9XXX_INTR_CLR_COMMIT] =
-						TOMTOM_A_INTR_MODE;
-
-	return rc;
-}
-EXPORT_SYMBOL(wcd9330_get_codec_info);
-
-/*
- * wcd9330_bringdown: Bringdown WCD Codec
- *
- * @wcd9xxx: Pointer to wcd9xxx structure
- *
- * Returns 0 for success or negative error code for failure
- */
-int wcd9330_bringdown(struct wcd9xxx *wcd9xxx)
-{
-	if (!wcd9xxx || !wcd9xxx->regmap)
-		return -EINVAL;
-
-	regmap_write(wcd9xxx->regmap, TOMTOM_A_LEAKAGE_CTL, 0x7);
-	regmap_write(wcd9xxx->regmap, TOMTOM_A_LEAKAGE_CTL, 0x6);
-	regmap_write(wcd9xxx->regmap, TOMTOM_A_LEAKAGE_CTL, 0xe);
-	regmap_write(wcd9xxx->regmap, TOMTOM_A_LEAKAGE_CTL, 0x8);
-
-	return 0;
-}
-EXPORT_SYMBOL(wcd9330_bringdown);
-
-/*
- * wcd9330_bringup: Bring up WCD Codec
- *
- * @wcd9xxx: Pointer to wcd9xxx structure
- *
- * Returns 0 for success or negative error code for failure
- */
-int wcd9330_bringup(struct wcd9xxx *wcd9xxx)
-{
-	if (!wcd9xxx || !wcd9xxx->regmap)
-		return -EINVAL;
-
-	regmap_write(wcd9xxx->regmap, TOMTOM_A_LEAKAGE_CTL, 0x4);
-	regmap_write(wcd9xxx->regmap, TOMTOM_A_CDC_CTL, 0x0);
-	/* wait for 5ms after codec reset for it to complete */
-	usleep_range(5000, 5100);
-	regmap_write(wcd9xxx->regmap, TOMTOM_A_CDC_CTL, 0x1);
-	regmap_write(wcd9xxx->regmap, TOMTOM_A_LEAKAGE_CTL, 0x3);
-	regmap_write(wcd9xxx->regmap, TOMTOM_A_CDC_CTL, 0x3);
-
-	return 0;
-}
-EXPORT_SYMBOL(wcd9330_bringup);
-
-int tomtom_enable_qfuse_sensing(struct snd_soc_codec *codec)
-{
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
-	if (tomtom->wcd_ext_clk)
-		tomtom_codec_mclk_enable(codec, true, false);
-
-	snd_soc_write(codec, TOMTOM_A_QFUSE_CTL, 0x03);
-	/*
-	 * 5ms sleep required after enabling qfuse control
-	 * before checking the status.
-	 */
-	usleep_range(5000, 5500);
-	if ((snd_soc_read(codec, TOMTOM_A_QFUSE_STATUS) & (0x03)) != 0x03)
-		WARN(1, "%s: Qfuse sense is not complete\n", __func__);
-
-	if (tomtom->wcd_ext_clk)
-		tomtom_codec_mclk_enable(codec, false, false);
-	return 0;
-}
-EXPORT_SYMBOL(tomtom_enable_qfuse_sensing);
-
-static int tomtom_get_sample_rate(struct snd_soc_codec *codec, int path)
-{
-	if (path == RX8_PATH)
-		return snd_soc_read(codec, TOMTOM_A_CDC_RX8_B5_CTL);
-	else
-		return snd_soc_read(codec,
-			(TOMTOM_A_CDC_RX1_B5_CTL + 8 * (path - 1)));
-}
-
-static int tomtom_compare_bit_format(struct snd_soc_codec *codec,
-				int bit_format)
-{
-	int i = 0;
-	int ret = 0;
-	struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec);
-
-	for (i = 0; i < NUM_CODEC_DAIS; i++) {
-		if (tomtom_p->dai[i].bit_width == bit_format) {
-			ret = 1;
-			break;
-		}
-	}
-	return ret;
-}
-
-static int tomtom_update_uhqa_mode(struct snd_soc_codec *codec, int path)
-{
-	int ret = 0;
-	struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec);
-
-	/* UHQA path has fs=192KHz & bit=24 bit */
-	if (((tomtom_get_sample_rate(codec, path) & 0xE0) == 0xA0) &&
-		(tomtom_compare_bit_format(codec, 24))) {
-		tomtom_p->uhqa_mode = 1;
-	} else {
-		tomtom_p->uhqa_mode = 0;
-	}
-	dev_dbg(codec->dev, "%s: uhqa_mode=%d", __func__, tomtom_p->uhqa_mode);
-	return ret;
-}
-
-static int tomtom_get_anc_slot(struct snd_kcontrol *kcontrol,
-	struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
-	ucontrol->value.integer.value[0] = tomtom->anc_slot;
-	return 0;
-}
-
-static int tomtom_put_anc_slot(struct snd_kcontrol *kcontrol,
-	struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
-	tomtom->anc_slot = ucontrol->value.integer.value[0];
-	return 0;
-}
-
-static int tomtom_get_anc_func(struct snd_kcontrol *kcontrol,
-	struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
-	ucontrol->value.integer.value[0] = (tomtom->anc_func == true ? 1 : 0);
-	return 0;
-}
-
-static int tomtom_put_anc_func(struct snd_kcontrol *kcontrol,
-	struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-	struct snd_soc_dapm_context *dapm =
-				snd_soc_codec_get_dapm(codec);
-
-	mutex_lock(&tomtom->codec_mutex);
-	tomtom->anc_func = (!ucontrol->value.integer.value[0] ? false : true);
-
-	dev_dbg(codec->dev, "%s: anc_func %x", __func__, tomtom->anc_func);
-
-	if (tomtom->anc_func == true) {
-		snd_soc_dapm_enable_pin(dapm, "ANC HPHR");
-		snd_soc_dapm_enable_pin(dapm, "ANC HPHL");
-		snd_soc_dapm_enable_pin(dapm, "ANC HEADPHONE");
-		snd_soc_dapm_enable_pin(dapm, "ANC EAR PA");
-		snd_soc_dapm_enable_pin(dapm, "ANC EAR");
-		snd_soc_dapm_disable_pin(dapm, "HPHR");
-		snd_soc_dapm_disable_pin(dapm, "HPHL");
-		snd_soc_dapm_disable_pin(dapm, "HEADPHONE");
-		snd_soc_dapm_disable_pin(dapm, "EAR PA");
-		snd_soc_dapm_disable_pin(dapm, "EAR");
-	} else {
-		snd_soc_dapm_disable_pin(dapm, "ANC HPHR");
-		snd_soc_dapm_disable_pin(dapm, "ANC HPHL");
-		snd_soc_dapm_disable_pin(dapm, "ANC HEADPHONE");
-		snd_soc_dapm_disable_pin(dapm, "ANC EAR PA");
-		snd_soc_dapm_disable_pin(dapm, "ANC EAR");
-		snd_soc_dapm_enable_pin(dapm, "HPHR");
-		snd_soc_dapm_enable_pin(dapm, "HPHL");
-		snd_soc_dapm_enable_pin(dapm, "HEADPHONE");
-		snd_soc_dapm_enable_pin(dapm, "EAR PA");
-		snd_soc_dapm_enable_pin(dapm, "EAR");
-	}
-	mutex_unlock(&tomtom->codec_mutex);
-	snd_soc_dapm_sync(dapm);
-	return 0;
-}
-
-static int tomtom_get_iir_enable_audio_mixer(
-					struct snd_kcontrol *kcontrol,
-					struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	int iir_idx = ((struct soc_multi_mixer_control *)
-					kcontrol->private_value)->reg;
-	int band_idx = ((struct soc_multi_mixer_control *)
-					kcontrol->private_value)->shift;
-
-	ucontrol->value.integer.value[0] =
-		(snd_soc_read(codec, (TOMTOM_A_CDC_IIR1_CTL + 16 * iir_idx)) &
-		(1 << band_idx)) != 0;
-
-	pr_debug("%s: IIR #%d band #%d enable %d\n", __func__,
-		iir_idx, band_idx,
-		(uint32_t)ucontrol->value.integer.value[0]);
-	return 0;
-}
-
-static int tomtom_put_iir_enable_audio_mixer(
-					struct snd_kcontrol *kcontrol,
-					struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	int iir_idx = ((struct soc_multi_mixer_control *)
-					kcontrol->private_value)->reg;
-	int band_idx = ((struct soc_multi_mixer_control *)
-					kcontrol->private_value)->shift;
-	int value = ucontrol->value.integer.value[0];
-
-	/* Mask first 5 bits, 6-8 are reserved */
-	snd_soc_update_bits(codec, (TOMTOM_A_CDC_IIR1_CTL + 16 * iir_idx),
-		(1 << band_idx), (value << band_idx));
-
-	pr_debug("%s: IIR #%d band #%d enable %d\n", __func__,
-		iir_idx, band_idx,
-		((snd_soc_read(codec, (TOMTOM_A_CDC_IIR1_CTL + 16 * iir_idx)) &
-		(1 << band_idx)) != 0));
-	return 0;
-}
-static uint32_t get_iir_band_coeff(struct snd_soc_codec *codec,
-				int iir_idx, int band_idx,
-				int coeff_idx)
-{
-	uint32_t value = 0;
-
-	/* Address does not automatically update if reading */
-	snd_soc_write(codec,
-		(TOMTOM_A_CDC_IIR1_COEF_B1_CTL + 16 * iir_idx),
-		((band_idx * BAND_MAX + coeff_idx)
-		* sizeof(uint32_t)) & 0x7F);
-
-	value |= snd_soc_read(codec,
-		(TOMTOM_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx));
-
-	snd_soc_write(codec,
-		(TOMTOM_A_CDC_IIR1_COEF_B1_CTL + 16 * iir_idx),
-		((band_idx * BAND_MAX + coeff_idx)
-		* sizeof(uint32_t) + 1) & 0x7F);
-
-	value |= (snd_soc_read(codec,
-		(TOMTOM_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx)) << 8);
-
-	snd_soc_write(codec,
-		(TOMTOM_A_CDC_IIR1_COEF_B1_CTL + 16 * iir_idx),
-		((band_idx * BAND_MAX + coeff_idx)
-		* sizeof(uint32_t) + 2) & 0x7F);
-
-	value |= (snd_soc_read(codec,
-		(TOMTOM_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx)) << 16);
-
-	snd_soc_write(codec,
-		(TOMTOM_A_CDC_IIR1_COEF_B1_CTL + 16 * iir_idx),
-		((band_idx * BAND_MAX + coeff_idx)
-		* sizeof(uint32_t) + 3) & 0x7F);
-
-	/* Mask bits top 2 bits since they are reserved */
-	value |= ((snd_soc_read(codec,
-		(TOMTOM_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx)) & 0x3F) << 24);
-
-	return value;
-}
-
-static int tomtom_get_iir_band_audio_mixer(
-					struct snd_kcontrol *kcontrol,
-					struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	int iir_idx = ((struct soc_multi_mixer_control *)
-					kcontrol->private_value)->reg;
-	int band_idx = ((struct soc_multi_mixer_control *)
-					kcontrol->private_value)->shift;
-
-	ucontrol->value.integer.value[0] =
-		get_iir_band_coeff(codec, iir_idx, band_idx, 0);
-	ucontrol->value.integer.value[1] =
-		get_iir_band_coeff(codec, iir_idx, band_idx, 1);
-	ucontrol->value.integer.value[2] =
-		get_iir_band_coeff(codec, iir_idx, band_idx, 2);
-	ucontrol->value.integer.value[3] =
-		get_iir_band_coeff(codec, iir_idx, band_idx, 3);
-	ucontrol->value.integer.value[4] =
-		get_iir_band_coeff(codec, iir_idx, band_idx, 4);
-
-	pr_debug("%s: IIR #%d band #%d b0 = 0x%x\n"
-		"%s: IIR #%d band #%d b1 = 0x%x\n"
-		"%s: IIR #%d band #%d b2 = 0x%x\n"
-		"%s: IIR #%d band #%d a1 = 0x%x\n"
-		"%s: IIR #%d band #%d a2 = 0x%x\n",
-		__func__, iir_idx, band_idx,
-		(uint32_t)ucontrol->value.integer.value[0],
-		__func__, iir_idx, band_idx,
-		(uint32_t)ucontrol->value.integer.value[1],
-		__func__, iir_idx, band_idx,
-		(uint32_t)ucontrol->value.integer.value[2],
-		__func__, iir_idx, band_idx,
-		(uint32_t)ucontrol->value.integer.value[3],
-		__func__, iir_idx, band_idx,
-		(uint32_t)ucontrol->value.integer.value[4]);
-	return 0;
-}
-
-static void set_iir_band_coeff(struct snd_soc_codec *codec,
-				int iir_idx, int band_idx,
-				uint32_t value)
-{
-	snd_soc_write(codec,
-		(TOMTOM_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx),
-		(value & 0xFF));
-
-	snd_soc_write(codec,
-		(TOMTOM_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx),
-		(value >> 8) & 0xFF);
-
-	snd_soc_write(codec,
-		(TOMTOM_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx),
-		(value >> 16) & 0xFF);
-
-	/* Mask top 2 bits, 7-8 are reserved */
-	snd_soc_write(codec,
-		(TOMTOM_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx),
-		(value >> 24) & 0x3F);
-}
-
-static int tomtom_put_iir_band_audio_mixer(
-					struct snd_kcontrol *kcontrol,
-					struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	int iir_idx = ((struct soc_multi_mixer_control *)
-					kcontrol->private_value)->reg;
-	int band_idx = ((struct soc_multi_mixer_control *)
-					kcontrol->private_value)->shift;
-
-	/* Mask top bit it is reserved */
-	/* Updates addr automatically for each B2 write */
-	snd_soc_write(codec,
-		(TOMTOM_A_CDC_IIR1_COEF_B1_CTL + 16 * iir_idx),
-		(band_idx * BAND_MAX * sizeof(uint32_t)) & 0x7F);
-
-	set_iir_band_coeff(codec, iir_idx, band_idx,
-				ucontrol->value.integer.value[0]);
-	set_iir_band_coeff(codec, iir_idx, band_idx,
-				ucontrol->value.integer.value[1]);
-	set_iir_band_coeff(codec, iir_idx, band_idx,
-				ucontrol->value.integer.value[2]);
-	set_iir_band_coeff(codec, iir_idx, band_idx,
-				ucontrol->value.integer.value[3]);
-	set_iir_band_coeff(codec, iir_idx, band_idx,
-				ucontrol->value.integer.value[4]);
-
-	pr_debug("%s: IIR #%d band #%d b0 = 0x%x\n"
-		"%s: IIR #%d band #%d b1 = 0x%x\n"
-		"%s: IIR #%d band #%d b2 = 0x%x\n"
-		"%s: IIR #%d band #%d a1 = 0x%x\n"
-		"%s: IIR #%d band #%d a2 = 0x%x\n",
-		__func__, iir_idx, band_idx,
-		get_iir_band_coeff(codec, iir_idx, band_idx, 0),
-		__func__, iir_idx, band_idx,
-		get_iir_band_coeff(codec, iir_idx, band_idx, 1),
-		__func__, iir_idx, band_idx,
-		get_iir_band_coeff(codec, iir_idx, band_idx, 2),
-		__func__, iir_idx, band_idx,
-		get_iir_band_coeff(codec, iir_idx, band_idx, 3),
-		__func__, iir_idx, band_idx,
-		get_iir_band_coeff(codec, iir_idx, band_idx, 4));
-	return 0;
-}
-
-static int tomtom_get_compander(struct snd_kcontrol *kcontrol,
-			       struct snd_ctl_elem_value *ucontrol)
-{
-
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	int comp = ((struct soc_multi_mixer_control *)
-		    kcontrol->private_value)->shift;
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
-	ucontrol->value.integer.value[0] = tomtom->comp_enabled[comp];
-	return 0;
-}
-
-static int tomtom_set_compander(struct snd_kcontrol *kcontrol,
-			       struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-	int comp = ((struct soc_multi_mixer_control *)
-		    kcontrol->private_value)->shift;
-	int value = ucontrol->value.integer.value[0];
-
-	pr_debug("%s: Compander %d enable current %d, new %d\n",
-		 __func__, comp, tomtom->comp_enabled[comp], value);
-	tomtom->comp_enabled[comp] = value;
-
-	if (comp == COMPANDER_1 &&
-			tomtom->comp_enabled[comp] == 1) {
-		/* Wavegen to 5 msec */
-		snd_soc_write(codec, TOMTOM_A_RX_HPH_CNP_WG_CTL, 0xDB);
-		snd_soc_write(codec, TOMTOM_A_RX_HPH_CNP_WG_TIME, 0x2A);
-		snd_soc_write(codec, TOMTOM_A_RX_HPH_BIAS_WG_OCP, 0x2A);
-
-		/* Enable Chopper */
-		snd_soc_update_bits(codec,
-			TOMTOM_A_RX_HPH_CHOP_CTL, 0x80, 0x80);
-
-		snd_soc_write(codec, TOMTOM_A_NCP_DTEST, 0x20);
-		pr_debug("%s: Enabled Chopper and set wavegen to 5 msec\n",
-				__func__);
-	} else if (comp == COMPANDER_1 &&
-			tomtom->comp_enabled[comp] == 0) {
-		/* Wavegen to 20 msec */
-		snd_soc_write(codec, TOMTOM_A_RX_HPH_CNP_WG_CTL, 0xDB);
-		snd_soc_write(codec, TOMTOM_A_RX_HPH_CNP_WG_TIME, 0x58);
-		snd_soc_write(codec, TOMTOM_A_RX_HPH_BIAS_WG_OCP, 0x1A);
-
-		/* Disable CHOPPER block */
-		snd_soc_update_bits(codec,
-			TOMTOM_A_RX_HPH_CHOP_CTL, 0x80, 0x00);
-
-		snd_soc_write(codec, TOMTOM_A_NCP_DTEST, 0x10);
-		pr_debug("%s: Disabled Chopper and set wavegen to 20 msec\n",
-				__func__);
-	}
-	return 0;
-}
-
-static int tomtom_config_gain_compander(struct snd_soc_codec *codec,
-				       int comp, bool enable)
-{
-	int ret = 0;
-
-	switch (comp) {
-	case COMPANDER_0:
-		snd_soc_update_bits(codec, TOMTOM_A_SPKR_DRV1_GAIN,
-				    1 << 2, !enable << 2);
-		snd_soc_update_bits(codec, TOMTOM_A_SPKR_DRV2_GAIN,
-				    1 << 2, !enable << 2);
-		break;
-	case COMPANDER_1:
-		snd_soc_update_bits(codec, TOMTOM_A_RX_HPH_L_GAIN,
-				    1 << 5, !enable << 5);
-		snd_soc_update_bits(codec, TOMTOM_A_RX_HPH_R_GAIN,
-				    1 << 5, !enable << 5);
-		break;
-	case COMPANDER_2:
-		snd_soc_update_bits(codec, TOMTOM_A_RX_LINE_1_GAIN,
-				    1 << 5, !enable << 5);
-		snd_soc_update_bits(codec, TOMTOM_A_RX_LINE_3_GAIN,
-				    1 << 5, !enable << 5);
-		snd_soc_update_bits(codec, TOMTOM_A_RX_LINE_2_GAIN,
-				    1 << 5, !enable << 5);
-		snd_soc_update_bits(codec, TOMTOM_A_RX_LINE_4_GAIN,
-				    1 << 5, !enable << 5);
-		break;
-	default:
-		WARN_ON(1);
-		ret = -EINVAL;
-	}
-
-	return ret;
-}
-
-static void tomtom_discharge_comp(struct snd_soc_codec *codec, int comp)
-{
-	/* Level meter DIV Factor to 5*/
-	snd_soc_update_bits(codec, TOMTOM_A_CDC_COMP0_B2_CTL + (comp * 8), 0xF0,
-			    0x05 << 4);
-	/* RMS meter Sampling to 0x01 */
-	snd_soc_write(codec, TOMTOM_A_CDC_COMP0_B3_CTL + (comp * 8), 0x01);
-
-	/* Worst case timeout for compander CnP sleep timeout */
-	usleep_range(3000, 3100);
-}
-
-static enum wcd9xxx_buck_volt tomtom_codec_get_buck_mv(
-	struct snd_soc_codec *codec)
-{
-	int buck_volt = WCD9XXX_CDC_BUCK_UNSUPPORTED;
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-	struct wcd9xxx_pdata *pdata = tomtom->resmgr.pdata;
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(pdata->regulator); i++) {
-		if (!strcmp(pdata->regulator[i].name,
-					 WCD9XXX_SUPPLY_BUCK_NAME)) {
-			if ((pdata->regulator[i].min_uV ==
-					WCD9XXX_CDC_BUCK_MV_1P8) ||
-				(pdata->regulator[i].min_uV ==
-					WCD9XXX_CDC_BUCK_MV_2P15))
-				buck_volt = pdata->regulator[i].min_uV;
-			break;
-		}
-	}
-	return buck_volt;
-}
-
-static int tomtom_config_compander(struct snd_soc_dapm_widget *w,
-				  struct snd_kcontrol *kcontrol, int event)
-{
-	int mask, enable_mask;
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-	const int comp = w->shift;
-	const u32 rate = tomtom->comp_fs[comp];
-	const struct comp_sample_dependent_params *comp_params =
-	    &comp_samp_params[rate];
-	enum wcd9xxx_buck_volt buck_mv;
-
-	pr_debug("%s: %s event %d compander %d, enabled %d", __func__,
-		 w->name, event, comp, tomtom->comp_enabled[comp]);
-
-	if (!tomtom->comp_enabled[comp])
-		return 0;
-
-	/* Compander 0 has two channels */
-	mask = enable_mask = 0x03;
-	buck_mv = tomtom_codec_get_buck_mv(codec);
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		/* Set compander Sample rate */
-		snd_soc_update_bits(codec,
-				    TOMTOM_A_CDC_COMP0_FS_CFG + (comp * 8),
-				    0x07, rate);
-		/* Set the static gain offset for HPH Path */
-		if (comp == COMPANDER_1) {
-			if (buck_mv == WCD9XXX_CDC_BUCK_MV_2P15) {
-				snd_soc_update_bits(codec,
-					TOMTOM_A_CDC_COMP0_B4_CTL + (comp * 8),
-					0x80, 0x00);
-			} else {
-				snd_soc_update_bits(codec,
-					TOMTOM_A_CDC_COMP0_B4_CTL + (comp * 8),
-					0x80, 0x80);
-			}
-		}
-		/* Enable RX interpolation path compander clocks */
-		snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_RX_B2_CTL,
-				    mask << comp_shift[comp],
-				    mask << comp_shift[comp]);
-		/* Toggle compander reset bits */
-		snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL,
-				    mask << comp_shift[comp],
-				    mask << comp_shift[comp]);
-		snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL,
-				    mask << comp_shift[comp], 0);
-
-		/* Set gain source to compander */
-		tomtom_config_gain_compander(codec, comp, true);
-
-		/* Compander enable */
-		snd_soc_update_bits(codec, TOMTOM_A_CDC_COMP0_B1_CTL +
-				    (comp * 8), enable_mask, enable_mask);
-
-		tomtom_discharge_comp(codec, comp);
-
-		/* Set sample rate dependent parameter */
-		snd_soc_write(codec, TOMTOM_A_CDC_COMP0_B3_CTL + (comp * 8),
-			      comp_params->rms_meter_resamp_fact);
-		snd_soc_update_bits(codec,
-				    TOMTOM_A_CDC_COMP0_B2_CTL + (comp * 8),
-				    0xF0, comp_params->rms_meter_div_fact << 4);
-		snd_soc_update_bits(codec,
-					TOMTOM_A_CDC_COMP0_B2_CTL + (comp * 8),
-					0x0F, comp_params->peak_det_timeout);
-		break;
-	case SND_SOC_DAPM_PRE_PMD:
-		/* Disable compander */
-		snd_soc_update_bits(codec,
-				    TOMTOM_A_CDC_COMP0_B1_CTL + (comp * 8),
-				    enable_mask, 0x00);
-
-		/* Toggle compander reset bits */
-		snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL,
-				    mask << comp_shift[comp],
-				    mask << comp_shift[comp]);
-		snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL,
-				    mask << comp_shift[comp], 0);
-
-		/* Turn off the clock for compander in pair */
-		snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_RX_B2_CTL,
-				    mask << comp_shift[comp], 0);
-
-		/* Set gain source to register */
-		tomtom_config_gain_compander(codec, comp, false);
-		break;
-	}
-	return 0;
-}
-
-
-
-static const char *const tomtom_anc_func_text[] = {"OFF", "ON"};
-static const struct soc_enum tomtom_anc_func_enum =
-		SOC_ENUM_SINGLE_EXT(2, tomtom_anc_func_text);
-
-static const char *const tabla_ear_pa_gain_text[] = {"POS_6_DB", "POS_2_DB"};
-static const struct soc_enum tabla_ear_pa_gain_enum[] = {
-		SOC_ENUM_SINGLE_EXT(2, tabla_ear_pa_gain_text),
-};
-
-/*cut of frequency for high pass filter*/
-static const char * const cf_text[] = {
-	"MIN_3DB_4Hz", "MIN_3DB_75Hz", "MIN_3DB_150Hz"
-};
-
-static const char * const rx_cf_text[] = {
-	"MIN_3DB_4Hz", "MIN_3DB_75Hz", "MIN_3DB_150Hz",
-	"MIN_3DB_0P48Hz"
-};
-
-static const struct soc_enum cf_dec1_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_TX1_MUX_CTL, 4, 3, cf_text);
-
-static const struct soc_enum cf_dec2_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_TX2_MUX_CTL, 4, 3, cf_text);
-
-static const struct soc_enum cf_dec3_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_TX3_MUX_CTL, 4, 3, cf_text);
-
-static const struct soc_enum cf_dec4_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_TX4_MUX_CTL, 4, 3, cf_text);
-
-static const struct soc_enum cf_dec5_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_TX5_MUX_CTL, 4, 3, cf_text);
-
-static const struct soc_enum cf_dec6_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_TX6_MUX_CTL, 4, 3, cf_text);
-
-static const struct soc_enum cf_dec7_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_TX7_MUX_CTL, 4, 3, cf_text);
-
-static const struct soc_enum cf_dec8_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_TX8_MUX_CTL, 4, 3, cf_text);
-
-static const struct soc_enum cf_dec9_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_TX9_MUX_CTL, 4, 3, cf_text);
-
-static const struct soc_enum cf_dec10_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_TX10_MUX_CTL, 4, 3, cf_text);
-
-static const struct soc_enum cf_rxmix1_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_RX1_B4_CTL, 0, 4, rx_cf_text);
-
-static const struct soc_enum cf_rxmix2_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_RX2_B4_CTL, 0, 4, rx_cf_text);
-
-static const struct soc_enum cf_rxmix3_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_RX3_B4_CTL, 0, 4, rx_cf_text);
-
-static const struct soc_enum cf_rxmix4_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_RX4_B4_CTL, 0, 4, rx_cf_text);
-
-static const struct soc_enum cf_rxmix5_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_RX5_B4_CTL, 0, 4, rx_cf_text)
-;
-static const struct soc_enum cf_rxmix6_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_RX6_B4_CTL, 0, 4, rx_cf_text);
-
-static const struct soc_enum cf_rxmix7_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_RX7_B4_CTL, 0, 4, rx_cf_text);
-
-static const struct soc_enum cf_rxmix8_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_RX8_B4_CTL, 0, 4, rx_cf_text);
-
-static const char * const class_h_dsm_text[] = {
-	"ZERO", "DSM_HPHL_RX1", "DSM_SPKR_RX7"
-};
-
-static const struct soc_enum class_h_dsm_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_CLSH_CTL, 4, 3, class_h_dsm_text);
-
-static const struct snd_kcontrol_new class_h_dsm_mux =
-	SOC_DAPM_ENUM("CLASS_H_DSM MUX Mux", class_h_dsm_enum);
-
-static const char * const rx1_interp_text[] = {
-	"ZERO", "RX1 MIX2"
-};
-
-static const struct soc_enum rx1_interp_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CLK_RX_B1_CTL, 0, 2, rx1_interp_text);
-
-static const struct snd_kcontrol_new rx1_interp_mux =
-	SOC_DAPM_ENUM("RX1 INTERP MUX Mux", rx1_interp_enum);
-
-static const char * const rx2_interp_text[] = {
-	"ZERO", "RX2 MIX2"
-};
-
-static const struct soc_enum rx2_interp_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CLK_RX_B1_CTL, 1, 2, rx2_interp_text);
-
-static const struct snd_kcontrol_new rx2_interp_mux =
-	SOC_DAPM_ENUM("RX2 INTERP MUX Mux", rx2_interp_enum);
-
-static const char *const tomtom_conn_mad_text[] = {
-	"ADC_MB", "ADC1", "ADC2", "ADC3", "ADC4", "ADC5", "ADC6", "NOTUSED1",
-	"DMIC1", "DMIC2", "DMIC3", "DMIC4", "DMIC5", "DMIC6", "NOTUSED2",
-	"NOTUSED3"};
-
-static const struct soc_enum tomtom_conn_mad_enum =
-	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(tomtom_conn_mad_text),
-			tomtom_conn_mad_text);
-
-
-static int tomtom_mad_input_get(struct snd_kcontrol *kcontrol,
-	struct snd_ctl_elem_value *ucontrol)
-{
-	u8 tomtom_mad_input;
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-
-	tomtom_mad_input = snd_soc_read(codec, TOMTOM_A_CDC_MAD_INP_SEL);
-
-	tomtom_mad_input = tomtom_mad_input & 0x0F;
-
-	ucontrol->value.integer.value[0] = tomtom_mad_input;
-
-	pr_debug("%s: tomtom_mad_input = %s\n", __func__,
-			tomtom_conn_mad_text[tomtom_mad_input]);
-
-	return 0;
-}
-
-static int tomtom_mad_input_put(struct snd_kcontrol *kcontrol,
-	struct snd_ctl_elem_value *ucontrol)
-{
-	u8 tomtom_mad_input;
-	u16 micb_int_reg, micb_4_int_reg;
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	struct snd_soc_card *card = codec->component.card;
-	char mad_amic_input_widget[6];
-	u32 adc;
-	const char *mad_input_widget;
-	const char *source_widget = NULL;
-	u32  mic_bias_found = 0;
-	u32 i;
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-	int ret = 0;
-	char *mad_input;
-
-	tomtom_mad_input = ucontrol->value.integer.value[0];
-	micb_4_int_reg = tomtom->resmgr.reg_addr->micb_4_int_rbias;
-
-	if (tomtom_mad_input >= ARRAY_SIZE(tomtom_conn_mad_text)) {
-		dev_err(codec->dev,
-			"%s: tomtom_mad_input = %d out of bounds\n",
-			__func__, tomtom_mad_input);
-		return -EINVAL;
-	}
-
-	pr_debug("%s: tomtom_mad_input = %s\n", __func__,
-			tomtom_conn_mad_text[tomtom_mad_input]);
-
-	if (!strcmp(tomtom_conn_mad_text[tomtom_mad_input], "NOTUSED1") ||
-		!strcmp(tomtom_conn_mad_text[tomtom_mad_input], "NOTUSED2") ||
-		!strcmp(tomtom_conn_mad_text[tomtom_mad_input], "NOTUSED3") ||
-		!strcmp(tomtom_conn_mad_text[tomtom_mad_input], "ADC_MB")) {
-		pr_info("%s: tomtom mad input is set to unsupported input = %s\n",
-			__func__, tomtom_conn_mad_text[tomtom_mad_input]);
-		return -EINVAL;
-	}
-
-	if (strnstr(tomtom_conn_mad_text[tomtom_mad_input],
-				"ADC", sizeof("ADC"))) {
-		mad_input = strpbrk(tomtom_conn_mad_text[tomtom_mad_input],
-				"123456");
-		if (!mad_input) {
-			dev_err(codec->dev, "%s: Invalid MAD input %s\n",
-			__func__, tomtom_conn_mad_text[tomtom_mad_input]);
-			return -EINVAL;
-		}
-		ret = kstrtouint(mad_input, 10, &adc);
-		if ((ret < 0) || (adc > 6)) {
-			pr_err("%s: Invalid ADC = %s\n", __func__,
-				tomtom_conn_mad_text[tomtom_mad_input]);
-			ret =  -EINVAL;
-		}
-
-		snprintf(mad_amic_input_widget, 6, "%s%u", "AMIC", adc);
-
-		mad_input_widget = mad_amic_input_widget;
-		pr_debug("%s: tomtom amic input widget = %s\n", __func__,
-			  mad_amic_input_widget);
-	} else {
-		/* DMIC type input widget*/
-		mad_input_widget = tomtom_conn_mad_text[tomtom_mad_input];
-	}
-
-	pr_debug("%s: tomtom input widget = %s\n", __func__, mad_input_widget);
-
-	for (i = 0; i < card->num_dapm_routes; i++) {
-
-		if (!strcmp(card->dapm_routes[i].sink, mad_input_widget)) {
-
-			source_widget = card->dapm_routes[i].source;
-			if (!source_widget) {
-				dev_err(codec->dev,
-					"%s: invalid source widget\n",
-					__func__);
-				return -EINVAL;
-			}
-
-			if (strnstr(source_widget,
-				"MIC BIAS1", sizeof("MIC BIAS1"))) {
-				mic_bias_found = 1;
-				micb_int_reg = TOMTOM_A_MICB_1_INT_RBIAS;
-				break;
-			} else if (strnstr(source_widget,
-				"MIC BIAS2", sizeof("MIC BIAS2"))) {
-				mic_bias_found = 2;
-				micb_int_reg = TOMTOM_A_MICB_2_INT_RBIAS;
-				break;
-			} else if (strnstr(source_widget,
-				"MIC BIAS3", sizeof("MIC BIAS3"))) {
-				mic_bias_found = 3;
-				micb_int_reg = TOMTOM_A_MICB_3_INT_RBIAS;
-				break;
-			} else if (strnstr(source_widget,
-				"MIC BIAS4", sizeof("MIC BIAS4"))) {
-				mic_bias_found = 4;
-				micb_int_reg = micb_4_int_reg;
-				break;
-			}
-		}
-	}
-
-	if (mic_bias_found) {
-		pr_debug("%s: source mic bias = %s. sink = %s\n", __func__,
-				card->dapm_routes[i].source,
-				card->dapm_routes[i].sink);
-
-		snd_soc_update_bits(codec, TOMTOM_A_CDC_MAD_INP_SEL,
-					0x0F, tomtom_mad_input);
-		snd_soc_update_bits(codec, TOMTOM_A_MAD_ANA_CTRL,
-					0x07, mic_bias_found);
-
-		/* Setup internal micbias */
-
-		if (strnstr(source_widget, "Internal1", strlen(source_widget)))
-			snd_soc_update_bits(codec,
-					micb_int_reg,
-					0xE0, 0xE0);
-		else if (strnstr(source_widget, "Internal2",
-				strlen(source_widget)))
-			snd_soc_update_bits(codec,
-					micb_int_reg,
-					0x1C, 0x1C);
-		else if (strnstr(source_widget, "Internal3",
-				strlen(source_widget)))
-			snd_soc_update_bits(codec,
-					micb_int_reg,
-					0x3, 0x3);
-		else
-			/*
-			 *  If not internal, make sure to write the
-			 *  register to default value
-			 */
-			snd_soc_write(codec, micb_int_reg, 0x24);
-		return 0;
-	}
-
-	pr_err("%s: mic bias source not found for input = %s\n",
-		__func__, mad_input_widget);
-	return -EINVAL;
-}
-
-static int tomtom_tx_hpf_bypass_get(struct snd_kcontrol *kcontrol,
-	struct snd_ctl_elem_value *ucontrol)
-{
-	u32	tx_index;
-
-	tx_index = (u32)kcontrol->private_value;
-
-	if (tx_index > NUM_DECIMATORS) {
-		pr_err("%s: Invalid TX decimator %d\n", __func__,
-			   tx_index);
-		return -EINVAL;
-	}
-
-	ucontrol->value.integer.value[0] =
-		tx_hpf_work[tx_index-1].tx_hpf_bypass;
-
-	return 0;
-}
-
-static int tomtom_tx_hpf_bypass_put(struct snd_kcontrol *kcontrol,
-	struct snd_ctl_elem_value *ucontrol)
-{
-	bool tx_hpf_bypass_cfg;
-	u32	tx_index;
-
-	tx_hpf_bypass_cfg = (bool)ucontrol->value.integer.value[0];
-
-	pr_debug("%s: tx_hpf_bypass = %d\n", __func__,
-			tx_hpf_bypass_cfg);
-
-	tx_index = (u32)kcontrol->private_value;
-
-	if (tx_index > NUM_DECIMATORS) {
-		pr_err("%s: Invalid TX decimator %d\n", __func__,
-			   tx_index);
-		return -EINVAL;
-	}
-	if (tx_hpf_work[tx_index-1].tx_hpf_bypass != tx_hpf_bypass_cfg)
-		tx_hpf_work[tx_index-1].tx_hpf_bypass = tx_hpf_bypass_cfg;
-
-	pr_debug("%s: Set TX%d HPF bypass configuration %d",
-			 __func__, tx_index,
-			 tx_hpf_work[tx_index-1].tx_hpf_bypass);
-
-	return 0;
-}
-
-static const struct snd_kcontrol_new tomtom_snd_controls[] = {
-
-	SOC_SINGLE_SX_TLV("RX1 Digital Volume", TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL,
-		0, -84, 40, digital_gain),
-	SOC_SINGLE_SX_TLV("RX2 Digital Volume", TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL,
-		0, -84, 40, digital_gain),
-	SOC_SINGLE_SX_TLV("RX3 Digital Volume", TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL,
-		0, -84, 40, digital_gain),
-	SOC_SINGLE_SX_TLV("RX4 Digital Volume", TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL,
-		0, -84, 40, digital_gain),
-	SOC_SINGLE_SX_TLV("RX5 Digital Volume", TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL,
-		0, -84, 40, digital_gain),
-	SOC_SINGLE_SX_TLV("RX6 Digital Volume", TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL,
-		0, -84, 40, digital_gain),
-	SOC_SINGLE_SX_TLV("RX7 Digital Volume", TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL,
-		0, -84, 40, digital_gain),
-	SOC_SINGLE_SX_TLV("RX8 Digital Volume", TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL,
-		0, -84, 40, digital_gain),
-
-	SOC_SINGLE_SX_TLV("DEC1 Volume", TOMTOM_A_CDC_TX1_VOL_CTL_GAIN, 0,
-					  -84, 40, digital_gain),
-	SOC_SINGLE_SX_TLV("DEC2 Volume", TOMTOM_A_CDC_TX2_VOL_CTL_GAIN, 0,
-					  -84, 40, digital_gain),
-	SOC_SINGLE_SX_TLV("DEC3 Volume", TOMTOM_A_CDC_TX3_VOL_CTL_GAIN, 0,
-					  -84, 40, digital_gain),
-	SOC_SINGLE_SX_TLV("DEC4 Volume", TOMTOM_A_CDC_TX4_VOL_CTL_GAIN, 0,
-					  -84, 40, digital_gain),
-	SOC_SINGLE_SX_TLV("DEC5 Volume", TOMTOM_A_CDC_TX5_VOL_CTL_GAIN, 0,
-					  -84, 40, digital_gain),
-	SOC_SINGLE_SX_TLV("DEC6 Volume", TOMTOM_A_CDC_TX6_VOL_CTL_GAIN, 0,
-					  -84, 40, digital_gain),
-	SOC_SINGLE_SX_TLV("DEC7 Volume", TOMTOM_A_CDC_TX7_VOL_CTL_GAIN, 0,
-					  -84, 40, digital_gain),
-	SOC_SINGLE_SX_TLV("DEC8 Volume", TOMTOM_A_CDC_TX8_VOL_CTL_GAIN, 0,
-					  -84, 40, digital_gain),
-	SOC_SINGLE_SX_TLV("DEC9 Volume", TOMTOM_A_CDC_TX9_VOL_CTL_GAIN, 0,
-					  -84, 40, digital_gain),
-	SOC_SINGLE_SX_TLV("DEC10 Volume", TOMTOM_A_CDC_TX10_VOL_CTL_GAIN, 0,
-					  -84, 40, digital_gain),
-
-	SOC_SINGLE_SX_TLV("IIR1 INP1 Volume", TOMTOM_A_CDC_IIR1_GAIN_B1_CTL, 0,
-					  -84, 40, digital_gain),
-	SOC_SINGLE_SX_TLV("IIR1 INP2 Volume", TOMTOM_A_CDC_IIR1_GAIN_B2_CTL, 0,
-					  -84, 40, digital_gain),
-	SOC_SINGLE_SX_TLV("IIR1 INP3 Volume", TOMTOM_A_CDC_IIR1_GAIN_B3_CTL, 0,
-					  -84, 40, digital_gain),
-	SOC_SINGLE_SX_TLV("IIR1 INP4 Volume", TOMTOM_A_CDC_IIR1_GAIN_B4_CTL, 0,
-					  -84, 40, digital_gain),
-	SOC_SINGLE_SX_TLV("IIR2 INP1 Volume", TOMTOM_A_CDC_IIR2_GAIN_B1_CTL, 0,
-					  -84, 40, digital_gain),
-	SOC_SINGLE_SX_TLV("IIR2 INP2 Volume", TOMTOM_A_CDC_IIR2_GAIN_B2_CTL, 0,
-					  -84, 40, digital_gain),
-	SOC_SINGLE_SX_TLV("IIR2 INP3 Volume", TOMTOM_A_CDC_IIR2_GAIN_B3_CTL, 0,
-					  -84, 40, digital_gain),
-	SOC_SINGLE_SX_TLV("IIR2 INP4 Volume", TOMTOM_A_CDC_IIR2_GAIN_B4_CTL, 0,
-					  -84, 40, digital_gain),
-
-	SOC_SINGLE_EXT("ANC Slot", SND_SOC_NOPM, 0, 100, 0, tomtom_get_anc_slot,
-		tomtom_put_anc_slot),
-	SOC_ENUM_EXT("ANC Function", tomtom_anc_func_enum, tomtom_get_anc_func,
-		tomtom_put_anc_func),
-
-	SOC_ENUM("TX1 HPF cut off", cf_dec1_enum),
-	SOC_ENUM("TX2 HPF cut off", cf_dec2_enum),
-	SOC_ENUM("TX3 HPF cut off", cf_dec3_enum),
-	SOC_ENUM("TX4 HPF cut off", cf_dec4_enum),
-	SOC_ENUM("TX5 HPF cut off", cf_dec5_enum),
-	SOC_ENUM("TX6 HPF cut off", cf_dec6_enum),
-	SOC_ENUM("TX7 HPF cut off", cf_dec7_enum),
-	SOC_ENUM("TX8 HPF cut off", cf_dec8_enum),
-	SOC_ENUM("TX9 HPF cut off", cf_dec9_enum),
-	SOC_ENUM("TX10 HPF cut off", cf_dec10_enum),
-
-	SOC_SINGLE_BOOL_EXT("TX1 HPF Switch", 1,
-				tomtom_tx_hpf_bypass_get,
-				tomtom_tx_hpf_bypass_put),
-	SOC_SINGLE_BOOL_EXT("TX2 HPF Switch", 2,
-				tomtom_tx_hpf_bypass_get,
-				tomtom_tx_hpf_bypass_put),
-	SOC_SINGLE_BOOL_EXT("TX3 HPF Switch", 3,
-				tomtom_tx_hpf_bypass_get,
-				tomtom_tx_hpf_bypass_put),
-	SOC_SINGLE_BOOL_EXT("TX4 HPF Switch", 4,
-				tomtom_tx_hpf_bypass_get,
-				tomtom_tx_hpf_bypass_put),
-	SOC_SINGLE_BOOL_EXT("TX5 HPF Switch", 5,
-				tomtom_tx_hpf_bypass_get,
-				tomtom_tx_hpf_bypass_put),
-	SOC_SINGLE_BOOL_EXT("TX6 HPF Switch", 6,
-				tomtom_tx_hpf_bypass_get,
-				tomtom_tx_hpf_bypass_put),
-	SOC_SINGLE_BOOL_EXT("TX7 HPF Switch", 7,
-				tomtom_tx_hpf_bypass_get,
-				tomtom_tx_hpf_bypass_put),
-	SOC_SINGLE_BOOL_EXT("TX8 HPF Switch", 8,
-				tomtom_tx_hpf_bypass_get,
-				tomtom_tx_hpf_bypass_put),
-	SOC_SINGLE_BOOL_EXT("TX9 HPF Switch", 9,
-				tomtom_tx_hpf_bypass_get,
-				tomtom_tx_hpf_bypass_put),
-	SOC_SINGLE_BOOL_EXT("TX10 HPF Switch", 10,
-				tomtom_tx_hpf_bypass_get,
-				tomtom_tx_hpf_bypass_put),
-
-	SOC_SINGLE("RX1 HPF Switch", TOMTOM_A_CDC_RX1_B5_CTL, 2, 1, 0),
-	SOC_SINGLE("RX2 HPF Switch", TOMTOM_A_CDC_RX2_B5_CTL, 2, 1, 0),
-	SOC_SINGLE("RX3 HPF Switch", TOMTOM_A_CDC_RX3_B5_CTL, 2, 1, 0),
-	SOC_SINGLE("RX4 HPF Switch", TOMTOM_A_CDC_RX4_B5_CTL, 2, 1, 0),
-	SOC_SINGLE("RX5 HPF Switch", TOMTOM_A_CDC_RX5_B5_CTL, 2, 1, 0),
-	SOC_SINGLE("RX6 HPF Switch", TOMTOM_A_CDC_RX6_B5_CTL, 2, 1, 0),
-	SOC_SINGLE("RX7 HPF Switch", TOMTOM_A_CDC_RX7_B5_CTL, 2, 1, 0),
-	SOC_SINGLE("RX8 HPF Switch", TOMTOM_A_CDC_RX8_B5_CTL, 2, 1, 0),
-
-	SOC_ENUM("RX1 HPF cut off", cf_rxmix1_enum),
-	SOC_ENUM("RX2 HPF cut off", cf_rxmix2_enum),
-	SOC_ENUM("RX3 HPF cut off", cf_rxmix3_enum),
-	SOC_ENUM("RX4 HPF cut off", cf_rxmix4_enum),
-	SOC_ENUM("RX5 HPF cut off", cf_rxmix5_enum),
-	SOC_ENUM("RX6 HPF cut off", cf_rxmix6_enum),
-	SOC_ENUM("RX7 HPF cut off", cf_rxmix7_enum),
-	SOC_ENUM("RX8 HPF cut off", cf_rxmix8_enum),
-
-	SOC_SINGLE_EXT("IIR1 Enable Band1", IIR1, BAND1, 1, 0,
-	tomtom_get_iir_enable_audio_mixer, tomtom_put_iir_enable_audio_mixer),
-	SOC_SINGLE_EXT("IIR1 Enable Band2", IIR1, BAND2, 1, 0,
-	tomtom_get_iir_enable_audio_mixer, tomtom_put_iir_enable_audio_mixer),
-	SOC_SINGLE_EXT("IIR1 Enable Band3", IIR1, BAND3, 1, 0,
-	tomtom_get_iir_enable_audio_mixer, tomtom_put_iir_enable_audio_mixer),
-	SOC_SINGLE_EXT("IIR1 Enable Band4", IIR1, BAND4, 1, 0,
-	tomtom_get_iir_enable_audio_mixer, tomtom_put_iir_enable_audio_mixer),
-	SOC_SINGLE_EXT("IIR1 Enable Band5", IIR1, BAND5, 1, 0,
-	tomtom_get_iir_enable_audio_mixer, tomtom_put_iir_enable_audio_mixer),
-	SOC_SINGLE_EXT("IIR2 Enable Band1", IIR2, BAND1, 1, 0,
-	tomtom_get_iir_enable_audio_mixer, tomtom_put_iir_enable_audio_mixer),
-	SOC_SINGLE_EXT("IIR2 Enable Band2", IIR2, BAND2, 1, 0,
-	tomtom_get_iir_enable_audio_mixer, tomtom_put_iir_enable_audio_mixer),
-	SOC_SINGLE_EXT("IIR2 Enable Band3", IIR2, BAND3, 1, 0,
-	tomtom_get_iir_enable_audio_mixer, tomtom_put_iir_enable_audio_mixer),
-	SOC_SINGLE_EXT("IIR2 Enable Band4", IIR2, BAND4, 1, 0,
-	tomtom_get_iir_enable_audio_mixer, tomtom_put_iir_enable_audio_mixer),
-	SOC_SINGLE_EXT("IIR2 Enable Band5", IIR2, BAND5, 1, 0,
-	tomtom_get_iir_enable_audio_mixer, tomtom_put_iir_enable_audio_mixer),
-
-	SOC_SINGLE_MULTI_EXT("IIR1 Band1", IIR1, BAND1, 255, 0, 5,
-	tomtom_get_iir_band_audio_mixer, tomtom_put_iir_band_audio_mixer),
-	SOC_SINGLE_MULTI_EXT("IIR1 Band2", IIR1, BAND2, 255, 0, 5,
-	tomtom_get_iir_band_audio_mixer, tomtom_put_iir_band_audio_mixer),
-	SOC_SINGLE_MULTI_EXT("IIR1 Band3", IIR1, BAND3, 255, 0, 5,
-	tomtom_get_iir_band_audio_mixer, tomtom_put_iir_band_audio_mixer),
-	SOC_SINGLE_MULTI_EXT("IIR1 Band4", IIR1, BAND4, 255, 0, 5,
-	tomtom_get_iir_band_audio_mixer, tomtom_put_iir_band_audio_mixer),
-	SOC_SINGLE_MULTI_EXT("IIR1 Band5", IIR1, BAND5, 255, 0, 5,
-	tomtom_get_iir_band_audio_mixer, tomtom_put_iir_band_audio_mixer),
-	SOC_SINGLE_MULTI_EXT("IIR2 Band1", IIR2, BAND1, 255, 0, 5,
-	tomtom_get_iir_band_audio_mixer, tomtom_put_iir_band_audio_mixer),
-	SOC_SINGLE_MULTI_EXT("IIR2 Band2", IIR2, BAND2, 255, 0, 5,
-	tomtom_get_iir_band_audio_mixer, tomtom_put_iir_band_audio_mixer),
-	SOC_SINGLE_MULTI_EXT("IIR2 Band3", IIR2, BAND3, 255, 0, 5,
-	tomtom_get_iir_band_audio_mixer, tomtom_put_iir_band_audio_mixer),
-	SOC_SINGLE_MULTI_EXT("IIR2 Band4", IIR2, BAND4, 255, 0, 5,
-	tomtom_get_iir_band_audio_mixer, tomtom_put_iir_band_audio_mixer),
-	SOC_SINGLE_MULTI_EXT("IIR2 Band5", IIR2, BAND5, 255, 0, 5,
-	tomtom_get_iir_band_audio_mixer, tomtom_put_iir_band_audio_mixer),
-
-	SOC_SINGLE_EXT("COMP0 Switch", SND_SOC_NOPM, COMPANDER_0, 1, 0,
-		       tomtom_get_compander, tomtom_set_compander),
-	SOC_SINGLE_EXT("COMP1 Switch", SND_SOC_NOPM, COMPANDER_1, 1, 0,
-		       tomtom_get_compander, tomtom_set_compander),
-	SOC_SINGLE_EXT("COMP2 Switch", SND_SOC_NOPM, COMPANDER_2, 1, 0,
-		       tomtom_get_compander, tomtom_set_compander),
-
-	SOC_ENUM_EXT("MAD Input", tomtom_conn_mad_enum,
-			tomtom_mad_input_get, tomtom_mad_input_put),
-
-};
-
-static int tomtom_pa_gain_get(struct snd_kcontrol *kcontrol,
-				struct snd_ctl_elem_value *ucontrol)
-{
-	u8 ear_pa_gain;
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-
-	ear_pa_gain = snd_soc_read(codec, TOMTOM_A_RX_EAR_GAIN);
-
-	ear_pa_gain = ear_pa_gain >> 5;
-
-	ucontrol->value.integer.value[0] = ear_pa_gain;
-
-	pr_debug("%s: ear_pa_gain = 0x%x\n", __func__, ear_pa_gain);
-
-	return 0;
-}
-
-static int tomtom_pa_gain_put(struct snd_kcontrol *kcontrol,
-				struct snd_ctl_elem_value *ucontrol)
-{
-	u8 ear_pa_gain;
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-
-	pr_debug("%s: ucontrol->value.integer.value[0]  = %ld\n", __func__,
-			ucontrol->value.integer.value[0]);
-
-	ear_pa_gain =  ucontrol->value.integer.value[0] << 5;
-
-	snd_soc_update_bits(codec, TOMTOM_A_RX_EAR_GAIN, 0xE0, ear_pa_gain);
-	return 0;
-}
-
-static const char * const tomtom_1_x_ear_pa_gain_text[] = {
-	"POS_6_DB", "POS_4P5_DB", "POS_3_DB", "POS_1P5_DB",
-	"POS_0_DB", "NEG_2P5_DB", "UNDEFINED", "NEG_12_DB"
-};
-
-static const struct soc_enum tomtom_1_x_ear_pa_gain_enum =
-	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(tomtom_1_x_ear_pa_gain_text),
-			tomtom_1_x_ear_pa_gain_text);
-
-static const struct snd_kcontrol_new tomtom_1_x_analog_gain_controls[] = {
-
-	SOC_ENUM_EXT("EAR PA Gain", tomtom_1_x_ear_pa_gain_enum,
-		tomtom_pa_gain_get, tomtom_pa_gain_put),
-
-	SOC_SINGLE_TLV("HPHL Volume", TOMTOM_A_RX_HPH_L_GAIN, 0, 20, 1,
-		line_gain),
-	SOC_SINGLE_TLV("HPHR Volume", TOMTOM_A_RX_HPH_R_GAIN, 0, 20, 1,
-		line_gain),
-
-	SOC_SINGLE_TLV("LINEOUT1 Volume", TOMTOM_A_RX_LINE_1_GAIN, 0, 20, 1,
-		line_gain),
-	SOC_SINGLE_TLV("LINEOUT2 Volume", TOMTOM_A_RX_LINE_2_GAIN, 0, 20, 1,
-		line_gain),
-	SOC_SINGLE_TLV("LINEOUT3 Volume", TOMTOM_A_RX_LINE_3_GAIN, 0, 20, 1,
-		line_gain),
-	SOC_SINGLE_TLV("LINEOUT4 Volume", TOMTOM_A_RX_LINE_4_GAIN, 0, 20, 1,
-		line_gain),
-
-	SOC_SINGLE_TLV("SPK DRV Volume", TOMTOM_A_SPKR_DRV1_GAIN, 3, 8, 1,
-		line_gain),
-	SOC_SINGLE_TLV("SPK DRV2 Volume", TOMTOM_A_SPKR_DRV2_GAIN, 3, 8, 1,
-		line_gain),
-
-	SOC_SINGLE_TLV("ADC1 Volume", TOMTOM_A_TX_1_GAIN, 2, 19, 0,
-			analog_gain),
-	SOC_SINGLE_TLV("ADC2 Volume", TOMTOM_A_TX_2_GAIN, 2, 19, 0,
-			analog_gain),
-	SOC_SINGLE_TLV("ADC3 Volume", TOMTOM_A_TX_3_GAIN, 2, 19, 0,
-			analog_gain),
-	SOC_SINGLE_TLV("ADC4 Volume", TOMTOM_A_TX_4_GAIN, 2, 19, 0,
-			analog_gain),
-	SOC_SINGLE_TLV("ADC5 Volume", TOMTOM_A_TX_5_GAIN, 2, 19, 0,
-			analog_gain),
-	SOC_SINGLE_TLV("ADC6 Volume", TOMTOM_A_TX_6_GAIN, 2, 19, 0,
-			analog_gain),
-};
-
-static int tomtom_hph_impedance_get(struct snd_kcontrol *kcontrol,
-				   struct snd_ctl_elem_value *ucontrol)
-{
-	uint32_t zl, zr;
-	bool hphr;
-	struct soc_multi_mixer_control *mc;
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	struct tomtom_priv *priv = snd_soc_codec_get_drvdata(codec);
-
-	mc = (struct soc_multi_mixer_control *)(kcontrol->private_value);
-
-	hphr = mc->shift;
-	wcd9xxx_mbhc_get_impedance(&priv->mbhc, &zl, &zr);
-	pr_debug("%s: zl %u, zr %u\n", __func__, zl, zr);
-	ucontrol->value.integer.value[0] = hphr ? zr : zl;
-
-	return 0;
-}
-
-static const struct snd_kcontrol_new impedance_detect_controls[] = {
-	SOC_SINGLE_EXT("HPHL Impedance", 0, 0, UINT_MAX, 0,
-		       tomtom_hph_impedance_get, NULL),
-	SOC_SINGLE_EXT("HPHR Impedance", 0, 1, UINT_MAX, 0,
-		       tomtom_hph_impedance_get, NULL),
-};
-
-static int tomtom_get_hph_type(struct snd_kcontrol *kcontrol,
-				struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	struct tomtom_priv *priv = snd_soc_codec_get_drvdata(codec);
-	struct wcd9xxx_mbhc *mbhc;
-
-	if (!priv) {
-		pr_debug("%s: wcd9330 private data is NULL\n", __func__);
-		return 0;
-	}
-
-	mbhc = &priv->mbhc;
-	if (!mbhc) {
-		pr_debug("%s: mbhc not initialized\n", __func__);
-		return 0;
-	}
-
-	ucontrol->value.integer.value[0] = (u32) mbhc->hph_type;
-	pr_debug("%s: hph_type = %u\n", __func__, mbhc->hph_type);
-
-	return 0;
-}
-
-static const struct snd_kcontrol_new hph_type_detect_controls[] = {
-	SOC_SINGLE_EXT("HPH Type", 0, 0, UINT_MAX, 0,
-		       tomtom_get_hph_type, NULL),
-};
-
-static const char * const rx_mix1_text[] = {
-	"ZERO", "SRC1", "SRC2", "IIR1", "IIR2", "RX1", "RX2", "RX3", "RX4",
-		"RX5", "RX6", "RX7"
-};
-
-static const char * const rx8_mix1_text[] = {
-	"ZERO", "IIR1", "IIR2", "RX1", "RX2", "RX3", "RX4",
-		"RX5", "RX6", "RX7", "RX8"
-};
-
-static const char * const rx_mix2_text[] = {
-	"ZERO", "SRC1", "SRC2", "IIR1", "IIR2"
-};
-
-static const char * const rx_rdac5_text[] = {
-	"DEM4", "DEM3_INV"
-};
-
-static const char * const rx_rdac7_text[] = {
-	"DEM6", "DEM5_INV"
-};
-
-static const char * const mad_sel_text[] = {
-	"SPE", "MSM"
-};
-
-static const char * const sb_tx1_mux_text[] = {
-	"ZERO", "RMIX1", "RMIX2", "RMIX3", "RMIX4", "RMIX5", "RMIX6", "RMIX7",
-		"DEC1", "RMIX8"
-};
-
-static const char * const sb_tx2_mux_text[] = {
-	"ZERO", "RMIX1", "RMIX2", "RMIX3", "RMIX4", "RMIX5", "RMIX6", "RMIX7",
-		"DEC2", "RMIX8"
-};
-
-static const char * const sb_tx3_mux_text[] = {
-	"ZERO", "RMIX1", "RMIX2", "RMIX3", "RMIX4", "RMIX5", "RMIX6", "RMIX7",
-		"DEC3", "RMIX8"
-};
-
-static const char * const sb_tx4_mux_text[] = {
-	"ZERO", "RMIX1", "RMIX2", "RMIX3", "RMIX4", "RMIX5", "RMIX6", "RMIX7",
-		"DEC4", "RMIX8"
-};
-
-static const char * const sb_tx5_mux_text[] = {
-	"ZERO", "RMIX1", "RMIX2", "RMIX3", "RMIX4", "RMIX5", "RMIX6", "RMIX7",
-		"DEC5", "RMIX8"
-};
-
-static const char * const sb_tx6_mux_text[] = {
-	"ZERO", "RMIX1", "RMIX2", "RMIX3", "RMIX4", "RMIX5", "RMIX6", "RMIX7",
-		"DEC6", "RMIX8"
-};
-
-static const char * const sb_tx7_to_tx10_mux_text[] = {
-	"ZERO", "RMIX1", "RMIX2", "RMIX3", "RMIX4", "RMIX5", "RMIX6", "RMIX7",
-		"DEC1", "DEC2", "DEC3", "DEC4", "DEC5", "DEC6", "DEC7", "DEC8",
-		"DEC9", "DEC10"
-};
-
-static const char * const dec1_mux_text[] = {
-	"ZERO", "DMIC1", "ADC6",
-};
-
-static const char * const dec2_mux_text[] = {
-	"ZERO", "DMIC2", "ADC5",
-};
-
-static const char * const dec3_mux_text[] = {
-	"ZERO", "DMIC3", "ADC4",
-};
-
-static const char * const dec4_mux_text[] = {
-	"ZERO", "DMIC4", "ADC3",
-};
-
-static const char * const dec5_mux_text[] = {
-	"ZERO", "DMIC5", "ADC2",
-};
-
-static const char * const dec6_mux_text[] = {
-	"ZERO", "DMIC6", "ADC1",
-};
-
-static const char * const dec7_mux_text[] = {
-	"ZERO", "DMIC1", "DMIC6", "ADC1", "ADC6", "ANC1_FB", "ANC2_FB",
-};
-
-static const char * const dec8_mux_text[] = {
-	"ZERO", "DMIC2", "DMIC5", "ADC2", "ADC5", "ANC1_FB", "ANC2_FB",
-};
-
-static const char * const dec9_mux_text[] = {
-	"ZERO", "DMIC4", "DMIC5", "ADC2", "ADC3", "ADCMB", "ANC1_FB", "ANC2_FB",
-};
-
-static const char * const dec10_mux_text[] = {
-	"ZERO", "DMIC3", "DMIC6", "ADC1", "ADC4", "ADCMB", "ANC1_FB", "ANC2_FB",
-};
-
-static const char * const anc_mux_text[] = {
-	"ZERO", "ADC1", "ADC2", "ADC3", "ADC4", "ADC5", "ADC6", "ADC_MB",
-		"RSVD_1", "DMIC1", "DMIC2", "DMIC3", "DMIC4", "DMIC5", "DMIC6"
-};
-
-static const char * const anc1_fb_mux_text[] = {
-	"ZERO", "EAR_HPH_L", "EAR_LINE_1",
-};
-
-static const char * const iir_inp1_text[] = {
-	"ZERO", "DEC1", "DEC2", "DEC3", "DEC4", "DEC5", "DEC6", "DEC7", "DEC8",
-	"DEC9", "DEC10", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7"
-};
-
-static const char * const iir_inp2_text[] = {
-	"ZERO", "DEC1", "DEC2", "DEC3", "DEC4", "DEC5", "DEC6", "DEC7", "DEC8",
-	"DEC9", "DEC10", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7"
-};
-
-static const char * const iir_inp3_text[] = {
-	"ZERO", "DEC1", "DEC2", "DEC3", "DEC4", "DEC5", "DEC6", "DEC7", "DEC8",
-	"DEC9", "DEC10", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7"
-};
-
-static const char * const iir_inp4_text[] = {
-	"ZERO", "DEC1", "DEC2", "DEC3", "DEC4", "DEC5", "DEC6", "DEC7", "DEC8",
-	"DEC9", "DEC10", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7"
-};
-
-static const struct soc_enum rx_mix1_inp1_chain_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX1_B1_CTL, 0, 12, rx_mix1_text);
-
-static const struct soc_enum rx_mix1_inp2_chain_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX1_B1_CTL, 4, 12, rx_mix1_text);
-
-static const struct soc_enum rx_mix1_inp3_chain_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX1_B2_CTL, 0, 12, rx_mix1_text);
-
-static const struct soc_enum rx2_mix1_inp1_chain_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX2_B1_CTL, 0, 12, rx_mix1_text);
-
-static const struct soc_enum rx2_mix1_inp2_chain_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX2_B1_CTL, 4, 12, rx_mix1_text);
-
-static const struct soc_enum rx3_mix1_inp1_chain_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX3_B1_CTL, 0, 12, rx_mix1_text);
-
-static const struct soc_enum rx3_mix1_inp2_chain_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX3_B1_CTL, 4, 12, rx_mix1_text);
-
-static const struct soc_enum rx4_mix1_inp1_chain_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX4_B1_CTL, 0, 12, rx_mix1_text);
-
-static const struct soc_enum rx4_mix1_inp2_chain_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX4_B1_CTL, 4, 12, rx_mix1_text);
-
-static const struct soc_enum rx5_mix1_inp1_chain_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX5_B1_CTL, 0, 12, rx_mix1_text);
-
-static const struct soc_enum rx5_mix1_inp2_chain_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX5_B1_CTL, 4, 12, rx_mix1_text);
-
-static const struct soc_enum rx6_mix1_inp1_chain_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX6_B1_CTL, 0, 12, rx_mix1_text);
-
-static const struct soc_enum rx6_mix1_inp2_chain_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX6_B1_CTL, 4, 12, rx_mix1_text);
-
-static const struct soc_enum rx7_mix1_inp1_chain_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX7_B1_CTL, 0, 12, rx_mix1_text);
-
-static const struct soc_enum rx7_mix1_inp2_chain_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX7_B1_CTL, 4, 12, rx_mix1_text);
-
-static const struct soc_enum rx8_mix1_inp1_chain_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX8_B1_CTL, 0, 11, rx8_mix1_text);
-
-static const struct soc_enum rx8_mix1_inp2_chain_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX8_B1_CTL, 4, 11, rx8_mix1_text);
-
-static const struct soc_enum rx1_mix2_inp1_chain_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX1_B3_CTL, 0, 5, rx_mix2_text);
-
-static const struct soc_enum rx1_mix2_inp2_chain_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX1_B3_CTL, 3, 5, rx_mix2_text);
-
-static const struct soc_enum rx2_mix2_inp1_chain_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX2_B3_CTL, 0, 5, rx_mix2_text);
-
-static const struct soc_enum rx2_mix2_inp2_chain_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX2_B3_CTL, 3, 5, rx_mix2_text);
-
-static const struct soc_enum rx7_mix2_inp1_chain_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX7_B3_CTL, 0, 5, rx_mix2_text);
-
-static const struct soc_enum rx7_mix2_inp2_chain_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX7_B3_CTL, 3, 5, rx_mix2_text);
-
-static const struct soc_enum rx_rdac5_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_MISC, 2, 2, rx_rdac5_text);
-
-static const struct soc_enum rx_rdac7_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_MISC, 1, 2, rx_rdac7_text);
-
-static const struct soc_enum mad_sel_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_SVASS_CFG, 0, 2, mad_sel_text);
-
-static const struct soc_enum sb_tx1_mux_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_SB_B1_CTL, 0, 10, sb_tx1_mux_text);
-
-static const struct soc_enum sb_tx2_mux_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_SB_B2_CTL, 0, 10, sb_tx2_mux_text);
-
-static const struct soc_enum sb_tx3_mux_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_SB_B3_CTL, 0, 10, sb_tx3_mux_text);
-
-static const struct soc_enum sb_tx4_mux_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_SB_B4_CTL, 0, 10, sb_tx4_mux_text);
-
-static const struct soc_enum sb_tx5_mux_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_SB_B5_CTL, 0, 10, sb_tx5_mux_text);
-
-static const struct soc_enum sb_tx6_mux_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_SB_B6_CTL, 0, 10, sb_tx6_mux_text);
-
-static const struct soc_enum sb_tx7_mux_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_SB_B7_CTL, 0, 18,
-			sb_tx7_to_tx10_mux_text);
-
-static const struct soc_enum sb_tx8_mux_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_SB_B8_CTL, 0, 18,
-			sb_tx7_to_tx10_mux_text);
-
-static const struct soc_enum sb_tx9_mux_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_SB_B9_CTL, 0, 18,
-			sb_tx7_to_tx10_mux_text);
-
-static const struct soc_enum sb_tx10_mux_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_SB_B10_CTL, 0, 18,
-			sb_tx7_to_tx10_mux_text);
-
-static const struct soc_enum dec1_mux_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_B1_CTL, 0, 3, dec1_mux_text);
-
-static const struct soc_enum dec2_mux_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_B1_CTL, 2, 3, dec2_mux_text);
-
-static const struct soc_enum dec3_mux_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_B1_CTL, 4, 3, dec3_mux_text);
-
-static const struct soc_enum dec4_mux_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_B1_CTL, 6, 3, dec4_mux_text);
-
-static const struct soc_enum dec5_mux_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_B2_CTL, 0, 3, dec5_mux_text);
-
-static const struct soc_enum dec6_mux_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_B2_CTL, 2, 3, dec6_mux_text);
-
-static const struct soc_enum dec7_mux_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_B2_CTL, 4, 7, dec7_mux_text);
-
-static const struct soc_enum dec8_mux_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_B3_CTL, 0, 7, dec8_mux_text);
-
-static const struct soc_enum dec9_mux_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_B3_CTL, 3, 8, dec9_mux_text);
-
-static const struct soc_enum dec10_mux_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_B4_CTL, 0, 8, dec10_mux_text);
-
-static const struct soc_enum anc1_mux_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_ANC_B1_CTL, 0, 15, anc_mux_text);
-
-static const struct soc_enum anc2_mux_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_ANC_B1_CTL, 4, 15, anc_mux_text);
-
-static const struct soc_enum anc1_fb_mux_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_ANC_B2_CTL, 0, 3, anc1_fb_mux_text);
-
-static const struct soc_enum iir1_inp1_mux_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_EQ1_B1_CTL, 0, 18, iir_inp1_text);
-
-static const struct soc_enum iir2_inp1_mux_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_EQ2_B1_CTL, 0, 18, iir_inp1_text);
-
-static const struct soc_enum iir1_inp2_mux_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_EQ1_B2_CTL, 0, 18, iir_inp2_text);
-
-static const struct soc_enum iir2_inp2_mux_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_EQ2_B2_CTL, 0, 18, iir_inp2_text);
-
-static const struct soc_enum iir1_inp3_mux_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_EQ1_B3_CTL, 0, 18, iir_inp3_text);
-
-static const struct soc_enum iir2_inp3_mux_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_EQ2_B3_CTL, 0, 18, iir_inp3_text);
-
-static const struct soc_enum iir1_inp4_mux_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_EQ1_B4_CTL, 0, 18, iir_inp4_text);
-
-static const struct soc_enum iir2_inp4_mux_enum =
-	SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_EQ2_B4_CTL, 0, 18, iir_inp4_text);
-
-static const struct snd_kcontrol_new rx_mix1_inp1_mux =
-	SOC_DAPM_ENUM("RX1 MIX1 INP1 Mux", rx_mix1_inp1_chain_enum);
-
-static const struct snd_kcontrol_new rx_mix1_inp2_mux =
-	SOC_DAPM_ENUM("RX1 MIX1 INP2 Mux", rx_mix1_inp2_chain_enum);
-
-static const struct snd_kcontrol_new rx_mix1_inp3_mux =
-	SOC_DAPM_ENUM("RX1 MIX1 INP3 Mux", rx_mix1_inp3_chain_enum);
-
-static const struct snd_kcontrol_new rx2_mix1_inp1_mux =
-	SOC_DAPM_ENUM("RX2 MIX1 INP1 Mux", rx2_mix1_inp1_chain_enum);
-
-static const struct snd_kcontrol_new rx2_mix1_inp2_mux =
-	SOC_DAPM_ENUM("RX2 MIX1 INP2 Mux", rx2_mix1_inp2_chain_enum);
-
-static const struct snd_kcontrol_new rx3_mix1_inp1_mux =
-	SOC_DAPM_ENUM("RX3 MIX1 INP1 Mux", rx3_mix1_inp1_chain_enum);
-
-static const struct snd_kcontrol_new rx3_mix1_inp2_mux =
-	SOC_DAPM_ENUM("RX3 MIX1 INP2 Mux", rx3_mix1_inp2_chain_enum);
-
-static const struct snd_kcontrol_new rx4_mix1_inp1_mux =
-	SOC_DAPM_ENUM("RX4 MIX1 INP1 Mux", rx4_mix1_inp1_chain_enum);
-
-static const struct snd_kcontrol_new rx4_mix1_inp2_mux =
-	SOC_DAPM_ENUM("RX4 MIX1 INP2 Mux", rx4_mix1_inp2_chain_enum);
-
-static const struct snd_kcontrol_new rx5_mix1_inp1_mux =
-	SOC_DAPM_ENUM("RX5 MIX1 INP1 Mux", rx5_mix1_inp1_chain_enum);
-
-static const struct snd_kcontrol_new rx5_mix1_inp2_mux =
-	SOC_DAPM_ENUM("RX5 MIX1 INP2 Mux", rx5_mix1_inp2_chain_enum);
-
-static const struct snd_kcontrol_new rx6_mix1_inp1_mux =
-	SOC_DAPM_ENUM("RX6 MIX1 INP1 Mux", rx6_mix1_inp1_chain_enum);
-
-static const struct snd_kcontrol_new rx6_mix1_inp2_mux =
-	SOC_DAPM_ENUM("RX6 MIX1 INP2 Mux", rx6_mix1_inp2_chain_enum);
-
-static const struct snd_kcontrol_new rx7_mix1_inp1_mux =
-	SOC_DAPM_ENUM("RX7 MIX1 INP1 Mux", rx7_mix1_inp1_chain_enum);
-
-static const struct snd_kcontrol_new rx7_mix1_inp2_mux =
-	SOC_DAPM_ENUM("RX7 MIX1 INP2 Mux", rx7_mix1_inp2_chain_enum);
-
-static const struct snd_kcontrol_new rx8_mix1_inp1_mux =
-	SOC_DAPM_ENUM("RX8 MIX1 INP1 Mux", rx8_mix1_inp1_chain_enum);
-
-static const struct snd_kcontrol_new rx8_mix1_inp2_mux =
-	SOC_DAPM_ENUM("RX8 MIX1 INP2 Mux", rx8_mix1_inp2_chain_enum);
-
-static const struct snd_kcontrol_new rx1_mix2_inp1_mux =
-	SOC_DAPM_ENUM("RX1 MIX2 INP1 Mux", rx1_mix2_inp1_chain_enum);
-
-static const struct snd_kcontrol_new rx1_mix2_inp2_mux =
-	SOC_DAPM_ENUM("RX1 MIX2 INP2 Mux", rx1_mix2_inp2_chain_enum);
-
-static const struct snd_kcontrol_new rx2_mix2_inp1_mux =
-	SOC_DAPM_ENUM("RX2 MIX2 INP1 Mux", rx2_mix2_inp1_chain_enum);
-
-static const struct snd_kcontrol_new rx2_mix2_inp2_mux =
-	SOC_DAPM_ENUM("RX2 MIX2 INP2 Mux", rx2_mix2_inp2_chain_enum);
-
-static const struct snd_kcontrol_new rx7_mix2_inp1_mux =
-	SOC_DAPM_ENUM("RX7 MIX2 INP1 Mux", rx7_mix2_inp1_chain_enum);
-
-static const struct snd_kcontrol_new rx7_mix2_inp2_mux =
-	SOC_DAPM_ENUM("RX7 MIX2 INP2 Mux", rx7_mix2_inp2_chain_enum);
-
-static const struct snd_kcontrol_new rx_dac5_mux =
-	SOC_DAPM_ENUM("RDAC5 MUX Mux", rx_rdac5_enum);
-
-static const struct snd_kcontrol_new rx_dac7_mux =
-	SOC_DAPM_ENUM("RDAC7 MUX Mux", rx_rdac7_enum);
-
-static const struct snd_kcontrol_new mad_sel_mux =
-	SOC_DAPM_ENUM("MAD_SEL MUX Mux", mad_sel_enum);
-
-static const struct snd_kcontrol_new sb_tx1_mux =
-	SOC_DAPM_ENUM("SLIM TX1 MUX Mux", sb_tx1_mux_enum);
-
-static const struct snd_kcontrol_new sb_tx2_mux =
-	SOC_DAPM_ENUM("SLIM TX2 MUX Mux", sb_tx2_mux_enum);
-
-static const struct snd_kcontrol_new sb_tx3_mux =
-	SOC_DAPM_ENUM("SLIM TX3 MUX Mux", sb_tx3_mux_enum);
-
-static const struct snd_kcontrol_new sb_tx4_mux =
-	SOC_DAPM_ENUM("SLIM TX4 MUX Mux", sb_tx4_mux_enum);
-
-static const struct snd_kcontrol_new sb_tx5_mux =
-	SOC_DAPM_ENUM("SLIM TX5 MUX Mux", sb_tx5_mux_enum);
-
-static const struct snd_kcontrol_new sb_tx6_mux =
-	SOC_DAPM_ENUM("SLIM TX6 MUX Mux", sb_tx6_mux_enum);
-
-static const struct snd_kcontrol_new sb_tx7_mux =
-	SOC_DAPM_ENUM("SLIM TX7 MUX Mux", sb_tx7_mux_enum);
-
-static const struct snd_kcontrol_new sb_tx8_mux =
-	SOC_DAPM_ENUM("SLIM TX8 MUX Mux", sb_tx8_mux_enum);
-
-static const struct snd_kcontrol_new sb_tx9_mux =
-	SOC_DAPM_ENUM("SLIM TX9 MUX Mux", sb_tx9_mux_enum);
-
-static const struct snd_kcontrol_new sb_tx10_mux =
-	SOC_DAPM_ENUM("SLIM TX10 MUX Mux", sb_tx10_mux_enum);
-
-
-static int wcd9330_put_dec_enum(struct snd_kcontrol *kcontrol,
-			      struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_dapm_widget_list *wlist =
-					dapm_kcontrol_get_wlist(kcontrol);
-	struct snd_soc_dapm_widget *w = wlist->widgets[0];
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
-	unsigned int dec_mux, decimator;
-	char *dec_name = NULL;
-	char *widget_name = NULL;
-	char *temp;
-	u16 tx_mux_ctl_reg;
-	u8 adc_dmic_sel = 0x0;
-	int ret = 0;
-	char *dec;
-
-	if (ucontrol->value.enumerated.item[0] >= e->items)
-		return -EINVAL;
-
-	dec_mux = ucontrol->value.enumerated.item[0];
-
-	widget_name = kstrndup(w->name, 15, GFP_KERNEL);
-	if (!widget_name)
-		return -ENOMEM;
-	temp = widget_name;
-
-	dec_name = strsep(&widget_name, " ");
-	widget_name = temp;
-	if (!dec_name) {
-		pr_err("%s: Invalid decimator = %s\n", __func__, w->name);
-		ret =  -EINVAL;
-		goto out;
-	}
-	dec = strpbrk(dec_name, "123456789");
-	if (!dec) {
-		dev_err(w->dapm->dev, "%s: decimator index not found\n",
-			__func__);
-		ret =  -EINVAL;
-		goto out;
-	}
-	ret = kstrtouint(dec, 10, &decimator);
-	if (ret < 0) {
-		pr_err("%s: Invalid decimator = %s\n", __func__, dec_name);
-		ret =  -EINVAL;
-		goto out;
-	}
-
-	dev_dbg(w->dapm->dev, "%s(): widget = %s decimator = %u dec_mux = %u\n"
-		, __func__, w->name, decimator, dec_mux);
-
-
-	switch (decimator) {
-	case 1:
-	case 2:
-	case 3:
-	case 4:
-	case 5:
-	case 6:
-		if (dec_mux == 1)
-			adc_dmic_sel = 0x1;
-		else
-			adc_dmic_sel = 0x0;
-		break;
-	case 7:
-	case 8:
-	case 9:
-	case 10:
-		if ((dec_mux == 1) || (dec_mux == 2))
-			adc_dmic_sel = 0x1;
-		else
-			adc_dmic_sel = 0x0;
-		break;
-	default:
-		pr_err("%s: Invalid Decimator = %u\n", __func__, decimator);
-		ret = -EINVAL;
-		goto out;
-	}
-
-	tx_mux_ctl_reg = TOMTOM_A_CDC_TX1_MUX_CTL + 8 * (decimator - 1);
-
-	snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x1, adc_dmic_sel);
-
-	ret = snd_soc_dapm_put_enum_double(kcontrol, ucontrol);
-
-out:
-	kfree(widget_name);
-	return ret;
-}
-
-#define WCD9330_DEC_ENUM(xname, xenum) \
-{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
-	.info = snd_soc_info_enum_double, \
-	.get = snd_soc_dapm_get_enum_double, \
-	.put = wcd9330_put_dec_enum, \
-	.private_value = (unsigned long)&xenum }
-
-static const struct snd_kcontrol_new dec1_mux =
-	WCD9330_DEC_ENUM("DEC1 MUX Mux", dec1_mux_enum);
-
-static const struct snd_kcontrol_new dec2_mux =
-	WCD9330_DEC_ENUM("DEC2 MUX Mux", dec2_mux_enum);
-
-static const struct snd_kcontrol_new dec3_mux =
-	WCD9330_DEC_ENUM("DEC3 MUX Mux", dec3_mux_enum);
-
-static const struct snd_kcontrol_new dec4_mux =
-	WCD9330_DEC_ENUM("DEC4 MUX Mux", dec4_mux_enum);
-
-static const struct snd_kcontrol_new dec5_mux =
-	WCD9330_DEC_ENUM("DEC5 MUX Mux", dec5_mux_enum);
-
-static const struct snd_kcontrol_new dec6_mux =
-	WCD9330_DEC_ENUM("DEC6 MUX Mux", dec6_mux_enum);
-
-static const struct snd_kcontrol_new dec7_mux =
-	WCD9330_DEC_ENUM("DEC7 MUX Mux", dec7_mux_enum);
-
-static const struct snd_kcontrol_new dec8_mux =
-	WCD9330_DEC_ENUM("DEC8 MUX Mux", dec8_mux_enum);
-
-static const struct snd_kcontrol_new dec9_mux =
-	WCD9330_DEC_ENUM("DEC9 MUX Mux", dec9_mux_enum);
-
-static const struct snd_kcontrol_new dec10_mux =
-	WCD9330_DEC_ENUM("DEC10 MUX Mux", dec10_mux_enum);
-
-static const struct snd_kcontrol_new iir1_inp1_mux =
-	SOC_DAPM_ENUM("IIR1 INP1 Mux", iir1_inp1_mux_enum);
-
-static const struct snd_kcontrol_new iir2_inp1_mux =
-	SOC_DAPM_ENUM("IIR2 INP1 Mux", iir2_inp1_mux_enum);
-
-static const struct snd_kcontrol_new iir1_inp2_mux =
-	SOC_DAPM_ENUM("IIR1 INP2 Mux", iir1_inp2_mux_enum);
-
-static const struct snd_kcontrol_new iir2_inp2_mux =
-	SOC_DAPM_ENUM("IIR2 INP2 Mux", iir2_inp2_mux_enum);
-
-static const struct snd_kcontrol_new iir1_inp3_mux =
-	SOC_DAPM_ENUM("IIR1 INP3 Mux", iir1_inp3_mux_enum);
-
-static const struct snd_kcontrol_new iir2_inp3_mux =
-	SOC_DAPM_ENUM("IIR2 INP3 Mux", iir2_inp3_mux_enum);
-
-static const struct snd_kcontrol_new iir1_inp4_mux =
-	SOC_DAPM_ENUM("IIR1 INP4 Mux", iir1_inp4_mux_enum);
-
-static const struct snd_kcontrol_new iir2_inp4_mux =
-	SOC_DAPM_ENUM("IIR2 INP4 Mux", iir2_inp4_mux_enum);
-
-static const struct snd_kcontrol_new anc1_mux =
-	SOC_DAPM_ENUM("ANC1 MUX Mux", anc1_mux_enum);
-
-static const struct snd_kcontrol_new anc2_mux =
-	SOC_DAPM_ENUM("ANC2 MUX Mux", anc2_mux_enum);
-
-static const struct snd_kcontrol_new anc1_fb_mux =
-	SOC_DAPM_ENUM("ANC1 FB MUX Mux", anc1_fb_mux_enum);
-
-static const struct snd_kcontrol_new dac1_switch[] = {
-	SOC_DAPM_SINGLE("Switch", TOMTOM_A_RX_EAR_EN, 5, 1, 0)
-};
-static const struct snd_kcontrol_new hphl_switch[] = {
-	SOC_DAPM_SINGLE("Switch", TOMTOM_A_RX_HPH_L_DAC_CTL, 6, 1, 0)
-};
-
-static const struct snd_kcontrol_new hphl_pa_mix[] = {
-	SOC_DAPM_SINGLE("AUX_PGA_L Switch", TOMTOM_A_RX_PA_AUX_IN_CONN,
-					7, 1, 0),
-};
-
-static const struct snd_kcontrol_new hphr_pa_mix[] = {
-	SOC_DAPM_SINGLE("AUX_PGA_R Switch", TOMTOM_A_RX_PA_AUX_IN_CONN,
-					6, 1, 0),
-};
-
-static const struct snd_kcontrol_new ear_pa_mix[] = {
-	SOC_DAPM_SINGLE("AUX_PGA_L Switch", TOMTOM_A_RX_PA_AUX_IN_CONN,
-					5, 1, 0),
-};
-static const struct snd_kcontrol_new lineout1_pa_mix[] = {
-	SOC_DAPM_SINGLE("AUX_PGA_L Switch", TOMTOM_A_RX_PA_AUX_IN_CONN,
-					4, 1, 0),
-};
-
-static const struct snd_kcontrol_new lineout2_pa_mix[] = {
-	SOC_DAPM_SINGLE("AUX_PGA_R Switch", TOMTOM_A_RX_PA_AUX_IN_CONN,
-					3, 1, 0),
-};
-
-static const struct snd_kcontrol_new lineout3_pa_mix[] = {
-	SOC_DAPM_SINGLE("AUX_PGA_L Switch", TOMTOM_A_RX_PA_AUX_IN_CONN,
-					2, 1, 0),
-};
-
-static const struct snd_kcontrol_new lineout4_pa_mix[] = {
-	SOC_DAPM_SINGLE("AUX_PGA_R Switch", TOMTOM_A_RX_PA_AUX_IN_CONN,
-					1, 1, 0),
-};
-
-static const struct snd_kcontrol_new lineout3_ground_switch =
-	SOC_DAPM_SINGLE("Switch", TOMTOM_A_RX_LINE_3_DAC_CTL, 6, 1, 0);
-
-static const struct snd_kcontrol_new lineout4_ground_switch =
-	SOC_DAPM_SINGLE("Switch", TOMTOM_A_RX_LINE_4_DAC_CTL, 6, 1, 0);
-
-static const struct snd_kcontrol_new aif4_mad_switch =
-	SOC_DAPM_SINGLE("Switch", TOMTOM_A_SVASS_CLKRST_CTL, 0, 1, 0);
-
-static const struct snd_kcontrol_new aif4_vi_switch =
-	SOC_DAPM_SINGLE("Switch", TOMTOM_A_SPKR1_PROT_EN, 3, 1, 0);
-
-/* virtual port entries */
-static int slim_tx_mixer_get(struct snd_kcontrol *kcontrol,
-			     struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_dapm_widget_list *wlist =
-					dapm_kcontrol_get_wlist(kcontrol);
-	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
-	struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec);
-
-	ucontrol->value.integer.value[0] = tomtom_p->tx_port_value;
-	return 0;
-}
-
-static int slim_tx_mixer_put(struct snd_kcontrol *kcontrol,
-			     struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_dapm_widget_list *wlist =
-					dapm_kcontrol_get_wlist(kcontrol);
-	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
-	struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec);
-	struct wcd9xxx *core = dev_get_drvdata(codec->dev->parent);
-	struct snd_soc_dapm_update *update = NULL;
-	struct soc_multi_mixer_control *mixer =
-		((struct soc_multi_mixer_control *)kcontrol->private_value);
-	u32 dai_id = widget->shift;
-	u32 port_id = mixer->shift;
-	u32 enable = ucontrol->value.integer.value[0];
-	u32 vtable = vport_check_table[dai_id];
-
-
-	pr_debug("%s: wname %s cname %s value %u shift %d item %ld\n", __func__,
-		widget->name, ucontrol->id.name, tomtom_p->tx_port_value,
-		widget->shift, ucontrol->value.integer.value[0]);
-
-	mutex_lock(&tomtom_p->codec_mutex);
-
-	if (tomtom_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
-		if (dai_id != AIF1_CAP) {
-			dev_err(codec->dev, "%s: invalid AIF for I2C mode\n",
-				__func__);
-			mutex_unlock(&tomtom_p->codec_mutex);
-			return -EINVAL;
-		}
-	}
-	switch (dai_id) {
-	case AIF1_CAP:
-	case AIF2_CAP:
-	case AIF3_CAP:
-		/* only add to the list if value not set
-		 */
-		if (enable && !(tomtom_p->tx_port_value & 1 << port_id)) {
-
-			if (tomtom_p->intf_type ==
-				WCD9XXX_INTERFACE_TYPE_SLIMBUS)
-				vtable = vport_check_table[dai_id];
-			if (tomtom_p->intf_type ==
-				WCD9XXX_INTERFACE_TYPE_I2C)
-				vtable = vport_i2s_check_table[dai_id];
-
-			if (wcd9xxx_tx_vport_validation(
-					vtable,
-					port_id,
-					tomtom_p->dai, NUM_CODEC_DAIS)) {
-				dev_dbg(codec->dev, "%s: TX%u is used by other virtual port\n",
-					__func__, port_id + 1);
-				mutex_unlock(&tomtom_p->codec_mutex);
-				return 0;
-			}
-			tomtom_p->tx_port_value |= 1 << port_id;
-			list_add_tail(&core->tx_chs[port_id].list,
-			      &tomtom_p->dai[dai_id].wcd9xxx_ch_list
-					      );
-		} else if (!enable && (tomtom_p->tx_port_value &
-					1 << port_id)) {
-			tomtom_p->tx_port_value &= ~(1 << port_id);
-			list_del_init(&core->tx_chs[port_id].list);
-		} else {
-			if (enable)
-				dev_dbg(codec->dev, "%s: TX%u port is used by\n"
-					"this virtual port\n",
-					__func__, port_id + 1);
-			else
-				dev_dbg(codec->dev, "%s: TX%u port is not used by\n"
-					"this virtual port\n",
-					__func__, port_id + 1);
-			/* avoid update power function */
-			mutex_unlock(&tomtom_p->codec_mutex);
-			return 0;
-		}
-		break;
-	default:
-		pr_err("Unknown AIF %d\n", dai_id);
-		mutex_unlock(&tomtom_p->codec_mutex);
-		return -EINVAL;
-	}
-	pr_debug("%s: name %s sname %s updated value %u shift %d\n", __func__,
-		widget->name, widget->sname, tomtom_p->tx_port_value,
-		widget->shift);
-
-	mutex_unlock(&tomtom_p->codec_mutex);
-	snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, enable, update);
-
-	return 0;
-}
-
-static int slim_rx_mux_get(struct snd_kcontrol *kcontrol,
-			   struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_dapm_widget_list *wlist =
-					dapm_kcontrol_get_wlist(kcontrol);
-	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
-	struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec);
-
-	ucontrol->value.enumerated.item[0] = tomtom_p->rx_port_value;
-	return 0;
-}
-
-static const char *const slim_rx_mux_text[] = {
-	"ZERO", "AIF1_PB", "AIF2_PB", "AIF3_PB"
-};
-
-static int slim_rx_mux_put(struct snd_kcontrol *kcontrol,
-			   struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_dapm_widget_list *wlist =
-					dapm_kcontrol_get_wlist(kcontrol);
-	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
-	struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec);
-	struct wcd9xxx *core = dev_get_drvdata(codec->dev->parent);
-	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
-	struct snd_soc_dapm_update *update = NULL;
-	u32 port_id = widget->shift;
-
-	pr_debug("%s: wname %s cname %s value %u shift %d item %ld\n", __func__,
-		widget->name, ucontrol->id.name, tomtom_p->rx_port_value,
-		widget->shift, ucontrol->value.integer.value[0]);
-
-	tomtom_p->rx_port_value = ucontrol->value.enumerated.item[0];
-
-	mutex_lock(&tomtom_p->codec_mutex);
-
-	if (tomtom_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
-		if (tomtom_p->rx_port_value > 2) {
-			dev_err(codec->dev, "%s: invalid AIF for I2C mode\n",
-				__func__);
-			goto err;
-		}
-	}
-	/* value need to match the Virtual port and AIF number
-	 */
-	switch (tomtom_p->rx_port_value) {
-	case 0:
-		list_del_init(&core->rx_chs[port_id].list);
-		break;
-	case 1:
-		if (wcd9xxx_rx_vport_validation(port_id +
-			TOMTOM_RX_PORT_START_NUMBER,
-			&tomtom_p->dai[AIF1_PB].wcd9xxx_ch_list)) {
-			dev_dbg(codec->dev, "%s: RX%u is used by current requesting AIF_PB itself\n",
-				__func__, port_id + 1);
-			goto rtn;
-		}
-		list_add_tail(&core->rx_chs[port_id].list,
-			      &tomtom_p->dai[AIF1_PB].wcd9xxx_ch_list);
-		break;
-	case 2:
-		if (wcd9xxx_rx_vport_validation(port_id +
-			TOMTOM_RX_PORT_START_NUMBER,
-			&tomtom_p->dai[AIF2_PB].wcd9xxx_ch_list)) {
-			dev_dbg(codec->dev, "%s: RX%u is used by current requesting AIF_PB itself\n",
-				__func__, port_id + 1);
-			goto rtn;
-		}
-		list_add_tail(&core->rx_chs[port_id].list,
-			      &tomtom_p->dai[AIF2_PB].wcd9xxx_ch_list);
-		break;
-	case 3:
-		if (wcd9xxx_rx_vport_validation(port_id +
-			TOMTOM_RX_PORT_START_NUMBER,
-			&tomtom_p->dai[AIF3_PB].wcd9xxx_ch_list)) {
-			dev_dbg(codec->dev, "%s: RX%u is used by current requesting AIF_PB itself\n",
-				__func__, port_id + 1);
-			goto rtn;
-		}
-		list_add_tail(&core->rx_chs[port_id].list,
-			      &tomtom_p->dai[AIF3_PB].wcd9xxx_ch_list);
-		break;
-	default:
-		pr_err("Unknown AIF %d\n", tomtom_p->rx_port_value);
-		goto err;
-	}
-rtn:
-	mutex_unlock(&tomtom_p->codec_mutex);
-	snd_soc_dapm_mux_update_power(widget->dapm, kcontrol,
-					tomtom_p->rx_port_value, e, update);
-
-	return 0;
-err:
-	mutex_unlock(&tomtom_p->codec_mutex);
-	return -EINVAL;
-}
-
-static const struct soc_enum slim_rx_mux_enum =
-	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(slim_rx_mux_text), slim_rx_mux_text);
-
-static const struct snd_kcontrol_new slim_rx_mux[TOMTOM_RX_MAX] = {
-	SOC_DAPM_ENUM_EXT("SLIM RX1 Mux", slim_rx_mux_enum,
-			  slim_rx_mux_get, slim_rx_mux_put),
-	SOC_DAPM_ENUM_EXT("SLIM RX2 Mux", slim_rx_mux_enum,
-			  slim_rx_mux_get, slim_rx_mux_put),
-	SOC_DAPM_ENUM_EXT("SLIM RX3 Mux", slim_rx_mux_enum,
-			  slim_rx_mux_get, slim_rx_mux_put),
-	SOC_DAPM_ENUM_EXT("SLIM RX4 Mux", slim_rx_mux_enum,
-			  slim_rx_mux_get, slim_rx_mux_put),
-	SOC_DAPM_ENUM_EXT("SLIM RX5 Mux", slim_rx_mux_enum,
-			  slim_rx_mux_get, slim_rx_mux_put),
-	SOC_DAPM_ENUM_EXT("SLIM RX6 Mux", slim_rx_mux_enum,
-			  slim_rx_mux_get, slim_rx_mux_put),
-	SOC_DAPM_ENUM_EXT("SLIM RX7 Mux", slim_rx_mux_enum,
-			  slim_rx_mux_get, slim_rx_mux_put),
-	SOC_DAPM_ENUM_EXT("SLIM RX8 Mux", slim_rx_mux_enum,
-			  slim_rx_mux_get, slim_rx_mux_put),
-};
-
-static const struct snd_kcontrol_new aif1_cap_mixer[] = {
-	SOC_SINGLE_EXT("SLIM TX1", SND_SOC_NOPM, TOMTOM_TX1, 1, 0,
-			slim_tx_mixer_get, slim_tx_mixer_put),
-	SOC_SINGLE_EXT("SLIM TX2", SND_SOC_NOPM, TOMTOM_TX2, 1, 0,
-			slim_tx_mixer_get, slim_tx_mixer_put),
-	SOC_SINGLE_EXT("SLIM TX3", SND_SOC_NOPM, TOMTOM_TX3, 1, 0,
-			slim_tx_mixer_get, slim_tx_mixer_put),
-	SOC_SINGLE_EXT("SLIM TX4", SND_SOC_NOPM, TOMTOM_TX4, 1, 0,
-			slim_tx_mixer_get, slim_tx_mixer_put),
-	SOC_SINGLE_EXT("SLIM TX5", SND_SOC_NOPM, TOMTOM_TX5, 1, 0,
-			slim_tx_mixer_get, slim_tx_mixer_put),
-	SOC_SINGLE_EXT("SLIM TX6", SND_SOC_NOPM, TOMTOM_TX6, 1, 0,
-			slim_tx_mixer_get, slim_tx_mixer_put),
-	SOC_SINGLE_EXT("SLIM TX7", SND_SOC_NOPM, TOMTOM_TX7, 1, 0,
-			slim_tx_mixer_get, slim_tx_mixer_put),
-	SOC_SINGLE_EXT("SLIM TX8", SND_SOC_NOPM, TOMTOM_TX8, 1, 0,
-			slim_tx_mixer_get, slim_tx_mixer_put),
-	SOC_SINGLE_EXT("SLIM TX9", SND_SOC_NOPM, TOMTOM_TX9, 1, 0,
-			slim_tx_mixer_get, slim_tx_mixer_put),
-	SOC_SINGLE_EXT("SLIM TX10", SND_SOC_NOPM, TOMTOM_TX10, 1, 0,
-			slim_tx_mixer_get, slim_tx_mixer_put),
-};
-
-static const struct snd_kcontrol_new aif2_cap_mixer[] = {
-	SOC_SINGLE_EXT("SLIM TX1", SND_SOC_NOPM, TOMTOM_TX1, 1, 0,
-			slim_tx_mixer_get, slim_tx_mixer_put),
-	SOC_SINGLE_EXT("SLIM TX2", SND_SOC_NOPM, TOMTOM_TX2, 1, 0,
-			slim_tx_mixer_get, slim_tx_mixer_put),
-	SOC_SINGLE_EXT("SLIM TX3", SND_SOC_NOPM, TOMTOM_TX3, 1, 0,
-			slim_tx_mixer_get, slim_tx_mixer_put),
-	SOC_SINGLE_EXT("SLIM TX4", SND_SOC_NOPM, TOMTOM_TX4, 1, 0,
-			slim_tx_mixer_get, slim_tx_mixer_put),
-	SOC_SINGLE_EXT("SLIM TX5", SND_SOC_NOPM, TOMTOM_TX5, 1, 0,
-			slim_tx_mixer_get, slim_tx_mixer_put),
-	SOC_SINGLE_EXT("SLIM TX6", SND_SOC_NOPM, TOMTOM_TX6, 1, 0,
-			slim_tx_mixer_get, slim_tx_mixer_put),
-	SOC_SINGLE_EXT("SLIM TX7", SND_SOC_NOPM, TOMTOM_TX7, 1, 0,
-			slim_tx_mixer_get, slim_tx_mixer_put),
-	SOC_SINGLE_EXT("SLIM TX8", SND_SOC_NOPM, TOMTOM_TX8, 1, 0,
-			slim_tx_mixer_get, slim_tx_mixer_put),
-	SOC_SINGLE_EXT("SLIM TX9", SND_SOC_NOPM, TOMTOM_TX9, 1, 0,
-			slim_tx_mixer_get, slim_tx_mixer_put),
-	SOC_SINGLE_EXT("SLIM TX10", SND_SOC_NOPM, TOMTOM_TX10, 1, 0,
-			slim_tx_mixer_get, slim_tx_mixer_put),
-};
-
-static const struct snd_kcontrol_new aif3_cap_mixer[] = {
-	SOC_SINGLE_EXT("SLIM TX1", SND_SOC_NOPM, TOMTOM_TX1, 1, 0,
-			slim_tx_mixer_get, slim_tx_mixer_put),
-	SOC_SINGLE_EXT("SLIM TX2", SND_SOC_NOPM, TOMTOM_TX2, 1, 0,
-			slim_tx_mixer_get, slim_tx_mixer_put),
-	SOC_SINGLE_EXT("SLIM TX3", SND_SOC_NOPM, TOMTOM_TX3, 1, 0,
-			slim_tx_mixer_get, slim_tx_mixer_put),
-	SOC_SINGLE_EXT("SLIM TX4", SND_SOC_NOPM, TOMTOM_TX4, 1, 0,
-			slim_tx_mixer_get, slim_tx_mixer_put),
-	SOC_SINGLE_EXT("SLIM TX5", SND_SOC_NOPM, TOMTOM_TX5, 1, 0,
-			slim_tx_mixer_get, slim_tx_mixer_put),
-	SOC_SINGLE_EXT("SLIM TX6", SND_SOC_NOPM, TOMTOM_TX6, 1, 0,
-			slim_tx_mixer_get, slim_tx_mixer_put),
-	SOC_SINGLE_EXT("SLIM TX7", SND_SOC_NOPM, TOMTOM_TX7, 1, 0,
-			slim_tx_mixer_get, slim_tx_mixer_put),
-	SOC_SINGLE_EXT("SLIM TX8", SND_SOC_NOPM, TOMTOM_TX8, 1, 0,
-			slim_tx_mixer_get, slim_tx_mixer_put),
-	SOC_SINGLE_EXT("SLIM TX9", SND_SOC_NOPM, TOMTOM_TX9, 1, 0,
-			slim_tx_mixer_get, slim_tx_mixer_put),
-	SOC_SINGLE_EXT("SLIM TX10", SND_SOC_NOPM, TOMTOM_TX10, 1, 0,
-			slim_tx_mixer_get, slim_tx_mixer_put),
-};
-
-static void tomtom_codec_enable_adc_block(struct snd_soc_codec *codec,
-					 int enable)
-{
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
-	pr_debug("%s %d\n", __func__, enable);
-
-	if (enable) {
-		tomtom->adc_count++;
-		snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLK_OTHR_CTL,
-						0x2, 0x2);
-	} else {
-		tomtom->adc_count--;
-		if (!tomtom->adc_count)
-			snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLK_OTHR_CTL,
-					    0x2, 0x0);
-	}
-}
-
-static int tomtom_codec_enable_adc(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct tomtom_priv *priv = snd_soc_codec_get_drvdata(codec);
-	u16 adc_reg;
-	u16 tx_fe_clkdiv_reg;
-	u8 tx_fe_clkdiv_mask;
-	u8 init_bit_shift;
-	u8 bit_pos;
-
-	pr_debug("%s %d\n", __func__, event);
-
-	switch (w->reg) {
-	case TOMTOM_A_TX_1_GAIN:
-		adc_reg = TOMTOM_A_TX_1_2_TEST_CTL;
-		tx_fe_clkdiv_reg = TOMTOM_A_TX_1_2_TXFE_CLKDIV;
-		tx_fe_clkdiv_mask = 0x0F;
-		init_bit_shift = 7;
-		bit_pos = ADC1_TXFE;
-		break;
-	case TOMTOM_A_TX_2_GAIN:
-		adc_reg = TOMTOM_A_TX_1_2_TEST_CTL;
-		tx_fe_clkdiv_reg = TOMTOM_A_TX_1_2_TXFE_CLKDIV;
-		tx_fe_clkdiv_mask = 0xF0;
-		init_bit_shift = 6;
-		bit_pos = ADC2_TXFE;
-		break;
-	case TOMTOM_A_TX_3_GAIN:
-		adc_reg = TOMTOM_A_TX_3_4_TEST_CTL;
-		init_bit_shift = 7;
-		tx_fe_clkdiv_reg = TOMTOM_A_TX_3_4_TXFE_CKDIV;
-		tx_fe_clkdiv_mask = 0x0F;
-		bit_pos = ADC3_TXFE;
-		break;
-	case TOMTOM_A_TX_4_GAIN:
-		adc_reg = TOMTOM_A_TX_3_4_TEST_CTL;
-		init_bit_shift = 6;
-		tx_fe_clkdiv_reg = TOMTOM_A_TX_3_4_TXFE_CKDIV;
-		tx_fe_clkdiv_mask = 0xF0;
-		bit_pos = ADC4_TXFE;
-		break;
-	case TOMTOM_A_TX_5_GAIN:
-		adc_reg = TOMTOM_A_TX_5_6_TEST_CTL;
-		init_bit_shift = 7;
-		tx_fe_clkdiv_reg = TOMTOM_A_TX_5_6_TXFE_CKDIV;
-		tx_fe_clkdiv_mask = 0x0F;
-		bit_pos = ADC5_TXFE;
-		break;
-	case TOMTOM_A_TX_6_GAIN:
-		adc_reg = TOMTOM_A_TX_5_6_TEST_CTL;
-		init_bit_shift = 6;
-		tx_fe_clkdiv_reg = TOMTOM_A_TX_5_6_TXFE_CKDIV;
-		tx_fe_clkdiv_mask = 0xF0;
-		bit_pos = ADC6_TXFE;
-		break;
-	default:
-		pr_err("%s: Error, invalid adc register\n", __func__);
-		return -EINVAL;
-	}
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		snd_soc_update_bits(codec, tx_fe_clkdiv_reg, tx_fe_clkdiv_mask,
-				    0x0);
-		set_bit(bit_pos, &priv->status_mask);
-		tomtom_codec_enable_adc_block(codec, 1);
-		snd_soc_update_bits(codec, adc_reg, 1 << init_bit_shift,
-				1 << init_bit_shift);
-		break;
-	case SND_SOC_DAPM_POST_PMU:
-		snd_soc_update_bits(codec, adc_reg, 1 << init_bit_shift, 0x00);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		tomtom_codec_enable_adc_block(codec, 0);
-		break;
-	}
-	return 0;
-}
-
-static int tomtom_codec_ext_clk_en(struct snd_soc_codec *codec,
-		int enable, bool dapm)
-{
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
-	if (!tomtom->codec_ext_clk_en_cb) {
-		dev_err(codec->dev,
-			"%s: Invalid ext_clk_callback\n",
-			__func__);
-		return -EINVAL;
-	}
-
-	return tomtom->codec_ext_clk_en_cb(codec, enable, dapm);
-}
-
-static int __tomtom_mclk_enable(struct tomtom_priv *tomtom, int mclk_enable)
-{
-	int ret = 0;
-
-	WCD9XXX_BG_CLK_LOCK(&tomtom->resmgr);
-	if (mclk_enable) {
-		tomtom->ext_clk_users++;
-		if (tomtom->ext_clk_users > 1)
-			goto bg_clk_unlock;
-		ret = clk_prepare_enable(tomtom->wcd_ext_clk);
-		if (ret) {
-			pr_err("%s: ext clk enable failed\n",
-				__func__);
-			tomtom->ext_clk_users--;
-			goto bg_clk_unlock;
-		}
-		wcd9xxx_resmgr_get_bandgap(&tomtom->resmgr,
-					   WCD9XXX_BANDGAP_AUDIO_MODE);
-		wcd9xxx_resmgr_get_clk_block(&tomtom->resmgr, WCD9XXX_CLK_MCLK);
-	} else {
-		tomtom->ext_clk_users--;
-		if (tomtom->ext_clk_users == 0) {
-			/* Put clock and BG */
-			wcd9xxx_resmgr_put_clk_block(&tomtom->resmgr,
-						     WCD9XXX_CLK_MCLK);
-			wcd9xxx_resmgr_put_bandgap(&tomtom->resmgr,
-					WCD9XXX_BANDGAP_AUDIO_MODE);
-			clk_disable_unprepare(tomtom->wcd_ext_clk);
-		}
-	}
-bg_clk_unlock:
-	WCD9XXX_BG_CLK_UNLOCK(&tomtom->resmgr);
-
-	return ret;
-}
-
-int tomtom_codec_mclk_enable(struct snd_soc_codec *codec,
-			     int enable, bool dapm)
-{
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
-	if (tomtom->wcd_ext_clk) {
-		dev_dbg(codec->dev, "%s: mclk_enable = %u, dapm = %d\n",
-			__func__, enable, dapm);
-		return __tomtom_mclk_enable(tomtom, enable);
-	} else if (tomtom->codec_ext_clk_en_cb)
-		return tomtom_codec_ext_clk_en(codec, enable, dapm);
-	else {
-		dev_err(codec->dev,
-			"%s: Cannot turn on MCLK\n",
-			__func__);
-		return -EINVAL;
-	}
-}
-EXPORT_SYMBOL(tomtom_codec_mclk_enable);
-
-static int tomtom_codec_get_ext_clk_users(struct tomtom_priv *tomtom)
-{
-	if (tomtom->wcd_ext_clk)
-		return tomtom->ext_clk_users;
-	else if (tomtom->codec_get_ext_clk_cnt)
-		return tomtom->codec_get_ext_clk_cnt();
-	else
-		return 0;
-}
-
-/* tomtom_codec_internal_rco_ctrl( )
- * Make sure that BG_CLK_LOCK is not acquired. Exit if acquired to avoid
- * potential deadlock as ext_clk_en_cb() also tries to acquire the same
- * lock to enable MCLK for RCO calibration
- */
-static int tomtom_codec_internal_rco_ctrl(struct snd_soc_codec *codec,
-					  bool enable)
-{
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-	int ret = 0;
-
-	if (enable) {
-		if (wcd9xxx_resmgr_get_clk_type(&tomtom->resmgr) ==
-		    WCD9XXX_CLK_RCO) {
-			WCD9XXX_BG_CLK_LOCK(&tomtom->resmgr);
-			wcd9xxx_resmgr_get_clk_block(&tomtom->resmgr,
-						     WCD9XXX_CLK_RCO);
-			WCD9XXX_BG_CLK_UNLOCK(&tomtom->resmgr);
-		} else {
-			tomtom_codec_mclk_enable(codec, true, false);
-			WCD9XXX_BG_CLK_LOCK(&tomtom->resmgr);
-			tomtom->resmgr.ext_clk_users =
-					tomtom_codec_get_ext_clk_users(tomtom);
-			wcd9xxx_resmgr_get_clk_block(&tomtom->resmgr,
-						     WCD9XXX_CLK_RCO);
-			WCD9XXX_BG_CLK_UNLOCK(&tomtom->resmgr);
-			tomtom_codec_mclk_enable(codec, false, false);
-		}
-
-	} else {
-		WCD9XXX_BG_CLK_LOCK(&tomtom->resmgr);
-		wcd9xxx_resmgr_put_clk_block(&tomtom->resmgr,
-					     WCD9XXX_CLK_RCO);
-		WCD9XXX_BG_CLK_UNLOCK(&tomtom->resmgr);
-	}
-
-	return ret;
-}
-
-static int tomtom_codec_enable_aux_pga(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
-	pr_debug("%s: %d\n", __func__, event);
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		WCD9XXX_BG_CLK_LOCK(&tomtom->resmgr);
-		wcd9xxx_resmgr_get_bandgap(&tomtom->resmgr,
-					   WCD9XXX_BANDGAP_AUDIO_MODE);
-		WCD9XXX_BG_CLK_UNLOCK(&tomtom->resmgr);
-		/* AUX PGA requires RCO or MCLK */
-		tomtom_codec_internal_rco_ctrl(codec, true);
-		WCD9XXX_BG_CLK_LOCK(&tomtom->resmgr);
-		wcd9xxx_resmgr_enable_rx_bias(&tomtom->resmgr, 1);
-		WCD9XXX_BG_CLK_UNLOCK(&tomtom->resmgr);
-		break;
-
-	case SND_SOC_DAPM_POST_PMD:
-		WCD9XXX_BG_CLK_LOCK(&tomtom->resmgr);
-		wcd9xxx_resmgr_enable_rx_bias(&tomtom->resmgr, 0);
-		WCD9XXX_BG_CLK_UNLOCK(&tomtom->resmgr);
-		tomtom_codec_internal_rco_ctrl(codec, false);
-		WCD9XXX_BG_CLK_LOCK(&tomtom->resmgr);
-		wcd9xxx_resmgr_put_bandgap(&tomtom->resmgr,
-					   WCD9XXX_BANDGAP_AUDIO_MODE);
-		WCD9XXX_BG_CLK_UNLOCK(&tomtom->resmgr);
-		break;
-	}
-	return 0;
-}
-
-static int tomtom_codec_enable_lineout(struct snd_soc_dapm_widget *w,
-		struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-	u16 lineout_gain_reg;
-
-	pr_debug("%s %d %s\n", __func__, event, w->name);
-
-	switch (w->shift) {
-	case 0:
-		lineout_gain_reg = TOMTOM_A_RX_LINE_1_GAIN;
-		break;
-	case 1:
-		lineout_gain_reg = TOMTOM_A_RX_LINE_2_GAIN;
-		break;
-	case 2:
-		lineout_gain_reg = TOMTOM_A_RX_LINE_3_GAIN;
-		break;
-	case 3:
-		lineout_gain_reg = TOMTOM_A_RX_LINE_4_GAIN;
-		break;
-	default:
-		pr_err("%s: Error, incorrect lineout register value\n",
-			__func__);
-		return -EINVAL;
-	}
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		snd_soc_update_bits(codec, lineout_gain_reg, 0x40, 0x40);
-		break;
-	case SND_SOC_DAPM_POST_PMU:
-		wcd9xxx_clsh_fsm(codec, &tomtom->clsh_d,
-						 WCD9XXX_CLSH_STATE_LO,
-						 WCD9XXX_CLSH_REQ_ENABLE,
-						 WCD9XXX_CLSH_EVENT_POST_PA);
-		pr_debug("%s: sleeping 5 ms after %s PA turn on\n",
-				__func__, w->name);
-		/* Wait for CnP time after PA enable */
-		usleep_range(5000, 5100);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		snd_soc_update_bits(codec, lineout_gain_reg, 0x40, 0x00);
-		pr_debug("%s: sleeping 5 ms after %s PA turn off\n",
-			 __func__, w->name);
-		/* Wait for CnP time after PA disable */
-		usleep_range(5000, 5100);
-		break;
-	}
-	return 0;
-}
-
-static int tomtom_codec_enable_spk_pa(struct snd_soc_dapm_widget *w,
-				     struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-	u16 spk_drv_reg;
-
-	pr_debug("%s: %d %s\n", __func__, event, w->name);
-	if (strnstr(w->name, "SPK2 PA", sizeof("SPK2 PA")))
-		spk_drv_reg = TOMTOM_A_SPKR_DRV2_EN;
-	else
-		spk_drv_reg = TOMTOM_A_SPKR_DRV1_EN;
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		tomtom->spkr_pa_widget_on = true;
-		snd_soc_update_bits(codec, spk_drv_reg, 0x80, 0x80);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		tomtom->spkr_pa_widget_on = false;
-		snd_soc_update_bits(codec, spk_drv_reg, 0x80, 0x00);
-		break;
-	}
-	return 0;
-}
-
-static u8 tomtom_get_dmic_clk_val(struct snd_soc_codec *codec,
-	u32 mclk_rate, u32 dmic_clk_rate)
-{
-	u32 div_factor;
-	u8 dmic_ctl_val;
-
-	dev_dbg(codec->dev,
-		"%s: mclk_rate = %d, dmic_sample_rate = %d\n",
-		__func__, mclk_rate, dmic_clk_rate);
-
-	/* Default value to return in case of error */
-	if (mclk_rate == TOMTOM_MCLK_CLK_9P6MHZ)
-		dmic_ctl_val = WCD9330_DMIC_CLK_DIV_2;
-	else
-		dmic_ctl_val = WCD9330_DMIC_CLK_DIV_3;
-
-	if (dmic_clk_rate == 0) {
-		dev_err(codec->dev,
-			"%s: dmic_sample_rate cannot be 0\n",
-			__func__);
-		goto done;
-	}
-
-	div_factor = mclk_rate / dmic_clk_rate;
-	switch (div_factor) {
-	case 2:
-		dmic_ctl_val = WCD9330_DMIC_CLK_DIV_2;
-		break;
-	case 3:
-		dmic_ctl_val = WCD9330_DMIC_CLK_DIV_3;
-		break;
-	case 4:
-		dmic_ctl_val = WCD9330_DMIC_CLK_DIV_4;
-		break;
-	case 6:
-		dmic_ctl_val = WCD9330_DMIC_CLK_DIV_6;
-		break;
-	case 16:
-		dmic_ctl_val = WCD9330_DMIC_CLK_DIV_16;
-		break;
-	default:
-		dev_err(codec->dev,
-			"%s: Invalid div_factor %u, clk_rate(%u), dmic_rate(%u)\n",
-			__func__, div_factor, mclk_rate, dmic_clk_rate);
-		break;
-	}
-
-done:
-	return dmic_ctl_val;
-}
-
-static int tomtom_codec_enable_dmic(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-	struct wcd9xxx_pdata *pdata = tomtom->resmgr.pdata;
-	u8  dmic_clk_en;
-	u16 dmic_clk_reg;
-	s32 *dmic_clk_cnt;
-	u8 dmic_rate_val, dmic_rate_shift;
-	unsigned int dmic;
-	int ret;
-	char *wname;
-
-	wname = strpbrk(w->name, "123456");
-	if (!wname) {
-		dev_err(codec->dev, "%s: widget not found\n", __func__);
-		return -EINVAL;
-	}
-
-	ret = kstrtouint(wname, 10, &dmic);
-	if (ret < 0) {
-		pr_err("%s: Invalid DMIC line on the codec\n", __func__);
-		return -EINVAL;
-	}
-
-	switch (dmic) {
-	case 1:
-	case 2:
-		dmic_clk_en = 0x01;
-		dmic_clk_cnt = &(tomtom->dmic_1_2_clk_cnt);
-		dmic_clk_reg = TOMTOM_A_DMIC_B1_CTL;
-		dmic_rate_shift = 5;
-		pr_debug("%s() event %d DMIC%d dmic_1_2_clk_cnt %d\n",
-			__func__, event,  dmic, *dmic_clk_cnt);
-
-		break;
-
-	case 3:
-	case 4:
-		dmic_clk_en = 0x02;
-		dmic_clk_cnt = &(tomtom->dmic_3_4_clk_cnt);
-		dmic_clk_reg = TOMTOM_A_DMIC_B2_CTL;
-		dmic_rate_shift = 1;
-		pr_debug("%s() event %d DMIC%d dmic_3_4_clk_cnt %d\n",
-			__func__, event,  dmic, *dmic_clk_cnt);
-		break;
-
-	case 5:
-	case 6:
-		dmic_clk_en = 0x04;
-		dmic_clk_cnt = &(tomtom->dmic_5_6_clk_cnt);
-		dmic_clk_reg = TOMTOM_A_DMIC_B2_CTL;
-		dmic_rate_shift = 4;
-		pr_debug("%s() event %d DMIC%d dmic_5_6_clk_cnt %d\n",
-			__func__, event,  dmic, *dmic_clk_cnt);
-
-		break;
-
-	default:
-		pr_err("%s: Invalid DMIC Selection\n", __func__);
-		return -EINVAL;
-	}
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-
-		dmic_rate_val =
-			tomtom_get_dmic_clk_val(codec,
-					pdata->mclk_rate,
-					pdata->dmic_sample_rate);
-
-		(*dmic_clk_cnt)++;
-		if (*dmic_clk_cnt == 1) {
-			snd_soc_update_bits(codec, dmic_clk_reg,
-				0x07 << dmic_rate_shift,
-				dmic_rate_val << dmic_rate_shift);
-			snd_soc_update_bits(codec, TOMTOM_A_DMIC_B1_CTL,
-					dmic_clk_en, dmic_clk_en);
-		}
-
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-
-		dmic_rate_val =
-			tomtom_get_dmic_clk_val(codec,
-					pdata->mclk_rate,
-					pdata->mad_dmic_sample_rate);
-		(*dmic_clk_cnt)--;
-		if (*dmic_clk_cnt  == 0) {
-			snd_soc_update_bits(codec, TOMTOM_A_DMIC_B1_CTL,
-					dmic_clk_en, 0);
-			snd_soc_update_bits(codec, dmic_clk_reg,
-				0x07 << dmic_rate_shift,
-				dmic_rate_val << dmic_rate_shift);
-		}
-		break;
-	}
-	return 0;
-}
-
-static int tomtom_codec_config_mad(struct snd_soc_codec *codec)
-{
-	int ret = 0;
-	const struct firmware *fw;
-	struct firmware_cal *hwdep_cal = NULL;
-	struct mad_audio_cal *mad_cal;
-	const void *data;
-	const char *filename = TOMTOM_MAD_AUDIO_FIRMWARE_PATH;
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-	size_t cal_size;
-	int idx;
-
-	pr_debug("%s: enter\n", __func__);
-
-	if (!tomtom->fw_data) {
-		dev_err(codec->dev, "%s: invalid cal data\n",
-				__func__);
-		return -ENODEV;
-	}
-
-	hwdep_cal = wcdcal_get_fw_cal(tomtom->fw_data, WCD9XXX_MAD_CAL);
-	if (hwdep_cal) {
-		data = hwdep_cal->data;
-		cal_size = hwdep_cal->size;
-		dev_dbg(codec->dev, "%s: using hwdep calibration\n",
-				__func__);
-	} else {
-		ret = request_firmware(&fw, filename, codec->dev);
-		if (ret != 0) {
-			pr_err("Failed to acquire MAD firwmare data %s: %d\n",
-				filename, ret);
-			return -ENODEV;
-		}
-		if (!fw) {
-			dev_err(codec->dev, "failed to get mad fw");
-			return -ENODEV;
-		}
-		data = fw->data;
-		cal_size = fw->size;
-		dev_dbg(codec->dev, "%s: using request_firmware calibration\n",
-				__func__);
-	}
-	if (cal_size < sizeof(struct mad_audio_cal)) {
-		pr_err("%s: incorrect hwdep cal size %zu\n",
-			__func__, cal_size);
-		ret = -ENOMEM;
-		goto err;
-	}
-
-	mad_cal = (struct mad_audio_cal *)(data);
-	if (!mad_cal) {
-		dev_err(codec->dev, "%s: Invalid calibration data\n",
-				__func__);
-		ret =  -EINVAL;
-		goto err;
-	}
-
-	snd_soc_write(codec, TOMTOM_A_CDC_MAD_MAIN_CTL_2,
-		      mad_cal->microphone_info.cycle_time);
-	snd_soc_update_bits(codec, TOMTOM_A_CDC_MAD_MAIN_CTL_1, 0xFF << 3,
-			    ((uint16_t)mad_cal->microphone_info.settle_time)
-			    << 3);
-
-	/* Audio */
-	snd_soc_write(codec, TOMTOM_A_CDC_MAD_AUDIO_CTL_8,
-		      mad_cal->audio_info.rms_omit_samples);
-	snd_soc_update_bits(codec, TOMTOM_A_CDC_MAD_AUDIO_CTL_1,
-			    0x07 << 4, mad_cal->audio_info.rms_comp_time << 4);
-	snd_soc_update_bits(codec, TOMTOM_A_CDC_MAD_AUDIO_CTL_2, 0x03 << 2,
-			    mad_cal->audio_info.detection_mechanism << 2);
-	snd_soc_write(codec, TOMTOM_A_CDC_MAD_AUDIO_CTL_7,
-		      mad_cal->audio_info.rms_diff_threshold & 0x3F);
-	snd_soc_write(codec, TOMTOM_A_CDC_MAD_AUDIO_CTL_5,
-		      mad_cal->audio_info.rms_threshold_lsb);
-	snd_soc_write(codec, TOMTOM_A_CDC_MAD_AUDIO_CTL_6,
-		      mad_cal->audio_info.rms_threshold_msb);
-
-	for (idx = 0; idx < ARRAY_SIZE(mad_cal->audio_info.iir_coefficients);
-	     idx++) {
-		snd_soc_update_bits(codec, TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_PTR,
-				    0x3F, idx);
-		snd_soc_write(codec, TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_VAL,
-			      mad_cal->audio_info.iir_coefficients[idx]);
-		dev_dbg(codec->dev, "%s:MAD Audio IIR Coef[%d] = 0X%x",
-			__func__, idx,
-			mad_cal->audio_info.iir_coefficients[idx]);
-	}
-
-	/* Beacon */
-	snd_soc_write(codec, TOMTOM_A_CDC_MAD_BEACON_CTL_8,
-		      mad_cal->beacon_info.rms_omit_samples);
-	snd_soc_update_bits(codec, TOMTOM_A_CDC_MAD_BEACON_CTL_1,
-			    0x07 << 4, mad_cal->beacon_info.rms_comp_time);
-	snd_soc_update_bits(codec, TOMTOM_A_CDC_MAD_BEACON_CTL_2, 0x03 << 2,
-			    mad_cal->beacon_info.detection_mechanism << 2);
-	snd_soc_write(codec, TOMTOM_A_CDC_MAD_BEACON_CTL_7,
-		      mad_cal->beacon_info.rms_diff_threshold & 0x1F);
-	snd_soc_write(codec, TOMTOM_A_CDC_MAD_BEACON_CTL_5,
-		      mad_cal->beacon_info.rms_threshold_lsb);
-	snd_soc_write(codec, TOMTOM_A_CDC_MAD_BEACON_CTL_6,
-		      mad_cal->beacon_info.rms_threshold_msb);
-
-	/* Ultrasound */
-	snd_soc_update_bits(codec, TOMTOM_A_CDC_MAD_BEACON_CTL_1,
-			    0x07 << 4, mad_cal->beacon_info.rms_comp_time);
-	snd_soc_update_bits(codec, TOMTOM_A_CDC_MAD_ULTR_CTL_2, 0x03 << 2,
-			    mad_cal->ultrasound_info.detection_mechanism);
-	snd_soc_write(codec, TOMTOM_A_CDC_MAD_ULTR_CTL_7,
-		      mad_cal->ultrasound_info.rms_diff_threshold & 0x1F);
-	snd_soc_write(codec, TOMTOM_A_CDC_MAD_ULTR_CTL_5,
-		      mad_cal->ultrasound_info.rms_threshold_lsb);
-	snd_soc_write(codec, TOMTOM_A_CDC_MAD_ULTR_CTL_6,
-		      mad_cal->ultrasound_info.rms_threshold_msb);
-
-	/* Set MAD intr time to 20 msec */
-	snd_soc_update_bits(codec, 0x4E, 0x01F, 0x13);
-
-	pr_debug("%s: leave ret %d\n", __func__, ret);
-err:
-	if (!hwdep_cal)
-		release_firmware(fw);
-	return ret;
-}
-
-static int tomtom_codec_enable_mad(struct snd_soc_dapm_widget *w,
-				  struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-	int ret = 0;
-	u8 mad_micb, mad_cfilt;
-	u16 mad_cfilt_reg;
-
-	mad_micb = snd_soc_read(codec, TOMTOM_A_MAD_ANA_CTRL) & 0x07;
-	switch (mad_micb) {
-	case 1:
-		mad_cfilt = tomtom->resmgr.pdata->micbias.bias1_cfilt_sel;
-		break;
-	case 2:
-		mad_cfilt = tomtom->resmgr.pdata->micbias.bias2_cfilt_sel;
-		break;
-	case 3:
-		mad_cfilt = tomtom->resmgr.pdata->micbias.bias3_cfilt_sel;
-		break;
-	case 4:
-		mad_cfilt = tomtom->resmgr.pdata->micbias.bias4_cfilt_sel;
-		break;
-	default:
-		dev_err(codec->dev,
-			"%s: Invalid micbias selection 0x%x\n",
-			__func__, mad_micb);
-		return -EINVAL;
-	}
-
-	switch (mad_cfilt) {
-	case WCD9XXX_CFILT1_SEL:
-		mad_cfilt_reg = TOMTOM_A_MICB_CFILT_1_VAL;
-		break;
-	case WCD9XXX_CFILT2_SEL:
-		mad_cfilt_reg = TOMTOM_A_MICB_CFILT_2_VAL;
-		break;
-	case WCD9XXX_CFILT3_SEL:
-		mad_cfilt_reg = TOMTOM_A_MICB_CFILT_3_VAL;
-		break;
-	default:
-		dev_err(codec->dev,
-			"%s: invalid cfilt 0x%x for micb 0x%x\n",
-			__func__, mad_cfilt, mad_micb);
-		return -EINVAL;
-	}
-
-	dev_dbg(codec->dev,
-		"%s event = %d, mad_cfilt_reg = 0x%x\n",
-		__func__, event, mad_cfilt_reg);
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		/* Undo reset for MAD */
-		snd_soc_update_bits(codec, TOMTOM_A_SVASS_CLKRST_CTL,
-				    0x02, 0x00);
-
-		ret = tomtom_codec_config_mad(codec);
-		if (ret) {
-			pr_err("%s: Failed to config MAD\n", __func__);
-			break;
-		}
-
-		/* setup MAD micbias to VDDIO */
-		snd_soc_update_bits(codec, mad_cfilt_reg,
-				    0x02, 0x02);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		/* Reset the MAD block */
-		snd_soc_update_bits(codec, TOMTOM_A_SVASS_CLKRST_CTL,
-				    0x02, 0x02);
-
-		/* Undo setup of MAD micbias to VDDIO */
-		snd_soc_update_bits(codec, mad_cfilt_reg,
-				    0x02, 0x00);
-	}
-	return ret;
-}
-
-static int tomtom_codec_enable_micbias(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-	u16 micb_int_reg = 0, micb_ctl_reg = 0;
-	u8 cfilt_sel_val = 0;
-	char *internal1_text = "Internal1";
-	char *internal2_text = "Internal2";
-	char *internal3_text = "Internal3";
-	enum wcd9xxx_notify_event e_post_off, e_pre_on, e_post_on;
-
-	pr_debug("%s: w->name %s event %d\n", __func__, w->name, event);
-	if (strnstr(w->name, "MIC BIAS1", sizeof("MIC BIAS1"))) {
-		micb_ctl_reg = TOMTOM_A_MICB_1_CTL;
-		micb_int_reg = TOMTOM_A_MICB_1_INT_RBIAS;
-		cfilt_sel_val = tomtom->resmgr.pdata->micbias.bias1_cfilt_sel;
-		e_pre_on = WCD9XXX_EVENT_PRE_MICBIAS_1_ON;
-		e_post_on = WCD9XXX_EVENT_POST_MICBIAS_1_ON;
-		e_post_off = WCD9XXX_EVENT_POST_MICBIAS_1_OFF;
-	} else if (strnstr(w->name, "MIC BIAS2", sizeof("MIC BIAS2"))) {
-		micb_ctl_reg = TOMTOM_A_MICB_2_CTL;
-		micb_int_reg = TOMTOM_A_MICB_2_INT_RBIAS;
-		cfilt_sel_val = tomtom->resmgr.pdata->micbias.bias2_cfilt_sel;
-		e_pre_on = WCD9XXX_EVENT_PRE_MICBIAS_2_ON;
-		e_post_on = WCD9XXX_EVENT_POST_MICBIAS_2_ON;
-		e_post_off = WCD9XXX_EVENT_POST_MICBIAS_2_OFF;
-	} else if (strnstr(w->name, "MIC BIAS3", sizeof("MIC BIAS3"))) {
-		micb_ctl_reg = TOMTOM_A_MICB_3_CTL;
-		micb_int_reg = TOMTOM_A_MICB_3_INT_RBIAS;
-		cfilt_sel_val = tomtom->resmgr.pdata->micbias.bias3_cfilt_sel;
-		e_pre_on = WCD9XXX_EVENT_PRE_MICBIAS_3_ON;
-		e_post_on = WCD9XXX_EVENT_POST_MICBIAS_3_ON;
-		e_post_off = WCD9XXX_EVENT_POST_MICBIAS_3_OFF;
-	} else if (strnstr(w->name, "MIC BIAS4", sizeof("MIC BIAS4"))) {
-		micb_ctl_reg = TOMTOM_A_MICB_4_CTL;
-		micb_int_reg = tomtom->resmgr.reg_addr->micb_4_int_rbias;
-		cfilt_sel_val = tomtom->resmgr.pdata->micbias.bias4_cfilt_sel;
-		e_pre_on = WCD9XXX_EVENT_PRE_MICBIAS_4_ON;
-		e_post_on = WCD9XXX_EVENT_POST_MICBIAS_4_ON;
-		e_post_off = WCD9XXX_EVENT_POST_MICBIAS_4_OFF;
-	} else {
-		pr_err("%s: Error, invalid micbias %s\n", __func__, w->name);
-		return -EINVAL;
-	}
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		/* Let MBHC module know so micbias switch to be off */
-		wcd9xxx_resmgr_notifier_call(&tomtom->resmgr, e_pre_on);
-
-		/* Get cfilt */
-		wcd9xxx_resmgr_cfilt_get(&tomtom->resmgr, cfilt_sel_val);
-
-		if (strnstr(w->name, internal1_text, 30))
-			snd_soc_update_bits(codec, micb_int_reg, 0xE0, 0xE0);
-		else if (strnstr(w->name, internal2_text, 30))
-			snd_soc_update_bits(codec, micb_int_reg, 0x1C, 0x1C);
-		else if (strnstr(w->name, internal3_text, 30))
-			snd_soc_update_bits(codec, micb_int_reg, 0x3, 0x3);
-		else
-			/*
-			 *  If not internal, make sure to write the
-			 *  register to default value
-			 */
-			snd_soc_write(codec, micb_int_reg, 0x24);
-		if (tomtom->mbhc_started && micb_ctl_reg ==
-		    TOMTOM_A_MICB_2_CTL) {
-			if (++tomtom->micb_2_users == 1) {
-				if (tomtom->resmgr.pdata->
-				    micbias.bias2_is_headset_only)
-					wcd9xxx_resmgr_add_cond_update_bits(
-							 &tomtom->resmgr,
-							 WCD9XXX_COND_HPH_MIC,
-							 micb_ctl_reg, w->shift,
-							 false);
-				else
-					snd_soc_update_bits(codec, micb_ctl_reg,
-							    1 << w->shift,
-							    1 << w->shift);
-			}
-			pr_debug("%s: micb_2_users %d\n", __func__,
-				 tomtom->micb_2_users);
-		} else {
-			snd_soc_update_bits(codec, micb_ctl_reg, 1 << w->shift,
-					    1 << w->shift);
-		}
-		break;
-	case SND_SOC_DAPM_POST_PMU:
-		usleep_range(5000, 5100);
-		/* Let MBHC module know so micbias is on */
-		wcd9xxx_resmgr_notifier_call(&tomtom->resmgr, e_post_on);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		if (tomtom->mbhc_started && micb_ctl_reg ==
-		    TOMTOM_A_MICB_2_CTL) {
-			if (--tomtom->micb_2_users == 0) {
-				if (tomtom->resmgr.pdata->
-				    micbias.bias2_is_headset_only)
-					wcd9xxx_resmgr_rm_cond_update_bits(
-							&tomtom->resmgr,
-							WCD9XXX_COND_HPH_MIC,
-							micb_ctl_reg, 7, false);
-				else
-					snd_soc_update_bits(codec, micb_ctl_reg,
-							    1 << w->shift, 0);
-			}
-			pr_debug("%s: micb_2_users %d\n", __func__,
-				 tomtom->micb_2_users);
-			WARN(tomtom->micb_2_users < 0,
-			     "Unexpected micbias users %d\n",
-			     tomtom->micb_2_users);
-		} else {
-			snd_soc_update_bits(codec, micb_ctl_reg, 1 << w->shift,
-					    0);
-		}
-
-		/* Let MBHC module know so micbias switch to be off */
-		wcd9xxx_resmgr_notifier_call(&tomtom->resmgr, e_post_off);
-
-		if (strnstr(w->name, internal1_text, 30))
-			snd_soc_update_bits(codec, micb_int_reg, 0x80, 0x00);
-		else if (strnstr(w->name, internal2_text, 30))
-			snd_soc_update_bits(codec, micb_int_reg, 0x10, 0x00);
-		else if (strnstr(w->name, internal3_text, 30))
-			snd_soc_update_bits(codec, micb_int_reg, 0x2, 0x0);
-
-		/* Put cfilt */
-		wcd9xxx_resmgr_cfilt_put(&tomtom->resmgr, cfilt_sel_val);
-		break;
-	}
-
-	return 0;
-}
-
-/* called under codec_resource_lock acquisition */
-static int tomtom_enable_mbhc_micbias(struct snd_soc_codec *codec, bool enable,
-				enum wcd9xxx_micbias_num micb_num)
-{
-	int rc;
-
-	if (micb_num != MBHC_MICBIAS2) {
-		dev_err(codec->dev, "%s: Unsupported micbias, micb_num=%d\n",
-			__func__, micb_num);
-		return -EINVAL;
-	}
-
-	if (enable)
-		rc = snd_soc_dapm_force_enable_pin(
-					     snd_soc_codec_get_dapm(codec),
-					     DAPM_MICBIAS2_EXTERNAL_STANDALONE);
-	else
-		rc = snd_soc_dapm_disable_pin(snd_soc_codec_get_dapm(codec),
-					     DAPM_MICBIAS2_EXTERNAL_STANDALONE);
-	if (!rc)
-		snd_soc_dapm_sync(snd_soc_codec_get_dapm(codec));
-	pr_debug("%s: leave ret %d\n", __func__, rc);
-	return rc;
-}
-
-static void txfe_clkdiv_update(struct snd_soc_codec *codec)
-{
-	struct tomtom_priv *priv = snd_soc_codec_get_drvdata(codec);
-
-	if (test_bit(ADC1_TXFE, &priv->status_mask)) {
-		snd_soc_update_bits(codec, TOMTOM_A_TX_1_2_TXFE_CLKDIV,
-				    0x0F, 0x05);
-		clear_bit(ADC1_TXFE, &priv->status_mask);
-	}
-	if (test_bit(ADC2_TXFE, &priv->status_mask)) {
-		snd_soc_update_bits(codec, TOMTOM_A_TX_1_2_TXFE_CLKDIV,
-				    0xF0, 0x50);
-		clear_bit(ADC2_TXFE, &priv->status_mask);
-	}
-	if (test_bit(ADC3_TXFE, &priv->status_mask)) {
-		snd_soc_update_bits(codec, TOMTOM_A_TX_3_4_TXFE_CKDIV,
-				    0x0F, 0x05);
-		clear_bit(ADC3_TXFE, &priv->status_mask);
-	}
-	if (test_bit(ADC4_TXFE, &priv->status_mask)) {
-		snd_soc_update_bits(codec, TOMTOM_A_TX_3_4_TXFE_CKDIV,
-				    0xF0, 0x50);
-		clear_bit(ADC4_TXFE, &priv->status_mask);
-	}
-	if (test_bit(ADC5_TXFE, &priv->status_mask)) {
-		snd_soc_update_bits(codec, TOMTOM_A_TX_5_6_TXFE_CKDIV,
-				    0x0F, 0x05);
-		clear_bit(ADC5_TXFE, &priv->status_mask);
-	}
-	if (test_bit(ADC6_TXFE, &priv->status_mask)) {
-		snd_soc_update_bits(codec, TOMTOM_A_TX_5_6_TXFE_CKDIV,
-				    0xF0, 0x50);
-		clear_bit(ADC6_TXFE, &priv->status_mask);
-	}
-}
-
-static void tx_hpf_corner_freq_callback(struct work_struct *work)
-{
-	struct delayed_work *hpf_delayed_work;
-	struct hpf_work *hpf_work;
-	struct tomtom_priv *tomtom;
-	struct snd_soc_codec *codec;
-	u16 tx_mux_ctl_reg;
-	u8 hpf_cut_of_freq;
-
-	hpf_delayed_work = to_delayed_work(work);
-	hpf_work = container_of(hpf_delayed_work, struct hpf_work, dwork);
-	tomtom = hpf_work->tomtom;
-	codec = hpf_work->tomtom->codec;
-	hpf_cut_of_freq = hpf_work->tx_hpf_cut_of_freq;
-
-	tx_mux_ctl_reg = TOMTOM_A_CDC_TX1_MUX_CTL +
-			(hpf_work->decimator - 1) * 8;
-
-	pr_debug("%s(): decimator %u hpf_cut_of_freq 0x%x\n", __func__,
-		hpf_work->decimator, (unsigned int)hpf_cut_of_freq);
-
-	/*
-	 * Restore TXFE ClkDiv registers to default.
-	 * If any of these registers are modified during analog
-	 * front-end enablement, they will be restored back to the
-	 * default
-	 */
-	txfe_clkdiv_update(codec);
-
-	snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x30, hpf_cut_of_freq << 4);
-}
-
-#define  TX_MUX_CTL_CUT_OFF_FREQ_MASK	0x30
-#define  CF_MIN_3DB_4HZ			0x0
-#define  CF_MIN_3DB_75HZ		0x1
-#define  CF_MIN_3DB_150HZ		0x2
-
-static int tomtom_codec_enable_dec(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	unsigned int decimator;
-	char *dec_name = NULL;
-	char *widget_name = NULL;
-	char *temp;
-	int ret = 0;
-	u16 dec_reset_reg, tx_vol_ctl_reg, tx_mux_ctl_reg;
-	u8 dec_hpf_cut_of_freq;
-	int offset;
-	char *dec;
-
-	pr_debug("%s %d\n", __func__, event);
-
-	widget_name = kstrndup(w->name, 15, GFP_KERNEL);
-	if (!widget_name)
-		return -ENOMEM;
-	temp = widget_name;
-
-	dec_name = strsep(&widget_name, " ");
-	widget_name = temp;
-	if (!dec_name) {
-		pr_err("%s: Invalid decimator = %s\n", __func__, w->name);
-		ret =  -EINVAL;
-		goto out;
-	}
-
-	dec = strpbrk(dec_name, "123456789");
-	if (!dec) {
-		dev_err(codec->dev, "%s: decimator index not found\n",
-			__func__);
-		ret =  -EINVAL;
-		goto out;
-	}
-
-	ret = kstrtouint(dec, 10, &decimator);
-	if (ret < 0) {
-		pr_err("%s: Invalid decimator = %s\n", __func__, dec_name);
-		ret =  -EINVAL;
-		goto out;
-	}
-
-	pr_debug("%s(): widget = %s dec_name = %s decimator = %u\n", __func__,
-			w->name, dec_name, decimator);
-
-	if (w->reg == TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL) {
-		dec_reset_reg = TOMTOM_A_CDC_CLK_TX_RESET_B1_CTL;
-		offset = 0;
-	} else if (w->reg == TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL) {
-		dec_reset_reg = TOMTOM_A_CDC_CLK_TX_RESET_B2_CTL;
-		offset = 8;
-	} else {
-		pr_err("%s: Error, incorrect dec\n", __func__);
-		return -EINVAL;
-	}
-
-	tx_vol_ctl_reg = TOMTOM_A_CDC_TX1_VOL_CTL_CFG + 8 * (decimator - 1);
-	tx_mux_ctl_reg = TOMTOM_A_CDC_TX1_MUX_CTL + 8 * (decimator - 1);
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-
-		/* Enableable TX digital mute */
-		snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x01);
-
-		snd_soc_update_bits(codec, dec_reset_reg, 1 << w->shift,
-			1 << w->shift);
-		snd_soc_update_bits(codec, dec_reset_reg, 1 << w->shift, 0x0);
-
-		pr_debug("%s: decimator = %u, bypass = %d\n", __func__,
-			decimator, tx_hpf_work[decimator - 1].tx_hpf_bypass);
-		if (tx_hpf_work[decimator - 1].tx_hpf_bypass != true) {
-			dec_hpf_cut_of_freq = snd_soc_read(codec,
-							tx_mux_ctl_reg);
-
-			dec_hpf_cut_of_freq = (dec_hpf_cut_of_freq & 0x30) >> 4;
-
-			tx_hpf_work[decimator - 1].tx_hpf_cut_of_freq =
-				dec_hpf_cut_of_freq;
-
-			if (dec_hpf_cut_of_freq != CF_MIN_3DB_150HZ) {
-
-				/* set cut of freq to CF_MIN_3DB_150HZ (0x1); */
-				snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x30,
-							CF_MIN_3DB_150HZ << 4);
-			}
-
-			/* enable HPF */
-			snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x08, 0x00);
-		} else
-			/* bypass HPF */
-			snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x08, 0x08);
-
-		break;
-
-	case SND_SOC_DAPM_POST_PMU:
-
-		/* Disable TX digital mute */
-		snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x00);
-
-		if ((tx_hpf_work[decimator - 1].tx_hpf_cut_of_freq !=
-				CF_MIN_3DB_150HZ) &&
-			(tx_hpf_work[decimator - 1].tx_hpf_bypass != true)) {
-
-			schedule_delayed_work(&tx_hpf_work[decimator - 1].dwork,
-					msecs_to_jiffies(300));
-		}
-		/* apply the digital gain after the decimator is enabled*/
-		if ((w->shift + offset) < ARRAY_SIZE(tx_digital_gain_reg))
-			snd_soc_write(codec,
-				  tx_digital_gain_reg[w->shift + offset],
-				  snd_soc_read(codec,
-				  tx_digital_gain_reg[w->shift + offset])
-				  );
-
-		break;
-
-	case SND_SOC_DAPM_PRE_PMD:
-
-		snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x01);
-		cancel_delayed_work_sync(&tx_hpf_work[decimator - 1].dwork);
-		break;
-
-	case SND_SOC_DAPM_POST_PMD:
-
-		snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x08, 0x08);
-		snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x30,
-			(tx_hpf_work[decimator - 1].tx_hpf_cut_of_freq) << 4);
-
-		break;
-	}
-out:
-	kfree(widget_name);
-	return ret;
-}
-
-static int tomtom_codec_enable_vdd_spkr(struct snd_soc_dapm_widget *w,
-				       struct snd_kcontrol *kcontrol, int event)
-{
-	int ret = 0;
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct tomtom_priv *priv = snd_soc_codec_get_drvdata(codec);
-
-	pr_debug("%s: %d %s\n", __func__, event, w->name);
-
-	WARN_ONCE(!priv->spkdrv_reg, "SPKDRV supply %s isn't defined\n",
-		  WCD9XXX_VDD_SPKDRV_NAME);
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		if (priv->spkdrv_reg) {
-			ret = regulator_enable(priv->spkdrv_reg);
-			if (ret)
-				pr_err("%s: Failed to enable spkdrv_reg %s\n",
-				       __func__, WCD9XXX_VDD_SPKDRV_NAME);
-		}
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		if (priv->spkdrv_reg) {
-			ret = regulator_disable(priv->spkdrv_reg);
-			if (ret)
-				pr_err("%s: Failed to disable spkdrv_reg %s\n",
-				       __func__, WCD9XXX_VDD_SPKDRV_NAME);
-		}
-		break;
-	}
-
-	return ret;
-}
-
-static int tomtom_codec_enable_vdd_spkr2(struct snd_soc_dapm_widget *w,
-				       struct snd_kcontrol *kcontrol, int event)
-{
-	int ret = 0;
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct tomtom_priv *priv = snd_soc_codec_get_drvdata(codec);
-
-	pr_debug("%s: %d %s\n", __func__, event, w->name);
-
-	/*
-	 * If on-demand voltage regulators of spkr1 and spkr2 has been derived
-	 * from same power rail then same on-demand voltage regulator can be
-	 * used by both spkr1 and spkr2, if a separate device tree entry has
-	 * not been defined for on-demand voltage regulator for spkr2.
-	 */
-	if (!priv->spkdrv2_reg) {
-		if (priv->spkdrv_reg) {
-			priv->spkdrv2_reg = priv->spkdrv_reg;
-		} else {
-			WARN_ONCE(!priv->spkdrv2_reg,
-					"SPKDRV2 supply %s isn't defined\n",
-					WCD9XXX_VDD_SPKDRV2_NAME);
-			return 0;
-		}
-	}
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		if (priv->spkdrv2_reg) {
-			ret = regulator_enable(priv->spkdrv2_reg);
-			if (ret)
-				pr_err("%s: Failed to enable spkdrv2_reg %s ret:%d\n",
-				       __func__, WCD9XXX_VDD_SPKDRV2_NAME, ret);
-		}
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		if (priv->spkdrv2_reg) {
-			ret = regulator_disable(priv->spkdrv2_reg);
-			if (ret)
-				pr_err("%s: Failed to disable spkdrv2_reg %s ret:%d\n",
-				       __func__, WCD9XXX_VDD_SPKDRV2_NAME, ret);
-		}
-		break;
-	}
-
-	return ret;
-}
-
-static int tomtom_codec_enable_interpolator(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-
-	pr_debug("%s %d %s\n", __func__, event, w->name);
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_RX_RESET_CTL,
-			1 << w->shift, 1 << w->shift);
-		snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_RX_RESET_CTL,
-			1 << w->shift, 0x0);
-		break;
-	case SND_SOC_DAPM_POST_PMU:
-		/* apply the digital gain after the interpolator is enabled*/
-		if ((w->shift) < ARRAY_SIZE(rx_digital_gain_reg))
-			snd_soc_write(codec,
-				  rx_digital_gain_reg[w->shift],
-				  snd_soc_read(codec,
-				  rx_digital_gain_reg[w->shift])
-				  );
-		/* Check for Rx1 and Rx2 paths for uhqa mode update */
-		if (w->shift == 0 || w->shift == 1)
-			tomtom_update_uhqa_mode(codec, (1 << w->shift));
-
-		break;
-	}
-	return 0;
-}
-
-/* called under codec_resource_lock acquisition */
-static int __tomtom_codec_enable_ldo_h(struct snd_soc_dapm_widget *w,
-				      struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct tomtom_priv *priv = snd_soc_codec_get_drvdata(codec);
-
-	pr_debug("%s: enter\n", __func__);
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		/*
-		 * ldo_h_users is protected by tomtom->codec_mutex, don't need
-		 * additional mutex
-		 */
-		if (++priv->ldo_h_users == 1) {
-			WCD9XXX_BG_CLK_LOCK(&priv->resmgr);
-			wcd9xxx_resmgr_get_bandgap(&priv->resmgr,
-						   WCD9XXX_BANDGAP_AUDIO_MODE);
-			WCD9XXX_BG_CLK_UNLOCK(&priv->resmgr);
-			tomtom_codec_internal_rco_ctrl(codec, true);
-			snd_soc_update_bits(codec, TOMTOM_A_LDO_H_MODE_1,
-					    1 << 7, 1 << 7);
-			tomtom_codec_internal_rco_ctrl(codec, false);
-			pr_debug("%s: ldo_h_users %d\n", __func__,
-				 priv->ldo_h_users);
-			/* LDO enable requires 1ms to settle down */
-			usleep_range(1000, 1100);
-		}
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		if (--priv->ldo_h_users == 0) {
-			tomtom_codec_internal_rco_ctrl(codec, true);
-			snd_soc_update_bits(codec, TOMTOM_A_LDO_H_MODE_1,
-					    1 << 7, 0);
-			tomtom_codec_internal_rco_ctrl(codec, false);
-			WCD9XXX_BG_CLK_LOCK(&priv->resmgr);
-			wcd9xxx_resmgr_put_bandgap(&priv->resmgr,
-						   WCD9XXX_BANDGAP_AUDIO_MODE);
-			WCD9XXX_BG_CLK_UNLOCK(&priv->resmgr);
-			pr_debug("%s: ldo_h_users %d\n", __func__,
-				 priv->ldo_h_users);
-		}
-		WARN(priv->ldo_h_users < 0, "Unexpected ldo_h users %d\n",
-		     priv->ldo_h_users);
-		break;
-	}
-	pr_debug("%s: leave\n", __func__);
-	return 0;
-}
-
-static int tomtom_codec_enable_ldo_h(struct snd_soc_dapm_widget *w,
-				    struct snd_kcontrol *kcontrol, int event)
-{
-	int rc;
-
-	rc = __tomtom_codec_enable_ldo_h(w, kcontrol, event);
-	return rc;
-}
-
-static int tomtom_codec_enable_rx_bias(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
-	pr_debug("%s %d\n", __func__, event);
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		wcd9xxx_resmgr_enable_rx_bias(&tomtom->resmgr, 1);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		wcd9xxx_resmgr_enable_rx_bias(&tomtom->resmgr, 0);
-		break;
-	}
-	return 0;
-}
-
-static int tomtom_codec_enable_anc(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	const char *filename;
-	const struct firmware *fw;
-	int i;
-	int ret = 0;
-	int num_anc_slots;
-	struct wcd9xxx_anc_header *anc_head;
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-	struct firmware_cal *hwdep_cal = NULL;
-	u32 anc_writes_size = 0;
-	u32 anc_cal_size = 0;
-	int anc_size_remaining;
-	u32 *anc_ptr;
-	u16 reg;
-	u8 mask, val, old_val;
-	size_t cal_size;
-	const void *data;
-
-	if (tomtom->anc_func == 0)
-		return 0;
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		filename = "wcd9320/wcd9320_anc.bin";
-
-		hwdep_cal = wcdcal_get_fw_cal(tomtom->fw_data, WCD9XXX_ANC_CAL);
-		if (hwdep_cal) {
-			data = hwdep_cal->data;
-			cal_size = hwdep_cal->size;
-			dev_dbg(codec->dev, "%s: using hwdep calibration\n",
-				__func__);
-		} else {
-			ret = request_firmware(&fw, filename, codec->dev);
-			if (ret != 0) {
-				dev_err(codec->dev, "Failed to acquire ANC data: %d\n",
-					ret);
-				return -ENODEV;
-			}
-			if (!fw) {
-				dev_err(codec->dev, "failed to get anc fw");
-				return -ENODEV;
-			}
-			data = fw->data;
-			cal_size = fw->size;
-			dev_dbg(codec->dev, "%s: using request_firmware calibration\n",
-				__func__);
-		}
-		if (cal_size < sizeof(struct wcd9xxx_anc_header)) {
-			dev_err(codec->dev, "Not enough data\n");
-			ret = -ENOMEM;
-			goto err;
-		}
-		/* First number is the number of register writes */
-		anc_head = (struct wcd9xxx_anc_header *)(data);
-		anc_ptr = (u32 *)(data +
-				  sizeof(struct wcd9xxx_anc_header));
-		anc_size_remaining = cal_size -
-				     sizeof(struct wcd9xxx_anc_header);
-		num_anc_slots = anc_head->num_anc_slots;
-
-		if (tomtom->anc_slot >= num_anc_slots) {
-			dev_err(codec->dev, "Invalid ANC slot selected\n");
-			ret = -EINVAL;
-			goto err;
-		}
-		for (i = 0; i < num_anc_slots; i++) {
-			if (anc_size_remaining < TOMTOM_PACKED_REG_SIZE) {
-				dev_err(codec->dev, "Invalid register format\n");
-				ret = -EINVAL;
-				goto err;
-			}
-			anc_writes_size = (u32)(*anc_ptr);
-			anc_size_remaining -= sizeof(u32);
-			anc_ptr += 1;
-
-			if (anc_writes_size * TOMTOM_PACKED_REG_SIZE
-				> anc_size_remaining) {
-				dev_err(codec->dev, "Invalid register format\n");
-				ret = -EINVAL;
-				goto err;
-			}
-
-			if (tomtom->anc_slot == i)
-				break;
-
-			anc_size_remaining -= (anc_writes_size *
-				TOMTOM_PACKED_REG_SIZE);
-			anc_ptr += anc_writes_size;
-		}
-		if (i == num_anc_slots) {
-			dev_err(codec->dev, "Selected ANC slot not present\n");
-			ret = -EINVAL;
-			goto err;
-		}
-
-		i = 0;
-		anc_cal_size = anc_writes_size;
-		if (w->reg == TOMTOM_A_RX_HPH_L_DAC_CTL) {
-			snd_soc_update_bits(codec,
-				TOMTOM_A_CDC_CLK_ANC_RESET_CTL, 0x03, 0x03);
-			anc_writes_size = (anc_cal_size/2);
-		}
-
-		if (w->reg == TOMTOM_A_RX_HPH_R_DAC_CTL) {
-			snd_soc_update_bits(codec,
-				TOMTOM_A_CDC_CLK_ANC_RESET_CTL, 0x0C, 0x0C);
-			i = (anc_cal_size/2);
-			anc_writes_size = anc_cal_size;
-		}
-
-		for (; i < anc_writes_size; i++) {
-			TOMTOM_CODEC_UNPACK_ENTRY(anc_ptr[i], reg,
-				mask, val);
-			/*
-			 * ANC Soft reset register is ignored from ACDB
-			 * because ANC left soft reset bits will be called
-			 * while enabling ANC HPH Right DAC.
-			 */
-			if ((reg == TOMTOM_A_CDC_CLK_ANC_RESET_CTL) &&
-				((w->reg == TOMTOM_A_RX_HPH_L_DAC_CTL) ||
-				(w->reg == TOMTOM_A_RX_HPH_R_DAC_CTL))) {
-				continue;
-			}
-			old_val = snd_soc_read(codec, reg);
-			snd_soc_write(codec, reg, (old_val & ~mask) |
-				(val & mask));
-		}
-		if (w->reg == TOMTOM_A_RX_HPH_L_DAC_CTL)
-			snd_soc_update_bits(codec,
-				TOMTOM_A_CDC_CLK_ANC_RESET_CTL, 0x03, 0x00);
-
-		if (w->reg == TOMTOM_A_RX_HPH_R_DAC_CTL)
-			snd_soc_update_bits(codec,
-				TOMTOM_A_CDC_CLK_ANC_RESET_CTL, 0x0C, 0x00);
-		if (!hwdep_cal)
-			release_firmware(fw);
-		txfe_clkdiv_update(codec);
-		break;
-	case SND_SOC_DAPM_PRE_PMD:
-		msleep(40);
-		snd_soc_update_bits(codec, TOMTOM_A_CDC_ANC1_B1_CTL, 0x01,
-				    0x00);
-		snd_soc_update_bits(codec, TOMTOM_A_CDC_ANC2_B1_CTL, 0x02,
-				    0x00);
-		msleep(20);
-		snd_soc_write(codec, TOMTOM_A_CDC_CLK_ANC_RESET_CTL, 0x0F);
-		snd_soc_write(codec, TOMTOM_A_CDC_CLK_ANC_CLK_EN_CTL, 0);
-		snd_soc_write(codec, TOMTOM_A_CDC_CLK_ANC_RESET_CTL, 0x00);
-		break;
-	}
-	return 0;
-err:
-	if (!hwdep_cal)
-		release_firmware(fw);
-	return ret;
-}
-
-static int tomtom_hphl_dac_event(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec);
-	uint32_t impedl, impedr;
-	int ret = 0;
-
-	pr_debug("%s %s %d\n", __func__, w->name, event);
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		if (tomtom_p->anc_func) {
-			tomtom_codec_enable_anc(w, kcontrol, event);
-			msleep(50);
-		}
-
-		if (!high_perf_mode && !tomtom_p->uhqa_mode) {
-			wcd9xxx_clsh_fsm(codec, &tomtom_p->clsh_d,
-						 WCD9XXX_CLSH_STATE_HPHL,
-						 WCD9XXX_CLSH_REQ_ENABLE,
-						 WCD9XXX_CLSH_EVENT_PRE_DAC);
-		} else {
-			wcd9xxx_enable_high_perf_mode(codec, &tomtom_p->clsh_d,
-						tomtom_p->uhqa_mode,
-						WCD9XXX_CLSAB_STATE_HPHL,
-						WCD9XXX_CLSAB_REQ_ENABLE);
-		}
-		ret = wcd9xxx_mbhc_get_impedance(&tomtom_p->mbhc,
-					&impedl, &impedr);
-		if (!ret)
-			wcd9xxx_clsh_imped_config(codec, impedl);
-		else
-			dev_dbg(codec->dev, "%s: Failed to get mbhc impedance %d\n",
-						__func__, ret);
-		break;
-	case SND_SOC_DAPM_POST_PMU:
-		snd_soc_update_bits(codec, TOMTOM_A_CDC_RX1_B3_CTL, 0xBC, 0x94);
-		snd_soc_update_bits(codec, TOMTOM_A_CDC_RX1_B4_CTL, 0x30, 0x10);
-		break;
-	case SND_SOC_DAPM_PRE_PMD:
-		snd_soc_update_bits(codec, TOMTOM_A_CDC_RX1_B3_CTL, 0xBC, 0x00);
-		snd_soc_update_bits(codec, TOMTOM_A_CDC_RX1_B4_CTL, 0x30, 0x00);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		if (!high_perf_mode && !tomtom_p->uhqa_mode) {
-			wcd9xxx_clsh_fsm(codec, &tomtom_p->clsh_d,
-						 WCD9XXX_CLSH_STATE_HPHL,
-						 WCD9XXX_CLSH_REQ_DISABLE,
-						 WCD9XXX_CLSH_EVENT_POST_PA);
-		} else {
-			wcd9xxx_enable_high_perf_mode(codec, &tomtom_p->clsh_d,
-						tomtom_p->uhqa_mode,
-						WCD9XXX_CLSAB_STATE_HPHL,
-						WCD9XXX_CLSAB_REQ_DISABLE);
-		}
-		break;
-	}
-	return 0;
-}
-
-static int tomtom_hphr_dac_event(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec);
-
-	pr_debug("%s %s %d\n", __func__, w->name, event);
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		if (tomtom_p->anc_func) {
-			tomtom_codec_enable_anc(w, kcontrol, event);
-			msleep(50);
-		}
-
-		snd_soc_update_bits(codec, w->reg, 0x40, 0x40);
-		if (!high_perf_mode && !tomtom_p->uhqa_mode) {
-			wcd9xxx_clsh_fsm(codec, &tomtom_p->clsh_d,
-						 WCD9XXX_CLSH_STATE_HPHR,
-						 WCD9XXX_CLSH_REQ_ENABLE,
-						 WCD9XXX_CLSH_EVENT_PRE_DAC);
-		} else {
-			wcd9xxx_enable_high_perf_mode(codec, &tomtom_p->clsh_d,
-						tomtom_p->uhqa_mode,
-						WCD9XXX_CLSAB_STATE_HPHR,
-						WCD9XXX_CLSAB_REQ_ENABLE);
-		}
-		break;
-	case SND_SOC_DAPM_POST_PMU:
-		snd_soc_update_bits(codec, TOMTOM_A_CDC_RX2_B3_CTL, 0xBC, 0x94);
-		snd_soc_update_bits(codec, TOMTOM_A_CDC_RX2_B4_CTL, 0x30, 0x10);
-		break;
-	case SND_SOC_DAPM_PRE_PMD:
-		snd_soc_update_bits(codec, TOMTOM_A_CDC_RX2_B3_CTL, 0xBC, 0x00);
-		snd_soc_update_bits(codec, TOMTOM_A_CDC_RX2_B4_CTL, 0x30, 0x00);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		snd_soc_update_bits(codec, w->reg, 0x40, 0x00);
-		if (!high_perf_mode && !tomtom_p->uhqa_mode) {
-			wcd9xxx_clsh_fsm(codec, &tomtom_p->clsh_d,
-						 WCD9XXX_CLSH_STATE_HPHR,
-						 WCD9XXX_CLSH_REQ_DISABLE,
-						 WCD9XXX_CLSH_EVENT_POST_PA);
-		} else {
-			wcd9xxx_enable_high_perf_mode(codec, &tomtom_p->clsh_d,
-						tomtom_p->uhqa_mode,
-						WCD9XXX_CLSAB_STATE_HPHR,
-						WCD9XXX_CLSAB_REQ_DISABLE);
-		}
-		break;
-	}
-	return 0;
-}
-
-static int tomtom_hph_pa_event(struct snd_soc_dapm_widget *w,
-			      struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-	enum wcd9xxx_notify_event e_pre_on, e_post_off;
-	u8 req_clsh_state;
-	u32 pa_settle_time = TOMTOM_HPH_PA_SETTLE_COMP_OFF;
-
-	pr_debug("%s: %s event = %d\n", __func__, w->name, event);
-	if (w->shift == 5) {
-		e_pre_on = WCD9XXX_EVENT_PRE_HPHL_PA_ON;
-		e_post_off = WCD9XXX_EVENT_POST_HPHL_PA_OFF;
-		req_clsh_state = WCD9XXX_CLSH_STATE_HPHL;
-	} else if (w->shift == 4) {
-		e_pre_on = WCD9XXX_EVENT_PRE_HPHR_PA_ON;
-		e_post_off = WCD9XXX_EVENT_POST_HPHR_PA_OFF;
-		req_clsh_state = WCD9XXX_CLSH_STATE_HPHR;
-	} else {
-		pr_err("%s: Invalid w->shift %d\n", __func__, w->shift);
-		return -EINVAL;
-	}
-
-	if (tomtom->comp_enabled[COMPANDER_1])
-		pa_settle_time = TOMTOM_HPH_PA_SETTLE_COMP_ON;
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		set_bit(HPH_DELAY, &tomtom->status_mask);
-		/* Let MBHC module know PA is turning on */
-		wcd9xxx_resmgr_notifier_call(&tomtom->resmgr, e_pre_on);
-		break;
-
-	case SND_SOC_DAPM_POST_PMU:
-		if (test_bit(HPH_DELAY, &tomtom->status_mask)) {
-			/*
-			 * Make sure to wait 10ms after enabling HPHR_HPHL
-			 * in register 0x1AB
-			 */
-			usleep_range(pa_settle_time, pa_settle_time + 1000);
-			clear_bit(HPH_DELAY, &tomtom->status_mask);
-			pr_debug("%s: sleep %d us after %s PA enable\n",
-				__func__, pa_settle_time, w->name);
-		}
-		if (!high_perf_mode && !tomtom->uhqa_mode) {
-			wcd9xxx_clsh_fsm(codec, &tomtom->clsh_d,
-						 req_clsh_state,
-						 WCD9XXX_CLSH_REQ_ENABLE,
-						 WCD9XXX_CLSH_EVENT_POST_PA);
-		}
-		break;
-
-	case SND_SOC_DAPM_PRE_PMD:
-		set_bit(HPH_DELAY, &tomtom->status_mask);
-		break;
-
-	case SND_SOC_DAPM_POST_PMD:
-		/* Let MBHC module know PA turned off */
-		wcd9xxx_resmgr_notifier_call(&tomtom->resmgr, e_post_off);
-		if (test_bit(HPH_DELAY, &tomtom->status_mask)) {
-			/*
-			 * Make sure to wait 10ms after disabling HPHR_HPHL
-			 * in register 0x1AB
-			 */
-			usleep_range(pa_settle_time, pa_settle_time + 1000);
-			clear_bit(HPH_DELAY, &tomtom->status_mask);
-			pr_debug("%s: sleep %d us after %s PA disable\n",
-				__func__, pa_settle_time, w->name);
-		}
-
-		break;
-	}
-	return 0;
-}
-
-static int tomtom_codec_enable_anc_hph(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	int ret = 0;
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		ret = tomtom_hph_pa_event(w, kcontrol, event);
-		break;
-	case SND_SOC_DAPM_POST_PMU:
-		if ((snd_soc_read(codec, TOMTOM_A_RX_HPH_L_DAC_CTL) & 0x80) &&
-			(snd_soc_read(codec, TOMTOM_A_RX_HPH_R_DAC_CTL)
-								& 0x80)) {
-			snd_soc_update_bits(codec,
-					TOMTOM_A_RX_HPH_CNP_EN, 0x30, 0x30);
-			msleep(30);
-		}
-		ret = tomtom_hph_pa_event(w, kcontrol, event);
-		break;
-	case SND_SOC_DAPM_PRE_PMD:
-		if (w->shift == 5) {
-			snd_soc_update_bits(codec,
-					TOMTOM_A_RX_HPH_CNP_EN, 0x30, 0x00);
-			msleep(40);
-			snd_soc_update_bits(codec,
-					TOMTOM_A_TX_7_MBHC_EN, 0x80, 00);
-			ret |= tomtom_codec_enable_anc(w, kcontrol, event);
-		}
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		ret = tomtom_hph_pa_event(w, kcontrol, event);
-		break;
-	}
-	return ret;
-}
-
-static const struct snd_soc_dapm_widget tomtom_dapm_i2s_widgets[] = {
-	SND_SOC_DAPM_SUPPLY("RX_I2S_CLK", TOMTOM_A_CDC_CLK_RX_I2S_CTL,
-	4, 0, NULL, 0),
-	SND_SOC_DAPM_SUPPLY("TX_I2S_CLK", TOMTOM_A_CDC_CLK_TX_I2S_CTL, 4,
-	0, NULL, 0),
-};
-
-static int tomtom_lineout_dac_event(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
-	pr_debug("%s %s %d\n", __func__, w->name, event);
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		wcd9xxx_clsh_fsm(codec, &tomtom->clsh_d,
-						 WCD9XXX_CLSH_STATE_LO,
-						 WCD9XXX_CLSH_REQ_ENABLE,
-						 WCD9XXX_CLSH_EVENT_PRE_DAC);
-		snd_soc_update_bits(codec, w->reg, 0x40, 0x40);
-		break;
-
-	case SND_SOC_DAPM_POST_PMD:
-		snd_soc_update_bits(codec, w->reg, 0x40, 0x00);
-		wcd9xxx_clsh_fsm(codec, &tomtom->clsh_d,
-						 WCD9XXX_CLSH_STATE_LO,
-						 WCD9XXX_CLSH_REQ_DISABLE,
-						 WCD9XXX_CLSH_EVENT_POST_PA);
-		break;
-	}
-	return 0;
-}
-
-static int tomtom_spk_dac_event(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-
-	pr_debug("%s %s %d\n", __func__, w->name, event);
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLK_OTHR_CTL,
-							0x80, 0x80);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		if ((snd_soc_read(codec, w->reg) & 0x03) == 0)
-			snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLK_OTHR_CTL,
-							0x80, 0x00);
-		break;
-	}
-	return 0;
-}
-
-static const struct snd_soc_dapm_route audio_i2s_map[] = {
-	{"SLIM RX1", NULL, "RX_I2S_CLK"},
-	{"SLIM RX2", NULL, "RX_I2S_CLK"},
-	{"SLIM RX3", NULL, "RX_I2S_CLK"},
-	{"SLIM RX4", NULL, "RX_I2S_CLK"},
-
-	{"SLIM TX7 MUX", NULL, "TX_I2S_CLK"},
-	{"SLIM TX8 MUX", NULL, "TX_I2S_CLK"},
-	{"SLIM TX9 MUX", NULL, "TX_I2S_CLK"},
-	{"SLIM TX10 MUX", NULL, "TX_I2S_CLK"},
-
-	{"RX_I2S_CLK", NULL, "CDC_I2S_RX_CONN"},
-};
-
-static const struct snd_soc_dapm_route audio_map[] = {
-	/* SLIMBUS Connections */
-	{"AIF1 CAP", NULL, "AIF1_CAP Mixer"},
-	{"AIF2 CAP", NULL, "AIF2_CAP Mixer"},
-	{"AIF3 CAP", NULL, "AIF3_CAP Mixer"},
-
-	/* VI Feedback */
-	{"AIF4 VI", NULL, "VIONOFF"},
-	{"VIONOFF", "Switch", "VIINPUT"},
-
-	/* MAD */
-	{"MAD_SEL MUX", "SPE", "MAD_CPE_INPUT"},
-	{"MAD_SEL MUX", "MSM", "MADINPUT"},
-	{"MADONOFF", "Switch", "MAD_SEL MUX"},
-	{"AIF4 MAD", NULL, "MADONOFF"},
-
-	/* SLIM_MIXER("AIF1_CAP Mixer"),*/
-	{"AIF1_CAP Mixer", "SLIM TX1", "SLIM TX1 MUX"},
-	{"AIF1_CAP Mixer", "SLIM TX2", "SLIM TX2 MUX"},
-	{"AIF1_CAP Mixer", "SLIM TX3", "SLIM TX3 MUX"},
-	{"AIF1_CAP Mixer", "SLIM TX4", "SLIM TX4 MUX"},
-	{"AIF1_CAP Mixer", "SLIM TX5", "SLIM TX5 MUX"},
-	{"AIF1_CAP Mixer", "SLIM TX6", "SLIM TX6 MUX"},
-	{"AIF1_CAP Mixer", "SLIM TX7", "SLIM TX7 MUX"},
-	{"AIF1_CAP Mixer", "SLIM TX8", "SLIM TX8 MUX"},
-	{"AIF1_CAP Mixer", "SLIM TX9", "SLIM TX9 MUX"},
-	{"AIF1_CAP Mixer", "SLIM TX10", "SLIM TX10 MUX"},
-	/* SLIM_MIXER("AIF2_CAP Mixer"),*/
-	{"AIF2_CAP Mixer", "SLIM TX1", "SLIM TX1 MUX"},
-	{"AIF2_CAP Mixer", "SLIM TX2", "SLIM TX2 MUX"},
-	{"AIF2_CAP Mixer", "SLIM TX3", "SLIM TX3 MUX"},
-	{"AIF2_CAP Mixer", "SLIM TX4", "SLIM TX4 MUX"},
-	{"AIF2_CAP Mixer", "SLIM TX5", "SLIM TX5 MUX"},
-	{"AIF2_CAP Mixer", "SLIM TX6", "SLIM TX6 MUX"},
-	{"AIF2_CAP Mixer", "SLIM TX7", "SLIM TX7 MUX"},
-	{"AIF2_CAP Mixer", "SLIM TX8", "SLIM TX8 MUX"},
-	{"AIF2_CAP Mixer", "SLIM TX9", "SLIM TX9 MUX"},
-	{"AIF2_CAP Mixer", "SLIM TX10", "SLIM TX10 MUX"},
-	/* SLIM_MIXER("AIF3_CAP Mixer"),*/
-	{"AIF3_CAP Mixer", "SLIM TX1", "SLIM TX1 MUX"},
-	{"AIF3_CAP Mixer", "SLIM TX2", "SLIM TX2 MUX"},
-	{"AIF3_CAP Mixer", "SLIM TX3", "SLIM TX3 MUX"},
-	{"AIF3_CAP Mixer", "SLIM TX4", "SLIM TX4 MUX"},
-	{"AIF3_CAP Mixer", "SLIM TX5", "SLIM TX5 MUX"},
-	{"AIF3_CAP Mixer", "SLIM TX6", "SLIM TX6 MUX"},
-	{"AIF3_CAP Mixer", "SLIM TX7", "SLIM TX7 MUX"},
-	{"AIF3_CAP Mixer", "SLIM TX8", "SLIM TX8 MUX"},
-	{"AIF3_CAP Mixer", "SLIM TX9", "SLIM TX9 MUX"},
-	{"AIF3_CAP Mixer", "SLIM TX10", "SLIM TX10 MUX"},
-
-	{"SLIM TX1 MUX", "DEC1", "DEC1 MUX"},
-	{"SLIM TX1 MUX", "RMIX1", "RX1 MIX1"},
-	{"SLIM TX1 MUX", "RMIX2", "RX2 MIX1"},
-	{"SLIM TX1 MUX", "RMIX3", "RX3 MIX1"},
-	{"SLIM TX1 MUX", "RMIX4", "RX4 MIX1"},
-	{"SLIM TX1 MUX", "RMIX5", "RX5 MIX1"},
-	{"SLIM TX1 MUX", "RMIX6", "RX6 MIX1"},
-	{"SLIM TX1 MUX", "RMIX7", "RX7 MIX1"},
-	{"SLIM TX1 MUX", "RMIX8", "RX8 MIX1"},
-
-	{"SLIM TX2 MUX", "DEC2", "DEC2 MUX"},
-	{"SLIM TX2 MUX", "RMIX1", "RX1 MIX1"},
-	{"SLIM TX2 MUX", "RMIX2", "RX2 MIX1"},
-	{"SLIM TX2 MUX", "RMIX3", "RX3 MIX1"},
-	{"SLIM TX2 MUX", "RMIX4", "RX4 MIX1"},
-	{"SLIM TX2 MUX", "RMIX5", "RX5 MIX1"},
-	{"SLIM TX2 MUX", "RMIX6", "RX6 MIX1"},
-	{"SLIM TX2 MUX", "RMIX7", "RX7 MIX1"},
-	{"SLIM TX2 MUX", "RMIX8", "RX8 MIX1"},
-
-	{"SLIM TX3 MUX", "DEC3", "DEC3 MUX"},
-	{"SLIM TX3 MUX", "RMIX1", "RX1 MIX1"},
-	{"SLIM TX3 MUX", "RMIX2", "RX2 MIX1"},
-	{"SLIM TX3 MUX", "RMIX3", "RX3 MIX1"},
-	{"SLIM TX3 MUX", "RMIX4", "RX4 MIX1"},
-	{"SLIM TX3 MUX", "RMIX5", "RX5 MIX1"},
-	{"SLIM TX3 MUX", "RMIX6", "RX6 MIX1"},
-	{"SLIM TX3 MUX", "RMIX7", "RX7 MIX1"},
-	{"SLIM TX3 MUX", "RMIX8", "RX8 MIX1"},
-
-	{"SLIM TX4 MUX", "DEC4", "DEC4 MUX"},
-	{"SLIM TX4 MUX", "RMIX1", "RX1 MIX1"},
-	{"SLIM TX4 MUX", "RMIX2", "RX2 MIX1"},
-	{"SLIM TX4 MUX", "RMIX3", "RX3 MIX1"},
-	{"SLIM TX4 MUX", "RMIX4", "RX4 MIX1"},
-	{"SLIM TX4 MUX", "RMIX5", "RX5 MIX1"},
-	{"SLIM TX4 MUX", "RMIX6", "RX6 MIX1"},
-	{"SLIM TX4 MUX", "RMIX7", "RX7 MIX1"},
-	{"SLIM TX4 MUX", "RMIX8", "RX8 MIX1"},
-
-	{"SLIM TX5 MUX", "DEC5", "DEC5 MUX"},
-	{"SLIM TX5 MUX", "RMIX1", "RX1 MIX1"},
-	{"SLIM TX5 MUX", "RMIX2", "RX2 MIX1"},
-	{"SLIM TX5 MUX", "RMIX3", "RX3 MIX1"},
-	{"SLIM TX5 MUX", "RMIX4", "RX4 MIX1"},
-	{"SLIM TX5 MUX", "RMIX5", "RX5 MIX1"},
-	{"SLIM TX5 MUX", "RMIX6", "RX6 MIX1"},
-	{"SLIM TX5 MUX", "RMIX7", "RX7 MIX1"},
-	{"SLIM TX5 MUX", "RMIX8", "RX8 MIX1"},
-
-	{"SLIM TX6 MUX", "DEC6", "DEC6 MUX"},
-
-	{"SLIM TX7 MUX", "DEC1", "DEC1 MUX"},
-	{"SLIM TX7 MUX", "DEC2", "DEC2 MUX"},
-	{"SLIM TX7 MUX", "DEC3", "DEC3 MUX"},
-	{"SLIM TX7 MUX", "DEC4", "DEC4 MUX"},
-	{"SLIM TX7 MUX", "DEC5", "DEC5 MUX"},
-	{"SLIM TX7 MUX", "DEC6", "DEC6 MUX"},
-	{"SLIM TX7 MUX", "DEC7", "DEC7 MUX"},
-	{"SLIM TX7 MUX", "DEC8", "DEC8 MUX"},
-	{"SLIM TX7 MUX", "DEC9", "DEC9 MUX"},
-	{"SLIM TX7 MUX", "DEC10", "DEC10 MUX"},
-	{"SLIM TX7 MUX", "RMIX1", "RX1 MIX1"},
-	{"SLIM TX7 MUX", "RMIX2", "RX2 MIX1"},
-	{"SLIM TX7 MUX", "RMIX3", "RX3 MIX1"},
-	{"SLIM TX7 MUX", "RMIX4", "RX4 MIX1"},
-	{"SLIM TX7 MUX", "RMIX5", "RX5 MIX1"},
-	{"SLIM TX7 MUX", "RMIX6", "RX6 MIX1"},
-	{"SLIM TX7 MUX", "RMIX7", "RX7 MIX1"},
-
-	{"SLIM TX8 MUX", "DEC1", "DEC1 MUX"},
-	{"SLIM TX8 MUX", "DEC2", "DEC2 MUX"},
-	{"SLIM TX8 MUX", "DEC3", "DEC3 MUX"},
-	{"SLIM TX8 MUX", "DEC4", "DEC4 MUX"},
-	{"SLIM TX8 MUX", "DEC5", "DEC5 MUX"},
-	{"SLIM TX8 MUX", "DEC6", "DEC6 MUX"},
-	{"SLIM TX8 MUX", "DEC7", "DEC7 MUX"},
-	{"SLIM TX8 MUX", "DEC8", "DEC8 MUX"},
-	{"SLIM TX8 MUX", "DEC9", "DEC9 MUX"},
-	{"SLIM TX8 MUX", "DEC10", "DEC10 MUX"},
-
-	{"SLIM TX9 MUX", "DEC1", "DEC1 MUX"},
-	{"SLIM TX9 MUX", "DEC2", "DEC2 MUX"},
-	{"SLIM TX9 MUX", "DEC3", "DEC3 MUX"},
-	{"SLIM TX9 MUX", "DEC4", "DEC4 MUX"},
-	{"SLIM TX9 MUX", "DEC5", "DEC5 MUX"},
-	{"SLIM TX9 MUX", "DEC6", "DEC6 MUX"},
-	{"SLIM TX9 MUX", "DEC7", "DEC7 MUX"},
-	{"SLIM TX9 MUX", "DEC8", "DEC8 MUX"},
-	{"SLIM TX9 MUX", "DEC9", "DEC9 MUX"},
-	{"SLIM TX9 MUX", "DEC10", "DEC10 MUX"},
-
-	{"SLIM TX10 MUX", "DEC1", "DEC1 MUX"},
-	{"SLIM TX10 MUX", "DEC2", "DEC2 MUX"},
-	{"SLIM TX10 MUX", "DEC3", "DEC3 MUX"},
-	{"SLIM TX10 MUX", "DEC4", "DEC4 MUX"},
-	{"SLIM TX10 MUX", "DEC5", "DEC5 MUX"},
-	{"SLIM TX10 MUX", "DEC6", "DEC6 MUX"},
-	{"SLIM TX10 MUX", "DEC7", "DEC7 MUX"},
-	{"SLIM TX10 MUX", "DEC8", "DEC8 MUX"},
-	{"SLIM TX10 MUX", "DEC9", "DEC9 MUX"},
-	{"SLIM TX10 MUX", "DEC10", "DEC10 MUX"},
-
-	/* Earpiece (RX MIX1) */
-	{"EAR", NULL, "EAR PA"},
-	{"EAR PA", NULL, "EAR_PA_MIXER"},
-	{"EAR_PA_MIXER", NULL, "DAC1"},
-	{"DAC1", NULL, "RX_BIAS"},
-
-	{"ANC EAR", NULL, "ANC EAR PA"},
-	{"ANC EAR PA", NULL, "EAR_PA_MIXER"},
-	{"ANC1 FB MUX", "EAR_HPH_L", "RX1 MIX2"},
-	{"ANC1 FB MUX", "EAR_LINE_1", "RX2 MIX2"},
-
-	/* Headset (RX MIX1 and RX MIX2) */
-	{"HEADPHONE", NULL, "HPHL"},
-	{"HEADPHONE", NULL, "HPHR"},
-
-	{"HPHL", NULL, "HPHL_PA_MIXER"},
-	{"HPHL_PA_MIXER", NULL, "HPHL DAC"},
-	{"HPHL DAC", NULL, "RX_BIAS"},
-
-	{"HPHR", NULL, "HPHR_PA_MIXER"},
-	{"HPHR_PA_MIXER", NULL, "HPHR DAC"},
-	{"HPHR DAC", NULL, "RX_BIAS"},
-
-	{"ANC HEADPHONE", NULL, "ANC HPHL"},
-	{"ANC HEADPHONE", NULL, "ANC HPHR"},
-
-	{"ANC HPHL", NULL, "HPHL_PA_MIXER"},
-	{"ANC HPHR", NULL, "HPHR_PA_MIXER"},
-
-	{"ANC1 MUX", "ADC1", "ADC1"},
-	{"ANC1 MUX", "ADC2", "ADC2"},
-	{"ANC1 MUX", "ADC3", "ADC3"},
-	{"ANC1 MUX", "ADC4", "ADC4"},
-	{"ANC1 MUX", "ADC5", "ADC5"},
-	{"ANC1 MUX", "ADC6", "ADC6"},
-	{"ANC1 MUX", "DMIC1", "DMIC1"},
-	{"ANC1 MUX", "DMIC2", "DMIC2"},
-	{"ANC1 MUX", "DMIC3", "DMIC3"},
-	{"ANC1 MUX", "DMIC4", "DMIC4"},
-	{"ANC1 MUX", "DMIC5", "DMIC5"},
-	{"ANC1 MUX", "DMIC6", "DMIC6"},
-	{"ANC2 MUX", "ADC1", "ADC1"},
-	{"ANC2 MUX", "ADC2", "ADC2"},
-	{"ANC2 MUX", "ADC3", "ADC3"},
-	{"ANC2 MUX", "ADC4", "ADC4"},
-	{"ANC2 MUX", "ADC5", "ADC5"},
-	{"ANC2 MUX", "ADC6", "ADC6"},
-	{"ANC2 MUX", "DMIC1", "DMIC1"},
-	{"ANC2 MUX", "DMIC2", "DMIC2"},
-	{"ANC2 MUX", "DMIC3", "DMIC3"},
-	{"ANC2 MUX", "DMIC4", "DMIC4"},
-	{"ANC2 MUX", "DMIC5", "DMIC5"},
-	{"ANC2 MUX", "DMIC6", "DMIC6"},
-
-	{"ANC HPHR", NULL, "CDC_CONN"},
-
-	{"DAC1", "Switch", "CLASS_H_DSM MUX"},
-	{"HPHL DAC", "Switch", "CLASS_H_DSM MUX"},
-	{"HPHR DAC", NULL, "RX2 CHAIN"},
-
-	{"LINEOUT1", NULL, "LINEOUT1 PA"},
-	{"LINEOUT2", NULL, "LINEOUT2 PA"},
-	{"LINEOUT3", NULL, "LINEOUT3 PA"},
-	{"LINEOUT4", NULL, "LINEOUT4 PA"},
-	{"SPK_OUT", NULL, "SPK PA"},
-	{"SPK_OUT", NULL, "SPK2 PA"},
-
-	{"LINEOUT1 PA", NULL, "LINEOUT1_PA_MIXER"},
-	{"LINEOUT1_PA_MIXER", NULL, "LINEOUT1 DAC"},
-
-	{"LINEOUT2 PA", NULL, "LINEOUT2_PA_MIXER"},
-	{"LINEOUT2_PA_MIXER", NULL, "LINEOUT2 DAC"},
-
-	{"LINEOUT3 PA", NULL, "LINEOUT3_PA_MIXER"},
-	{"LINEOUT3_PA_MIXER", NULL, "LINEOUT3 DAC"},
-
-	{"LINEOUT4 PA", NULL, "LINEOUT4_PA_MIXER"},
-	{"LINEOUT4_PA_MIXER", NULL, "LINEOUT4 DAC"},
-
-	{"LINEOUT1 DAC", NULL, "RX3 MIX1"},
-
-	{"RDAC5 MUX", "DEM3_INV", "RX3 MIX1"},
-	{"RDAC5 MUX", "DEM4", "RX4 MIX1"},
-
-	{"LINEOUT3 DAC", NULL, "RDAC5 MUX"},
-
-	{"LINEOUT2 DAC", NULL, "RX5 MIX1"},
-
-	{"RDAC7 MUX", "DEM5_INV", "RX5 MIX1"},
-	{"RDAC7 MUX", "DEM6", "RX6 MIX1"},
-
-	{"LINEOUT4 DAC", NULL, "RDAC7 MUX"},
-
-	{"SPK PA", NULL, "SPK DAC"},
-	{"SPK DAC", NULL, "RX7 MIX2"},
-	{"SPK DAC", NULL, "VDD_SPKDRV"},
-
-	{"SPK2 PA", NULL, "SPK2 DAC"},
-	{"SPK2 DAC", NULL, "RX8 MIX1"},
-	{"SPK2 DAC", NULL, "VDD_SPKDRV2"},
-
-	{"CLASS_H_DSM MUX", "DSM_HPHL_RX1", "RX1 CHAIN"},
-
-	{"RX1 INTERP", NULL, "RX1 MIX2"},
-	{"RX1 CHAIN", NULL, "RX1 INTERP"},
-	{"RX2 INTERP", NULL, "RX2 MIX2"},
-	{"RX2 CHAIN", NULL, "RX2 INTERP"},
-	{"RX1 MIX2", NULL, "ANC1 MUX"},
-	{"RX2 MIX2", NULL, "ANC2 MUX"},
-
-	{"LINEOUT1 DAC", NULL, "RX_BIAS"},
-	{"LINEOUT2 DAC", NULL, "RX_BIAS"},
-	{"LINEOUT3 DAC", NULL, "RX_BIAS"},
-	{"LINEOUT4 DAC", NULL, "RX_BIAS"},
-	{"SPK DAC", NULL, "RX_BIAS"},
-	{"SPK2 DAC", NULL, "RX_BIAS"},
-
-	{"RX7 MIX1", NULL, "COMP0_CLK"},
-	{"RX8 MIX1", NULL, "COMP0_CLK"},
-	{"RX1 MIX1", NULL, "COMP1_CLK"},
-	{"RX2 MIX1", NULL, "COMP1_CLK"},
-	{"RX3 MIX1", NULL, "COMP2_CLK"},
-	{"RX5 MIX1", NULL, "COMP2_CLK"},
-
-	{"RX1 MIX1", NULL, "RX1 MIX1 INP1"},
-	{"RX1 MIX1", NULL, "RX1 MIX1 INP2"},
-	{"RX1 MIX1", NULL, "RX1 MIX1 INP3"},
-	{"RX2 MIX1", NULL, "RX2 MIX1 INP1"},
-	{"RX2 MIX1", NULL, "RX2 MIX1 INP2"},
-	{"RX3 MIX1", NULL, "RX3 MIX1 INP1"},
-	{"RX3 MIX1", NULL, "RX3 MIX1 INP2"},
-	{"RX4 MIX1", NULL, "RX4 MIX1 INP1"},
-	{"RX4 MIX1", NULL, "RX4 MIX1 INP2"},
-	{"RX5 MIX1", NULL, "RX5 MIX1 INP1"},
-	{"RX5 MIX1", NULL, "RX5 MIX1 INP2"},
-	{"RX6 MIX1", NULL, "RX6 MIX1 INP1"},
-	{"RX6 MIX1", NULL, "RX6 MIX1 INP2"},
-	{"RX7 MIX1", NULL, "RX7 MIX1 INP1"},
-	{"RX7 MIX1", NULL, "RX7 MIX1 INP2"},
-	{"RX8 MIX1", NULL, "RX8 MIX1 INP1"},
-	{"RX8 MIX1", NULL, "RX8 MIX1 INP2"},
-	{"RX1 MIX2", NULL, "RX1 MIX1"},
-	{"RX1 MIX2", NULL, "RX1 MIX2 INP1"},
-	{"RX1 MIX2", NULL, "RX1 MIX2 INP2"},
-	{"RX2 MIX2", NULL, "RX2 MIX1"},
-	{"RX2 MIX2", NULL, "RX2 MIX2 INP1"},
-	{"RX2 MIX2", NULL, "RX2 MIX2 INP2"},
-	{"RX7 MIX2", NULL, "RX7 MIX1"},
-	{"RX7 MIX2", NULL, "RX7 MIX2 INP1"},
-	{"RX7 MIX2", NULL, "RX7 MIX2 INP2"},
-
-	/* SLIM_MUX("AIF1_PB", "AIF1 PB"),*/
-	{"SLIM RX1 MUX", "AIF1_PB", "AIF1 PB"},
-	{"SLIM RX2 MUX", "AIF1_PB", "AIF1 PB"},
-	{"SLIM RX3 MUX", "AIF1_PB", "AIF1 PB"},
-	{"SLIM RX4 MUX", "AIF1_PB", "AIF1 PB"},
-	{"SLIM RX5 MUX", "AIF1_PB", "AIF1 PB"},
-	{"SLIM RX6 MUX", "AIF1_PB", "AIF1 PB"},
-	{"SLIM RX7 MUX", "AIF1_PB", "AIF1 PB"},
-	{"SLIM RX8 MUX", "AIF1_PB", "AIF1 PB"},
-	/* SLIM_MUX("AIF2_PB", "AIF2 PB"),*/
-	{"SLIM RX1 MUX", "AIF2_PB", "AIF2 PB"},
-	{"SLIM RX2 MUX", "AIF2_PB", "AIF2 PB"},
-	{"SLIM RX3 MUX", "AIF2_PB", "AIF2 PB"},
-	{"SLIM RX4 MUX", "AIF2_PB", "AIF2 PB"},
-	{"SLIM RX5 MUX", "AIF2_PB", "AIF2 PB"},
-	{"SLIM RX6 MUX", "AIF2_PB", "AIF2 PB"},
-	{"SLIM RX7 MUX", "AIF2_PB", "AIF2 PB"},
-	{"SLIM RX8 MUX", "AIF2_PB", "AIF2 PB"},
-	/* SLIM_MUX("AIF3_PB", "AIF3 PB"),*/
-	{"SLIM RX1 MUX", "AIF3_PB", "AIF3 PB"},
-	{"SLIM RX2 MUX", "AIF3_PB", "AIF3 PB"},
-	{"SLIM RX3 MUX", "AIF3_PB", "AIF3 PB"},
-	{"SLIM RX4 MUX", "AIF3_PB", "AIF3 PB"},
-	{"SLIM RX5 MUX", "AIF3_PB", "AIF3 PB"},
-	{"SLIM RX6 MUX", "AIF3_PB", "AIF3 PB"},
-	{"SLIM RX7 MUX", "AIF3_PB", "AIF3 PB"},
-	{"SLIM RX8 MUX", "AIF3_PB", "AIF3 PB"},
-
-	{"SLIM RX1", NULL, "SLIM RX1 MUX"},
-	{"SLIM RX2", NULL, "SLIM RX2 MUX"},
-	{"SLIM RX3", NULL, "SLIM RX3 MUX"},
-	{"SLIM RX4", NULL, "SLIM RX4 MUX"},
-	{"SLIM RX5", NULL, "SLIM RX5 MUX"},
-	{"SLIM RX6", NULL, "SLIM RX6 MUX"},
-	{"SLIM RX7", NULL, "SLIM RX7 MUX"},
-	{"SLIM RX8", NULL, "SLIM RX8 MUX"},
-
-	{"RX1 MIX1 INP1", "RX1", "SLIM RX1"},
-	{"RX1 MIX1 INP1", "RX2", "SLIM RX2"},
-	{"RX1 MIX1 INP1", "RX3", "SLIM RX3"},
-	{"RX1 MIX1 INP1", "RX4", "SLIM RX4"},
-	{"RX1 MIX1 INP1", "RX5", "SLIM RX5"},
-	{"RX1 MIX1 INP1", "RX6", "SLIM RX6"},
-	{"RX1 MIX1 INP1", "RX7", "SLIM RX7"},
-	{"RX1 MIX1 INP1", "IIR1", "IIR1"},
-	{"RX1 MIX1 INP1", "IIR2", "IIR2"},
-	{"RX1 MIX1 INP2", "RX1", "SLIM RX1"},
-	{"RX1 MIX1 INP2", "RX2", "SLIM RX2"},
-	{"RX1 MIX1 INP2", "RX3", "SLIM RX3"},
-	{"RX1 MIX1 INP2", "RX4", "SLIM RX4"},
-	{"RX1 MIX1 INP2", "RX5", "SLIM RX5"},
-	{"RX1 MIX1 INP2", "RX6", "SLIM RX6"},
-	{"RX1 MIX1 INP2", "RX7", "SLIM RX7"},
-	{"RX1 MIX1 INP2", "IIR1", "IIR1"},
-	{"RX1 MIX1 INP2", "IIR2", "IIR2"},
-	{"RX1 MIX1 INP3", "RX1", "SLIM RX1"},
-	{"RX1 MIX1 INP3", "RX2", "SLIM RX2"},
-	{"RX1 MIX1 INP3", "RX3", "SLIM RX3"},
-	{"RX1 MIX1 INP3", "RX4", "SLIM RX4"},
-	{"RX1 MIX1 INP3", "RX5", "SLIM RX5"},
-	{"RX1 MIX1 INP3", "RX6", "SLIM RX6"},
-	{"RX1 MIX1 INP3", "RX7", "SLIM RX7"},
-	{"RX2 MIX1 INP1", "RX1", "SLIM RX1"},
-	{"RX2 MIX1 INP1", "RX2", "SLIM RX2"},
-	{"RX2 MIX1 INP1", "RX3", "SLIM RX3"},
-	{"RX2 MIX1 INP1", "RX4", "SLIM RX4"},
-	{"RX2 MIX1 INP1", "RX5", "SLIM RX5"},
-	{"RX2 MIX1 INP1", "RX6", "SLIM RX6"},
-	{"RX2 MIX1 INP1", "RX7", "SLIM RX7"},
-	{"RX2 MIX1 INP1", "IIR1", "IIR1"},
-	{"RX2 MIX1 INP1", "IIR2", "IIR2"},
-	{"RX2 MIX1 INP2", "RX1", "SLIM RX1"},
-	{"RX2 MIX1 INP2", "RX2", "SLIM RX2"},
-	{"RX2 MIX1 INP2", "RX3", "SLIM RX3"},
-	{"RX2 MIX1 INP2", "RX4", "SLIM RX4"},
-	{"RX2 MIX1 INP2", "RX5", "SLIM RX5"},
-	{"RX2 MIX1 INP2", "RX6", "SLIM RX6"},
-	{"RX2 MIX1 INP2", "RX7", "SLIM RX7"},
-	{"RX2 MIX1 INP2", "IIR1", "IIR1"},
-	{"RX2 MIX1 INP2", "IIR2", "IIR2"},
-	{"RX3 MIX1 INP1", "RX1", "SLIM RX1"},
-	{"RX3 MIX1 INP1", "RX2", "SLIM RX2"},
-	{"RX3 MIX1 INP1", "RX3", "SLIM RX3"},
-	{"RX3 MIX1 INP1", "RX4", "SLIM RX4"},
-	{"RX3 MIX1 INP1", "RX5", "SLIM RX5"},
-	{"RX3 MIX1 INP1", "RX6", "SLIM RX6"},
-	{"RX3 MIX1 INP1", "RX7", "SLIM RX7"},
-	{"RX3 MIX1 INP1", "IIR1", "IIR1"},
-	{"RX3 MIX1 INP1", "IIR2", "IIR2"},
-	{"RX3 MIX1 INP2", "RX1", "SLIM RX1"},
-	{"RX3 MIX1 INP2", "RX2", "SLIM RX2"},
-	{"RX3 MIX1 INP2", "RX3", "SLIM RX3"},
-	{"RX3 MIX1 INP2", "RX4", "SLIM RX4"},
-	{"RX3 MIX1 INP2", "RX5", "SLIM RX5"},
-	{"RX3 MIX1 INP2", "RX6", "SLIM RX6"},
-	{"RX3 MIX1 INP2", "RX7", "SLIM RX7"},
-	{"RX3 MIX1 INP2", "IIR1", "IIR1"},
-	{"RX3 MIX1 INP2", "IIR2", "IIR2"},
-	{"RX4 MIX1 INP1", "RX1", "SLIM RX1"},
-	{"RX4 MIX1 INP1", "RX2", "SLIM RX2"},
-	{"RX4 MIX1 INP1", "RX3", "SLIM RX3"},
-	{"RX4 MIX1 INP1", "RX4", "SLIM RX4"},
-	{"RX4 MIX1 INP1", "RX5", "SLIM RX5"},
-	{"RX4 MIX1 INP1", "RX6", "SLIM RX6"},
-	{"RX4 MIX1 INP1", "RX7", "SLIM RX7"},
-	{"RX4 MIX1 INP1", "IIR1", "IIR1"},
-	{"RX4 MIX1 INP1", "IIR2", "IIR2"},
-	{"RX4 MIX1 INP2", "RX1", "SLIM RX1"},
-	{"RX4 MIX1 INP2", "RX2", "SLIM RX2"},
-	{"RX4 MIX1 INP2", "RX3", "SLIM RX3"},
-	{"RX4 MIX1 INP2", "RX5", "SLIM RX5"},
-	{"RX4 MIX1 INP2", "RX4", "SLIM RX4"},
-	{"RX4 MIX1 INP2", "RX6", "SLIM RX6"},
-	{"RX4 MIX1 INP2", "RX7", "SLIM RX7"},
-	{"RX4 MIX1 INP2", "IIR1", "IIR1"},
-	{"RX4 MIX1 INP2", "IIR2", "IIR2"},
-	{"RX5 MIX1 INP1", "RX1", "SLIM RX1"},
-	{"RX5 MIX1 INP1", "RX2", "SLIM RX2"},
-	{"RX5 MIX1 INP1", "RX3", "SLIM RX3"},
-	{"RX5 MIX1 INP1", "RX4", "SLIM RX4"},
-	{"RX5 MIX1 INP1", "RX5", "SLIM RX5"},
-	{"RX5 MIX1 INP1", "RX6", "SLIM RX6"},
-	{"RX5 MIX1 INP1", "RX7", "SLIM RX7"},
-	{"RX5 MIX1 INP1", "IIR1", "IIR1"},
-	{"RX5 MIX1 INP1", "IIR2", "IIR2"},
-	{"RX5 MIX1 INP2", "RX1", "SLIM RX1"},
-	{"RX5 MIX1 INP2", "RX2", "SLIM RX2"},
-	{"RX5 MIX1 INP2", "RX3", "SLIM RX3"},
-	{"RX5 MIX1 INP2", "RX4", "SLIM RX4"},
-	{"RX5 MIX1 INP2", "RX5", "SLIM RX5"},
-	{"RX5 MIX1 INP2", "RX6", "SLIM RX6"},
-	{"RX5 MIX1 INP2", "RX7", "SLIM RX7"},
-	{"RX5 MIX1 INP2", "IIR1", "IIR1"},
-	{"RX5 MIX1 INP2", "IIR2", "IIR2"},
-	{"RX6 MIX1 INP1", "RX1", "SLIM RX1"},
-	{"RX6 MIX1 INP1", "RX2", "SLIM RX2"},
-	{"RX6 MIX1 INP1", "RX3", "SLIM RX3"},
-	{"RX6 MIX1 INP1", "RX4", "SLIM RX4"},
-	{"RX6 MIX1 INP1", "RX5", "SLIM RX5"},
-	{"RX6 MIX1 INP1", "RX6", "SLIM RX6"},
-	{"RX6 MIX1 INP1", "RX7", "SLIM RX7"},
-	{"RX6 MIX1 INP1", "IIR1", "IIR1"},
-	{"RX6 MIX1 INP1", "IIR2", "IIR2"},
-	{"RX6 MIX1 INP2", "RX1", "SLIM RX1"},
-	{"RX6 MIX1 INP2", "RX2", "SLIM RX2"},
-	{"RX6 MIX1 INP2", "RX3", "SLIM RX3"},
-	{"RX6 MIX1 INP2", "RX4", "SLIM RX4"},
-	{"RX6 MIX1 INP2", "RX5", "SLIM RX5"},
-	{"RX6 MIX1 INP2", "RX6", "SLIM RX6"},
-	{"RX6 MIX1 INP2", "RX7", "SLIM RX7"},
-	{"RX6 MIX1 INP2", "IIR1", "IIR1"},
-	{"RX6 MIX1 INP2", "IIR2", "IIR2"},
-	{"RX7 MIX1 INP1", "RX1", "SLIM RX1"},
-	{"RX7 MIX1 INP1", "RX2", "SLIM RX2"},
-	{"RX7 MIX1 INP1", "RX3", "SLIM RX3"},
-	{"RX7 MIX1 INP1", "RX4", "SLIM RX4"},
-	{"RX7 MIX1 INP1", "RX5", "SLIM RX5"},
-	{"RX7 MIX1 INP1", "RX6", "SLIM RX6"},
-	{"RX7 MIX1 INP1", "RX7", "SLIM RX7"},
-	{"RX7 MIX1 INP1", "IIR1", "IIR1"},
-	{"RX7 MIX1 INP1", "IIR2", "IIR2"},
-	{"RX7 MIX1 INP2", "RX1", "SLIM RX1"},
-	{"RX7 MIX1 INP2", "RX2", "SLIM RX2"},
-	{"RX7 MIX1 INP2", "RX3", "SLIM RX3"},
-	{"RX7 MIX1 INP2", "RX4", "SLIM RX4"},
-	{"RX7 MIX1 INP2", "RX5", "SLIM RX5"},
-	{"RX7 MIX1 INP2", "RX6", "SLIM RX6"},
-	{"RX7 MIX1 INP2", "RX7", "SLIM RX7"},
-	{"RX7 MIX1 INP2", "IIR1", "IIR1"},
-	{"RX7 MIX1 INP2", "IIR2", "IIR2"},
-	{"RX8 MIX1 INP1", "RX1", "SLIM RX1"},
-	{"RX8 MIX1 INP1", "RX2", "SLIM RX2"},
-	{"RX8 MIX1 INP1", "RX3", "SLIM RX3"},
-	{"RX8 MIX1 INP1", "RX4", "SLIM RX4"},
-	{"RX8 MIX1 INP1", "RX5", "SLIM RX5"},
-	{"RX8 MIX1 INP1", "RX6", "SLIM RX6"},
-	{"RX8 MIX1 INP1", "RX7", "SLIM RX7"},
-	{"RX8 MIX1 INP1", "RX8", "SLIM RX8"},
-	{"RX8 MIX1 INP1", "IIR1", "IIR1"},
-	{"RX8 MIX1 INP1", "IIR2", "IIR2"},
-	{"RX8 MIX1 INP2", "RX1", "SLIM RX1"},
-	{"RX8 MIX1 INP2", "RX2", "SLIM RX2"},
-	{"RX8 MIX1 INP2", "RX3", "SLIM RX3"},
-	{"RX8 MIX1 INP2", "RX4", "SLIM RX4"},
-	{"RX8 MIX1 INP2", "RX5", "SLIM RX5"},
-	{"RX8 MIX1 INP2", "RX6", "SLIM RX6"},
-	{"RX8 MIX1 INP2", "RX7", "SLIM RX7"},
-	{"RX8 MIX1 INP2", "RX8", "SLIM RX8"},
-	{"RX8 MIX1 INP2", "IIR1", "IIR1"},
-	{"RX8 MIX1 INP2", "IIR2", "IIR2"},
-
-	/* IIR1, IIR2 inputs to Second RX Mixer on RX1, RX2 and RX7 chains. */
-	{"RX1 MIX2 INP1", "IIR1", "IIR1"},
-	{"RX1 MIX2 INP2", "IIR1", "IIR1"},
-	{"RX2 MIX2 INP1", "IIR1", "IIR1"},
-	{"RX2 MIX2 INP2", "IIR1", "IIR1"},
-	{"RX7 MIX2 INP1", "IIR1", "IIR1"},
-	{"RX7 MIX2 INP2", "IIR1", "IIR1"},
-	{"RX1 MIX2 INP1", "IIR2", "IIR2"},
-	{"RX1 MIX2 INP2", "IIR2", "IIR2"},
-	{"RX2 MIX2 INP1", "IIR2", "IIR2"},
-	{"RX2 MIX2 INP2", "IIR2", "IIR2"},
-	{"RX7 MIX2 INP1", "IIR2", "IIR2"},
-	{"RX7 MIX2 INP2", "IIR2", "IIR2"},
-
-	/* Decimator Inputs */
-	{"DEC1 MUX", "DMIC1", "DMIC1"},
-	{"DEC1 MUX", "ADC6", "ADC6"},
-	{"DEC1 MUX", NULL, "CDC_CONN"},
-	{"DEC2 MUX", "DMIC2", "DMIC2"},
-	{"DEC2 MUX", "ADC5", "ADC5"},
-	{"DEC2 MUX", NULL, "CDC_CONN"},
-	{"DEC3 MUX", "DMIC3", "DMIC3"},
-	{"DEC3 MUX", "ADC4", "ADC4"},
-	{"DEC3 MUX", NULL, "CDC_CONN"},
-	{"DEC4 MUX", "DMIC4", "DMIC4"},
-	{"DEC4 MUX", "ADC3", "ADC3"},
-	{"DEC4 MUX", NULL, "CDC_CONN"},
-	{"DEC5 MUX", "DMIC5", "DMIC5"},
-	{"DEC5 MUX", "ADC2", "ADC2"},
-	{"DEC5 MUX", NULL, "CDC_CONN"},
-	{"DEC6 MUX", "DMIC6", "DMIC6"},
-	{"DEC6 MUX", "ADC1", "ADC1"},
-	{"DEC6 MUX", NULL, "CDC_CONN"},
-	{"DEC7 MUX", "DMIC1", "DMIC1"},
-	{"DEC7 MUX", "DMIC6", "DMIC6"},
-	{"DEC7 MUX", "ADC1", "ADC1"},
-	{"DEC7 MUX", "ADC6", "ADC6"},
-	{"DEC7 MUX", "ANC1_FB", "ANC1 MUX"},
-	{"DEC7 MUX", "ANC2_FB", "ANC2 MUX"},
-	{"DEC7 MUX", NULL, "CDC_CONN"},
-	{"DEC8 MUX", "DMIC2", "DMIC2"},
-	{"DEC8 MUX", "DMIC5", "DMIC5"},
-	{"DEC8 MUX", "ADC2", "ADC2"},
-	{"DEC8 MUX", "ADC5", "ADC5"},
-	{"DEC8 MUX", "ANC1_FB", "ANC1 MUX"},
-	{"DEC8 MUX", "ANC2_FB", "ANC2 MUX"},
-	{"DEC8 MUX", NULL, "CDC_CONN"},
-	{"DEC9 MUX", "DMIC4", "DMIC4"},
-	{"DEC9 MUX", "DMIC5", "DMIC5"},
-	{"DEC9 MUX", "ADC2", "ADC2"},
-	{"DEC9 MUX", "ADC3", "ADC3"},
-	{"DEC9 MUX", "ANC1_FB", "ANC1 MUX"},
-	{"DEC9 MUX", "ANC2_FB", "ANC2 MUX"},
-	{"DEC9 MUX", NULL, "CDC_CONN"},
-	{"DEC10 MUX", "DMIC3", "DMIC3"},
-	{"DEC10 MUX", "DMIC6", "DMIC6"},
-	{"DEC10 MUX", "ADC1", "ADC1"},
-	{"DEC10 MUX", "ADC4", "ADC4"},
-	{"DEC10 MUX", "ANC1_FB", "ANC1 MUX"},
-	{"DEC10 MUX", "ANC2_FB", "ANC2 MUX"},
-	{"DEC10 MUX", NULL, "CDC_CONN"},
-
-	/* ADC Connections */
-	{"ADC1", NULL, "AMIC1"},
-	{"ADC2", NULL, "AMIC2"},
-	{"ADC3", NULL, "AMIC3"},
-	{"ADC4", NULL, "AMIC4"},
-	{"ADC5", NULL, "AMIC5"},
-	{"ADC6", NULL, "AMIC6"},
-
-	/* AUX PGA Connections */
-	{"EAR_PA_MIXER", "AUX_PGA_L Switch", "AUX_PGA_Left"},
-	{"HPHL_PA_MIXER", "AUX_PGA_L Switch", "AUX_PGA_Left"},
-	{"HPHR_PA_MIXER", "AUX_PGA_R Switch", "AUX_PGA_Right"},
-	{"LINEOUT1_PA_MIXER", "AUX_PGA_L Switch", "AUX_PGA_Left"},
-	{"LINEOUT2_PA_MIXER", "AUX_PGA_R Switch", "AUX_PGA_Right"},
-	{"LINEOUT3_PA_MIXER", "AUX_PGA_L Switch", "AUX_PGA_Left"},
-	{"LINEOUT4_PA_MIXER", "AUX_PGA_R Switch", "AUX_PGA_Right"},
-	{"AUX_PGA_Left", NULL, "AMIC5"},
-	{"AUX_PGA_Right", NULL, "AMIC6"},
-
-	{"IIR1", NULL, "IIR1 INP1 MUX"},
-	{"IIR1 INP1 MUX", "DEC1", "DEC1 MUX"},
-	{"IIR1 INP1 MUX", "DEC2", "DEC2 MUX"},
-	{"IIR1 INP1 MUX", "DEC3", "DEC3 MUX"},
-	{"IIR1 INP1 MUX", "DEC4", "DEC4 MUX"},
-	{"IIR1 INP1 MUX", "DEC5", "DEC5 MUX"},
-	{"IIR1 INP1 MUX", "DEC6", "DEC6 MUX"},
-	{"IIR1 INP1 MUX", "DEC7", "DEC7 MUX"},
-	{"IIR1 INP1 MUX", "DEC8", "DEC8 MUX"},
-	{"IIR1 INP1 MUX", "DEC9", "DEC9 MUX"},
-	{"IIR1 INP1 MUX", "DEC10", "DEC10 MUX"},
-	{"IIR1 INP1 MUX", "RX1", "SLIM RX1"},
-	{"IIR1 INP1 MUX", "RX2", "SLIM RX2"},
-	{"IIR1 INP1 MUX", "RX3", "SLIM RX3"},
-	{"IIR1 INP1 MUX", "RX4", "SLIM RX4"},
-	{"IIR1 INP1 MUX", "RX5", "SLIM RX5"},
-	{"IIR1 INP1 MUX", "RX6", "SLIM RX6"},
-	{"IIR1 INP1 MUX", "RX7", "SLIM RX7"},
-
-	{"IIR2", NULL, "IIR2 INP1 MUX"},
-	{"IIR2 INP1 MUX", "DEC1", "DEC1 MUX"},
-	{"IIR2 INP1 MUX", "DEC2", "DEC2 MUX"},
-	{"IIR2 INP1 MUX", "DEC3", "DEC3 MUX"},
-	{"IIR2 INP1 MUX", "DEC4", "DEC4 MUX"},
-	{"IIR2 INP1 MUX", "DEC5", "DEC5 MUX"},
-	{"IIR2 INP1 MUX", "DEC6", "DEC6 MUX"},
-	{"IIR2 INP1 MUX", "DEC7", "DEC7 MUX"},
-	{"IIR2 INP1 MUX", "DEC8", "DEC8 MUX"},
-	{"IIR2 INP1 MUX", "DEC9", "DEC9 MUX"},
-	{"IIR2 INP1 MUX", "DEC10", "DEC10 MUX"},
-	{"IIR2 INP1 MUX", "RX1", "SLIM RX1"},
-	{"IIR2 INP1 MUX", "RX2", "SLIM RX2"},
-	{"IIR2 INP1 MUX", "RX3", "SLIM RX3"},
-	{"IIR2 INP1 MUX", "RX4", "SLIM RX4"},
-	{"IIR2 INP1 MUX", "RX5", "SLIM RX5"},
-	{"IIR2 INP1 MUX", "RX6", "SLIM RX6"},
-	{"IIR2 INP1 MUX", "RX7", "SLIM RX7"},
-
-	{"IIR1", NULL, "IIR1 INP2 MUX"},
-	{"IIR1 INP2 MUX", "DEC1", "DEC1 MUX"},
-	{"IIR1 INP2 MUX", "DEC2", "DEC2 MUX"},
-	{"IIR1 INP2 MUX", "DEC3", "DEC3 MUX"},
-	{"IIR1 INP2 MUX", "DEC4", "DEC4 MUX"},
-	{"IIR1 INP2 MUX", "DEC5", "DEC5 MUX"},
-	{"IIR1 INP2 MUX", "DEC6", "DEC6 MUX"},
-	{"IIR1 INP2 MUX", "DEC7", "DEC7 MUX"},
-	{"IIR1 INP2 MUX", "DEC8", "DEC8 MUX"},
-	{"IIR1 INP2 MUX", "DEC9", "DEC9 MUX"},
-	{"IIR1 INP2 MUX", "DEC10", "DEC10 MUX"},
-	{"IIR1 INP2 MUX", "RX1", "SLIM RX1"},
-	{"IIR1 INP2 MUX", "RX2", "SLIM RX2"},
-	{"IIR1 INP2 MUX", "RX3", "SLIM RX3"},
-	{"IIR1 INP2 MUX", "RX4", "SLIM RX4"},
-	{"IIR1 INP2 MUX", "RX5", "SLIM RX5"},
-	{"IIR1 INP2 MUX", "RX6", "SLIM RX6"},
-	{"IIR1 INP2 MUX", "RX7", "SLIM RX7"},
-
-	{"IIR2", NULL, "IIR2 INP2 MUX"},
-	{"IIR2 INP2 MUX", "DEC1", "DEC1 MUX"},
-	{"IIR2 INP2 MUX", "DEC2", "DEC2 MUX"},
-	{"IIR2 INP2 MUX", "DEC3", "DEC3 MUX"},
-	{"IIR2 INP2 MUX", "DEC4", "DEC4 MUX"},
-	{"IIR2 INP2 MUX", "DEC5", "DEC5 MUX"},
-	{"IIR2 INP2 MUX", "DEC6", "DEC6 MUX"},
-	{"IIR2 INP2 MUX", "DEC7", "DEC7 MUX"},
-	{"IIR2 INP2 MUX", "DEC8", "DEC8 MUX"},
-	{"IIR2 INP2 MUX", "DEC9", "DEC9 MUX"},
-	{"IIR2 INP2 MUX", "DEC10", "DEC10 MUX"},
-	{"IIR2 INP2 MUX", "RX1", "SLIM RX1"},
-	{"IIR2 INP2 MUX", "RX2", "SLIM RX2"},
-	{"IIR2 INP2 MUX", "RX3", "SLIM RX3"},
-	{"IIR2 INP2 MUX", "RX4", "SLIM RX4"},
-	{"IIR2 INP2 MUX", "RX5", "SLIM RX5"},
-	{"IIR2 INP2 MUX", "RX6", "SLIM RX6"},
-	{"IIR2 INP2 MUX", "RX7", "SLIM RX7"},
-
-	{"IIR1", NULL, "IIR1 INP3 MUX"},
-	{"IIR1 INP3 MUX", "DEC1", "DEC1 MUX"},
-	{"IIR1 INP3 MUX", "DEC2", "DEC2 MUX"},
-	{"IIR1 INP3 MUX", "DEC3", "DEC3 MUX"},
-	{"IIR1 INP3 MUX", "DEC4", "DEC4 MUX"},
-	{"IIR1 INP3 MUX", "DEC5", "DEC5 MUX"},
-	{"IIR1 INP3 MUX", "DEC6", "DEC6 MUX"},
-	{"IIR1 INP3 MUX", "DEC7", "DEC7 MUX"},
-	{"IIR1 INP3 MUX", "DEC8", "DEC8 MUX"},
-	{"IIR1 INP3 MUX", "DEC9", "DEC9 MUX"},
-	{"IIR1 INP3 MUX", "DEC10", "DEC10 MUX"},
-	{"IIR1 INP3 MUX", "RX1", "SLIM RX1"},
-	{"IIR1 INP3 MUX", "RX2", "SLIM RX2"},
-	{"IIR1 INP3 MUX", "RX3", "SLIM RX3"},
-	{"IIR1 INP3 MUX", "RX4", "SLIM RX4"},
-	{"IIR1 INP3 MUX", "RX5", "SLIM RX5"},
-	{"IIR1 INP3 MUX", "RX6", "SLIM RX6"},
-	{"IIR1 INP3 MUX", "RX7", "SLIM RX7"},
-
-	{"IIR2", NULL, "IIR2 INP3 MUX"},
-	{"IIR2 INP3 MUX", "DEC1", "DEC1 MUX"},
-	{"IIR2 INP3 MUX", "DEC2", "DEC2 MUX"},
-	{"IIR2 INP3 MUX", "DEC3", "DEC3 MUX"},
-	{"IIR2 INP3 MUX", "DEC4", "DEC4 MUX"},
-	{"IIR2 INP3 MUX", "DEC5", "DEC5 MUX"},
-	{"IIR2 INP3 MUX", "DEC6", "DEC6 MUX"},
-	{"IIR2 INP3 MUX", "DEC7", "DEC7 MUX"},
-	{"IIR2 INP3 MUX", "DEC8", "DEC8 MUX"},
-	{"IIR2 INP3 MUX", "DEC9", "DEC9 MUX"},
-	{"IIR2 INP3 MUX", "DEC10", "DEC10 MUX"},
-	{"IIR2 INP3 MUX", "RX1", "SLIM RX1"},
-	{"IIR2 INP3 MUX", "RX2", "SLIM RX2"},
-	{"IIR2 INP3 MUX", "RX3", "SLIM RX3"},
-	{"IIR2 INP3 MUX", "RX4", "SLIM RX4"},
-	{"IIR2 INP3 MUX", "RX5", "SLIM RX5"},
-	{"IIR2 INP3 MUX", "RX6", "SLIM RX6"},
-	{"IIR2 INP3 MUX", "RX7", "SLIM RX7"},
-
-	{"IIR1", NULL, "IIR1 INP4 MUX"},
-	{"IIR1 INP4 MUX", "DEC1", "DEC1 MUX"},
-	{"IIR1 INP4 MUX", "DEC2", "DEC2 MUX"},
-	{"IIR1 INP4 MUX", "DEC3", "DEC3 MUX"},
-	{"IIR1 INP4 MUX", "DEC4", "DEC4 MUX"},
-	{"IIR1 INP4 MUX", "DEC5", "DEC5 MUX"},
-	{"IIR1 INP4 MUX", "DEC6", "DEC6 MUX"},
-	{"IIR1 INP4 MUX", "DEC7", "DEC7 MUX"},
-	{"IIR1 INP4 MUX", "DEC8", "DEC8 MUX"},
-	{"IIR1 INP4 MUX", "DEC9", "DEC9 MUX"},
-	{"IIR1 INP4 MUX", "DEC10", "DEC10 MUX"},
-	{"IIR1 INP4 MUX", "RX1", "SLIM RX1"},
-	{"IIR1 INP4 MUX", "RX2", "SLIM RX2"},
-	{"IIR1 INP4 MUX", "RX3", "SLIM RX3"},
-	{"IIR1 INP4 MUX", "RX4", "SLIM RX4"},
-	{"IIR1 INP4 MUX", "RX5", "SLIM RX5"},
-	{"IIR1 INP4 MUX", "RX6", "SLIM RX6"},
-	{"IIR1 INP4 MUX", "RX7", "SLIM RX7"},
-
-	{"IIR2", NULL, "IIR2 INP4 MUX"},
-	{"IIR2 INP4 MUX", "DEC1", "DEC1 MUX"},
-	{"IIR2 INP4 MUX", "DEC2", "DEC2 MUX"},
-	{"IIR2 INP4 MUX", "DEC3", "DEC3 MUX"},
-	{"IIR2 INP4 MUX", "DEC4", "DEC4 MUX"},
-	{"IIR2 INP4 MUX", "DEC5", "DEC5 MUX"},
-	{"IIR2 INP4 MUX", "DEC6", "DEC6 MUX"},
-	{"IIR2 INP4 MUX", "DEC7", "DEC7 MUX"},
-	{"IIR2 INP4 MUX", "DEC8", "DEC8 MUX"},
-	{"IIR2 INP4 MUX", "DEC9", "DEC9 MUX"},
-	{"IIR2 INP4 MUX", "DEC10", "DEC10 MUX"},
-	{"IIR2 INP4 MUX", "RX1", "SLIM RX1"},
-	{"IIR2 INP4 MUX", "RX2", "SLIM RX2"},
-	{"IIR2 INP4 MUX", "RX3", "SLIM RX3"},
-	{"IIR2 INP4 MUX", "RX4", "SLIM RX4"},
-	{"IIR2 INP4 MUX", "RX5", "SLIM RX5"},
-	{"IIR2 INP4 MUX", "RX6", "SLIM RX6"},
-	{"IIR2 INP4 MUX", "RX7", "SLIM RX7"},
-
-	{"MIC BIAS1 Internal1", NULL, "LDO_H"},
-	{"MIC BIAS1 Internal2", NULL, "LDO_H"},
-	{"MIC BIAS1 External", NULL, "LDO_H"},
-	{"MIC BIAS2 Internal1", NULL, "LDO_H"},
-	{"MIC BIAS2 Internal2", NULL, "LDO_H"},
-	{"MIC BIAS2 Internal3", NULL, "LDO_H"},
-	{"MIC BIAS2 External", NULL, "LDO_H"},
-	{"MIC BIAS3 Internal1", NULL, "LDO_H"},
-	{"MIC BIAS3 Internal2", NULL, "LDO_H"},
-	{"MIC BIAS3 External", NULL, "LDO_H"},
-	{"MIC BIAS4 External", NULL, "LDO_H"},
-	{DAPM_MICBIAS2_EXTERNAL_STANDALONE, NULL, "LDO_H Standalone"},
-};
-
-static int tomtom_startup(struct snd_pcm_substream *substream,
-		struct snd_soc_dai *dai)
-{
-	pr_debug("%s(): substream = %s  stream = %d\n", __func__,
-		 substream->name, substream->stream);
-
-	return 0;
-}
-
-static void tomtom_shutdown(struct snd_pcm_substream *substream,
-		struct snd_soc_dai *dai)
-{
-	pr_debug("%s(): substream = %s  stream = %d\n", __func__,
-		 substream->name, substream->stream);
-}
-
-int tomtom_mclk_enable(struct snd_soc_codec *codec, int mclk_enable, bool dapm)
-{
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
-	pr_debug("%s: mclk_enable = %u, dapm = %d\n", __func__, mclk_enable,
-		 dapm);
-
-	WCD9XXX_BG_CLK_LOCK(&tomtom->resmgr);
-	if (mclk_enable) {
-		wcd9xxx_resmgr_get_bandgap(&tomtom->resmgr,
-					   WCD9XXX_BANDGAP_AUDIO_MODE);
-		wcd9xxx_resmgr_get_clk_block(&tomtom->resmgr, WCD9XXX_CLK_MCLK);
-	} else {
-		/* Put clock and BG */
-		wcd9xxx_resmgr_put_clk_block(&tomtom->resmgr, WCD9XXX_CLK_MCLK);
-		wcd9xxx_resmgr_put_bandgap(&tomtom->resmgr,
-					   WCD9XXX_BANDGAP_AUDIO_MODE);
-	}
-	WCD9XXX_BG_CLK_UNLOCK(&tomtom->resmgr);
-
-	return 0;
-}
-
-static int tomtom_set_dai_sysclk(struct snd_soc_dai *dai,
-		int clk_id, unsigned int freq, int dir)
-{
-	pr_debug("%s\n", __func__);
-	return 0;
-}
-
-static int tomtom_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
-{
-	u8 val = 0;
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(dai->codec);
-
-	pr_debug("%s\n", __func__);
-	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
-	case SND_SOC_DAIFMT_CBS_CFS:
-		/* CPU is master */
-		if (tomtom->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
-			if (dai->id == AIF1_CAP)
-				snd_soc_update_bits(dai->codec,
-					TOMTOM_A_CDC_CLK_TX_I2S_CTL,
-					TOMTOM_I2S_MASTER_MODE_MASK, 0);
-			else if (dai->id == AIF1_PB)
-				snd_soc_update_bits(dai->codec,
-					TOMTOM_A_CDC_CLK_RX_I2S_CTL,
-					TOMTOM_I2S_MASTER_MODE_MASK, 0);
-		}
-		break;
-	case SND_SOC_DAIFMT_CBM_CFM:
-	/* CPU is slave */
-		if (tomtom->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
-			val = TOMTOM_I2S_MASTER_MODE_MASK;
-			if (dai->id == AIF1_CAP)
-				snd_soc_update_bits(dai->codec,
-					TOMTOM_A_CDC_CLK_TX_I2S_CTL, val, val);
-			else if (dai->id == AIF1_PB)
-				snd_soc_update_bits(dai->codec,
-					TOMTOM_A_CDC_CLK_RX_I2S_CTL, val, val);
-		}
-		break;
-	default:
-		return -EINVAL;
-	}
-	return 0;
-}
-
-static int tomtom_set_channel_map(struct snd_soc_dai *dai,
-				unsigned int tx_num, unsigned int *tx_slot,
-				unsigned int rx_num, unsigned int *rx_slot)
-
-{
-	struct wcd9xxx_codec_dai_data *dai_data = NULL;
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(dai->codec);
-	struct wcd9xxx *core = dev_get_drvdata(dai->codec->dev->parent);
-
-	if (!tx_slot || !rx_slot) {
-		pr_err("%s: Invalid tx_slot=%pK, rx_slot=%pK\n",
-			__func__, tx_slot, rx_slot);
-		return -EINVAL;
-	}
-	pr_debug("%s(): dai_name = %s DAI-ID %x tx_ch %d rx_ch %d\n"
-		 "tomtom->intf_type %d\n",
-		 __func__, dai->name, dai->id, tx_num, rx_num,
-		 tomtom->intf_type);
-
-	if (tomtom->intf_type == WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
-		wcd9xxx_init_slimslave(core, core->slim->laddr,
-					   tx_num, tx_slot, rx_num, rx_slot);
-		/*Reserve tx11 and tx12 for VI feedback path*/
-		dai_data = &tomtom->dai[AIF4_VIFEED];
-		if (dai_data) {
-			list_add_tail(&core->tx_chs[TOMTOM_TX11].list,
-			&dai_data->wcd9xxx_ch_list);
-			list_add_tail(&core->tx_chs[TOMTOM_TX12].list,
-			&dai_data->wcd9xxx_ch_list);
-		}
-
-		/* Reserve TX13 for MAD data channel */
-		dai_data = &tomtom->dai[AIF4_MAD_TX];
-		if (dai_data)
-			list_add_tail(&core->tx_chs[TOMTOM_TX13].list,
-				      &dai_data->wcd9xxx_ch_list);
-	}
-
-	return 0;
-}
-
-static int tomtom_get_channel_map(struct snd_soc_dai *dai,
-				 unsigned int *tx_num, unsigned int *tx_slot,
-				 unsigned int *rx_num, unsigned int *rx_slot)
-
-{
-	struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(dai->codec);
-	u32 i = 0;
-	struct wcd9xxx_ch *ch;
-
-	switch (dai->id) {
-	case AIF1_PB:
-	case AIF2_PB:
-	case AIF3_PB:
-		if (!rx_slot || !rx_num) {
-			pr_err("%s: Invalid rx_slot %pK or rx_num %pK\n",
-				 __func__, rx_slot, rx_num);
-			return -EINVAL;
-		}
-		list_for_each_entry(ch, &tomtom_p->dai[dai->id].wcd9xxx_ch_list,
-				    list) {
-			pr_debug("%s: slot_num %u ch->ch_num %d\n",
-				 __func__, i, ch->ch_num);
-			rx_slot[i++] = ch->ch_num;
-		}
-		pr_debug("%s: rx_num %d\n", __func__, i);
-		*rx_num = i;
-		break;
-	case AIF1_CAP:
-	case AIF2_CAP:
-	case AIF3_CAP:
-	case AIF4_VIFEED:
-	case AIF4_MAD_TX:
-		if (!tx_slot || !tx_num) {
-			pr_err("%s: Invalid tx_slot %pK or tx_num %pK\n",
-				 __func__, tx_slot, tx_num);
-			return -EINVAL;
-		}
-		list_for_each_entry(ch, &tomtom_p->dai[dai->id].wcd9xxx_ch_list,
-				    list) {
-			pr_debug("%s: slot_num %u ch->ch_num %d\n",
-				 __func__, i,  ch->ch_num);
-			tx_slot[i++] = ch->ch_num;
-		}
-		pr_debug("%s: tx_num %d\n", __func__, i);
-		*tx_num = i;
-		break;
-
-	default:
-		pr_err("%s: Invalid DAI ID %x\n", __func__, dai->id);
-		break;
-	}
-
-	return 0;
-}
-
-static int tomtom_set_interpolator_rate(struct snd_soc_dai *dai,
-	u8 rx_fs_rate_reg_val, u32 compander_fs, u32 sample_rate)
-{
-	u32 j;
-	u8 rx_mix1_inp, rx8_mix1_inp;
-	u16 rx_mix_1_reg_1, rx_mix_1_reg_2;
-	u16 rx_fs_reg;
-	u8 rx_mix_1_reg_1_val, rx_mix_1_reg_2_val;
-	struct snd_soc_codec *codec = dai->codec;
-	struct wcd9xxx_ch *ch;
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-	int port_rx_8 = TOMTOM_RX_PORT_START_NUMBER + NUM_INTERPOLATORS - 1;
-
-	list_for_each_entry(ch, &tomtom->dai[dai->id].wcd9xxx_ch_list, list) {
-		/* for RX port starting from 16 instead of 10 like tabla */
-		rx_mix1_inp = ch->port + RX_MIX1_INP_SEL_RX1 -
-			      TOMTOM_TX_PORT_NUMBER;
-		rx8_mix1_inp = ch->port + RX8_MIX1_INP_SEL_RX1 -
-			       TOMTOM_RX_PORT_START_NUMBER;
-		if (((ch->port < port_rx_8) &&
-		     ((rx_mix1_inp < RX_MIX1_INP_SEL_RX1) ||
-		      (rx_mix1_inp > RX_MIX1_INP_SEL_RX7))) ||
-		    ((rx8_mix1_inp < RX8_MIX1_INP_SEL_RX1) ||
-		     (rx8_mix1_inp > RX8_MIX1_INP_SEL_RX8))) {
-			pr_err("%s: Invalid TOMTOM_RX%u port. Dai ID is %d\n",
-					__func__,  rx8_mix1_inp - 2,
-					dai->id);
-			return -EINVAL;
-		}
-
-		rx_mix_1_reg_1 = TOMTOM_A_CDC_CONN_RX1_B1_CTL;
-
-		for (j = 0; j < NUM_INTERPOLATORS - 1; j++) {
-			rx_mix_1_reg_2 = rx_mix_1_reg_1 + 1;
-
-			rx_mix_1_reg_1_val = snd_soc_read(codec,
-							  rx_mix_1_reg_1);
-			rx_mix_1_reg_2_val = snd_soc_read(codec,
-							  rx_mix_1_reg_2);
-
-			if (((rx_mix_1_reg_1_val & 0x0F) == rx_mix1_inp) ||
-			    (((rx_mix_1_reg_1_val >> 4) & 0x0F)
-				== rx_mix1_inp) ||
-			    ((rx_mix_1_reg_2_val & 0x0F) == rx_mix1_inp)) {
-
-				rx_fs_reg = TOMTOM_A_CDC_RX1_B5_CTL + 8 * j;
-
-				pr_debug("%s: AIF_PB DAI(%d) connected to RX%u\n",
-					__func__, dai->id, j + 1);
-
-				pr_debug("%s: set RX%u sample rate to %u\n",
-					__func__, j + 1, sample_rate);
-
-				snd_soc_update_bits(codec, rx_fs_reg,
-						0xE0, rx_fs_rate_reg_val);
-
-				if (comp_rx_path[j] < COMPANDER_MAX)
-					tomtom->comp_fs[comp_rx_path[j]]
-					= compander_fs;
-			}
-			if (j < 2)
-				rx_mix_1_reg_1 += 3;
-			else
-				rx_mix_1_reg_1 += 2;
-		}
-
-		/* RX8 interpolator path */
-		rx_mix_1_reg_1_val = snd_soc_read(codec,
-						TOMTOM_A_CDC_CONN_RX8_B1_CTL);
-		if (((rx_mix_1_reg_1_val & 0x0F) == rx8_mix1_inp) ||
-		    (((rx_mix_1_reg_1_val >> 4) & 0x0F) == rx8_mix1_inp)) {
-			snd_soc_update_bits(codec, TOMTOM_A_CDC_RX8_B5_CTL,
-					    0xE0, rx_fs_rate_reg_val);
-			pr_debug("%s: AIF_PB DAI(%d) connected to RX%u\n",
-					__func__, dai->id, NUM_INTERPOLATORS);
-
-			pr_debug("%s: set RX%u sample rate to %u\n",
-					__func__, NUM_INTERPOLATORS,
-					sample_rate);
-			if (comp_rx_path[NUM_INTERPOLATORS - 1] < COMPANDER_MAX)
-				tomtom->comp_fs[comp_rx_path[j]] =
-							compander_fs;
-		}
-	}
-	return 0;
-}
-
-static int tomtom_set_decimator_rate(struct snd_soc_dai *dai,
-	u8 tx_fs_rate_reg_val, u32 sample_rate)
-{
-	struct snd_soc_codec *codec = dai->codec;
-	struct wcd9xxx_ch *ch;
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-	u32 tx_port;
-	u16 tx_port_reg, tx_fs_reg;
-	u8 tx_port_reg_val;
-	s8 decimator;
-
-	list_for_each_entry(ch, &tomtom->dai[dai->id].wcd9xxx_ch_list, list) {
-
-		tx_port = ch->port + 1;
-		pr_debug("%s: dai->id = %d, tx_port = %d",
-			__func__, dai->id, tx_port);
-
-		if ((tx_port < 1) || (tx_port > NUM_DECIMATORS)) {
-			pr_err("%s: Invalid SLIM TX%u port. DAI ID is %d\n",
-				__func__, tx_port, dai->id);
-			return -EINVAL;
-		}
-
-		tx_port_reg = TOMTOM_A_CDC_CONN_TX_SB_B1_CTL + (tx_port - 1);
-		tx_port_reg_val =  snd_soc_read(codec, tx_port_reg);
-
-		decimator = 0;
-
-		if ((tx_port >= 1) && (tx_port <= 6)) {
-
-			tx_port_reg_val =  tx_port_reg_val & 0x0F;
-			if (tx_port_reg_val == 0x8)
-				decimator = tx_port;
-
-		} else if ((tx_port >= 7) && (tx_port <= NUM_DECIMATORS)) {
-
-			tx_port_reg_val =  tx_port_reg_val & 0x1F;
-
-			if ((tx_port_reg_val >= 0x8) &&
-			    (tx_port_reg_val <= 0x11)) {
-
-				decimator = (tx_port_reg_val - 0x8) + 1;
-			}
-		}
-
-		if (decimator) { /* SLIM_TX port has a DEC as input */
-
-			tx_fs_reg = TOMTOM_A_CDC_TX1_CLK_FS_CTL +
-				    8 * (decimator - 1);
-
-			pr_debug("%s: set DEC%u (-> SLIM_TX%u) rate to %u\n",
-				__func__, decimator, tx_port, sample_rate);
-
-			snd_soc_update_bits(codec, tx_fs_reg, 0x07,
-					    tx_fs_rate_reg_val);
-
-		} else {
-			if ((tx_port_reg_val >= 0x1) &&
-			    (tx_port_reg_val <= 0x7)) {
-
-				pr_debug("%s: RMIX%u going to SLIM TX%u\n",
-					__func__, tx_port_reg_val, tx_port);
-
-			} else if  ((tx_port_reg_val >= 0x8) &&
-				    (tx_port_reg_val <= 0x11)) {
-
-				pr_err("%s: ERROR: Should not be here\n",
-				       __func__);
-				pr_err("%s: ERROR: DEC connected to SLIM TX%u\n",
-					__func__, tx_port);
-				return -EINVAL;
-
-			} else if (tx_port_reg_val == 0) {
-				pr_debug("%s: no signal to SLIM TX%u\n",
-					__func__, tx_port);
-			} else {
-				pr_err("%s: ERROR: wrong signal to SLIM TX%u\n",
-					__func__, tx_port);
-				pr_err("%s: ERROR: wrong signal = %u\n",
-					__func__, tx_port_reg_val);
-				return -EINVAL;
-			}
-		}
-	}
-	return 0;
-}
-
-static void tomtom_set_rxsb_port_format(struct snd_pcm_hw_params *params,
-				       struct snd_soc_dai *dai)
-{
-	struct snd_soc_codec *codec = dai->codec;
-	struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec);
-	struct wcd9xxx_codec_dai_data *cdc_dai;
-	struct wcd9xxx_ch *ch;
-	int port;
-	u8 bit_sel;
-	u16 sb_ctl_reg, field_shift;
-
-	switch (params_width(params)) {
-	case 16:
-		bit_sel = 0x2;
-		tomtom_p->dai[dai->id].bit_width = 16;
-		break;
-	case 24:
-		bit_sel = 0x0;
-		tomtom_p->dai[dai->id].bit_width = 24;
-		break;
-	default:
-		dev_err(codec->dev, "Invalid format\n");
-		return;
-	}
-
-	cdc_dai = &tomtom_p->dai[dai->id];
-
-	list_for_each_entry(ch, &cdc_dai->wcd9xxx_ch_list, list) {
-		port = wcd9xxx_get_slave_port(ch->ch_num);
-		if (port < 0 ||
-		    !TOMTOM_VALIDATE_RX_SBPORT_RANGE(port)) {
-			dev_warn(codec->dev,
-				 "%s: invalid port ID %d returned for RX DAI\n",
-				 __func__, port);
-			return;
-		}
-
-		port = TOMTOM_CONVERT_RX_SBPORT_ID(port);
-
-		if (port <= 3) {
-			sb_ctl_reg = TOMTOM_A_CDC_CONN_RX_SB_B1_CTL;
-			field_shift = port << 1;
-		} else if (port <= 7) {
-			sb_ctl_reg = TOMTOM_A_CDC_CONN_RX_SB_B2_CTL;
-			field_shift = (port - 4) << 1;
-		} else { /* should not happen */
-			dev_warn(codec->dev,
-				 "%s: bad port ID %d\n", __func__, port);
-			return;
-		}
-
-		dev_dbg(codec->dev, "%s: sb_ctl_reg %x field_shift %x\n",
-			__func__, sb_ctl_reg, field_shift);
-		snd_soc_update_bits(codec, sb_ctl_reg, 0x3 << field_shift,
-				    bit_sel << field_shift);
-	}
-}
-
-static void tomtom_set_tx_sb_port_format(struct snd_pcm_hw_params *params,
-					 struct snd_soc_dai *dai)
-{
-	struct snd_soc_codec *codec = dai->codec;
-	struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec);
-	struct wcd9xxx_codec_dai_data *cdc_dai;
-	struct wcd9xxx_ch *ch;
-	int port;
-	u8 bit_sel, bit_shift;
-	u16 sb_ctl_reg;
-
-	switch (params_width(params)) {
-	case 16:
-		bit_sel = 0x2;
-		tomtom_p->dai[dai->id].bit_width = 16;
-		break;
-	case 24:
-		bit_sel = 0x0;
-		tomtom_p->dai[dai->id].bit_width = 24;
-		break;
-	default:
-		dev_err(codec->dev, "%s: Invalid format %d\n", __func__,
-			params_width(params));
-		return;
-	}
-
-	cdc_dai = &tomtom_p->dai[dai->id];
-
-	list_for_each_entry(ch, &cdc_dai->wcd9xxx_ch_list, list) {
-		port = wcd9xxx_get_slave_port(ch->ch_num);
-		if (port < 0 ||
-		    !TOMTOM_VALIDATE_TX_SBPORT_RANGE(port)) {
-			dev_warn(codec->dev,
-				 "%s: invalid port ID %d returned for TX DAI\n",
-				 __func__, port);
-			return;
-		}
-
-		if (port < 6) /* 6 = SLIMBUS TX7 */
-			bit_shift = TOMTOM_BIT_ADJ_SHIFT_PORT1_6;
-		else if (port < 10)
-			bit_shift = TOMTOM_BIT_ADJ_SHIFT_PORT7_10;
-		else {
-			dev_warn(codec->dev,
-				 "%s: port ID %d bitwidth is fixed\n",
-				 __func__, port);
-			return;
-		}
-
-		sb_ctl_reg = (TOMTOM_A_CDC_CONN_TX_SB_B1_CTL + port);
-
-		dev_dbg(codec->dev, "%s: reg %x bit_sel %x bit_shift %x\n",
-			__func__, sb_ctl_reg, bit_sel, bit_shift);
-		snd_soc_update_bits(codec, sb_ctl_reg, 0x3 <<
-				    bit_shift, bit_sel << bit_shift);
-	}
-}
-
-static int tomtom_hw_params(struct snd_pcm_substream *substream,
-			    struct snd_pcm_hw_params *params,
-			    struct snd_soc_dai *dai)
-{
-	struct snd_soc_codec *codec = dai->codec;
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(dai->codec);
-	u8 tx_fs_rate, rx_fs_rate, i2s_bit_mode;
-	u32 compander_fs;
-	int ret;
-
-	pr_debug("%s: dai_name = %s DAI-ID %x rate %d num_ch %d\n", __func__,
-		 dai->name, dai->id, params_rate(params),
-		 params_channels(params));
-
-	switch (params_rate(params)) {
-	case 8000:
-		tx_fs_rate = 0x00;
-		rx_fs_rate = 0x00;
-		compander_fs = COMPANDER_FS_8KHZ;
-		break;
-	case 16000:
-		tx_fs_rate = 0x01;
-		rx_fs_rate = 0x20;
-		compander_fs = COMPANDER_FS_16KHZ;
-		break;
-	case 32000:
-		tx_fs_rate = 0x02;
-		rx_fs_rate = 0x40;
-		compander_fs = COMPANDER_FS_32KHZ;
-		break;
-	case 48000:
-		tx_fs_rate = 0x03;
-		rx_fs_rate = 0x60;
-		compander_fs = COMPANDER_FS_48KHZ;
-		break;
-	case 96000:
-		tx_fs_rate = 0x04;
-		rx_fs_rate = 0x80;
-		compander_fs = COMPANDER_FS_96KHZ;
-		break;
-	case 192000:
-		tx_fs_rate = 0x05;
-		rx_fs_rate = 0xA0;
-		compander_fs = COMPANDER_FS_192KHZ;
-		break;
-	default:
-		pr_err("%s: Invalid sampling rate %d\n", __func__,
-			params_rate(params));
-		return -EINVAL;
-	}
-
-	switch (substream->stream) {
-	case SNDRV_PCM_STREAM_CAPTURE:
-		if (dai->id != AIF4_VIFEED &&
-		    dai->id != AIF4_MAD_TX) {
-			ret = tomtom_set_decimator_rate(dai, tx_fs_rate,
-							   params_rate(params));
-			if (ret < 0) {
-				pr_err("%s: set decimator rate failed %d\n",
-					__func__, ret);
-				return ret;
-			}
-		}
-
-		tomtom->dai[dai->id].rate   = params_rate(params);
-
-		switch (params_format(params)) {
-		case SNDRV_PCM_FORMAT_S16_LE:
-			i2s_bit_mode = 0x01;
-			tomtom->dai[dai->id].bit_width = 16;
-			break;
-		case SNDRV_PCM_FORMAT_S24_LE:
-			tomtom->dai[dai->id].bit_width = 24;
-			i2s_bit_mode = 0x00;
-			break;
-		case SNDRV_PCM_FORMAT_S32_LE:
-			tomtom->dai[dai->id].bit_width = 32;
-			i2s_bit_mode = 0x00;
-			break;
-		default:
-			dev_err(codec->dev,
-				"%s: Invalid format 0x%x\n",
-				 __func__, params_format(params));
-			return -EINVAL;
-		}
-
-		if (tomtom->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
-			snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_TX_I2S_CTL,
-					    0x20, i2s_bit_mode << 5);
-			snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_TX_I2S_CTL,
-					    0x07, tx_fs_rate);
-		} else {
-			/* only generic ports can have sample bit adjustment */
-			if (dai->id != AIF4_VIFEED &&
-			    dai->id != AIF4_MAD_TX)
-				tomtom_set_tx_sb_port_format(params, dai);
-		}
-
-		break;
-
-	case SNDRV_PCM_STREAM_PLAYBACK:
-		ret = tomtom_set_interpolator_rate(dai, rx_fs_rate,
-						  compander_fs,
-						  params_rate(params));
-		if (ret < 0) {
-			pr_err("%s: set decimator rate failed %d\n", __func__,
-				ret);
-			return ret;
-		}
-		if (tomtom->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
-			switch (params_format(params)) {
-			case SNDRV_PCM_FORMAT_S16_LE:
-				snd_soc_update_bits(codec,
-					TOMTOM_A_CDC_CLK_RX_I2S_CTL,
-					0x20, 0x20);
-				break;
-			case SNDRV_PCM_FORMAT_S32_LE:
-				snd_soc_update_bits(codec,
-					TOMTOM_A_CDC_CLK_RX_I2S_CTL,
-					0x20, 0x00);
-				break;
-			default:
-				pr_err("invalid format\n");
-				break;
-			}
-			snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_RX_I2S_CTL,
-					    0x03, (rx_fs_rate >> 0x05));
-		} else {
-			tomtom_set_rxsb_port_format(params, dai);
-			tomtom->dai[dai->id].rate   = params_rate(params);
-		}
-		break;
-	default:
-		pr_err("%s: Invalid stream type %d\n", __func__,
-			substream->stream);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static struct snd_soc_dai_ops tomtom_dai_ops = {
-	.startup = tomtom_startup,
-	.shutdown = tomtom_shutdown,
-	.hw_params = tomtom_hw_params,
-	.set_sysclk = tomtom_set_dai_sysclk,
-	.set_fmt = tomtom_set_dai_fmt,
-	.set_channel_map = tomtom_set_channel_map,
-	.get_channel_map = tomtom_get_channel_map,
-};
-
-static struct snd_soc_dai_driver tomtom_dai[] = {
-	{
-		.name = "tomtom_rx1",
-		.id = AIF1_PB,
-		.playback = {
-			.stream_name = "AIF1 Playback",
-			.rates = WCD9330_RATES,
-			.formats = TOMTOM_FORMATS_S16_S24_LE,
-			.rate_max = 192000,
-			.rate_min = 8000,
-			.channels_min = 1,
-			.channels_max = 2,
-		},
-		.ops = &tomtom_dai_ops,
-	},
-	{
-		.name = "tomtom_tx1",
-		.id = AIF1_CAP,
-		.capture = {
-			.stream_name = "AIF1 Capture",
-			.rates = WCD9330_RATES,
-			.formats = TOMTOM_FORMATS,
-			.rate_max = 192000,
-			.rate_min = 8000,
-			.channels_min = 1,
-			.channels_max = 4,
-		},
-		.ops = &tomtom_dai_ops,
-	},
-	{
-		.name = "tomtom_rx2",
-		.id = AIF2_PB,
-		.playback = {
-			.stream_name = "AIF2 Playback",
-			.rates = WCD9330_RATES,
-			.formats = TOMTOM_FORMATS_S16_S24_LE,
-			.rate_min = 8000,
-			.rate_max = 192000,
-			.channels_min = 1,
-			.channels_max = 2,
-		},
-		.ops = &tomtom_dai_ops,
-	},
-	{
-		.name = "tomtom_tx2",
-		.id = AIF2_CAP,
-		.capture = {
-			.stream_name = "AIF2 Capture",
-			.rates = WCD9330_RATES,
-			.formats = TOMTOM_FORMATS,
-			.rate_max = 192000,
-			.rate_min = 8000,
-			.channels_min = 1,
-			.channels_max = 8,
-		},
-		.ops = &tomtom_dai_ops,
-	},
-	{
-		.name = "tomtom_rx3",
-		.id = AIF3_PB,
-		.playback = {
-			.stream_name = "AIF3 Playback",
-			.rates = WCD9330_RATES,
-			.formats = TOMTOM_FORMATS_S16_S24_LE,
-			.rate_min = 8000,
-			.rate_max = 192000,
-			.channels_min = 1,
-			.channels_max = 2,
-		},
-		.ops = &tomtom_dai_ops,
-	},
-	{
-		.name = "tomtom_tx3",
-		.id = AIF3_CAP,
-		.capture = {
-			.stream_name = "AIF3 Capture",
-			.rates = WCD9330_RATES,
-			.formats = TOMTOM_FORMATS,
-			.rate_max = 48000,
-			.rate_min = 8000,
-			.channels_min = 1,
-			.channels_max = 2,
-		},
-		.ops = &tomtom_dai_ops,
-	},
-	{
-		.name = "tomtom_vifeedback",
-		.id = AIF4_VIFEED,
-		.capture = {
-			.stream_name = "VIfeed",
-			.rates = SNDRV_PCM_RATE_48000,
-			.formats = TOMTOM_FORMATS,
-			.rate_max = 48000,
-			.rate_min = 48000,
-			.channels_min = 2,
-			.channels_max = 2,
-	 },
-		.ops = &tomtom_dai_ops,
-	},
-	{
-		.name = "tomtom_mad1",
-		.id = AIF4_MAD_TX,
-		.capture = {
-			.stream_name = "AIF4 MAD TX",
-			.rates = SNDRV_PCM_RATE_16000,
-			.formats = TOMTOM_FORMATS_S16_S24_LE,
-			.rate_min = 16000,
-			.rate_max = 16000,
-			.channels_min = 1,
-			.channels_max = 1,
-		},
-		.ops = &tomtom_dai_ops,
-	},
-};
-
-static struct snd_soc_dai_driver tomtom_i2s_dai[] = {
-	{
-		.name = "tomtom_i2s_rx1",
-		.id = AIF1_PB,
-		.playback = {
-			.stream_name = "AIF1 Playback",
-			.rates = WCD9330_RATES,
-			.formats = TOMTOM_FORMATS,
-			.rate_max = 192000,
-			.rate_min = 8000,
-			.channels_min = 1,
-			.channels_max = 4,
-		},
-		.ops = &tomtom_dai_ops,
-	},
-	{
-		.name = "tomtom_i2s_tx1",
-		.id = AIF1_CAP,
-		.capture = {
-			.stream_name = "AIF1 Capture",
-			.rates = WCD9330_RATES,
-			.formats = TOMTOM_FORMATS,
-			.rate_max = 192000,
-			.rate_min = 8000,
-			.channels_min = 1,
-			.channels_max = 4,
-		},
-		.ops = &tomtom_dai_ops,
-	},
-	{
-		.name = "tomtom_i2s_rx2",
-		.id = AIF1_PB,
-		.playback = {
-			.stream_name = "AIF2 Playback",
-			.rates = WCD9330_RATES,
-			.formats = TOMTOM_FORMATS,
-			.rate_max = 192000,
-			.rate_min = 8000,
-			.channels_min = 1,
-			.channels_max = 4,
-		},
-		.ops = &tomtom_dai_ops,
-	},
-	{
-		.name = "tomtom_i2s_tx2",
-		.id = AIF1_CAP,
-		.capture = {
-			.stream_name = "AIF2 Capture",
-			.rates = WCD9330_RATES,
-			.formats = TOMTOM_FORMATS,
-			.rate_max = 192000,
-			.rate_min = 8000,
-			.channels_min = 1,
-			.channels_max = 4,
-		},
-		.ops = &tomtom_dai_ops,
-	},
-};
-
-static int tomtom_codec_enable_slim_chmask(struct wcd9xxx_codec_dai_data *dai,
-					  bool up)
-{
-	int ret = 0;
-	struct wcd9xxx_ch *ch;
-
-	if (up) {
-		list_for_each_entry(ch, &dai->wcd9xxx_ch_list, list) {
-			ret = wcd9xxx_get_slave_port(ch->ch_num);
-			if (ret < 0) {
-				pr_err("%s: Invalid slave port ID: %d\n",
-				       __func__, ret);
-				ret = -EINVAL;
-			} else {
-				set_bit(ret, &dai->ch_mask);
-			}
-		}
-	} else {
-		ret = wait_event_timeout(dai->dai_wait, (dai->ch_mask == 0),
-					 msecs_to_jiffies(
-						TOMTOM_SLIM_CLOSE_TIMEOUT));
-		if (!ret) {
-			pr_err("%s: Slim close tx/rx wait timeout\n", __func__);
-			ret = -ETIMEDOUT;
-		} else {
-			ret = 0;
-		}
-	}
-	return ret;
-}
-
-static void tomtom_codec_enable_int_port(struct wcd9xxx_codec_dai_data *dai,
-					  struct snd_soc_codec *codec)
-{
-	struct wcd9xxx_ch *ch;
-	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
-	int port_num = 0;
-	unsigned short reg = 0;
-	u8 val = 0;
-
-	if (!dai || !codec) {
-		pr_err("%s: Invalid params\n", __func__);
-		return;
-	}
-	list_for_each_entry(ch, &dai->wcd9xxx_ch_list, list) {
-		if (ch->port >= TOMTOM_RX_PORT_START_NUMBER) {
-			port_num = ch->port - TOMTOM_RX_PORT_START_NUMBER;
-			reg = TOMTOM_SLIM_PGD_PORT_INT_EN0 + (port_num / 8);
-			val = wcd9xxx_interface_reg_read(wcd9xxx,
-				reg);
-			if (!(val & (1 << (port_num % 8)))) {
-				val |= (1 << (port_num % 8));
-				wcd9xxx_interface_reg_write(
-					wcd9xxx, reg, val);
-				val = wcd9xxx_interface_reg_read(
-					wcd9xxx, reg);
-			}
-		} else {
-			port_num = ch->port;
-			reg = TOMTOM_SLIM_PGD_PORT_INT_TX_EN0 + (port_num / 8);
-			val = wcd9xxx_interface_reg_read(wcd9xxx,
-				reg);
-			if (!(val & (1 << (port_num % 8)))) {
-				val |= (1 << (port_num % 8));
-				wcd9xxx_interface_reg_write(wcd9xxx,
-					reg, val);
-				val = wcd9xxx_interface_reg_read(
-					wcd9xxx, reg);
-			}
-		}
-	}
-}
-
-static int tomtom_codec_enable_slimrx(struct snd_soc_dapm_widget *w,
-				     struct snd_kcontrol *kcontrol,
-				     int event)
-{
-	struct wcd9xxx *core;
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec);
-	int ret = 0;
-	struct wcd9xxx_codec_dai_data *dai;
-
-	core = dev_get_drvdata(codec->dev->parent);
-
-	pr_debug("%s: event called! codec name %s num_dai %d\n"
-		"stream name %s event %d\n",
-		__func__, codec->component.name,
-		codec->component.num_dai, w->sname, event);
-
-	/* Execute the callback only if interface type is slimbus */
-	if (tomtom_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS)
-		return 0;
-
-	dai = &tomtom_p->dai[w->shift];
-	pr_debug("%s: w->name %s w->shift %d event %d\n",
-		 __func__, w->name, w->shift, event);
-
-	switch (event) {
-	case SND_SOC_DAPM_POST_PMU:
-		dai->bus_down_in_recovery = false;
-		tomtom_codec_enable_int_port(dai, codec);
-		(void) tomtom_codec_enable_slim_chmask(dai, true);
-		ret = wcd9xxx_cfg_slim_sch_rx(core, &dai->wcd9xxx_ch_list,
-					      dai->rate, dai->bit_width,
-					      &dai->grph);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		ret = wcd9xxx_close_slim_sch_rx(core, &dai->wcd9xxx_ch_list,
-						dai->grph);
-		if (!dai->bus_down_in_recovery)
-			ret = tomtom_codec_enable_slim_chmask(dai, false);
-		else
-			pr_debug("%s: bus in recovery skip enable slim_chmask",
-				__func__);
-		if (ret < 0) {
-			ret = wcd9xxx_disconnect_port(core,
-						      &dai->wcd9xxx_ch_list,
-						      dai->grph);
-			pr_debug("%s: Disconnect RX port, ret = %d\n",
-				 __func__, ret);
-		}
-		break;
-	}
-	return ret;
-}
-
-static int tomtom_codec_enable_slimvi_feedback(struct snd_soc_dapm_widget *w,
-				struct snd_kcontrol *kcontrol,
-				int event)
-{
-	struct wcd9xxx *core = NULL;
-	struct snd_soc_codec *codec = NULL;
-	struct tomtom_priv *tomtom_p = NULL;
-	u32 ret = 0;
-	struct wcd9xxx_codec_dai_data *dai = NULL;
-
-	if (!w) {
-		pr_err("%s invalid params\n", __func__);
-		return -EINVAL;
-	}
-	codec = snd_soc_dapm_to_codec(w->dapm);
-	tomtom_p = snd_soc_codec_get_drvdata(codec);
-	core = dev_get_drvdata(codec->dev->parent);
-
-	pr_debug("%s: event called! codec name %s num_dai %d stream name %s\n",
-		__func__, codec->component.name,
-		codec->component.num_dai, w->sname);
-
-	/* Execute the callback only if interface type is slimbus */
-	if (tomtom_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
-		pr_err("%s Interface is not correct", __func__);
-		return 0;
-	}
-
-	pr_debug("%s(): w->name %s event %d w->shift %d\n",
-		__func__, w->name, event, w->shift);
-	if (w->shift != AIF4_VIFEED) {
-		pr_err("%s Error in enabling the tx path\n", __func__);
-		ret = -EINVAL;
-		goto out_vi;
-	}
-	dai = &tomtom_p->dai[w->shift];
-	switch (event) {
-	case SND_SOC_DAPM_POST_PMU:
-		/*Enable V&I sensing*/
-		snd_soc_update_bits(codec, TOMTOM_A_SPKR1_PROT_EN,
-				0x88, 0x88);
-		/*Enable spkr VI clocks*/
-		snd_soc_update_bits(codec,
-		TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL, 0xC, 0xC);
-		dai->bus_down_in_recovery = false;
-		tomtom_codec_enable_int_port(dai, codec);
-		(void) tomtom_codec_enable_slim_chmask(dai, true);
-		ret = wcd9xxx_cfg_slim_sch_tx(core, &dai->wcd9xxx_ch_list,
-					dai->rate, dai->bit_width,
-					&dai->grph);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		ret = wcd9xxx_close_slim_sch_tx(core, &dai->wcd9xxx_ch_list,
-						dai->grph);
-		if (ret)
-			pr_err("%s error in close_slim_sch_tx %d\n",
-				__func__, ret);
-		if (!dai->bus_down_in_recovery)
-			ret = tomtom_codec_enable_slim_chmask(dai, false);
-		if (ret < 0) {
-			ret = wcd9xxx_disconnect_port(core,
-				&dai->wcd9xxx_ch_list,
-				dai->grph);
-			pr_debug("%s: Disconnect TX port, ret = %d\n",
-				__func__, ret);
-		}
-
-		snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL,
-				0xC, 0x0);
-		/*Disable V&I sensing*/
-		snd_soc_update_bits(codec, TOMTOM_A_SPKR1_PROT_EN,
-				0x88, 0x00);
-		break;
-	}
-out_vi:
-	return ret;
-}
-
-/* __tomtom_codec_enable_slimtx: Enable the slimbus slave port
- *				 for TX path
- * @codec: Handle to the codec for which the slave port is to be
- *	   enabled.
- * @dai_data: The dai specific data for dai which is enabled.
- */
-static int __tomtom_codec_enable_slimtx(struct snd_soc_codec *codec,
-		int event, struct wcd9xxx_codec_dai_data *dai_data)
-{
-	struct wcd9xxx *core;
-	int ret = 0;
-
-	core = dev_get_drvdata(codec->dev->parent);
-
-	switch (event) {
-	case SND_SOC_DAPM_POST_PMU:
-		dai_data->bus_down_in_recovery = false;
-		tomtom_codec_enable_int_port(dai_data, codec);
-		(void) tomtom_codec_enable_slim_chmask(dai_data, true);
-		ret = wcd9xxx_cfg_slim_sch_tx(core, &dai_data->wcd9xxx_ch_list,
-					      dai_data->rate,
-					      dai_data->bit_width,
-					      &dai_data->grph);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		ret = wcd9xxx_close_slim_sch_tx(core,
-						&dai_data->wcd9xxx_ch_list,
-						dai_data->grph);
-		if (!dai_data->bus_down_in_recovery)
-			ret = tomtom_codec_enable_slim_chmask(dai_data, false);
-		if (ret < 0) {
-			ret = wcd9xxx_disconnect_port(core,
-					&dai_data->wcd9xxx_ch_list,
-					dai_data->grph);
-			dev_dbg(codec->dev,
-				"%s: Disconnect TX port, ret = %d\n",
-				 __func__, ret);
-		}
-		break;
-	}
-
-	return ret;
-}
-
-/*
- * tomtom_codec_enable_slimtx_mad: Callback function that will be invoked
- *	to setup the slave port for MAD.
- * @codec: Handle to the codec
- * @event: Indicates whether to enable or disable the slave port
- */
-static int tomtom_codec_enable_slimtx_mad(struct snd_soc_codec *codec,
-					  u8 event)
-{
-	struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec);
-	struct wcd9xxx_codec_dai_data *dai;
-	int dapm_event = SND_SOC_DAPM_POST_PMU;
-
-	dai = &tomtom_p->dai[AIF4_MAD_TX];
-
-	if (event == 0)
-		dapm_event = SND_SOC_DAPM_POST_PMD;
-
-	dev_dbg(codec->dev,
-		"%s: mad_channel, event = 0x%x\n",
-		 __func__, event);
-	return __tomtom_codec_enable_slimtx(codec, dapm_event, dai);
-}
-
-/*
- * tomtom_codec_enable_slimtx: DAPM widget allback for TX widgets
- * @w: widget for which this callback is invoked
- * @kcontrol: kcontrol associated with this widget
- * @event: DAPM supplied event indicating enable/disable
- */
-static int tomtom_codec_enable_slimtx(struct snd_soc_dapm_widget *w,
-				     struct snd_kcontrol *kcontrol,
-				     int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec);
-	struct wcd9xxx_codec_dai_data *dai;
-
-	dev_dbg(codec->dev, "%s: event called! codec name %s num_dai %d stream name %s\n",
-		__func__, codec->component.name,
-		codec->component.num_dai, w->sname);
-
-	/* Execute the callback only if interface type is slimbus */
-	if (tomtom_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS)
-		return 0;
-
-	dev_dbg(codec->dev,
-		"%s(): w->name %s event %d w->shift %d\n",
-		__func__, w->name, event, w->shift);
-
-	dai = &tomtom_p->dai[w->shift];
-	return __tomtom_codec_enable_slimtx(codec, event, dai);
-}
-
-static int tomtom_codec_enable_ear_pa(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec);
-
-	pr_debug("%s %s %d\n", __func__, w->name, event);
-
-	switch (event) {
-	case SND_SOC_DAPM_POST_PMU:
-		wcd9xxx_clsh_fsm(codec, &tomtom_p->clsh_d,
-						 WCD9XXX_CLSH_STATE_EAR,
-						 WCD9XXX_CLSH_REQ_ENABLE,
-						 WCD9XXX_CLSH_EVENT_POST_PA);
-
-		usleep_range(5000, 5100);
-		break;
-	}
-	return 0;
-}
-
-static int tomtom_codec_ear_dac_event(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec);
-
-	pr_debug("%s %s %d\n", __func__, w->name, event);
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		wcd9xxx_clsh_fsm(codec, &tomtom_p->clsh_d,
-						 WCD9XXX_CLSH_STATE_EAR,
-						 WCD9XXX_CLSH_REQ_ENABLE,
-						 WCD9XXX_CLSH_EVENT_PRE_DAC);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		wcd9xxx_clsh_fsm(codec, &tomtom_p->clsh_d,
-						 WCD9XXX_CLSH_STATE_EAR,
-						 WCD9XXX_CLSH_REQ_DISABLE,
-						 WCD9XXX_CLSH_EVENT_POST_PA);
-		usleep_range(5000, 5100);
-		break;
-	default:
-		break;
-	}
-	return 0;
-}
-
-static int tomtom_codec_set_iir_gain(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-
-	pr_debug("%s: event = %d\n", __func__, event);
-
-	switch (event) {
-	case SND_SOC_DAPM_POST_PMU: /* fall through */
-	case SND_SOC_DAPM_PRE_PMD:
-		if (strnstr(w->name, "IIR1", sizeof("IIR1"))) {
-			snd_soc_write(codec, TOMTOM_A_CDC_IIR1_GAIN_B1_CTL,
-				      snd_soc_read(codec,
-					      TOMTOM_A_CDC_IIR1_GAIN_B1_CTL));
-			snd_soc_write(codec, TOMTOM_A_CDC_IIR1_GAIN_B2_CTL,
-				      snd_soc_read(codec,
-					      TOMTOM_A_CDC_IIR1_GAIN_B2_CTL));
-			snd_soc_write(codec, TOMTOM_A_CDC_IIR1_GAIN_B3_CTL,
-				      snd_soc_read(codec,
-					      TOMTOM_A_CDC_IIR1_GAIN_B3_CTL));
-			snd_soc_write(codec, TOMTOM_A_CDC_IIR1_GAIN_B4_CTL,
-				      snd_soc_read(codec,
-					      TOMTOM_A_CDC_IIR1_GAIN_B4_CTL));
-		} else {
-			snd_soc_write(codec, TOMTOM_A_CDC_IIR2_GAIN_B1_CTL,
-				      snd_soc_read(codec,
-					      TOMTOM_A_CDC_IIR2_GAIN_B1_CTL));
-			snd_soc_write(codec, TOMTOM_A_CDC_IIR2_GAIN_B2_CTL,
-				      snd_soc_read(codec,
-					      TOMTOM_A_CDC_IIR2_GAIN_B2_CTL));
-			snd_soc_write(codec, TOMTOM_A_CDC_IIR2_GAIN_B3_CTL,
-				      snd_soc_read(codec,
-					      TOMTOM_A_CDC_IIR2_GAIN_B3_CTL));
-			snd_soc_write(codec, TOMTOM_A_CDC_IIR2_GAIN_B4_CTL,
-				      snd_soc_read(codec,
-					      TOMTOM_A_CDC_IIR2_GAIN_B4_CTL));
-		}
-		break;
-	}
-	return 0;
-}
-
-static int tomtom_codec_dsm_mux_event(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	u8 reg_val, zoh_mux_val = 0x00;
-
-	pr_debug("%s: event = %d\n", __func__, event);
-
-	switch (event) {
-	case SND_SOC_DAPM_POST_PMU:
-		reg_val = snd_soc_read(codec, TOMTOM_A_CDC_CONN_CLSH_CTL);
-
-		if ((reg_val & 0x30) == 0x10)
-			zoh_mux_val = 0x04;
-		else if ((reg_val & 0x30) == 0x20)
-			zoh_mux_val = 0x08;
-
-		if (zoh_mux_val != 0x00)
-			snd_soc_update_bits(codec,
-					TOMTOM_A_CDC_CONN_CLSH_CTL,
-					0x0C, zoh_mux_val);
-		break;
-
-	case SND_SOC_DAPM_POST_PMD:
-		snd_soc_update_bits(codec, TOMTOM_A_CDC_CONN_CLSH_CTL,
-							0x0C, 0x00);
-		break;
-	}
-	return 0;
-}
-
-static int tomtom_codec_enable_anc_ear(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	int ret = 0;
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		ret = tomtom_codec_enable_anc(w, kcontrol, event);
-		msleep(50);
-		snd_soc_update_bits(codec, TOMTOM_A_RX_EAR_EN, 0x10, 0x10);
-		break;
-	case SND_SOC_DAPM_POST_PMU:
-		ret = tomtom_codec_enable_ear_pa(w, kcontrol, event);
-		break;
-	case SND_SOC_DAPM_PRE_PMD:
-		snd_soc_update_bits(codec, TOMTOM_A_RX_EAR_EN, 0x10, 0x00);
-		msleep(40);
-		ret |= tomtom_codec_enable_anc(w, kcontrol, event);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		ret = tomtom_codec_enable_ear_pa(w, kcontrol, event);
-		break;
-	}
-	return ret;
-}
-
-/* Todo: Have separate dapm widgets for I2S and Slimbus.
- * Might Need to have callbacks registered only for slimbus
- */
-static const struct snd_soc_dapm_widget tomtom_dapm_widgets[] = {
-	/*RX stuff */
-	SND_SOC_DAPM_OUTPUT("EAR"),
-
-	SND_SOC_DAPM_PGA_E("EAR PA", TOMTOM_A_RX_EAR_EN, 4, 0, NULL, 0,
-			tomtom_codec_enable_ear_pa, SND_SOC_DAPM_POST_PMU),
-
-	SND_SOC_DAPM_MIXER_E("DAC1", TOMTOM_A_RX_EAR_EN, 6, 0, dac1_switch,
-		ARRAY_SIZE(dac1_switch), tomtom_codec_ear_dac_event,
-		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_AIF_IN_E("AIF1 PB", "AIF1 Playback", 0, SND_SOC_NOPM,
-				AIF1_PB, 0, tomtom_codec_enable_slimrx,
-				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_AIF_IN_E("AIF2 PB", "AIF2 Playback", 0, SND_SOC_NOPM,
-				AIF2_PB, 0, tomtom_codec_enable_slimrx,
-				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_AIF_IN_E("AIF3 PB", "AIF3 Playback", 0, SND_SOC_NOPM,
-				AIF3_PB, 0, tomtom_codec_enable_slimrx,
-				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_MUX("SLIM RX1 MUX", SND_SOC_NOPM, TOMTOM_RX1, 0,
-				&slim_rx_mux[TOMTOM_RX1]),
-	SND_SOC_DAPM_MUX("SLIM RX2 MUX", SND_SOC_NOPM, TOMTOM_RX2, 0,
-				&slim_rx_mux[TOMTOM_RX2]),
-	SND_SOC_DAPM_MUX("SLIM RX3 MUX", SND_SOC_NOPM, TOMTOM_RX3, 0,
-				&slim_rx_mux[TOMTOM_RX3]),
-	SND_SOC_DAPM_MUX("SLIM RX4 MUX", SND_SOC_NOPM, TOMTOM_RX4, 0,
-				&slim_rx_mux[TOMTOM_RX4]),
-	SND_SOC_DAPM_MUX("SLIM RX5 MUX", SND_SOC_NOPM, TOMTOM_RX5, 0,
-				&slim_rx_mux[TOMTOM_RX5]),
-	SND_SOC_DAPM_MUX("SLIM RX6 MUX", SND_SOC_NOPM, TOMTOM_RX6, 0,
-				&slim_rx_mux[TOMTOM_RX6]),
-	SND_SOC_DAPM_MUX("SLIM RX7 MUX", SND_SOC_NOPM, TOMTOM_RX7, 0,
-				&slim_rx_mux[TOMTOM_RX7]),
-	SND_SOC_DAPM_MUX("SLIM RX8 MUX", SND_SOC_NOPM, TOMTOM_RX8, 0,
-				&slim_rx_mux[TOMTOM_RX8]),
-
-	SND_SOC_DAPM_MIXER("SLIM RX1", SND_SOC_NOPM, 0, 0, NULL, 0),
-	SND_SOC_DAPM_MIXER("SLIM RX2", SND_SOC_NOPM, 0, 0, NULL, 0),
-	SND_SOC_DAPM_MIXER("SLIM RX3", SND_SOC_NOPM, 0, 0, NULL, 0),
-	SND_SOC_DAPM_MIXER("SLIM RX4", SND_SOC_NOPM, 0, 0, NULL, 0),
-	SND_SOC_DAPM_MIXER("SLIM RX5", SND_SOC_NOPM, 0, 0, NULL, 0),
-	SND_SOC_DAPM_MIXER("SLIM RX6", SND_SOC_NOPM, 0, 0, NULL, 0),
-	SND_SOC_DAPM_MIXER("SLIM RX7", SND_SOC_NOPM, 0, 0, NULL, 0),
-	SND_SOC_DAPM_MIXER("SLIM RX8", SND_SOC_NOPM, 0, 0, NULL, 0),
-
-	/* Headphone */
-	SND_SOC_DAPM_OUTPUT("HEADPHONE"),
-	SND_SOC_DAPM_PGA_E("HPHL", TOMTOM_A_RX_HPH_CNP_EN, 5, 0, NULL, 0,
-		tomtom_hph_pa_event, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD |
-		SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_MIXER_E("HPHL DAC", TOMTOM_A_RX_HPH_L_DAC_CTL, 7, 0,
-		hphl_switch, ARRAY_SIZE(hphl_switch), tomtom_hphl_dac_event,
-		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_PGA_E("HPHR", TOMTOM_A_RX_HPH_CNP_EN, 4, 0, NULL, 0,
-		tomtom_hph_pa_event, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD |
-		SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_DAC_E("HPHR DAC", NULL, TOMTOM_A_RX_HPH_R_DAC_CTL, 7, 0,
-		tomtom_hphr_dac_event,
-		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
-	/* Speaker */
-	SND_SOC_DAPM_OUTPUT("LINEOUT1"),
-	SND_SOC_DAPM_OUTPUT("LINEOUT2"),
-	SND_SOC_DAPM_OUTPUT("LINEOUT3"),
-	SND_SOC_DAPM_OUTPUT("LINEOUT4"),
-	SND_SOC_DAPM_OUTPUT("SPK_OUT"),
-
-	SND_SOC_DAPM_PGA_E("LINEOUT1 PA", TOMTOM_A_RX_LINE_CNP_EN, 0, 0, NULL,
-			0, tomtom_codec_enable_lineout, SND_SOC_DAPM_PRE_PMU |
-			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_PGA_E("LINEOUT2 PA", TOMTOM_A_RX_LINE_CNP_EN, 1, 0, NULL,
-			0, tomtom_codec_enable_lineout, SND_SOC_DAPM_PRE_PMU |
-			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_PGA_E("LINEOUT3 PA", TOMTOM_A_RX_LINE_CNP_EN, 2, 0, NULL,
-			0, tomtom_codec_enable_lineout, SND_SOC_DAPM_PRE_PMU |
-			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_PGA_E("LINEOUT4 PA", TOMTOM_A_RX_LINE_CNP_EN, 3, 0, NULL,
-			0, tomtom_codec_enable_lineout, SND_SOC_DAPM_PRE_PMU |
-			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_PGA_E("SPK PA", SND_SOC_NOPM, 0, 0, NULL,
-			   0, tomtom_codec_enable_spk_pa,
-			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_PGA_E("SPK2 PA", SND_SOC_NOPM, 0, 0, NULL,
-			   0, tomtom_codec_enable_spk_pa,
-			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_DAC_E("LINEOUT1 DAC", NULL, TOMTOM_A_RX_LINE_1_DAC_CTL, 7,
-		0, tomtom_lineout_dac_event,
-		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_DAC_E("LINEOUT2 DAC", NULL, TOMTOM_A_RX_LINE_2_DAC_CTL, 7,
-		0, tomtom_lineout_dac_event,
-		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_DAC_E("LINEOUT3 DAC", NULL, TOMTOM_A_RX_LINE_3_DAC_CTL, 7,
-		0, tomtom_lineout_dac_event,
-		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_SWITCH("LINEOUT3 DAC GROUND", SND_SOC_NOPM, 0, 0,
-				&lineout3_ground_switch),
-	SND_SOC_DAPM_DAC_E("LINEOUT4 DAC", NULL, TOMTOM_A_RX_LINE_4_DAC_CTL, 7,
-		0, tomtom_lineout_dac_event,
-		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_SWITCH("LINEOUT4 DAC GROUND", SND_SOC_NOPM, 0, 0,
-				&lineout4_ground_switch),
-
-	SND_SOC_DAPM_DAC_E("SPK DAC", NULL, TOMTOM_A_CDC_BOOST_TRGR_EN, 0, 0,
-			   tomtom_spk_dac_event,
-			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_DAC_E("SPK2 DAC", NULL, TOMTOM_A_CDC_BOOST_TRGR_EN, 1, 0,
-			   tomtom_spk_dac_event,
-			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_SUPPLY("VDD_SPKDRV", SND_SOC_NOPM, 0, 0,
-			    tomtom_codec_enable_vdd_spkr,
-			    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_SUPPLY("VDD_SPKDRV2", SND_SOC_NOPM, 0, 0,
-			    tomtom_codec_enable_vdd_spkr2,
-			    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_MIXER("RX1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
-	SND_SOC_DAPM_MIXER("RX2 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
-	SND_SOC_DAPM_MIXER("RX7 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
-
-	SND_SOC_DAPM_MIXER("RX1 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
-	SND_SOC_DAPM_MIXER("RX2 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
-
-	SND_SOC_DAPM_MIXER_E("RX3 MIX1", TOMTOM_A_CDC_CLK_RX_B1_CTL, 2, 0, NULL,
-		0, tomtom_codec_enable_interpolator, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMU),
-	SND_SOC_DAPM_MIXER_E("RX4 MIX1", TOMTOM_A_CDC_CLK_RX_B1_CTL, 3, 0, NULL,
-		0, tomtom_codec_enable_interpolator, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMU),
-	SND_SOC_DAPM_MIXER_E("RX5 MIX1", TOMTOM_A_CDC_CLK_RX_B1_CTL, 4, 0, NULL,
-		0, tomtom_codec_enable_interpolator, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMU),
-	SND_SOC_DAPM_MIXER_E("RX6 MIX1", TOMTOM_A_CDC_CLK_RX_B1_CTL, 5, 0, NULL,
-		0, tomtom_codec_enable_interpolator, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMU),
-	SND_SOC_DAPM_MIXER_E("RX7 MIX2", TOMTOM_A_CDC_CLK_RX_B1_CTL, 6, 0, NULL,
-		0, tomtom_codec_enable_interpolator, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMU),
-	SND_SOC_DAPM_MIXER_E("RX8 MIX1", TOMTOM_A_CDC_CLK_RX_B1_CTL, 7, 0, NULL,
-		0, tomtom_codec_enable_interpolator, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMU),
-
-	SND_SOC_DAPM_MUX_E("RX1 INTERP", TOMTOM_A_CDC_CLK_RX_B1_CTL, 0, 0,
-			&rx1_interp_mux, tomtom_codec_enable_interpolator,
-			SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
-	SND_SOC_DAPM_MUX_E("RX2 INTERP", TOMTOM_A_CDC_CLK_RX_B1_CTL, 1, 0,
-			&rx2_interp_mux, tomtom_codec_enable_interpolator,
-			SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
-
-
-	SND_SOC_DAPM_MIXER("RX1 CHAIN", TOMTOM_A_CDC_RX1_B6_CTL, 5, 0, NULL, 0),
-	SND_SOC_DAPM_MIXER("RX2 CHAIN", TOMTOM_A_CDC_RX2_B6_CTL, 5, 0, NULL, 0),
-
-	SND_SOC_DAPM_MUX("RX1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
-		&rx_mix1_inp1_mux),
-	SND_SOC_DAPM_MUX("RX1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
-		&rx_mix1_inp2_mux),
-	SND_SOC_DAPM_MUX("RX1 MIX1 INP3", SND_SOC_NOPM, 0, 0,
-		&rx_mix1_inp3_mux),
-	SND_SOC_DAPM_MUX("RX2 MIX1 INP1", SND_SOC_NOPM, 0, 0,
-		&rx2_mix1_inp1_mux),
-	SND_SOC_DAPM_MUX("RX2 MIX1 INP2", SND_SOC_NOPM, 0, 0,
-		&rx2_mix1_inp2_mux),
-	SND_SOC_DAPM_MUX("RX3 MIX1 INP1", SND_SOC_NOPM, 0, 0,
-		&rx3_mix1_inp1_mux),
-	SND_SOC_DAPM_MUX("RX3 MIX1 INP2", SND_SOC_NOPM, 0, 0,
-		&rx3_mix1_inp2_mux),
-	SND_SOC_DAPM_MUX("RX4 MIX1 INP1", SND_SOC_NOPM, 0, 0,
-		&rx4_mix1_inp1_mux),
-	SND_SOC_DAPM_MUX("RX4 MIX1 INP2", SND_SOC_NOPM, 0, 0,
-		&rx4_mix1_inp2_mux),
-	SND_SOC_DAPM_MUX("RX5 MIX1 INP1", SND_SOC_NOPM, 0, 0,
-		&rx5_mix1_inp1_mux),
-	SND_SOC_DAPM_MUX("RX5 MIX1 INP2", SND_SOC_NOPM, 0, 0,
-		&rx5_mix1_inp2_mux),
-	SND_SOC_DAPM_MUX("RX6 MIX1 INP1", SND_SOC_NOPM, 0, 0,
-		&rx6_mix1_inp1_mux),
-	SND_SOC_DAPM_MUX("RX6 MIX1 INP2", SND_SOC_NOPM, 0, 0,
-		&rx6_mix1_inp2_mux),
-	SND_SOC_DAPM_MUX("RX7 MIX1 INP1", SND_SOC_NOPM, 0, 0,
-		&rx7_mix1_inp1_mux),
-	SND_SOC_DAPM_MUX("RX7 MIX1 INP2", SND_SOC_NOPM, 0, 0,
-		&rx7_mix1_inp2_mux),
-	SND_SOC_DAPM_MUX("RX8 MIX1 INP1", SND_SOC_NOPM, 0, 0,
-		&rx8_mix1_inp1_mux),
-	SND_SOC_DAPM_MUX("RX8 MIX1 INP2", SND_SOC_NOPM, 0, 0,
-		&rx8_mix1_inp2_mux),
-	SND_SOC_DAPM_MUX("RX1 MIX2 INP1", SND_SOC_NOPM, 0, 0,
-		&rx1_mix2_inp1_mux),
-	SND_SOC_DAPM_MUX("RX1 MIX2 INP2", SND_SOC_NOPM, 0, 0,
-		&rx1_mix2_inp2_mux),
-	SND_SOC_DAPM_MUX("RX2 MIX2 INP1", SND_SOC_NOPM, 0, 0,
-		&rx2_mix2_inp1_mux),
-	SND_SOC_DAPM_MUX("RX2 MIX2 INP2", SND_SOC_NOPM, 0, 0,
-		&rx2_mix2_inp2_mux),
-	SND_SOC_DAPM_MUX("RX7 MIX2 INP1", SND_SOC_NOPM, 0, 0,
-		&rx7_mix2_inp1_mux),
-	SND_SOC_DAPM_MUX("RX7 MIX2 INP2", SND_SOC_NOPM, 0, 0,
-		&rx7_mix2_inp2_mux),
-
-	SND_SOC_DAPM_MUX("RDAC5 MUX", SND_SOC_NOPM, 0, 0,
-		&rx_dac5_mux),
-	SND_SOC_DAPM_MUX("RDAC7 MUX", SND_SOC_NOPM, 0, 0,
-		&rx_dac7_mux),
-
-	SND_SOC_DAPM_MUX("MAD_SEL MUX", SND_SOC_NOPM, 0, 0,
-		&mad_sel_mux),
-
-	SND_SOC_DAPM_MUX_E("CLASS_H_DSM MUX", SND_SOC_NOPM, 0, 0,
-		&class_h_dsm_mux, tomtom_codec_dsm_mux_event,
-		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_SUPPLY("RX_BIAS", SND_SOC_NOPM, 0, 0,
-		tomtom_codec_enable_rx_bias, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_SUPPLY("CDC_I2S_RX_CONN", WCD9XXX_A_CDC_CLK_OTHR_CTL, 5, 0,
-			    NULL, 0),
-
-	/* TX */
-
-	SND_SOC_DAPM_SUPPLY("CDC_CONN", WCD9XXX_A_CDC_CLK_OTHR_CTL, 2, 0, NULL,
-		0),
-
-	SND_SOC_DAPM_SUPPLY("LDO_H", SND_SOC_NOPM, 7, 0,
-			    tomtom_codec_enable_ldo_h,
-			    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-	/*
-	 * DAPM 'LDO_H Standalone' is to be powered by mbhc driver after
-	 * acquring codec_resource lock.
-	 * So call __tomtom_codec_enable_ldo_h instead and avoid deadlock.
-	 */
-	SND_SOC_DAPM_SUPPLY("LDO_H Standalone", SND_SOC_NOPM, 7, 0,
-			    __tomtom_codec_enable_ldo_h,
-			    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_SUPPLY("COMP0_CLK", SND_SOC_NOPM, 0, 0,
-		tomtom_config_compander, SND_SOC_DAPM_PRE_PMU |
-			SND_SOC_DAPM_PRE_PMD),
-	SND_SOC_DAPM_SUPPLY("COMP1_CLK", SND_SOC_NOPM, 1, 0,
-		tomtom_config_compander, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_PRE_PMD),
-	SND_SOC_DAPM_SUPPLY("COMP2_CLK", SND_SOC_NOPM, 2, 0,
-		tomtom_config_compander, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_PRE_PMD),
-
-
-	SND_SOC_DAPM_INPUT("AMIC1"),
-	SND_SOC_DAPM_MICBIAS_E("MIC BIAS1 External", SND_SOC_NOPM, 7, 0,
-			       tomtom_codec_enable_micbias,
-			       SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-			       SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_MICBIAS_E("MIC BIAS1 Internal1", SND_SOC_NOPM, 7, 0,
-			       tomtom_codec_enable_micbias,
-			       SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-			       SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_MICBIAS_E("MIC BIAS1 Internal2", SND_SOC_NOPM, 7, 0,
-			       tomtom_codec_enable_micbias,
-			       SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-			       SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_INPUT("AMIC3"),
-
-	SND_SOC_DAPM_INPUT("AMIC4"),
-
-	SND_SOC_DAPM_INPUT("AMIC5"),
-
-	SND_SOC_DAPM_INPUT("AMIC6"),
-
-	SND_SOC_DAPM_MUX_E("DEC1 MUX", TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL, 0, 0,
-		&dec1_mux, tomtom_codec_enable_dec,
-		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_MUX_E("DEC2 MUX", TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL, 1, 0,
-		&dec2_mux, tomtom_codec_enable_dec,
-		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_MUX_E("DEC3 MUX", TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL, 2, 0,
-		&dec3_mux, tomtom_codec_enable_dec,
-		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_MUX_E("DEC4 MUX", TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL, 3, 0,
-		&dec4_mux, tomtom_codec_enable_dec,
-		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_MUX_E("DEC5 MUX", TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL, 4, 0,
-		&dec5_mux, tomtom_codec_enable_dec,
-		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_MUX_E("DEC6 MUX", TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL, 5, 0,
-		&dec6_mux, tomtom_codec_enable_dec,
-		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_MUX_E("DEC7 MUX", TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL, 6, 0,
-		&dec7_mux, tomtom_codec_enable_dec,
-		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_MUX_E("DEC8 MUX", TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL, 7, 0,
-		&dec8_mux, tomtom_codec_enable_dec,
-		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_MUX_E("DEC9 MUX", TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL, 0, 0,
-		&dec9_mux, tomtom_codec_enable_dec,
-		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_MUX_E("DEC10 MUX", TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL, 1, 0,
-		&dec10_mux, tomtom_codec_enable_dec,
-		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_MUX("ANC1 MUX", SND_SOC_NOPM, 0, 0, &anc1_mux),
-	SND_SOC_DAPM_MUX("ANC2 MUX", SND_SOC_NOPM, 0, 0, &anc2_mux),
-
-	SND_SOC_DAPM_OUTPUT("ANC HEADPHONE"),
-	SND_SOC_DAPM_PGA_E("ANC HPHL", SND_SOC_NOPM, 5, 0, NULL, 0,
-		tomtom_codec_enable_anc_hph,
-		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD |
-		SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_POST_PMU),
-	SND_SOC_DAPM_PGA_E("ANC HPHR", SND_SOC_NOPM, 4, 0, NULL, 0,
-		tomtom_codec_enable_anc_hph, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
-		SND_SOC_DAPM_POST_PMU),
-	SND_SOC_DAPM_OUTPUT("ANC EAR"),
-	SND_SOC_DAPM_PGA_E("ANC EAR PA", SND_SOC_NOPM, 0, 0, NULL, 0,
-		tomtom_codec_enable_anc_ear,
-		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD |
-		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_MUX("ANC1 FB MUX", SND_SOC_NOPM, 0, 0, &anc1_fb_mux),
-
-	SND_SOC_DAPM_INPUT("AMIC2"),
-	SND_SOC_DAPM_MICBIAS_E(DAPM_MICBIAS2_EXTERNAL_STANDALONE, SND_SOC_NOPM,
-			       7, 0, tomtom_codec_enable_micbias,
-			       SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-			       SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_MICBIAS_E("MIC BIAS2 External", SND_SOC_NOPM, 7, 0,
-			       tomtom_codec_enable_micbias,
-			       SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-			       SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_MICBIAS_E("MIC BIAS2 Internal1", SND_SOC_NOPM, 7, 0,
-			       tomtom_codec_enable_micbias,
-			       SND_SOC_DAPM_PRE_PMU |
-			       SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_MICBIAS_E("MIC BIAS2 Internal2", SND_SOC_NOPM, 7, 0,
-			       tomtom_codec_enable_micbias,
-			       SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-			       SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_MICBIAS_E("MIC BIAS2 Internal3", SND_SOC_NOPM, 7, 0,
-			       tomtom_codec_enable_micbias,
-			       SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-			       SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_MICBIAS_E("MIC BIAS3 External", SND_SOC_NOPM, 7, 0,
-			       tomtom_codec_enable_micbias,
-			       SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-			       SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_MICBIAS_E("MIC BIAS3 Internal1", SND_SOC_NOPM, 7, 0,
-			       tomtom_codec_enable_micbias,
-			       SND_SOC_DAPM_PRE_PMU |
-			       SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_MICBIAS_E("MIC BIAS3 Internal2", SND_SOC_NOPM, 7, 0,
-			       tomtom_codec_enable_micbias,
-			       SND_SOC_DAPM_PRE_PMU |
-			       SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_MICBIAS_E("MIC BIAS4 External", SND_SOC_NOPM, 7,
-			       0, tomtom_codec_enable_micbias,
-			       SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-			       SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_AIF_OUT_E("AIF1 CAP", "AIF1 Capture", 0, SND_SOC_NOPM,
-		AIF1_CAP, 0, tomtom_codec_enable_slimtx,
-		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_AIF_OUT_E("AIF2 CAP", "AIF2 Capture", 0, SND_SOC_NOPM,
-		AIF2_CAP, 0, tomtom_codec_enable_slimtx,
-		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_AIF_OUT_E("AIF3 CAP", "AIF3 Capture", 0, SND_SOC_NOPM,
-		AIF3_CAP, 0, tomtom_codec_enable_slimtx,
-		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_AIF_OUT_E("AIF4 VI", "VIfeed", 0, SND_SOC_NOPM,
-		AIF4_VIFEED, 0, tomtom_codec_enable_slimvi_feedback,
-		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_AIF_OUT_E("AIF4 MAD", "AIF4 MAD TX", 0,
-			       SND_SOC_NOPM, 0, 0,
-			       tomtom_codec_enable_mad,
-			       SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_SWITCH("MADONOFF", SND_SOC_NOPM, 0, 0,
-			    &aif4_mad_switch),
-	SND_SOC_DAPM_INPUT("MADINPUT"),
-	SND_SOC_DAPM_INPUT("MAD_CPE_INPUT"),
-
-	SND_SOC_DAPM_MIXER("AIF1_CAP Mixer", SND_SOC_NOPM, AIF1_CAP, 0,
-		aif1_cap_mixer, ARRAY_SIZE(aif1_cap_mixer)),
-
-	SND_SOC_DAPM_MIXER("AIF2_CAP Mixer", SND_SOC_NOPM, AIF2_CAP, 0,
-		aif2_cap_mixer, ARRAY_SIZE(aif2_cap_mixer)),
-
-	SND_SOC_DAPM_MIXER("AIF3_CAP Mixer", SND_SOC_NOPM, AIF3_CAP, 0,
-		aif3_cap_mixer, ARRAY_SIZE(aif3_cap_mixer)),
-
-	SND_SOC_DAPM_MUX("SLIM TX1 MUX", SND_SOC_NOPM, TOMTOM_TX1, 0,
-		&sb_tx1_mux),
-	SND_SOC_DAPM_MUX("SLIM TX2 MUX", SND_SOC_NOPM, TOMTOM_TX2, 0,
-		&sb_tx2_mux),
-	SND_SOC_DAPM_MUX("SLIM TX3 MUX", SND_SOC_NOPM, TOMTOM_TX3, 0,
-		&sb_tx3_mux),
-	SND_SOC_DAPM_MUX("SLIM TX4 MUX", SND_SOC_NOPM, TOMTOM_TX4, 0,
-		&sb_tx4_mux),
-	SND_SOC_DAPM_MUX("SLIM TX5 MUX", SND_SOC_NOPM, TOMTOM_TX5, 0,
-		&sb_tx5_mux),
-	SND_SOC_DAPM_MUX("SLIM TX6 MUX", SND_SOC_NOPM, TOMTOM_TX6, 0,
-		&sb_tx6_mux),
-	SND_SOC_DAPM_MUX("SLIM TX7 MUX", SND_SOC_NOPM, TOMTOM_TX7, 0,
-		&sb_tx7_mux),
-	SND_SOC_DAPM_MUX("SLIM TX8 MUX", SND_SOC_NOPM, TOMTOM_TX8, 0,
-		&sb_tx8_mux),
-	SND_SOC_DAPM_MUX("SLIM TX9 MUX", SND_SOC_NOPM, TOMTOM_TX9, 0,
-		&sb_tx9_mux),
-	SND_SOC_DAPM_MUX("SLIM TX10 MUX", SND_SOC_NOPM, TOMTOM_TX10, 0,
-		&sb_tx10_mux),
-
-	/* Digital Mic Inputs */
-	SND_SOC_DAPM_ADC_E("DMIC1", NULL, SND_SOC_NOPM, 0, 0,
-		tomtom_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_ADC_E("DMIC2", NULL, SND_SOC_NOPM, 0, 0,
-		tomtom_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_ADC_E("DMIC3", NULL, SND_SOC_NOPM, 0, 0,
-		tomtom_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_ADC_E("DMIC4", NULL, SND_SOC_NOPM, 0, 0,
-		tomtom_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_ADC_E("DMIC5", NULL, SND_SOC_NOPM, 0, 0,
-		tomtom_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_ADC_E("DMIC6", NULL, SND_SOC_NOPM, 0, 0,
-		tomtom_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMD),
-
-	/* Sidetone */
-	SND_SOC_DAPM_MUX("IIR1 INP1 MUX", SND_SOC_NOPM, 0, 0, &iir1_inp1_mux),
-
-	SND_SOC_DAPM_MUX("IIR1 INP2 MUX", SND_SOC_NOPM, 0, 0, &iir1_inp2_mux),
-
-	SND_SOC_DAPM_MUX("IIR1 INP3 MUX", SND_SOC_NOPM, 0, 0, &iir1_inp3_mux),
-
-	SND_SOC_DAPM_MUX("IIR1 INP4 MUX", SND_SOC_NOPM, 0, 0, &iir1_inp4_mux),
-
-	SND_SOC_DAPM_MIXER_E("IIR1", TOMTOM_A_CDC_CLK_SD_CTL, 0, 0, NULL, 0,
-		tomtom_codec_set_iir_gain, SND_SOC_DAPM_POST_PMU |
-		SND_SOC_DAPM_PRE_PMD),
-
-	SND_SOC_DAPM_MUX("IIR2 INP1 MUX", SND_SOC_NOPM, 0, 0, &iir2_inp1_mux),
-
-	SND_SOC_DAPM_MUX("IIR2 INP2 MUX", SND_SOC_NOPM, 0, 0, &iir2_inp2_mux),
-
-	SND_SOC_DAPM_MUX("IIR2 INP3 MUX", SND_SOC_NOPM, 0, 0, &iir2_inp3_mux),
-
-	SND_SOC_DAPM_MUX("IIR2 INP4 MUX", SND_SOC_NOPM, 0, 0, &iir2_inp4_mux),
-
-	SND_SOC_DAPM_MIXER_E("IIR2", TOMTOM_A_CDC_CLK_SD_CTL, 1, 0, NULL, 0,
-		tomtom_codec_set_iir_gain, SND_SOC_DAPM_POST_PMU |
-		SND_SOC_DAPM_PRE_PMD),
-
-	/* AUX PGA */
-	SND_SOC_DAPM_ADC_E("AUX_PGA_Left", NULL, TOMTOM_A_RX_AUX_SW_CTL, 7, 0,
-		tomtom_codec_enable_aux_pga, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_ADC_E("AUX_PGA_Right", NULL, TOMTOM_A_RX_AUX_SW_CTL, 6, 0,
-		tomtom_codec_enable_aux_pga, SND_SOC_DAPM_PRE_PMU |
-		SND_SOC_DAPM_POST_PMD),
-
-	/* Lineout, ear and HPH PA Mixers */
-
-	SND_SOC_DAPM_MIXER("EAR_PA_MIXER", SND_SOC_NOPM, 0, 0,
-		ear_pa_mix, ARRAY_SIZE(ear_pa_mix)),
-
-	SND_SOC_DAPM_MIXER("HPHL_PA_MIXER", SND_SOC_NOPM, 0, 0,
-		hphl_pa_mix, ARRAY_SIZE(hphl_pa_mix)),
-
-	SND_SOC_DAPM_MIXER("HPHR_PA_MIXER", SND_SOC_NOPM, 0, 0,
-		hphr_pa_mix, ARRAY_SIZE(hphr_pa_mix)),
-
-	SND_SOC_DAPM_MIXER("LINEOUT1_PA_MIXER", SND_SOC_NOPM, 0, 0,
-		lineout1_pa_mix, ARRAY_SIZE(lineout1_pa_mix)),
-
-	SND_SOC_DAPM_MIXER("LINEOUT2_PA_MIXER", SND_SOC_NOPM, 0, 0,
-		lineout2_pa_mix, ARRAY_SIZE(lineout2_pa_mix)),
-
-	SND_SOC_DAPM_MIXER("LINEOUT3_PA_MIXER", SND_SOC_NOPM, 0, 0,
-		lineout3_pa_mix, ARRAY_SIZE(lineout3_pa_mix)),
-
-	SND_SOC_DAPM_MIXER("LINEOUT4_PA_MIXER", SND_SOC_NOPM, 0, 0,
-		lineout4_pa_mix, ARRAY_SIZE(lineout4_pa_mix)),
-
-	SND_SOC_DAPM_SWITCH("VIONOFF", SND_SOC_NOPM, 0, 0,
-			    &aif4_vi_switch),
-
-	SND_SOC_DAPM_INPUT("VIINPUT"),
-};
-
-static irqreturn_t tomtom_slimbus_irq(int irq, void *data)
-{
-	struct tomtom_priv *priv = data;
-	struct snd_soc_codec *codec = priv->codec;
-	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
-	unsigned long status = 0;
-	int i, j, port_id, k;
-	u32 bit;
-	u8 val, int_val = 0;
-	bool tx, cleared;
-	unsigned short reg = 0;
-
-	for (i = TOMTOM_SLIM_PGD_PORT_INT_STATUS_RX_0, j = 0;
-	     i <= TOMTOM_SLIM_PGD_PORT_INT_STATUS_TX_1; i++, j++) {
-		val = wcd9xxx_interface_reg_read(wcd9xxx, i);
-		status |= ((u32)val << (8 * j));
-	}
-
-	for_each_set_bit(j, &status, 32) {
-		tx = (j >= 16 ? true : false);
-		port_id = (tx ? j - 16 : j);
-		val = wcd9xxx_interface_reg_read(wcd9xxx,
-				TOMTOM_SLIM_PGD_PORT_INT_RX_SOURCE0 + j);
-		if (val) {
-			if (!tx)
-				reg = TOMTOM_SLIM_PGD_PORT_INT_EN0 +
-					(port_id / 8);
-			else
-				reg = TOMTOM_SLIM_PGD_PORT_INT_TX_EN0 +
-					(port_id / 8);
-			int_val = wcd9xxx_interface_reg_read(
-				wcd9xxx, reg);
-			/*
-			 * Ignore interrupts for ports for which the
-			 * interrupts are not specifically enabled.
-			 */
-			if (!(int_val & (1 << (port_id % 8))))
-				continue;
-		}
-		if (val & TOMTOM_SLIM_IRQ_OVERFLOW)
-			pr_err_ratelimited(
-			   "%s: overflow error on %s port %d, value %x\n",
-			   __func__, (tx ? "TX" : "RX"), port_id, val);
-		if (val & TOMTOM_SLIM_IRQ_UNDERFLOW)
-			pr_err_ratelimited(
-			   "%s: underflow error on %s port %d, value %x\n",
-			   __func__, (tx ? "TX" : "RX"), port_id, val);
-		if ((val & TOMTOM_SLIM_IRQ_OVERFLOW) ||
-			(val & TOMTOM_SLIM_IRQ_UNDERFLOW)) {
-			if (!tx)
-				reg = TOMTOM_SLIM_PGD_PORT_INT_EN0 +
-					(port_id / 8);
-			else
-				reg = TOMTOM_SLIM_PGD_PORT_INT_TX_EN0 +
-					(port_id / 8);
-			int_val = wcd9xxx_interface_reg_read(wcd9xxx, reg);
-			if (int_val & (1 << (port_id % 8))) {
-				int_val = int_val ^ (1 << (port_id % 8));
-				wcd9xxx_interface_reg_write(wcd9xxx, reg,
-							    int_val);
-			}
-		}
-		if (val & TOMTOM_SLIM_IRQ_PORT_CLOSED) {
-			/*
-			 * INT SOURCE register starts from RX to TX
-			 * but port number in the ch_mask is in opposite way
-			 */
-			bit = (tx ? j - 16 : j + 16);
-			pr_debug("%s: %s port %d closed value %x, bit %u\n",
-				 __func__, (tx ? "TX" : "RX"), port_id, val,
-				 bit);
-			for (k = 0, cleared = false; k < NUM_CODEC_DAIS; k++) {
-				pr_debug("%s: priv->dai[%d].ch_mask = 0x%lx\n",
-					 __func__, k, priv->dai[k].ch_mask);
-				if (test_and_clear_bit(bit,
-						       &priv->dai[k].ch_mask)) {
-					cleared = true;
-					if (!priv->dai[k].ch_mask)
-						wake_up(&priv->dai[k].dai_wait);
-					/*
-					 * There are cases when multiple DAIs
-					 * might be using the same slimbus
-					 * channel. Hence don't break here.
-					 */
-				}
-			}
-			WARN(!cleared,
-			     "Couldn't find slimbus %s port %d for closing\n",
-			     (tx ? "TX" : "RX"), port_id);
-		}
-		wcd9xxx_interface_reg_write(wcd9xxx,
-					    TOMTOM_SLIM_PGD_PORT_INT_CLR_RX_0 +
-					    (j / 8),
-					    1 << (j % 8));
-	}
-
-	return IRQ_HANDLED;
-}
-
-static int tomtom_handle_pdata(struct tomtom_priv *tomtom)
-{
-	struct snd_soc_codec *codec = tomtom->codec;
-	struct wcd9xxx_pdata *pdata = tomtom->resmgr.pdata;
-	int k1, k2, k3, dec, rc = 0;
-	u8 leg_mode, txfe_bypass, txfe_buff, flag;
-	u8 i = 0, j = 0;
-	u8 val_txfe = 0, value = 0;
-	u8 dmic_ctl_val, mad_dmic_ctl_val;
-	u8 anc_ctl_value = 0;
-	u32 def_dmic_rate;
-	u16 tx_dmic_ctl_reg;
-
-	if (!pdata) {
-		pr_err("%s: NULL pdata\n", __func__);
-		rc = -ENODEV;
-		goto done;
-	}
-
-	leg_mode = pdata->amic_settings.legacy_mode;
-	txfe_bypass = pdata->amic_settings.txfe_enable;
-	txfe_buff = pdata->amic_settings.txfe_buff;
-	flag = pdata->amic_settings.use_pdata;
-
-	/* Make sure settings are correct */
-	if ((pdata->micbias.ldoh_v > WCD9XXX_LDOH_3P0_V) ||
-	    (pdata->micbias.bias1_cfilt_sel > WCD9XXX_CFILT3_SEL) ||
-	    (pdata->micbias.bias2_cfilt_sel > WCD9XXX_CFILT3_SEL) ||
-	    (pdata->micbias.bias3_cfilt_sel > WCD9XXX_CFILT3_SEL) ||
-	    (pdata->micbias.bias4_cfilt_sel > WCD9XXX_CFILT3_SEL)) {
-		rc = -EINVAL;
-		goto done;
-	}
-	/* figure out k value */
-	k1 = wcd9xxx_resmgr_get_k_val(&tomtom->resmgr,
-				      pdata->micbias.cfilt1_mv);
-	k2 = wcd9xxx_resmgr_get_k_val(&tomtom->resmgr,
-				      pdata->micbias.cfilt2_mv);
-	k3 = wcd9xxx_resmgr_get_k_val(&tomtom->resmgr,
-				      pdata->micbias.cfilt3_mv);
-	if (k1 < 0 || k2 < 0 || k3 < 0) {
-		rc = -EINVAL;
-		goto done;
-	}
-	/* Set voltage level and always use LDO */
-	snd_soc_update_bits(codec, TOMTOM_A_LDO_H_MODE_1, 0x0C,
-			    (pdata->micbias.ldoh_v << 2));
-
-	snd_soc_update_bits(codec, TOMTOM_A_MICB_CFILT_1_VAL, 0xFC, (k1 << 2));
-	snd_soc_update_bits(codec, TOMTOM_A_MICB_CFILT_2_VAL, 0xFC, (k2 << 2));
-	snd_soc_update_bits(codec, TOMTOM_A_MICB_CFILT_3_VAL, 0xFC, (k3 << 2));
-
-	snd_soc_update_bits(codec, TOMTOM_A_MICB_1_CTL, 0x60,
-			    (pdata->micbias.bias1_cfilt_sel << 5));
-	snd_soc_update_bits(codec, TOMTOM_A_MICB_2_CTL, 0x60,
-			    (pdata->micbias.bias2_cfilt_sel << 5));
-	snd_soc_update_bits(codec, TOMTOM_A_MICB_3_CTL, 0x60,
-			    (pdata->micbias.bias3_cfilt_sel << 5));
-	snd_soc_update_bits(codec, tomtom->resmgr.reg_addr->micb_4_ctl, 0x60,
-			    (pdata->micbias.bias4_cfilt_sel << 5));
-
-	for (i = 0; i < 6; j++, i += 2) {
-		if (flag & (0x01 << i)) {
-			val_txfe = (txfe_bypass & (0x01 << i)) ? 0x20 : 0x00;
-			val_txfe = val_txfe |
-				((txfe_buff & (0x01 << i)) ? 0x10 : 0x00);
-			snd_soc_update_bits(codec,
-				TOMTOM_A_TX_1_2_TEST_EN + j * 10,
-				0x30, val_txfe);
-		}
-		if (flag & (0x01 << (i + 1))) {
-			val_txfe = (txfe_bypass &
-					(0x01 << (i + 1))) ? 0x02 : 0x00;
-			val_txfe |= (txfe_buff &
-					(0x01 << (i + 1))) ? 0x01 : 0x00;
-			snd_soc_update_bits(codec,
-				TOMTOM_A_TX_1_2_TEST_EN + j * 10,
-				0x03, val_txfe);
-		}
-	}
-	if (flag & 0x40) {
-		value = (leg_mode & 0x40) ? 0x10 : 0x00;
-		value = value | ((txfe_bypass & 0x40) ? 0x02 : 0x00);
-		value = value | ((txfe_buff & 0x40) ? 0x01 : 0x00);
-		snd_soc_update_bits(codec, TOMTOM_A_TX_7_MBHC_EN,
-			0x13, value);
-	}
-
-	if (pdata->ocp.use_pdata) {
-		/* not defined in CODEC specification */
-		if (pdata->ocp.hph_ocp_limit == 1 ||
-			pdata->ocp.hph_ocp_limit == 5) {
-			rc = -EINVAL;
-			goto done;
-		}
-		snd_soc_update_bits(codec, TOMTOM_A_RX_COM_OCP_CTL,
-			0x0F, pdata->ocp.num_attempts);
-		snd_soc_write(codec, TOMTOM_A_RX_COM_OCP_COUNT,
-			((pdata->ocp.run_time << 4) | pdata->ocp.wait_time));
-		snd_soc_update_bits(codec, TOMTOM_A_RX_HPH_OCP_CTL,
-			0xE0, (pdata->ocp.hph_ocp_limit << 5));
-	}
-
-	for (i = 0; i < ARRAY_SIZE(pdata->regulator); i++) {
-		if (pdata->regulator[i].name &&
-		    !strcmp(pdata->regulator[i].name, "CDC_VDDA_RX")) {
-			if (pdata->regulator[i].min_uV == 1800000 &&
-			    pdata->regulator[i].max_uV == 1800000) {
-				snd_soc_write(codec, TOMTOM_A_BIAS_REF_CTL,
-					      0x1C);
-			} else if (pdata->regulator[i].min_uV == 2200000 &&
-				   pdata->regulator[i].max_uV == 2200000) {
-				snd_soc_write(codec, TOMTOM_A_BIAS_REF_CTL,
-					      0x1E);
-			} else {
-				pr_err("%s: unsupported CDC_VDDA_RX voltage\n"
-				       "min %d, max %d\n", __func__,
-				       pdata->regulator[i].min_uV,
-				       pdata->regulator[i].max_uV);
-				rc = -EINVAL;
-			}
-			break;
-		}
-	}
-
-	/* Set micbias capless mode with tail current */
-	value = (pdata->micbias.bias1_cap_mode == MICBIAS_EXT_BYP_CAP ?
-		 0x00 : 0x16);
-	snd_soc_update_bits(codec, TOMTOM_A_MICB_1_CTL, 0x1E, value);
-	value = (pdata->micbias.bias2_cap_mode == MICBIAS_EXT_BYP_CAP ?
-		 0x00 : 0x16);
-	snd_soc_update_bits(codec, TOMTOM_A_MICB_2_CTL, 0x1E, value);
-	value = (pdata->micbias.bias3_cap_mode == MICBIAS_EXT_BYP_CAP ?
-		 0x00 : 0x16);
-	snd_soc_update_bits(codec, TOMTOM_A_MICB_3_CTL, 0x1E, value);
-	value = (pdata->micbias.bias4_cap_mode == MICBIAS_EXT_BYP_CAP ?
-		 0x00 : 0x16);
-	snd_soc_update_bits(codec, TOMTOM_A_MICB_4_CTL, 0x1E, value);
-
-	/* Set the DMIC sample rate */
-	switch (pdata->mclk_rate) {
-	case TOMTOM_MCLK_CLK_9P6MHZ:
-		def_dmic_rate =
-			WCD9XXX_DMIC_SAMPLE_RATE_4P8MHZ;
-		break;
-	case TOMTOM_MCLK_CLK_12P288MHZ:
-		def_dmic_rate =
-			WCD9XXX_DMIC_SAMPLE_RATE_4P096MHZ;
-		break;
-	default:
-		/* should never happen */
-		pr_err("%s: Invalid mclk_rate %d\n",
-			__func__, pdata->mclk_rate);
-		rc = -EINVAL;
-		goto done;
-	}
-
-	if (pdata->dmic_sample_rate ==
-	    WCD9XXX_DMIC_SAMPLE_RATE_UNDEFINED) {
-		pr_info("%s: dmic_rate invalid default = %d\n",
-			__func__, def_dmic_rate);
-		pdata->dmic_sample_rate = def_dmic_rate;
-	}
-
-	if (pdata->mad_dmic_sample_rate ==
-	    WCD9XXX_DMIC_SAMPLE_RATE_UNDEFINED) {
-		pr_info("%s: mad_dmic_rate invalid default = %d\n",
-			__func__, def_dmic_rate);
-		/*
-		 * use dmic_sample_rate as the default for MAD
-		 * if mad dmic sample rate is undefined
-		 */
-		pdata->mad_dmic_sample_rate = pdata->dmic_sample_rate;
-	}
-
-	/*
-	 * Default the DMIC clk rates to mad_dmic_sample_rate,
-	 * whereas, the anc/txfe dmic rates to dmic_sample_rate
-	 * since the anc/txfe are independent of mad block.
-	 */
-	mad_dmic_ctl_val = tomtom_get_dmic_clk_val(tomtom->codec,
-				pdata->mclk_rate,
-				pdata->mad_dmic_sample_rate);
-	snd_soc_update_bits(codec, TOMTOM_A_DMIC_B1_CTL,
-		0xE0, mad_dmic_ctl_val << 5);
-	snd_soc_update_bits(codec, TOMTOM_A_DMIC_B2_CTL,
-		0x70, mad_dmic_ctl_val << 4);
-	snd_soc_update_bits(codec, TOMTOM_A_DMIC_B2_CTL,
-		0x0E, mad_dmic_ctl_val << 1);
-
-	dmic_ctl_val = tomtom_get_dmic_clk_val(tomtom->codec,
-				pdata->mclk_rate,
-				pdata->dmic_sample_rate);
-
-	if (dmic_ctl_val == WCD9330_DMIC_CLK_DIV_2)
-		anc_ctl_value = WCD9XXX_ANC_DMIC_X2_ON;
-	else
-		anc_ctl_value = WCD9XXX_ANC_DMIC_X2_OFF;
-
-	for (dec = 0; dec < NUM_DECIMATORS; dec++) {
-		tx_dmic_ctl_reg =
-			TOMTOM_A_CDC_TX1_DMIC_CTL + (8 * dec);
-		snd_soc_update_bits(codec, tx_dmic_ctl_reg,
-				    0x07, dmic_ctl_val);
-	}
-	snd_soc_update_bits(codec, TOMTOM_A_CDC_ANC1_B2_CTL,
-		0x1, anc_ctl_value);
-	snd_soc_update_bits(codec, TOMTOM_A_CDC_ANC2_B2_CTL,
-		0x1, anc_ctl_value);
-done:
-	return rc;
-}
-
-static const struct wcd9xxx_reg_mask_val tomtom_reg_defaults[] = {
-
-	/* set MCLk to 9.6 */
-	TOMTOM_REG_VAL(TOMTOM_A_CHIP_CTL, 0x02),
-
-	/* EAR PA deafults  */
-	TOMTOM_REG_VAL(TOMTOM_A_RX_EAR_CMBUFF, 0x05),
-
-	/* RX deafults */
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_RX1_B5_CTL, 0x79),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_RX2_B5_CTL, 0x79),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_RX3_B5_CTL, 0x79),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_RX4_B5_CTL, 0x79),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_RX5_B5_CTL, 0x79),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_RX6_B5_CTL, 0x79),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_RX7_B5_CTL, 0x79),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_RX8_B5_CTL, 0x79),
-
-	/* RX1 and RX2 defaults */
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_RX1_B6_CTL, 0xA0),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_RX2_B6_CTL, 0xA0),
-
-	/* RX3 to RX7 defaults */
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_RX3_B6_CTL, 0x80),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_RX4_B6_CTL, 0x80),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_RX5_B6_CTL, 0x80),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_RX6_B6_CTL, 0x80),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_RX7_B6_CTL, 0x80),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_RX8_B6_CTL, 0x80),
-
-	/* MAD registers */
-	TOMTOM_REG_VAL(TOMTOM_A_MAD_ANA_CTRL, 0xF1),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_MAIN_CTL_1, 0x00),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_MAIN_CTL_2, 0x00),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_AUDIO_CTL_1, 0x00),
-	/* Set SAMPLE_TX_EN bit */
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_AUDIO_CTL_2, 0x03),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_AUDIO_CTL_3, 0x00),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_AUDIO_CTL_4, 0x00),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_AUDIO_CTL_5, 0x00),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_AUDIO_CTL_6, 0x00),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_AUDIO_CTL_7, 0x00),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_AUDIO_CTL_8, 0x00),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_PTR, 0x00),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_VAL, 0x40),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_DEBUG_B7_CTL, 0x00),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_CLK_OTHR_RESET_B1_CTL, 0x00),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_CLK_OTHR_CTL, 0x00),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_INP_SEL, 0x01),
-
-	/* Set HPH Path to low power mode */
-	TOMTOM_REG_VAL(TOMTOM_A_RX_HPH_BIAS_PA, 0x57),
-
-	/* BUCK default */
-	TOMTOM_REG_VAL(TOMTOM_A_BUCK_CTRL_CCL_4, 0x51),
-	TOMTOM_REG_VAL(TOMTOM_A_BUCK_CTRL_CCL_1, 0x5B),
-};
-
-/*
- * Don't update TOMTOM_A_CHIP_CTL, TOMTOM_A_BUCK_CTRL_CCL_1 and
- * TOMTOM_A_RX_EAR_CMBUFF as those are updated in tomtom_reg_defaults
- */
-static const struct wcd9xxx_reg_mask_val tomtom_1_0_reg_defaults[] = {
-	TOMTOM_REG_VAL(TOMTOM_A_TX_1_GAIN, 0x2),
-	TOMTOM_REG_VAL(TOMTOM_A_TX_2_GAIN, 0x2),
-	TOMTOM_REG_VAL(TOMTOM_A_TX_1_2_ADC_IB, 0x44),
-	TOMTOM_REG_VAL(TOMTOM_A_TX_3_GAIN, 0x2),
-	TOMTOM_REG_VAL(TOMTOM_A_TX_4_GAIN, 0x2),
-	TOMTOM_REG_VAL(TOMTOM_A_TX_3_4_ADC_IB, 0x44),
-	TOMTOM_REG_VAL(TOMTOM_A_TX_5_GAIN, 0x2),
-	TOMTOM_REG_VAL(TOMTOM_A_TX_6_GAIN, 0x2),
-	TOMTOM_REG_VAL(TOMTOM_A_TX_5_6_ADC_IB, 0x44),
-	TOMTOM_REG_VAL(WCD9XXX_A_BUCK_MODE_3, 0xCE),
-	TOMTOM_REG_VAL(WCD9XXX_A_BUCK_CTRL_VCL_1, 0x8),
-	TOMTOM_REG_VAL(TOMTOM_A_BUCK_CTRL_CCL_4, 0x51),
-	TOMTOM_REG_VAL(TOMTOM_A_NCP_DTEST, 0x10),
-	TOMTOM_REG_VAL(TOMTOM_A_RX_HPH_CHOP_CTL, 0xA4),
-	TOMTOM_REG_VAL(TOMTOM_A_RX_HPH_OCP_CTL, 0x69),
-	TOMTOM_REG_VAL(TOMTOM_A_RX_HPH_CNP_WG_CTL, 0xDA),
-	TOMTOM_REG_VAL(TOMTOM_A_RX_HPH_CNP_WG_TIME, 0x15),
-	TOMTOM_REG_VAL(TOMTOM_A_RX_EAR_BIAS_PA, 0x76),
-	TOMTOM_REG_VAL(TOMTOM_A_RX_EAR_CNP, 0xC0),
-	TOMTOM_REG_VAL(TOMTOM_A_RX_LINE_BIAS_PA, 0x78),
-	TOMTOM_REG_VAL(TOMTOM_A_RX_LINE_1_TEST, 0x2),
-	TOMTOM_REG_VAL(TOMTOM_A_RX_LINE_2_TEST, 0x2),
-	TOMTOM_REG_VAL(TOMTOM_A_RX_LINE_3_TEST, 0x2),
-	TOMTOM_REG_VAL(TOMTOM_A_RX_LINE_4_TEST, 0x2),
-	TOMTOM_REG_VAL(TOMTOM_A_SPKR_DRV1_OCP_CTL, 0x97),
-	TOMTOM_REG_VAL(TOMTOM_A_SPKR_DRV1_CLIP_DET, 0x1),
-	TOMTOM_REG_VAL(TOMTOM_A_SPKR_DRV1_IEC, 0x0),
-	TOMTOM_REG_VAL(TOMTOM_A_SPKR_DRV2_OCP_CTL, 0x97),
-	TOMTOM_REG_VAL(TOMTOM_A_SPKR_DRV2_CLIP_DET, 0x1),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_TX1_MUX_CTL, 0x4A),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_TX2_MUX_CTL, 0x4A),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_TX3_MUX_CTL, 0x4A),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_TX4_MUX_CTL, 0x4A),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_TX5_MUX_CTL, 0x4A),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_TX6_MUX_CTL, 0x4A),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_TX7_MUX_CTL, 0x4A),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_TX8_MUX_CTL, 0x4A),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_TX9_MUX_CTL, 0x4A),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_TX10_MUX_CTL, 0x4A),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_RX1_B4_CTL, 0xB),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_RX2_B4_CTL, 0xB),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_RX3_B4_CTL, 0xB),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_RX4_B4_CTL, 0xB),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_RX5_B4_CTL, 0xB),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_RX6_B4_CTL, 0xB),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_RX7_B4_CTL, 0xB),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_RX8_B4_CTL, 0xB),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_VBAT_GAIN_UPD_MON, 0x0),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_PA_RAMP_B1_CTL, 0x0),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_PA_RAMP_B2_CTL, 0x0),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_PA_RAMP_B3_CTL, 0x0),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_PA_RAMP_B4_CTL, 0x0),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_SPKR_CLIPDET_B1_CTL, 0x0),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_SPKR2_CLIPDET_B1_CTL, 0x0),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_COMP0_B4_CTL, 0x37),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_COMP0_B5_CTL, 0x7f),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_COMP0_B5_CTL, 0x7f),
-};
-
-static const struct wcd9xxx_reg_mask_val tomtom_2_0_reg_defaults[] = {
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_MAIN_CTL_2, 0x32),
-	TOMTOM_REG_VAL(TOMTOM_A_RCO_CTRL, 0x10),
-	TOMTOM_REG_VAL(TOMTOM_A_RX_HPH_L_TEST, 0x0A),
-	TOMTOM_REG_VAL(TOMTOM_A_RX_HPH_R_TEST, 0x0A),
-	TOMTOM_REG_VAL(TOMTOM_A_PIN_CTL_OE0, 0xC3),
-	TOMTOM_REG_VAL(TOMTOM_A_PIN_CTL_DATA0, 0x00),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_TX_I2S_SCK_MODE, 0x04),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_TX_I2S_WS_MODE, 0x04),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_RX_I2S_SCK_MODE, 0x04),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_RX_I2S_WS_MODE, 0x04),
-	TOMTOM_REG_VAL(TOMTOM_A_PIN_CTL_OE1, 0xE0),
-	TOMTOM_REG_VAL(TOMTOM_A_PIN_CTL_OE2, 0x03),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_JTCK_MODE, 0x04),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_JTDI_MODE, 0x04),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_JTMS_MODE, 0x04),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_JTDO_MODE, 0x04),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_JTRST_MODE, 0x04),
-};
-
-static const struct wcd9xxx_reg_mask_val tomtom_2_0_reg_i2c_defaults[] = {
-	TOMTOM_REG_VAL(TOMTOM_A_PIN_CTL_OE0, 0x00),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_TX_I2S_SCK_MODE, 0x0),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_TX_I2S_WS_MODE, 0x0),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_RX_I2S_SCK_MODE, 0x0),
-	TOMTOM_REG_VAL(TOMTOM_A_CDC_RX_I2S_WS_MODE, 0x0),
-	TOMTOM_REG_VAL(TOMTOM_A_PIN_CTL_OE1, 0x0),
-	TOMTOM_REG_VAL(TOMTOM_A_PIN_CTL_OE2, 0x0),
-};
-
-static void tomtom_update_reg_defaults(struct snd_soc_codec *codec)
-{
-	u32 i;
-	struct wcd9xxx *tomtom_core = dev_get_drvdata(codec->dev->parent);
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
-	for (i = 0; i < ARRAY_SIZE(tomtom_reg_defaults); i++)
-		snd_soc_write(codec, tomtom_reg_defaults[i].reg,
-			      tomtom_reg_defaults[i].val);
-
-	for (i = 0; i < ARRAY_SIZE(tomtom_1_0_reg_defaults); i++)
-		snd_soc_write(codec, tomtom_1_0_reg_defaults[i].reg,
-				tomtom_1_0_reg_defaults[i].val);
-
-	if (!TOMTOM_IS_1_0(tomtom_core->version)) {
-		for (i = 0; i < ARRAY_SIZE(tomtom_2_0_reg_defaults); i++)
-			snd_soc_write(codec, tomtom_2_0_reg_defaults[i].reg,
-				      tomtom_2_0_reg_defaults[i].val);
-
-		if (tomtom->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
-			for (i = 0; i < ARRAY_SIZE(tomtom_2_0_reg_i2c_defaults);
-			     i++)
-				snd_soc_write(codec,
-					tomtom_2_0_reg_i2c_defaults[i].reg,
-					tomtom_2_0_reg_i2c_defaults[i].val);
-		}
-	}
-}
-
-static const struct wcd9xxx_reg_mask_val tomtom_codec_reg_init_val[] = {
-	/* Initialize current threshold to 350MA
-	 * number of wait and run cycles to 4096
-	 */
-	{TOMTOM_A_RX_HPH_OCP_CTL, 0xE1, 0x61},
-	{TOMTOM_A_RX_COM_OCP_COUNT, 0xFF, 0xFF},
-	{TOMTOM_A_RX_HPH_L_TEST, 0x01, 0x01},
-	{TOMTOM_A_RX_HPH_R_TEST, 0x01, 0x01},
-
-	/* Initialize gain registers to use register gain */
-	{TOMTOM_A_RX_HPH_L_GAIN, 0x20, 0x20},
-	{TOMTOM_A_RX_HPH_R_GAIN, 0x20, 0x20},
-	{TOMTOM_A_RX_LINE_1_GAIN, 0x20, 0x20},
-	{TOMTOM_A_RX_LINE_2_GAIN, 0x20, 0x20},
-	{TOMTOM_A_RX_LINE_3_GAIN, 0x20, 0x20},
-	{TOMTOM_A_RX_LINE_4_GAIN, 0x20, 0x20},
-	{TOMTOM_A_SPKR_DRV1_GAIN, 0x04, 0x04},
-	{TOMTOM_A_SPKR_DRV2_GAIN, 0x04, 0x04},
-
-	/* Use 16 bit sample size for TX1 to TX6 */
-	{TOMTOM_A_CDC_CONN_TX_SB_B1_CTL, 0x30, 0x20},
-	{TOMTOM_A_CDC_CONN_TX_SB_B2_CTL, 0x30, 0x20},
-	{TOMTOM_A_CDC_CONN_TX_SB_B3_CTL, 0x30, 0x20},
-	{TOMTOM_A_CDC_CONN_TX_SB_B4_CTL, 0x30, 0x20},
-	{TOMTOM_A_CDC_CONN_TX_SB_B5_CTL, 0x30, 0x20},
-	{TOMTOM_A_CDC_CONN_TX_SB_B6_CTL, 0x30, 0x20},
-
-	/* Use 16 bit sample size for TX7 to TX10 */
-	{TOMTOM_A_CDC_CONN_TX_SB_B7_CTL, 0x60, 0x40},
-	{TOMTOM_A_CDC_CONN_TX_SB_B8_CTL, 0x60, 0x40},
-	{TOMTOM_A_CDC_CONN_TX_SB_B9_CTL, 0x60, 0x40},
-	{TOMTOM_A_CDC_CONN_TX_SB_B10_CTL, 0x60, 0x40},
-
-	/*enable HPF filter for TX paths */
-	{TOMTOM_A_CDC_TX1_MUX_CTL, 0x8, 0x0},
-	{TOMTOM_A_CDC_TX2_MUX_CTL, 0x8, 0x0},
-	{TOMTOM_A_CDC_TX3_MUX_CTL, 0x8, 0x0},
-	{TOMTOM_A_CDC_TX4_MUX_CTL, 0x8, 0x0},
-	{TOMTOM_A_CDC_TX5_MUX_CTL, 0x8, 0x0},
-	{TOMTOM_A_CDC_TX6_MUX_CTL, 0x8, 0x0},
-	{TOMTOM_A_CDC_TX7_MUX_CTL, 0x8, 0x0},
-	{TOMTOM_A_CDC_TX8_MUX_CTL, 0x8, 0x0},
-	{TOMTOM_A_CDC_TX9_MUX_CTL, 0x8, 0x0},
-	{TOMTOM_A_CDC_TX10_MUX_CTL, 0x8, 0x0},
-
-	/* Compander zone selection */
-	{TOMTOM_A_CDC_COMP0_B4_CTL, 0x3F, 0x37},
-	{TOMTOM_A_CDC_COMP1_B4_CTL, 0x3F, 0x37},
-	{TOMTOM_A_CDC_COMP2_B4_CTL, 0x3F, 0x37},
-	{TOMTOM_A_CDC_COMP0_B5_CTL, 0x7F, 0x7F},
-	{TOMTOM_A_CDC_COMP1_B5_CTL, 0x7F, 0x7F},
-	{TOMTOM_A_CDC_COMP2_B5_CTL, 0x7F, 0x7F},
-
-	/*
-	 * Setup wavegen timer to 20msec and disable chopper
-	 * as default. This corresponds to Compander OFF
-	 */
-	{TOMTOM_A_RX_HPH_CNP_WG_CTL, 0xFF, 0xDB},
-	{TOMTOM_A_RX_HPH_CNP_WG_TIME, 0xFF, 0x58},
-	{TOMTOM_A_RX_HPH_BIAS_WG_OCP, 0xFF, 0x1A},
-	{TOMTOM_A_RX_HPH_CHOP_CTL, 0xFF, 0x24},
-
-	/* Choose max non-overlap time for NCP */
-	{TOMTOM_A_NCP_CLK, 0xFF, 0xFC},
-
-	/* Program the 0.85 volt VBG_REFERENCE */
-	{TOMTOM_A_BIAS_CURR_CTL_2, 0xFF, 0x04},
-
-	/* set MAD input MIC to DMIC1 */
-	{TOMTOM_A_CDC_MAD_INP_SEL, 0x0F, 0x08},
-
-	{TOMTOM_A_INTR_MODE, 0x04, 0x04},
-};
-
-static const struct wcd9xxx_reg_mask_val tomtom_codec_2_0_reg_init_val[] = {
-	{TOMTOM_A_RX_HPH_L_TEST, 0x08, 0x00},
-	{TOMTOM_A_RX_HPH_R_TEST, 0x08, 0x00},
-	{TOMTOM_A_CDC_CLIP_ADJ_SPKR_MIN_CLIP_THRESHOLD, 0xFF, 0x00},
-	{TOMTOM_A_CDC_CLIP_ADJ_SPKR2_MIN_CLIP_THRESHOLD, 0xFF, 0x00},
-	{TOMTOM_A_CDC_CLIP_ADJ_SPKR_BOOST_GATING, 0x01, 0x01},
-	{TOMTOM_A_CDC_CLIP_ADJ_SPKR2_BOOST_GATING, 0x01, 0x01},
-	{TOMTOM_A_CDC_CLIP_ADJ_SPKR_B1_CTL, 0x01, 0x00},
-	{TOMTOM_A_CDC_CLIP_ADJ_SPKR2_B1_CTL, 0x01, 0x00},
-};
-
-static void tomtom_codec_init_reg(struct snd_soc_codec *codec)
-{
-	u32 i;
-	struct wcd9xxx *tomtom_core = dev_get_drvdata(codec->dev->parent);
-
-	for (i = 0; i < ARRAY_SIZE(tomtom_codec_reg_init_val); i++)
-		snd_soc_update_bits(codec, tomtom_codec_reg_init_val[i].reg,
-				tomtom_codec_reg_init_val[i].mask,
-				tomtom_codec_reg_init_val[i].val);
-
-	if (!TOMTOM_IS_1_0(tomtom_core->version)) {
-		for (i = 0; i < ARRAY_SIZE(tomtom_codec_2_0_reg_init_val); i++)
-			snd_soc_update_bits(codec,
-				tomtom_codec_2_0_reg_init_val[i].reg,
-				tomtom_codec_2_0_reg_init_val[i].mask,
-				tomtom_codec_2_0_reg_init_val[i].val);
-	}
-
-}
-
-static void tomtom_slim_interface_init_reg(struct snd_soc_codec *codec)
-{
-	int i;
-	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
-
-	for (i = 0; i < WCD9XXX_SLIM_NUM_PORT_REG; i++)
-		wcd9xxx_interface_reg_write(wcd9xxx,
-					    TOMTOM_SLIM_PGD_PORT_INT_EN0 + i,
-					    0xFF);
-}
-
-static int tomtom_setup_irqs(struct tomtom_priv *tomtom)
-{
-	int ret = 0;
-	struct snd_soc_codec *codec = tomtom->codec;
-	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
-	struct wcd9xxx_core_resource *core_res =
-				&wcd9xxx->core_res;
-
-	ret = wcd9xxx_request_irq(core_res, WCD9XXX_IRQ_SLIMBUS,
-				  tomtom_slimbus_irq, "SLIMBUS Slave", tomtom);
-	if (ret)
-		pr_err("%s: Failed to request irq %d\n", __func__,
-		       WCD9XXX_IRQ_SLIMBUS);
-	else
-		tomtom_slim_interface_init_reg(codec);
-
-	return ret;
-}
-
-static void tomtom_cleanup_irqs(struct tomtom_priv *tomtom)
-{
-	struct snd_soc_codec *codec = tomtom->codec;
-	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
-	struct wcd9xxx_core_resource *core_res =
-				&wcd9xxx->core_res;
-
-	wcd9xxx_free_irq(core_res, WCD9XXX_IRQ_SLIMBUS, tomtom);
-}
-
-static
-struct firmware_cal *tomtom_get_hwdep_fw_cal(struct snd_soc_codec *codec,
-			enum wcd_cal_type type)
-{
-	struct tomtom_priv *tomtom;
-	struct firmware_cal *hwdep_cal;
-
-	if (!codec) {
-		pr_err("%s: NULL codec pointer\n", __func__);
-		return NULL;
-	}
-	tomtom = snd_soc_codec_get_drvdata(codec);
-	hwdep_cal = wcdcal_get_fw_cal(tomtom->fw_data, type);
-	if (!hwdep_cal) {
-		dev_err(codec->dev, "%s: cal not sent by %d\n",
-				__func__, type);
-		return NULL;
-	} else {
-		return hwdep_cal;
-	}
-}
-
-int tomtom_hs_detect(struct snd_soc_codec *codec,
-		    struct wcd9xxx_mbhc_config *mbhc_cfg)
-{
-	int rc;
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
-	if (mbhc_cfg->insert_detect) {
-		rc = wcd9xxx_mbhc_start(&tomtom->mbhc, mbhc_cfg);
-		if (!rc)
-			tomtom->mbhc_started = true;
-	} else {
-		/* MBHC is disabled, so disable Auto pulldown */
-		snd_soc_update_bits(codec, TOMTOM_A_MBHC_INSERT_DETECT2, 0xC0,
-				    0x00);
-		snd_soc_update_bits(codec, TOMTOM_A_MICB_CFILT_2_CTL, 0x01,
-				    0x00);
-		tomtom->mbhc.mbhc_cfg = NULL;
-		rc = 0;
-	}
-	return rc;
-}
-EXPORT_SYMBOL(tomtom_hs_detect);
-
-void tomtom_hs_detect_exit(struct snd_soc_codec *codec)
-{
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
-	wcd9xxx_mbhc_stop(&tomtom->mbhc);
-	tomtom->mbhc_started = false;
-}
-EXPORT_SYMBOL(tomtom_hs_detect_exit);
-
-void tomtom_event_register(
-	int (*machine_event_cb)(struct snd_soc_codec *codec,
-				enum wcd9xxx_codec_event),
-	struct snd_soc_codec *codec)
-{
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
-	tomtom->machine_codec_event_cb = machine_event_cb;
-}
-EXPORT_SYMBOL(tomtom_event_register);
-
-void tomtom_register_ext_clk_cb(
-	int (*codec_ext_clk_en)(struct snd_soc_codec *codec,
-				int enable, bool dapm),
-	int (*get_ext_clk_cnt)(void),
-	struct snd_soc_codec *codec)
-{
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
-	tomtom->codec_ext_clk_en_cb =  codec_ext_clk_en;
-	tomtom->codec_get_ext_clk_cnt = get_ext_clk_cnt;
-}
-EXPORT_SYMBOL(tomtom_register_ext_clk_cb);
-
-static void tomtom_init_slim_slave_cfg(struct snd_soc_codec *codec)
-{
-	struct tomtom_priv *priv = snd_soc_codec_get_drvdata(codec);
-	struct afe_param_cdc_slimbus_slave_cfg *cfg;
-	struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
-	uint64_t eaddr = 0;
-
-	cfg = &priv->slimbus_slave_cfg;
-	cfg->minor_version = 1;
-	cfg->tx_slave_port_offset = 0;
-	cfg->rx_slave_port_offset = 16;
-
-	memcpy(&eaddr, &wcd9xxx->slim->e_addr, sizeof(wcd9xxx->slim->e_addr));
-	WARN_ON(sizeof(wcd9xxx->slim->e_addr) != 6);
-	cfg->device_enum_addr_lsw = eaddr & 0xFFFFFFFF;
-	cfg->device_enum_addr_msw = eaddr >> 32;
-
-	pr_debug("%s: slimbus logical address 0x%llx\n", __func__, eaddr);
-}
-
-static int tomtom_device_down(struct wcd9xxx *wcd9xxx)
-{
-	int count;
-	struct snd_soc_codec *codec;
-	struct tomtom_priv *priv;
-
-	codec = (struct snd_soc_codec *)(wcd9xxx->ssr_priv);
-	priv = snd_soc_codec_get_drvdata(codec);
-	wcd_cpe_ssr_event(priv->cpe_core, WCD_CPE_BUS_DOWN_EVENT);
-	snd_soc_card_change_online_state(codec->component.card, 0);
-	set_bit(BUS_DOWN, &priv->status_mask);
-
-	for (count = 0; count < NUM_CODEC_DAIS; count++)
-		priv->dai[count].bus_down_in_recovery = true;
-	return 0;
-}
-
-static int wcd9xxx_prepare_static_pa(struct wcd9xxx_mbhc *mbhc,
-				     struct list_head *lh)
-{
-	int i;
-	struct snd_soc_codec *codec = mbhc->codec;
-	u32 delay;
-
-	const struct wcd9xxx_reg_mask_val reg_set_paon[] = {
-		{TOMTOM_A_TX_COM_BIAS, 0xff, 0xF0},
-		{WCD9XXX_A_CDC_RX1_B6_CTL, 0xff, 0x81},
-		{WCD9XXX_A_CDC_CLK_RX_B1_CTL, 0x01, 0x01},
-		{WCD9XXX_A_BUCK_MODE_2, 0xff, 0xEF},
-		{WCD9XXX_A_BUCK_MODE_2, 0xff, 0xEE},
-		{TOMTOM_A_NCP_DTEST, 0xff, 0x20},
-		{WCD9XXX_A_CDC_CLK_OTHR_CTL, 0xff, 0x21},
-		{WCD9XXX_A_CDC_RX2_B6_CTL, 0xff, 0x81},
-		{WCD9XXX_A_CDC_CLK_RX_B1_CTL, 0x02, 0x02},
-
-		{WCD9XXX_A_BUCK_MODE_2, 0xff, 0xAE},
-		{WCD9XXX_A_BUCK_MODE_2, 0xff, 0xAA},
-		{WCD9XXX_A_NCP_CLK, 0xff, 0x9C},
-		{WCD9XXX_A_NCP_CLK, 0xff, 0xFC},
-		{WCD9XXX_A_RX_COM_BIAS, 0xff, 0xA0},
-		{WCD9XXX_A_BUCK_MODE_3, 0xff, 0xC6},
-		{WCD9XXX_A_BUCK_MODE_4, 0xff, 0xE6},
-		{WCD9XXX_A_BUCK_MODE_5, 0xff, 0x02},
-		{WCD9XXX_A_BUCK_MODE_1, 0xff, 0xA1},
-		/* Add a delay of 1ms after this reg write */
-
-		{WCD9XXX_A_NCP_STATIC, 0xff, 0x28},
-		{WCD9XXX_A_NCP_EN, 0xff, 0xFF},
-		/* Add a delay of 1ms after this reg write */
-
-		/* set HPHL */
-		{WCD9XXX_A_RX_HPH_L_TEST, 0xff, 0x00},
-		{TOMTOM_A_RX_HPH_L_PA_CTL, 0xff, 0x42},
-		{TOMTOM_A_RX_HPH_BIAS_LDO, 0xff, 0x8C},
-		{TOMTOM_A_RX_HPH_CHOP_CTL, 0xff, 0xA4},
-		{WCD9XXX_A_RX_HPH_L_GAIN, 0xff, 0xE0},
-		{WCD9XXX_A_RX_HPH_L_GAIN, 0xff, 0xEC},
-
-		/* set HPHR */
-		{WCD9XXX_A_RX_HPH_R_TEST, 0xff, 0x00},
-		{TOMTOM_A_RX_HPH_R_PA_CTL, 0xff, 0x42},
-		{WCD9XXX_A_RX_HPH_R_GAIN, 0xff, 0x20},
-		{WCD9XXX_A_RX_HPH_R_GAIN, 0xff, 0x2C},
-
-		/* set HPH PAs */
-		{WCD9XXX_A_RX_HPH_BIAS_WG_OCP, 0xff, 0x2A},
-		{WCD9XXX_A_RX_HPH_CNP_WG_CTL, 0xff, 0xDA},
-		{WCD9XXX_A_RX_HPH_CNP_WG_TIME, 0xff, 0x15},
-		{WCD9XXX_A_CDC_CLSH_B1_CTL, 0xff, 0xE6},
-		{WCD9XXX_A_RX_HPH_L_DAC_CTL, 0xff, 0x40},
-		{WCD9XXX_A_RX_HPH_L_DAC_CTL, 0xff, 0xC0},
-		{WCD9XXX_A_RX_HPH_R_DAC_CTL, 0xff, 0x40},
-		{WCD9XXX_A_RX_HPH_R_DAC_CTL, 0xff, 0xC0},
-
-		{TOMTOM_A_RX_HPH_L_ATEST, 0xff, 0x00},
-		{TOMTOM_A_RX_HPH_R_ATEST, 0xff, 0x00},
-	};
-
-	for (i = 0; i < ARRAY_SIZE(reg_set_paon); i++) {
-		/*
-		 * Some of the codec registers like BUCK_MODE_1
-		 * and NCP_EN requires 1ms wait time for them
-		 * to take effect. Other register writes for
-		 * PA configuration do not require any wait time.
-		 */
-		if (reg_set_paon[i].reg == WCD9XXX_A_BUCK_MODE_1 ||
-		    reg_set_paon[i].reg == WCD9XXX_A_NCP_EN)
-			delay = 1000;
-		else
-			delay = 0;
-		wcd9xxx_soc_update_bits_push(codec, lh,
-					     reg_set_paon[i].reg,
-					     reg_set_paon[i].mask,
-					     reg_set_paon[i].val, delay);
-	}
-	pr_debug("%s: PAs are prepared\n", __func__);
-
-	return 0;
-}
-
-static int wcd9xxx_enable_static_pa(struct wcd9xxx_mbhc *mbhc, bool enable,
-				    u8 hph_pa)
-{
-	struct snd_soc_codec *codec = mbhc->codec;
-	const int wg_time = snd_soc_read(codec, WCD9XXX_A_RX_HPH_CNP_WG_TIME) *
-			    TOMTOM_WG_TIME_FACTOR_US;
-	u8 mask = (hph_pa << 4);
-	u8 pa_en = enable ? mask : ~mask;
-
-	snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_CNP_EN, mask, pa_en);
-	/* Wait for wave gen time to avoid pop noise */
-	usleep_range(wg_time, wg_time + WCD9XXX_USLEEP_RANGE_MARGIN_US);
-	pr_debug("%s: PAs are %s as static mode (wg_time %d)\n", __func__,
-		 enable ? "enabled" : "disabled", wg_time);
-	return 0;
-}
-
-static int tomtom_setup_zdet(struct wcd9xxx_mbhc *mbhc,
-			    enum mbhc_impedance_detect_stages stage)
-{
-	int ret = 0;
-	struct snd_soc_codec *codec = mbhc->codec;
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
-#define __wr(reg, mask, value)						  \
-	do {								  \
-		ret = wcd9xxx_soc_update_bits_push(codec,		  \
-						   &tomtom->reg_save_restore, \
-						   reg, mask, value, 0);  \
-		if (ret < 0)						  \
-			return ret;					  \
-	} while (0)
-
-	switch (stage) {
-
-	case MBHC_ZDET_PRE_MEASURE:
-		INIT_LIST_HEAD(&tomtom->reg_save_restore);
-		wcd9xxx_prepare_static_pa(mbhc, &tomtom->reg_save_restore);
-		/* Set HPH_MBHC for zdet */
-		__wr(WCD9XXX_A_MBHC_HPH, 0xff, 0xC4);
-		usleep_range(10, 10 + WCD9XXX_USLEEP_RANGE_MARGIN_US);
-		wcd9xxx_enable_static_pa(mbhc, HPH_PA_ENABLE, HPH_PA_L_R);
-
-		/* save old value of registers and write the new value */
-		__wr(WCD9XXX_A_RX_HPH_OCP_CTL, 0xff, 0x69);
-		__wr(WCD9XXX_A_CDC_RX1_B6_CTL, 0xff, 0x80);
-		__wr(WCD9XXX_A_CDC_RX2_B6_CTL, 0xff, 0x80);
-		/* Enable MBHC MUX, Set MUX current to 37.5uA and ADC7 */
-		__wr(WCD9XXX_A_MBHC_SCALING_MUX_1, 0xff, 0xC0);
-		__wr(WCD9XXX_A_MBHC_SCALING_MUX_2, 0xff, 0xF0);
-		__wr(TOMTOM_A_TX_7_TXFE_CLKDIV, 0xff, 0x8B);
-		__wr(WCD9XXX_A_TX_7_MBHC_TEST_CTL, 0xff, 0x78);
-		__wr(WCD9XXX_A_TX_7_MBHC_EN, 0xff, 0x8C);
-		__wr(WCD9XXX_A_CDC_MBHC_B1_CTL, 0xff, 0xDC);
-		/* Reset MBHC and set it up for STA */
-		__wr(WCD9XXX_A_CDC_MBHC_CLK_CTL, 0xff, 0x0A);
-		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x00);
-		__wr(WCD9XXX_A_CDC_MBHC_CLK_CTL, 0xff, 0x02);
-		__wr(WCD9XXX_A_CDC_MBHC_TIMER_B5_CTL, 0xff, 0x80);
-		__wr(WCD9XXX_A_CDC_MBHC_TIMER_B4_CTL, 0xff, 0x25);
-		/* Wait for ~50us to let MBHC hardware settle down */
-		usleep_range(50, 50 + WCD9XXX_USLEEP_RANGE_MARGIN_US);
-		break;
-	case MBHC_ZDET_POST_MEASURE:
-		/* 0x69 for 105 number of samples for PA RAMP */
-		snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B4_CTL, 0x69);
-		/* Program the PA Ramp to FS_16K, L shift 1 */
-		snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B3_CTL,
-			      0x1 << 4 | 0x6);
-		/* Reset the PA Ramp */
-		snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B1_CTL, 0x1C);
-		/*
-		 * Connect the PA Ramp to PA chain and release reset with
-		 * keep it connected.
-		 */
-		snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B1_CTL, 0x1F);
-		snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B1_CTL, 0x03);
-
-		/* Start the PA ramp on HPH L and R */
-		snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B2_CTL, 0x05);
-		/* Ramp generator takes ~30ms */
-		usleep_range(TOMTOM_HPH_PA_RAMP_DELAY,
-			     TOMTOM_HPH_PA_RAMP_DELAY +
-			     WCD9XXX_USLEEP_RANGE_MARGIN_US);
-
-		/*
-		 * Set the multiplication factor for zdet calculation
-		 * based on the Ramp voltage and Gain used
-		 */
-		tomtom->zdet_gain_mul_fact = TOMTOM_ZDET_MUL_FACTOR_1X;
-		break;
-	case MBHC_ZDET_GAIN_0:
-		/* Set Gain at 1x */
-		snd_soc_write(codec, TOMTOM_A_RX_HPH_L_ATEST, 0x00);
-		snd_soc_write(codec, TOMTOM_A_RX_HPH_R_ATEST, 0x00);
-		snd_soc_write(codec, TOMTOM_A_RX_HPH_L_PA_CTL, 0x42);
-		/* Allow 100us for gain registers to settle */
-		usleep_range(100,
-			     100 + WCD9XXX_USLEEP_RANGE_MARGIN_US);
-		break;
-	case MBHC_ZDET_GAIN_UPDATE_1X:
-		/*
-		 * Set the multiplication factor for zdet calculation
-		 * based on the Gain value used
-		 */
-		tomtom->zdet_gain_mul_fact = TOMTOM_ZDET_MUL_FACTOR_1X;
-		break;
-	case MBHC_ZDET_GAIN_1:
-		/* Set Gain at 10x */
-		snd_soc_write(codec, TOMTOM_A_RX_HPH_L_ATEST, 0x10);
-		snd_soc_write(codec, TOMTOM_A_RX_HPH_R_ATEST, 0x00);
-		snd_soc_write(codec, TOMTOM_A_RX_HPH_L_PA_CTL, 0x42);
-		/* Allow 100us for gain registers to settle */
-		usleep_range(100,
-			     100 + WCD9XXX_USLEEP_RANGE_MARGIN_US);
-
-		/*
-		 * Set the multiplication factor for zdet calculation
-		 * based on the Gain value used
-		 */
-		tomtom->zdet_gain_mul_fact = TOMTOM_ZDET_MUL_FACTOR_10X;
-		break;
-	case MBHC_ZDET_GAIN_2:
-		/* Set Gain at 100x */
-		snd_soc_write(codec, TOMTOM_A_RX_HPH_L_ATEST, 0x00);
-		snd_soc_write(codec, TOMTOM_A_RX_HPH_R_ATEST, 0x10);
-		snd_soc_write(codec, TOMTOM_A_RX_HPH_L_PA_CTL, 0x43);
-		/* Allow 100us for gain registers to settle */
-		usleep_range(100,
-			     100 + WCD9XXX_USLEEP_RANGE_MARGIN_US);
-
-		/*
-		 * Set the multiplication factor for zdet calculation
-		 * based on the Gain value used
-		 */
-		tomtom->zdet_gain_mul_fact = TOMTOM_ZDET_MUL_FACTOR_100X;
-		break;
-	case MBHC_ZDET_RAMP_DISABLE:
-		/* Ramp HPH L & R back to Zero */
-		snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B2_CTL, 0x00);
-		/* 0x69 for 105 number of samples for PA RAMP */
-		snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B4_CTL, 0x69);
-		/* Program the PA Ramp to FS_16K, L shift 1 */
-		snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B3_CTL,
-			      0x1 << 4 | 0x6);
-		/* Reset the PA Ramp */
-		snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B1_CTL, 0x17);
-		/*
-		 * Connect the PA Ramp to PA chain and release reset with
-		 * keep it connected.
-		 */
-		snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B1_CTL, 0x03);
-		/* Start the PA ramp on HPH L and R */
-		snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B2_CTL, 0x0A);
-		/* Ramp generator takes ~30ms to settle down */
-		usleep_range(TOMTOM_HPH_PA_RAMP_DELAY,
-			     TOMTOM_HPH_PA_RAMP_DELAY +
-			     WCD9XXX_USLEEP_RANGE_MARGIN_US);
-		break;
-	case MBHC_ZDET_HPHR_RAMP_DISABLE:
-		/* Ramp HPHR back to Zero */
-		snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B2_CTL, 0x00);
-		snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B4_CTL, 0x69);
-		snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B3_CTL,
-			      0x1 << 4 | 0x6);
-		/* Reset the PA Ramp */
-		snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B1_CTL, 0x17);
-		/*
-		 * Connect the PA Ramp to PA chain and release reset with
-		 * keep it connected.
-		 */
-		snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B1_CTL, 0x03);
-		snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B2_CTL, 0x08);
-		/* Ramp generator takes ~30ms to settle down */
-		usleep_range(TOMTOM_HPH_PA_RAMP_DELAY,
-			     TOMTOM_HPH_PA_RAMP_DELAY +
-			     WCD9XXX_USLEEP_RANGE_MARGIN_US);
-		break;
-	case MBHC_ZDET_HPHL_RAMP_DISABLE:
-		/* Ramp back to Zero */
-		snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B2_CTL, 0x00);
-		snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B4_CTL, 0x69);
-		snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B3_CTL,
-			      0x1 << 4 | 0x6);
-		/* Reset the PA Ramp */
-		snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B1_CTL, 0x17);
-		/*
-		 * Connect the PA Ramp to PA chain and release reset with
-		 * keep it connected.
-		 */
-		snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B1_CTL, 0x03);
-		snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B2_CTL, 0x02);
-		/* Ramp generator takes ~30ms to settle down */
-		usleep_range(TOMTOM_HPH_PA_RAMP_DELAY,
-			     TOMTOM_HPH_PA_RAMP_DELAY +
-			     WCD9XXX_USLEEP_RANGE_MARGIN_US);
-		break;
-	case MBHC_ZDET_HPHR_PA_DISABLE:
-		/* Disable PA */
-		wcd9xxx_enable_static_pa(mbhc, HPH_PA_DISABLE, HPH_PA_R);
-		break;
-	case MBHC_ZDET_PA_DISABLE:
-		/* Disable PA */
-		if (!mbhc->hph_pa_dac_state &&
-			(!(test_bit(MBHC_EVENT_PA_HPHL, &mbhc->event_state) ||
-			test_bit(MBHC_EVENT_PA_HPHR, &mbhc->event_state))))
-			wcd9xxx_enable_static_pa(mbhc, HPH_PA_DISABLE,
-						 HPH_PA_L_R);
-		else if (!(snd_soc_read(codec, WCD9XXX_A_RX_HPH_CNP_EN) & 0x10))
-			wcd9xxx_enable_static_pa(mbhc, HPH_PA_ENABLE, HPH_PA_R);
-
-		/* Turn off PA ramp generator */
-		snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B1_CTL, 0x00);
-		snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B2_CTL, 0x00);
-		snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B3_CTL, 0x00);
-		snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B4_CTL, 0x00);
-
-		/* Restore registers */
-		wcd9xxx_restore_registers(codec, &tomtom->reg_save_restore);
-		break;
-	}
-#undef __wr
-
-	return ret;
-}
-
-/* Calculate final impedance values for HPH left and right based on formulae */
-static void tomtom_compute_impedance(struct wcd9xxx_mbhc *mbhc, s16 *l, s16 *r,
-				     uint32_t *zl, uint32_t *zr)
-{
-	s64 zln, zrn;
-	int zld, zrd;
-	s64 rl = 0, rr = 0;
-	struct snd_soc_codec *codec;
-	struct tomtom_priv *tomtom;
-
-	if (!mbhc) {
-		pr_err("%s: Invalid parameters mbhc = %pK\n",
-			__func__,  mbhc);
-		return;
-	}
-	codec = mbhc->codec;
-	tomtom = snd_soc_codec_get_drvdata(codec);
-
-	if (l && zl) {
-		zln = (s64) (l[1] - l[0]) * tomtom->zdet_gain_mul_fact;
-		zld = (l[2] - l[0]);
-		if (zld)
-			rl = div_s64(zln, zld);
-		else
-			/* If L0 and L2 are same, Z has to be on Zone 3.
-			 * Assign a default value so that atleast the value
-			 * is read again with Ramp-up
-			 */
-			rl = TOMTOM_ZDET_ZONE_3_DEFAULT_VAL;
-
-		/* 32-bit LSBs are enough to hold Impedance values */
-		*zl = (u32) rl;
-	}
-	if (r && zr) {
-		zrn = (s64) (r[1] - r[0]) * tomtom->zdet_gain_mul_fact;
-		zrd = (r[2] - r[0]);
-		if (zrd)
-			rr = div_s64(zrn, zrd);
-		else
-			/* If R0 and R2 are same, Z has to be on Zone 3.
-			 * Assign a default value so that atleast the value
-			 * is read again with Ramp-up
-			 */
-			rr = TOMTOM_ZDET_ZONE_3_DEFAULT_VAL;
-
-		/* 32-bit LSBs are enough to hold Impedance values */
-		*zr = (u32) rr;
-	}
-}
-
-/*
- * Calculate error approximation of impedance values for HPH left
- * and HPH right based on QFuse values
- */
-static void tomtom_zdet_error_approx(struct wcd9xxx_mbhc *mbhc, uint32_t *zl,
-				     uint32_t *zr)
-{
-	struct snd_soc_codec *codec;
-	struct tomtom_priv *tomtom;
-	s8 q1_t, q2_t;
-	s8 q1_m, q2_m;
-	s8 q1, q2;
-	u8 div_shift;
-	int rl_alpha = 0, rr_alpha = 0;
-	int rl_beta = 0, rr_beta = 0;
-	u64 rl = 0, rr = 0;
-	const int mult_factor = TOMTOM_ZDET_ERROR_APPROX_MUL_FACTOR;
-	const int shift = TOMTOM_ZDET_ERROR_APPROX_SHIFT;
-
-	if (!zl || !zr || !mbhc) {
-		pr_err("%s: Invalid parameters zl = %pK zr = %pK, mbhc = %pK\n",
-			__func__, zl, zr, mbhc);
-		return;
-	}
-	codec = mbhc->codec;
-	tomtom = snd_soc_codec_get_drvdata(codec);
-
-	if ((tomtom->zdet_gain_mul_fact == TOMTOM_ZDET_MUL_FACTOR_1X) ||
-	    (tomtom->zdet_gain_mul_fact == TOMTOM_ZDET_MUL_FACTOR_10X)) {
-		q1_t = ((snd_soc_read(codec, TOMTOM_A_QFUSE_DATA_OUT0) &
-			0x3) << 0x5);
-		q1_t |= ((snd_soc_read(codec, TOMTOM_A_QFUSE_DATA_OUT1) &
-			0xF8) >> 0x3);
-		q2_t = ((snd_soc_read(codec, TOMTOM_A_QFUSE_DATA_OUT1) &
-			0x7) << 0x4);
-		q2_t |= ((snd_soc_read(codec, TOMTOM_A_QFUSE_DATA_OUT2) &
-			0xF0) >> 0x4);
-		/* Take out the numeric part of the Qfuse value */
-		q1_m = q1_t & 0x3F;
-		q2_m = q2_t & 0x3F;
-		/* Check the sign part of the Qfuse and adjust value */
-		q1 = (q1_t & 0x40) ? -q1_m : q1_m;
-		q2 = (q2_t & 0x40) ? -q2_m : q2_m;
-		div_shift = 1;
-	} else {
-		q1_t = ((snd_soc_read(codec, TOMTOM_A_QFUSE_DATA_OUT2) &
-			0xF) << 0x2);
-		q1_t |= ((snd_soc_read(codec, TOMTOM_A_QFUSE_DATA_OUT3) &
-			0xC0) >> 0x6);
-		q2_t = (snd_soc_read(codec, TOMTOM_A_QFUSE_DATA_OUT3) & 0x3F);
-		/* Take out the numeric part of the Qfuse value */
-		q1_m = q1_t & 0x1F;
-		q2_m = q2_t & 0x1F;
-		/* Check the sign part of the Qfuse and adjust value */
-		q1 = (q1_t & 0x20) ? -q1_m : q1_m;
-		q2 = (q2_t & 0x20) ? -q2_m : q2_m;
-		div_shift = 0;
-	}
-
-	dev_dbg(codec->dev, "%s: qfuse1 = %d, qfuse2 = %d\n",
-		__func__, q1, q2);
-	if (!q1 && !q2) {
-		dev_dbg(codec->dev, "%s: qfuse1 and qfuse2 are 0. Exiting\n",
-			__func__);
-		return;
-	}
-
-	/*
-	 * Use multiplication and shift to avoid floating point math
-	 * The Z value is calculated with the below formulae using
-	 * the Qfuse value-
-	 * zl = zl * [1 - {(Q1 / div) / 100}] (Include sign for Q1)
-	 * zr = zr * [1 - {(Q2 / div) / 100}] (Include sign for Q2)
-	 * We multiply by 65536 and shift 16 times to get the approx result
-	 * div = 4 for 1x gain, div = 2 for 10x/100x gain
-	 */
-	/* Q1/4 */
-	rl_alpha = q1 >> div_shift;
-	rl_alpha = 100 - rl_alpha;
-	/* {rl_alpha/100} * 65536 */
-	rl_beta = rl_alpha * mult_factor;
-	rl = (u64) *zl * rl_beta;
-	/* rl/65536 */
-	rl = (u64) rl >> shift;
-
-	rr_alpha = q2 >> div_shift;
-	rr_alpha = 100 - rr_alpha;
-	rr_beta = rr_alpha * mult_factor;
-	rr = (u64) *zr * rr_beta;
-	rr = (u64) rr >> shift;
-
-	dev_dbg(codec->dev, "%s: rl = 0x%llx (%lld) \t rr = 0x%llx (%lld)\n",
-		__func__, rl, rl, rr, rr);
-
-	*zl = (u32) rl;
-	*zr = (u32) rr;
-}
-
-static enum wcd9xxx_cdc_type tomtom_get_cdc_type(void)
-{
-	return WCD9XXX_CDC_TYPE_TOMTOM;
-}
-
-static bool tomtom_mbhc_ins_rem_status(struct snd_soc_codec *codec)
-{
-	return !(snd_soc_read(codec, WCD9XXX_A_MBHC_INSERT_DET_STATUS) &
-			    (1 << 4));
-}
-
-static void tomtom_mbhc_micb_pulldown_ctrl(struct wcd9xxx_mbhc *mbhc,
-					   bool enable)
-{
-	struct snd_soc_codec *codec = mbhc->codec;
-
-	if (!enable) {
-		/* Remove automatic pulldown on micbias */
-		snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.cfilt_ctl,
-				    0x01, 0x00);
-	} else {
-		/* Enable automatic pulldown on micbias */
-		snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.cfilt_ctl,
-				    0x01, 0x01);
-	}
-}
-
-static void tomtom_codec_hph_auto_pull_down(struct snd_soc_codec *codec,
-					    bool enable)
-{
-	struct wcd9xxx *tomtom_core = dev_get_drvdata(codec->dev->parent);
-
-	if (TOMTOM_IS_1_0(tomtom_core->version))
-		return;
-
-	dev_dbg(codec->dev, "%s: %s auto pull down\n", __func__,
-			enable ? "enable" : "disable");
-	if (enable) {
-		snd_soc_update_bits(codec, TOMTOM_A_RX_HPH_L_TEST, 0x08, 0x08);
-		snd_soc_update_bits(codec, TOMTOM_A_RX_HPH_R_TEST, 0x08, 0x08);
-	} else {
-		snd_soc_update_bits(codec, TOMTOM_A_RX_HPH_L_TEST, 0x08, 0x00);
-		snd_soc_update_bits(codec, TOMTOM_A_RX_HPH_R_TEST, 0x08, 0x00);
-	}
-}
-
-static const struct wcd9xxx_mbhc_cb mbhc_cb = {
-	.get_cdc_type = tomtom_get_cdc_type,
-	.setup_zdet = tomtom_setup_zdet,
-	.compute_impedance = tomtom_compute_impedance,
-	.zdet_error_approx = tomtom_zdet_error_approx,
-	.insert_rem_status = tomtom_mbhc_ins_rem_status,
-	.micbias_pulldown_ctrl = tomtom_mbhc_micb_pulldown_ctrl,
-	.codec_rco_ctrl = tomtom_codec_internal_rco_ctrl,
-	.hph_auto_pulldown_ctrl = tomtom_codec_hph_auto_pull_down,
-	.get_hwdep_fw_cal = tomtom_get_hwdep_fw_cal,
-};
-
-static const struct wcd9xxx_mbhc_intr cdc_intr_ids = {
-	.poll_plug_rem = WCD9XXX_IRQ_MBHC_REMOVAL,
-	.shortavg_complete = WCD9XXX_IRQ_MBHC_SHORT_TERM,
-	.potential_button_press = WCD9XXX_IRQ_MBHC_PRESS,
-	.button_release = WCD9XXX_IRQ_MBHC_RELEASE,
-	.dce_est_complete = WCD9XXX_IRQ_MBHC_POTENTIAL,
-	.insertion = WCD9XXX_IRQ_MBHC_INSERTION,
-	.hph_left_ocp = WCD9XXX_IRQ_HPH_PA_OCPL_FAULT,
-	.hph_right_ocp = WCD9XXX_IRQ_HPH_PA_OCPR_FAULT,
-	.hs_jack_switch = WCD9330_IRQ_MBHC_JACK_SWITCH,
-};
-
-static int tomtom_post_reset_cb(struct wcd9xxx *wcd9xxx)
-{
-	int ret = 0;
-	struct snd_soc_codec *codec;
-	struct tomtom_priv *tomtom;
-	int rco_clk_rate;
-
-	codec = (struct snd_soc_codec *)(wcd9xxx->ssr_priv);
-	tomtom = snd_soc_codec_get_drvdata(codec);
-
-	snd_soc_card_change_online_state(codec->component.card, 1);
-	clear_bit(BUS_DOWN, &tomtom->status_mask);
-
-	mutex_lock(&tomtom->codec_mutex);
-
-	tomtom_update_reg_defaults(codec);
-	if (wcd9xxx->mclk_rate == TOMTOM_MCLK_CLK_12P288MHZ)
-		snd_soc_update_bits(codec, TOMTOM_A_CHIP_CTL, 0x06, 0x0);
-	else if (wcd9xxx->mclk_rate == TOMTOM_MCLK_CLK_9P6MHZ)
-		snd_soc_update_bits(codec, TOMTOM_A_CHIP_CTL, 0x06, 0x2);
-	tomtom_codec_init_reg(codec);
-
-	snd_soc_cache_sync(codec);
-
-	ret = tomtom_handle_pdata(tomtom);
-	if (ret < 0)
-		pr_err("%s: bad pdata\n", __func__);
-
-	tomtom_init_slim_slave_cfg(codec);
-	tomtom_slim_interface_init_reg(codec);
-	wcd_cpe_ssr_event(tomtom->cpe_core, WCD_CPE_BUS_UP_EVENT);
-	wcd9xxx_resmgr_post_ssr(&tomtom->resmgr);
-
-	if (tomtom->mbhc_started) {
-		wcd9xxx_mbhc_deinit(&tomtom->mbhc);
-		tomtom->mbhc_started = false;
-
-		if (wcd9xxx->mclk_rate == TOMTOM_MCLK_CLK_12P288MHZ)
-			rco_clk_rate = TOMTOM_MCLK_CLK_12P288MHZ;
-		else
-			rco_clk_rate = TOMTOM_MCLK_CLK_9P6MHZ;
-
-		ret = wcd9xxx_mbhc_init(&tomtom->mbhc, &tomtom->resmgr, codec,
-					tomtom_enable_mbhc_micbias,
-					&mbhc_cb, &cdc_intr_ids,
-					rco_clk_rate, TOMTOM_ZDET_SUPPORTED);
-		if (ret)
-			pr_err("%s: mbhc init failed %d\n", __func__, ret);
-		else
-			tomtom_hs_detect(codec, tomtom->mbhc.mbhc_cfg);
-	}
-
-	if (tomtom->machine_codec_event_cb)
-		tomtom->machine_codec_event_cb(codec,
-				       WCD9XXX_CODEC_EVENT_CODEC_UP);
-
-	tomtom_cleanup_irqs(tomtom);
-	ret = tomtom_setup_irqs(tomtom);
-	if (ret)
-		pr_err("%s: Failed to setup irq: %d\n", __func__, ret);
-
-	/*
-	 * After SSR, the qfuse sensing is lost.
-	 * Perform qfuse sensing again after SSR
-	 * handling is finished.
-	 */
-	tomtom_enable_qfuse_sensing(codec);
-	mutex_unlock(&tomtom->codec_mutex);
-	return ret;
-}
-
-void *tomtom_get_afe_config(struct snd_soc_codec *codec,
-			   enum afe_config_type config_type)
-{
-	struct tomtom_priv *priv = snd_soc_codec_get_drvdata(codec);
-
-	switch (config_type) {
-	case AFE_SLIMBUS_SLAVE_CONFIG:
-		return &priv->slimbus_slave_cfg;
-	case AFE_CDC_REGISTERS_CONFIG:
-		return &tomtom_audio_reg_cfg;
-	case AFE_SLIMBUS_SLAVE_PORT_CONFIG:
-		return &tomtom_slimbus_slave_port_cfg;
-	case AFE_AANC_VERSION:
-		return &tomtom_cdc_aanc_version;
-	case AFE_CLIP_BANK_SEL:
-		return &clip_bank_sel;
-	case AFE_CDC_CLIP_REGISTERS_CONFIG:
-		return &tomtom_clip_reg_cfg;
-	default:
-		pr_err("%s: Unknown config_type 0x%x\n", __func__, config_type);
-		return NULL;
-	}
-}
-
-static struct wcd9xxx_reg_address tomtom_reg_address = {
-	.micb_4_mbhc = TOMTOM_A_MICB_4_MBHC,
-	.micb_4_int_rbias = TOMTOM_A_MICB_4_INT_RBIAS,
-	.micb_4_ctl = TOMTOM_A_MICB_4_CTL,
-};
-
-static int wcd9xxx_ssr_register(struct wcd9xxx *control,
-				int (*device_down_cb)(struct wcd9xxx *wcd9xxx),
-				int (*device_up_cb)(struct wcd9xxx *wcd9xxx),
-				void *priv)
-{
-	control->dev_down = device_down_cb;
-	control->post_reset = device_up_cb;
-	control->ssr_priv = priv;
-	return 0;
-}
-
-static const struct snd_soc_dapm_widget tomtom_1_dapm_widgets[] = {
-	SND_SOC_DAPM_ADC_E("ADC1", NULL, TOMTOM_A_TX_1_GAIN, 7, 0,
-			   tomtom_codec_enable_adc,
-			   SND_SOC_DAPM_PRE_PMU |
-			   SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_ADC_E("ADC2", NULL, TOMTOM_A_TX_2_GAIN, 7, 0,
-			   tomtom_codec_enable_adc,
-			   SND_SOC_DAPM_PRE_PMU |
-			   SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_ADC_E("ADC3", NULL, TOMTOM_A_TX_3_GAIN, 7, 0,
-			   tomtom_codec_enable_adc,
-			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-			   SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_ADC_E("ADC4", NULL, TOMTOM_A_TX_4_GAIN, 7, 0,
-			   tomtom_codec_enable_adc,
-			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-			   SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_ADC_E("ADC5", NULL, TOMTOM_A_TX_5_GAIN, 7, 0,
-			   tomtom_codec_enable_adc,
-			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-			   SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_ADC_E("ADC6", NULL, TOMTOM_A_TX_6_GAIN, 7, 0,
-			   tomtom_codec_enable_adc,
-			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
-			   SND_SOC_DAPM_POST_PMD),
-};
-
-static struct regulator *tomtom_codec_find_regulator(struct snd_soc_codec *cdc,
-						    const char *name)
-{
-	int i;
-	struct wcd9xxx *core = dev_get_drvdata(cdc->dev->parent);
-
-	for (i = 0; i < core->num_of_supplies; i++) {
-		if (core->supplies[i].supply &&
-		    !strcmp(core->supplies[i].supply, name))
-			return core->supplies[i].consumer;
-	}
-
-	return NULL;
-}
-
-static struct wcd_cpe_core *tomtom_codec_get_cpe_core(
-		struct snd_soc_codec *codec)
-{
-	struct tomtom_priv *priv = snd_soc_codec_get_drvdata(codec);
-
-	return priv->cpe_core;
-}
-
-static int tomtom_codec_fll_enable(struct snd_soc_codec *codec,
-				   bool enable)
-{
-	struct wcd9xxx *wcd9xxx;
-
-	if (!codec || !codec->control_data) {
-		pr_err("%s: Invalid codec handle, %pK\n",
-		       __func__, codec);
-		return -EINVAL;
-	}
-
-	wcd9xxx = codec->control_data;
-
-	dev_dbg(codec->dev, "%s: %s, mclk_rate = %d\n",
-		__func__, (enable ? "enable" : "disable"),
-		wcd9xxx->mclk_rate);
-
-	switch (wcd9xxx->mclk_rate) {
-	case TOMTOM_MCLK_CLK_9P6MHZ:
-		snd_soc_update_bits(codec, TOMTOM_A_FLL_NREF,
-				    0x1F, 0x15);
-		snd_soc_update_bits(codec, TOMTOM_A_FLL_KDCO_TUNE,
-				    0x07, 0x06);
-		snd_soc_write(codec, TOMTOM_A_FLL_LOCK_THRESH, 0xD1);
-		snd_soc_write(codec, TOMTOM_A_FLL_LOCK_DET_COUNT,
-			      0x40);
-		break;
-	case TOMTOM_MCLK_CLK_12P288MHZ:
-		snd_soc_update_bits(codec, TOMTOM_A_FLL_NREF,
-				    0x1F, 0x11);
-		snd_soc_update_bits(codec, TOMTOM_A_FLL_KDCO_TUNE,
-				    0x07, 0x05);
-		snd_soc_write(codec, TOMTOM_A_FLL_LOCK_THRESH, 0xB1);
-		snd_soc_write(codec, TOMTOM_A_FLL_LOCK_DET_COUNT,
-			      0x40);
-		break;
-	}
-
-	return 0;
-}
-
-static int tomtom_codec_slim_reserve_bw(struct snd_soc_codec *codec,
-		u32 bw_ops, bool commit)
-{
-	struct wcd9xxx *wcd9xxx;
-
-	if (!codec) {
-		pr_err("%s: Invalid handle to codec\n",
-			__func__);
-		return -EINVAL;
-	}
-
-	wcd9xxx = dev_get_drvdata(codec->dev->parent);
-
-	if (!wcd9xxx) {
-		dev_err(codec->dev, "%s: Invalid parent drv_data\n",
-			__func__);
-		return -EINVAL;
-	}
-
-	return wcd9xxx_slim_reserve_bw(wcd9xxx, bw_ops, commit);
-}
-
-static int tomtom_codec_vote_max_bw(struct snd_soc_codec *codec,
-			bool vote)
-{
-	u32 bw_ops;
-
-	if (vote)
-		bw_ops = SLIM_BW_CLK_GEAR_9;
-	else
-		bw_ops = SLIM_BW_UNVOTE;
-
-	return tomtom_codec_slim_reserve_bw(codec,
-			bw_ops, true);
-}
-
-static const struct wcd9xxx_resmgr_cb resmgr_cb = {
-	.cdc_rco_ctrl = tomtom_codec_internal_rco_ctrl,
-};
-
-static int tomtom_cpe_err_irq_control(struct snd_soc_codec *codec,
-	enum cpe_err_irq_cntl_type cntl_type, u8 *status)
-{
-	switch (cntl_type) {
-	case CPE_ERR_IRQ_MASK:
-		snd_soc_update_bits(codec,
-				    TOMTOM_A_SVASS_INT_MASK,
-				    0x3F, 0x3F);
-		break;
-	case CPE_ERR_IRQ_UNMASK:
-		snd_soc_update_bits(codec,
-				    TOMTOM_A_SVASS_INT_MASK,
-				    0x3F, 0x0C);
-		break;
-	case CPE_ERR_IRQ_CLEAR:
-		snd_soc_update_bits(codec,
-				    TOMTOM_A_SVASS_INT_CLR,
-				    0x3F, 0x3F);
-		break;
-	case CPE_ERR_IRQ_STATUS:
-		if (!status)
-			return -EINVAL;
-		*status = snd_soc_read(codec,
-				       TOMTOM_A_SVASS_INT_STATUS);
-		break;
-	}
-
-	return 0;
-}
-
-static const struct wcd_cpe_cdc_cb cpe_cb = {
-	.cdc_clk_en = tomtom_codec_internal_rco_ctrl,
-	.cpe_clk_en = tomtom_codec_fll_enable,
-	.lab_cdc_ch_ctl = tomtom_codec_enable_slimtx_mad,
-	.cdc_ext_clk = tomtom_codec_ext_clk_en,
-	.bus_vote_bw = tomtom_codec_vote_max_bw,
-	.cpe_err_irq_control = tomtom_cpe_err_irq_control,
-};
-
-static struct cpe_svc_init_param cpe_svc_params = {
-	.version = 0,
-	.query_freq_plans_cb = NULL,
-	.change_freq_plan_cb = NULL,
-};
-
-static int tomtom_cpe_initialize(struct snd_soc_codec *codec)
-{
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-	struct wcd_cpe_params cpe_params;
-
-	memset(&cpe_params, 0,
-	       sizeof(struct wcd_cpe_params));
-	cpe_params.codec = codec;
-	cpe_params.get_cpe_core = tomtom_codec_get_cpe_core;
-	cpe_params.cdc_cb = &cpe_cb;
-	cpe_params.dbg_mode = cpe_debug_mode;
-	cpe_params.cdc_major_ver = CPE_SVC_CODEC_TOMTOM;
-	cpe_params.cdc_minor_ver = CPE_SVC_CODEC_V1P0;
-	cpe_params.cdc_id = CPE_SVC_CODEC_TOMTOM;
-
-	cpe_params.cdc_irq_info.cpe_engine_irq =
-			WCD9330_IRQ_SVASS_ENGINE;
-	cpe_params.cdc_irq_info.cpe_err_irq =
-			WCD9330_IRQ_SVASS_ERR_EXCEPTION;
-	cpe_params.cdc_irq_info.cpe_fatal_irqs =
-			TOMTOM_CPE_FATAL_IRQS;
-
-	cpe_svc_params.context = codec;
-	cpe_params.cpe_svc_params = &cpe_svc_params;
-
-	tomtom->cpe_core = wcd_cpe_init("cpe", codec,
-					&cpe_params);
-	if (IS_ERR_OR_NULL(tomtom->cpe_core)) {
-		dev_err(codec->dev,
-			"%s: Failed to enable CPE\n",
-			__func__);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int tomtom_codec_probe(struct snd_soc_codec *codec)
-{
-	struct wcd9xxx *control;
-	struct tomtom_priv *tomtom;
-	struct wcd9xxx_pdata *pdata;
-	struct wcd9xxx *wcd9xxx;
-	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
-	int ret = 0;
-	int i, rco_clk_rate;
-	void *ptr = NULL;
-	struct wcd9xxx_core_resource *core_res;
-	struct clk *wcd_ext_clk = NULL;
-
-	dev_info(codec->dev, "%s()\n", __func__);
-
-	control = dev_get_drvdata(codec->dev->parent);
-
-	tomtom = snd_soc_codec_get_drvdata(codec);
-
-	wcd9xxx_ssr_register(control, tomtom_device_down,
-			     tomtom_post_reset_cb, (void *)codec);
-
-	for (i = 0; i < NUM_DECIMATORS; i++) {
-		tx_hpf_work[i].tomtom = tomtom;
-		tx_hpf_work[i].decimator = i + 1;
-		tx_hpf_work[i].tx_hpf_bypass = false;
-		INIT_DELAYED_WORK(&tx_hpf_work[i].dwork,
-			tx_hpf_corner_freq_callback);
-	}
-
-	wcd9xxx = control;
-	if (!of_find_property(wcd9xxx->dev->of_node, "clock-names", NULL)) {
-		dev_dbg(wcd9xxx->dev, "%s: codec not using audio-ext-clk driver\n",
-			__func__);
-	} else {
-		wcd_ext_clk = clk_get(wcd9xxx->dev, "wcd_clk");
-		if (IS_ERR(wcd_ext_clk)) {
-			dev_err(codec->dev, "%s: clk get %s failed\n",
-					__func__, "wcd_ext_clk");
-			goto err_nomem_slimch;
-		}
-	}
-	tomtom->wcd_ext_clk = wcd_ext_clk;
-	core_res = &wcd9xxx->core_res;
-	pdata = dev_get_platdata(codec->dev->parent);
-	/* codec resmgr module init */
-	ret = wcd9xxx_resmgr_init(&tomtom->resmgr, codec, core_res, pdata,
-				  &pdata->micbias, &tomtom_reg_address,
-				  &resmgr_cb, WCD9XXX_CDC_TYPE_TOMTOM);
-	if (ret) {
-		pr_err("%s: wcd9xxx init failed %d\n", __func__, ret);
-		goto err_nomem_slimch;
-	}
-
-	tomtom->clsh_d.buck_mv = tomtom_codec_get_buck_mv(codec);
-	/* TomTom does not support dynamic switching of vdd_cp */
-	tomtom->clsh_d.is_dynamic_vdd_cp = false;
-	wcd9xxx_clsh_init(&tomtom->clsh_d, &tomtom->resmgr);
-
-	if (wcd9xxx->mclk_rate == TOMTOM_MCLK_CLK_12P288MHZ)
-		rco_clk_rate = TOMTOM_MCLK_CLK_12P288MHZ;
-	else
-		rco_clk_rate = TOMTOM_MCLK_CLK_9P6MHZ;
-
-	tomtom->fw_data = kzalloc(sizeof(*(tomtom->fw_data)), GFP_KERNEL);
-	if (!tomtom->fw_data)
-		goto err_nomem_slimch;
-	set_bit(WCD9XXX_ANC_CAL, tomtom->fw_data->cal_bit);
-	set_bit(WCD9XXX_MAD_CAL, tomtom->fw_data->cal_bit);
-	set_bit(WCD9XXX_MBHC_CAL, tomtom->fw_data->cal_bit);
-	ret = wcd_cal_create_hwdep(tomtom->fw_data,
-				WCD9XXX_CODEC_HWDEP_NODE, codec);
-	if (ret < 0) {
-		dev_err(codec->dev, "%s hwdep failed %d\n", __func__, ret);
-		goto err_hwdep;
-	}
-
-	/* init and start mbhc */
-	ret = wcd9xxx_mbhc_init(&tomtom->mbhc, &tomtom->resmgr, codec,
-				tomtom_enable_mbhc_micbias,
-				&mbhc_cb, &cdc_intr_ids,
-				rco_clk_rate, TOMTOM_ZDET_SUPPORTED);
-	if (ret) {
-		pr_err("%s: mbhc init failed %d\n", __func__, ret);
-		goto err_hwdep;
-	}
-
-	tomtom->codec = codec;
-	for (i = 0; i < COMPANDER_MAX; i++) {
-		tomtom->comp_enabled[i] = 0;
-		tomtom->comp_fs[i] = COMPANDER_FS_48KHZ;
-	}
-	tomtom->intf_type = wcd9xxx_get_intf_type();
-	tomtom->aux_pga_cnt = 0;
-	tomtom->aux_l_gain = 0x1F;
-	tomtom->aux_r_gain = 0x1F;
-	tomtom->ldo_h_users = 0;
-	tomtom->micb_2_users = 0;
-	tomtom_update_reg_defaults(codec);
-	pr_debug("%s: MCLK Rate = %x\n", __func__, wcd9xxx->mclk_rate);
-	if (wcd9xxx->mclk_rate == TOMTOM_MCLK_CLK_12P288MHZ)
-		snd_soc_update_bits(codec, TOMTOM_A_CHIP_CTL, 0x06, 0x0);
-	else if (wcd9xxx->mclk_rate == TOMTOM_MCLK_CLK_9P6MHZ)
-		snd_soc_update_bits(codec, TOMTOM_A_CHIP_CTL, 0x06, 0x2);
-	tomtom_codec_init_reg(codec);
-
-	ret = tomtom_handle_pdata(tomtom);
-	if (ret < 0) {
-		pr_err("%s: bad pdata\n", __func__);
-		goto err_hwdep;
-	}
-
-	tomtom->spkdrv_reg = tomtom_codec_find_regulator(codec,
-					       WCD9XXX_VDD_SPKDRV_NAME);
-	tomtom->spkdrv2_reg = tomtom_codec_find_regulator(codec,
-					       WCD9XXX_VDD_SPKDRV2_NAME);
-
-	ptr = kmalloc((sizeof(tomtom_rx_chs) +
-		       sizeof(tomtom_tx_chs)), GFP_KERNEL);
-	if (!ptr) {
-		ret = -ENOMEM;
-		goto err_hwdep;
-	}
-
-	if (tomtom->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
-		snd_soc_dapm_new_controls(dapm, tomtom_dapm_i2s_widgets,
-			ARRAY_SIZE(tomtom_dapm_i2s_widgets));
-		snd_soc_dapm_add_routes(dapm, audio_i2s_map,
-			ARRAY_SIZE(audio_i2s_map));
-		for (i = 0; i < ARRAY_SIZE(tomtom_i2s_dai); i++)
-			INIT_LIST_HEAD(&tomtom->dai[i].wcd9xxx_ch_list);
-	} else if (tomtom->intf_type == WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
-		for (i = 0; i < NUM_CODEC_DAIS; i++) {
-			INIT_LIST_HEAD(&tomtom->dai[i].wcd9xxx_ch_list);
-			init_waitqueue_head(&tomtom->dai[i].dai_wait);
-		}
-		tomtom_slimbus_slave_port_cfg.slave_dev_intfdev_la =
-		    control->slim_slave->laddr;
-		tomtom_slimbus_slave_port_cfg.slave_dev_pgd_la =
-		    control->slim->laddr;
-		tomtom_slimbus_slave_port_cfg.slave_port_mapping[0] =
-		    TOMTOM_MAD_SLIMBUS_TX_PORT;
-
-		tomtom_init_slim_slave_cfg(codec);
-	}
-
-	snd_soc_dapm_new_controls(dapm, tomtom_1_dapm_widgets,
-			ARRAY_SIZE(tomtom_1_dapm_widgets));
-	snd_soc_add_codec_controls(codec,
-			tomtom_1_x_analog_gain_controls,
-			ARRAY_SIZE(tomtom_1_x_analog_gain_controls));
-
-	snd_soc_add_codec_controls(codec, impedance_detect_controls,
-				   ARRAY_SIZE(impedance_detect_controls));
-	snd_soc_add_codec_controls(codec, hph_type_detect_controls,
-				   ARRAY_SIZE(hph_type_detect_controls));
-
-	control->num_rx_port = TOMTOM_RX_MAX;
-	control->rx_chs = ptr;
-	memcpy(control->rx_chs, tomtom_rx_chs, sizeof(tomtom_rx_chs));
-	control->num_tx_port = TOMTOM_TX_MAX;
-	control->tx_chs = ptr + sizeof(tomtom_rx_chs);
-	memcpy(control->tx_chs, tomtom_tx_chs, sizeof(tomtom_tx_chs));
-
-	snd_soc_dapm_sync(dapm);
-
-	ret = tomtom_setup_irqs(tomtom);
-	if (ret) {
-		pr_err("%s: tomtom irq setup failed %d\n", __func__, ret);
-		goto err_pdata;
-	}
-
-	atomic_set(&kp_tomtom_priv, (unsigned long)tomtom);
-	mutex_lock(&tomtom->codec_mutex);
-	snd_soc_dapm_disable_pin(dapm, "ANC HPHL");
-	snd_soc_dapm_disable_pin(dapm, "ANC HPHR");
-	snd_soc_dapm_disable_pin(dapm, "ANC HEADPHONE");
-	snd_soc_dapm_disable_pin(dapm, "ANC EAR PA");
-	snd_soc_dapm_disable_pin(dapm, "ANC EAR");
-	mutex_unlock(&tomtom->codec_mutex);
-	snd_soc_dapm_sync(dapm);
-
-	codec->component.ignore_pmdown_time = 1;
-	ret = tomtom_cpe_initialize(codec);
-	if (ret) {
-		dev_info(codec->dev,
-			"%s: cpe initialization failed, ret = %d\n",
-			__func__, ret);
-		/* Do not fail probe if CPE failed */
-		ret = 0;
-	}
-	return ret;
-
-err_pdata:
-	kfree(ptr);
-	control->rx_chs = NULL;
-	control->tx_chs = NULL;
-err_hwdep:
-	kfree(tomtom->fw_data);
-	tomtom->fw_data = NULL;
-err_nomem_slimch:
-	devm_kfree(codec->dev, tomtom);
-	return ret;
-}
-static int tomtom_codec_remove(struct snd_soc_codec *codec)
-{
-	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-	struct wcd9xxx *control;
-
-	WCD9XXX_BG_CLK_LOCK(&tomtom->resmgr);
-	atomic_set(&kp_tomtom_priv, 0);
-
-	WCD9XXX_BG_CLK_UNLOCK(&tomtom->resmgr);
-
-	control = dev_get_drvdata(codec->dev->parent);
-	control->rx_chs = NULL;
-	control->tx_chs = NULL;
-
-	if (tomtom->wcd_ext_clk)
-		clk_put(tomtom->wcd_ext_clk);
-	tomtom_cleanup_irqs(tomtom);
-
-	/* cleanup MBHC */
-	wcd9xxx_mbhc_deinit(&tomtom->mbhc);
-	/* cleanup resmgr */
-	wcd9xxx_resmgr_deinit(&tomtom->resmgr);
-
-	tomtom->spkdrv_reg = NULL;
-	tomtom->spkdrv2_reg = NULL;
-
-	devm_kfree(codec->dev, tomtom);
-	return 0;
-}
-
-static struct regmap *tomtom_get_regmap(struct device *dev)
-{
-	struct wcd9xxx *control = dev_get_drvdata(dev->parent);
-
-	return control->regmap;
-}
-
-static struct snd_soc_codec_driver soc_codec_dev_tomtom = {
-	.probe = tomtom_codec_probe,
-	.remove = tomtom_codec_remove,
-	.get_regmap = tomtom_get_regmap,
-	.component_driver = {
-		.controls = tomtom_snd_controls,
-		.num_controls = ARRAY_SIZE(tomtom_snd_controls),
-		.dapm_widgets = tomtom_dapm_widgets,
-		.num_dapm_widgets = ARRAY_SIZE(tomtom_dapm_widgets),
-		.dapm_routes = audio_map,
-		.num_dapm_routes = ARRAY_SIZE(audio_map),
-	},
-};
-
-#ifdef CONFIG_PM
-static int tomtom_suspend(struct device *dev)
-{
-	dev_dbg(dev, "%s: system suspend\n", __func__);
-	return 0;
-}
-
-static int tomtom_resume(struct device *dev)
-{
-	struct platform_device *pdev = to_platform_device(dev);
-	struct tomtom_priv *tomtom = platform_get_drvdata(pdev);
-
-	if (!tomtom) {
-		dev_err(dev, "%s: tomtom private data is NULL\n", __func__);
-		return -EINVAL;
-	}
-	dev_dbg(dev, "%s: system resume\n", __func__);
-	/* Notify */
-	wcd9xxx_resmgr_notifier_call(&tomtom->resmgr,
-				     WCD9XXX_EVENT_POST_RESUME);
-	return 0;
-}
-
-static const struct dev_pm_ops tomtom_pm_ops = {
-	.suspend	= tomtom_suspend,
-	.resume		= tomtom_resume,
-};
-#endif
-
-static int tomtom_probe(struct platform_device *pdev)
-{
-	int ret = 0;
-	struct tomtom_priv *tomtom;
-
-	tomtom = devm_kzalloc(&pdev->dev, sizeof(struct tomtom_priv),
-			      GFP_KERNEL);
-	if (!tomtom)
-		return -ENOMEM;
-
-	platform_set_drvdata(pdev, tomtom);
-
-	if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_SLIMBUS)
-		ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_tomtom,
-			tomtom_dai, ARRAY_SIZE(tomtom_dai));
-	else if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
-		ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_tomtom,
-			tomtom_i2s_dai, ARRAY_SIZE(tomtom_i2s_dai));
-	mutex_init(&tomtom->codec_mutex);
-	return ret;
-}
-static int tomtom_remove(struct platform_device *pdev)
-{
-	struct tomtom_priv *tomtom = platform_get_drvdata(pdev);
-
-	mutex_destroy(&tomtom->codec_mutex);
-	snd_soc_unregister_codec(&pdev->dev);
-	return 0;
-}
-static struct platform_driver tomtom_codec_driver = {
-	.probe = tomtom_probe,
-	.remove = tomtom_remove,
-	.driver = {
-		.name = "tomtom_codec",
-		.owner = THIS_MODULE,
-#ifdef CONFIG_PM
-		.pm = &tomtom_pm_ops,
-#endif
-	},
-};
-
-static int __init tomtom_codec_init(void)
-{
-	return platform_driver_register(&tomtom_codec_driver);
-}
-
-static void __exit tomtom_codec_exit(void)
-{
-	platform_driver_unregister(&tomtom_codec_driver);
-}
-
-module_init(tomtom_codec_init);
-module_exit(tomtom_codec_exit);
-
-MODULE_DESCRIPTION("TomTom codec driver");
-MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/wcd9330.h b/sound/soc/codecs/wcd9330.h
deleted file mode 100644
index 8679d01..0000000
--- a/sound/soc/codecs/wcd9330.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/* Copyright (c) 2012-2015, 2017 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-#ifndef WCD9330_H
-#define WCD9330_H
-
-#include <sound/soc.h>
-#include <sound/jack.h>
-#include <sound/apr_audio-v2.h>
-#include <linux/mfd/wcd9xxx/wcd9xxx-slimslave.h>
-#include "wcd9xxx-mbhc.h"
-#include "wcd9xxx-resmgr.h"
-#include "wcd9xxx-common.h"
-
-#define TOMTOM_NUM_REGISTERS 0x400
-#define TOMTOM_MAX_REGISTER (TOMTOM_NUM_REGISTERS-1)
-#define TOMTOM_CACHE_SIZE TOMTOM_NUM_REGISTERS
-
-#define TOMTOM_REG_VAL(reg, val)		{reg, 0, val}
-#define TOMTOM_MCLK_ID 0
-
-#define TOMTOM_REGISTER_START_OFFSET 0x800
-#define TOMTOM_SB_PGD_PORT_RX_BASE   0x40
-#define TOMTOM_SB_PGD_PORT_TX_BASE   0x50
-
-#define WCD9330_DMIC_CLK_DIV_2 0x00
-#define WCD9330_DMIC_CLK_DIV_3 0x01
-#define WCD9330_DMIC_CLK_DIV_4 0x02
-#define WCD9330_DMIC_CLK_DIV_6 0x03
-#define WCD9330_DMIC_CLK_DIV_16 0x04
-
-#define TOMTOM_ZDET_SUPPORTED true
-
-extern const u8 tomtom_reset_reg_defaults[TOMTOM_CACHE_SIZE];
-struct tomtom_codec_dai_data {
-	u32 rate;
-	u32 *ch_num;
-	u32 ch_act;
-	u32 ch_tot;
-};
-
-enum tomtom_pid_current {
-	TOMTOM_PID_MIC_2P5_UA,
-	TOMTOM_PID_MIC_5_UA,
-	TOMTOM_PID_MIC_10_UA,
-	TOMTOM_PID_MIC_20_UA,
-};
-
-enum tomtom_mbhc_analog_pwr_cfg {
-	TOMTOM_ANALOG_PWR_COLLAPSED = 0,
-	TOMTOM_ANALOG_PWR_ON,
-	TOMTOM_NUM_ANALOG_PWR_CONFIGS,
-};
-
-enum {
-	HPH_PA_NONE = 0,
-	HPH_PA_R,
-	HPH_PA_L,
-	HPH_PA_L_R,
-};
-
-/* Number of input and output Slimbus port */
-enum {
-	TOMTOM_RX1 = 0,
-	TOMTOM_RX2,
-	TOMTOM_RX3,
-	TOMTOM_RX4,
-	TOMTOM_RX5,
-	TOMTOM_RX6,
-	TOMTOM_RX7,
-	TOMTOM_RX8,
-	TOMTOM_RX9,
-	TOMTOM_RX10,
-	TOMTOM_RX11,
-	TOMTOM_RX12,
-	TOMTOM_RX13,
-	TOMTOM_RX_MAX,
-};
-
-enum {
-	TOMTOM_TX1 = 0,
-	TOMTOM_TX2,
-	TOMTOM_TX3,
-	TOMTOM_TX4,
-	TOMTOM_TX5,
-	TOMTOM_TX6,
-	TOMTOM_TX7,
-	TOMTOM_TX8,
-	TOMTOM_TX9,
-	TOMTOM_TX10,
-	TOMTOM_TX11,
-	TOMTOM_TX12,
-	TOMTOM_TX13,
-	TOMTOM_TX14,
-	TOMTOM_TX15,
-	TOMTOM_TX16,
-	TOMTOM_TX_MAX,
-};
-
-extern int tomtom_mclk_enable(struct snd_soc_codec *codec, int mclk_enable,
-			     bool dapm);
-extern int tomtom_codec_mclk_enable(struct snd_soc_codec *codec,
-				    int mclk_enable, bool dapm);
-extern int tomtom_hs_detect(struct snd_soc_codec *codec,
-			   struct wcd9xxx_mbhc_config *mbhc_cfg);
-extern void tomtom_hs_detect_exit(struct snd_soc_codec *codec);
-extern void *tomtom_get_afe_config(struct snd_soc_codec *codec,
-				  enum afe_config_type config_type);
-
-extern void tomtom_event_register(
-	int (*machine_event_cb)(struct snd_soc_codec *codec,
-				enum wcd9xxx_codec_event),
-	struct snd_soc_codec *codec);
-extern void tomtom_register_ext_clk_cb(
-	int (*codec_ext_clk_en)(struct snd_soc_codec *codec,
-				int enable, bool dapm),
-	int (*get_ext_clk_cnt)(void),
-	struct snd_soc_codec *codec);
-extern int tomtom_enable_qfuse_sensing(struct snd_soc_codec *codec);
-#endif
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
index 90d16fb..329aa7a 100644
--- a/sound/soc/codecs/wcd9335.c
+++ b/sound/soc/codecs/wcd9335.c
@@ -26,6 +26,7 @@
 #include <linux/mfd/wcd9xxx/wcd9xxx-irq.h>
 #include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
 #include <linux/mfd/wcd9335/registers.h>
+#include <linux/mfd/wcd9335/irq.h>
 #include <linux/mfd/wcd9xxx/pdata.h>
 #include <linux/regulator/consumer.h>
 #include <linux/clk.h>
@@ -546,38 +547,6 @@
 	SPLINE_SRC_MAX,
 };
 
-/* wcd9335 interrupt table  */
-static const struct intr_data wcd9335_intr_table[] = {
-	{WCD9XXX_IRQ_SLIMBUS, false},
-	{WCD9335_IRQ_MBHC_SW_DET, true},
-	{WCD9335_IRQ_MBHC_BUTTON_PRESS_DET, true},
-	{WCD9335_IRQ_MBHC_BUTTON_RELEASE_DET, true},
-	{WCD9335_IRQ_MBHC_ELECT_INS_REM_DET, true},
-	{WCD9335_IRQ_MBHC_ELECT_INS_REM_LEG_DET, true},
-	{WCD9335_IRQ_FLL_LOCK_LOSS, false},
-	{WCD9335_IRQ_HPH_PA_CNPL_COMPLETE, false},
-	{WCD9335_IRQ_HPH_PA_CNPR_COMPLETE, false},
-	{WCD9335_IRQ_EAR_PA_CNP_COMPLETE, false},
-	{WCD9335_IRQ_LINE_PA1_CNP_COMPLETE, false},
-	{WCD9335_IRQ_LINE_PA2_CNP_COMPLETE, false},
-	{WCD9335_IRQ_LINE_PA3_CNP_COMPLETE, false},
-	{WCD9335_IRQ_LINE_PA4_CNP_COMPLETE, false},
-	{WCD9335_IRQ_HPH_PA_OCPL_FAULT, false},
-	{WCD9335_IRQ_HPH_PA_OCPR_FAULT, false},
-	{WCD9335_IRQ_EAR_PA_OCP_FAULT, false},
-	{WCD9335_IRQ_SOUNDWIRE, false},
-	{WCD9335_IRQ_VDD_DIG_RAMP_COMPLETE, false},
-	{WCD9335_IRQ_RCO_ERROR, false},
-	{WCD9335_IRQ_SVA_ERROR, false},
-	{WCD9335_IRQ_MAD_AUDIO, false},
-	{WCD9335_IRQ_MAD_BEACON, false},
-	{WCD9335_IRQ_SVA_OUTBOX1, true},
-	{WCD9335_IRQ_SVA_OUTBOX2, true},
-	{WCD9335_IRQ_MAD_ULTRASOUND, false},
-	{WCD9335_IRQ_VBAT_ATTACK, false},
-	{WCD9335_IRQ_VBAT_RESTORE, false},
-};
-
 static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0);
 static const DECLARE_TLV_DB_SCALE(line_gain, 0, 7, 1);
 static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1);
@@ -875,176 +844,6 @@
 	{WCD9335_CDC_BOOST1_BOOST_CTL, 0x7C, 0x44},
 };
 
-/*
- * wcd9335_get_codec_info: Get codec specific information
- *
- * @wcd9xxx: pointer to wcd9xxx structure
- * @wcd_type: pointer to wcd9xxx_codec_type structure
- *
- * Returns 0 for success or negative error code for failure
- */
-int wcd9335_get_codec_info(struct wcd9xxx *wcd9xxx,
-			   struct wcd9xxx_codec_type *wcd_type)
-{
-	u16 id_minor, id_major;
-	struct regmap *wcd_regmap;
-	int rc, val, version = 0;
-
-	if (!wcd9xxx || !wcd_type)
-		return -EINVAL;
-
-	if (!wcd9xxx->regmap) {
-		dev_err(wcd9xxx->dev, "%s: wcd9xxx regmap is null!\n",
-			__func__);
-		return -EINVAL;
-	}
-	wcd_regmap = wcd9xxx->regmap;
-
-	rc = regmap_bulk_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE0,
-			(u8 *)&id_minor, sizeof(u16));
-	if (rc)
-		return -EINVAL;
-
-	rc = regmap_bulk_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE2,
-			      (u8 *)&id_major, sizeof(u16));
-	if (rc)
-		return -EINVAL;
-
-	dev_info(wcd9xxx->dev, "%s: wcd9xxx chip id major 0x%x, minor 0x%x\n",
-		 __func__, id_major, id_minor);
-
-	/* Version detection */
-	if (id_major == TASHA_MAJOR) {
-		regmap_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT0,
-			    &val);
-		version = ((u8)val & 0x80) >> 7;
-	} else if (id_major == TASHA2P0_MAJOR)
-		version = 2;
-	else
-		dev_err(wcd9xxx->dev, "%s: wcd9335 version unknown (major 0x%x, minor 0x%x)\n",
-			__func__, id_major, id_minor);
-
-	/* Fill codec type info */
-	wcd_type->id_major = id_major;
-	wcd_type->id_minor = id_minor;
-	wcd_type->num_irqs = WCD9335_NUM_IRQS;
-	wcd_type->version = version;
-	wcd_type->slim_slave_type = WCD9XXX_SLIM_SLAVE_ADDR_TYPE_1;
-	wcd_type->i2c_chip_status = 0x01;
-	wcd_type->intr_tbl = wcd9335_intr_table;
-	wcd_type->intr_tbl_size = ARRAY_SIZE(wcd9335_intr_table);
-
-	wcd_type->intr_reg[WCD9XXX_INTR_STATUS_BASE] =
-						WCD9335_INTR_PIN1_STATUS0;
-	wcd_type->intr_reg[WCD9XXX_INTR_CLEAR_BASE] =
-						WCD9335_INTR_PIN1_CLEAR0;
-	wcd_type->intr_reg[WCD9XXX_INTR_MASK_BASE] =
-						WCD9335_INTR_PIN1_MASK0;
-	wcd_type->intr_reg[WCD9XXX_INTR_LEVEL_BASE] =
-						WCD9335_INTR_LEVEL0;
-	wcd_type->intr_reg[WCD9XXX_INTR_CLR_COMMIT] =
-						WCD9335_INTR_CLR_COMMIT;
-
-	return rc;
-}
-EXPORT_SYMBOL(wcd9335_get_codec_info);
-
-/*
- * wcd9335_bringdown: Bringdown WCD Codec
- *
- * @wcd9xxx: Pointer to wcd9xxx structure
- *
- * Returns 0 for success or negative error code for failure
- */
-int wcd9335_bringdown(struct wcd9xxx *wcd9xxx)
-{
-	if (!wcd9xxx || !wcd9xxx->regmap)
-		return -EINVAL;
-
-	regmap_write(wcd9xxx->regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
-		     0x04);
-
-	return 0;
-}
-EXPORT_SYMBOL(wcd9335_bringdown);
-
-/*
- * wcd9335_bringup: Bringup WCD Codec
- *
- * @wcd9xxx: Pointer to the wcd9xxx structure
- *
- * Returns 0 for success or negative error code for failure
- */
-int wcd9335_bringup(struct wcd9xxx *wcd9xxx)
-{
-	int ret = 0;
-	int val, byte0;
-	struct regmap *wcd_regmap;
-
-	if (!wcd9xxx)
-		return -EINVAL;
-
-	if (!wcd9xxx->regmap) {
-		dev_err(wcd9xxx->dev, "%s: wcd9xxx regmap is null!\n",
-			__func__);
-		return -EINVAL;
-	}
-	wcd_regmap = wcd9xxx->regmap;
-
-	regmap_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT0, &val);
-	regmap_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE0, &byte0);
-
-	if ((val < 0) || (byte0 < 0)) {
-		dev_err(wcd9xxx->dev, "%s: tasha codec version detection fail!\n",
-			__func__);
-		return -EINVAL;
-	}
-	if ((val & 0x80) && (byte0 == 0x0)) {
-		dev_info(wcd9xxx->dev, "%s: wcd9335 codec version is v1.1\n",
-			 __func__);
-		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x01);
-		regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_2, 0xFC);
-		regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_4, 0x21);
-		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
-			     0x5);
-		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
-			     0x7);
-		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
-			     0x3);
-		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x3);
-	} else if (byte0 == 0x1) {
-		dev_info(wcd9xxx->dev, "%s: wcd9335 codec version is v2.0\n",
-			 __func__);
-		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x01);
-		regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_TEST_2, 0x00);
-		regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_8, 0x6F);
-		regmap_write(wcd_regmap, WCD9335_BIAS_VBG_FINE_ADJ, 0x65);
-		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
-			     0x5);
-		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
-			     0x7);
-		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
-			     0x3);
-		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x3);
-	} else if ((byte0 == 0) && (!(val & 0x80))) {
-		dev_info(wcd9xxx->dev, "%s: wcd9335 codec version is v1.0\n",
-			 __func__);
-		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x01);
-		regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_2, 0xFC);
-		regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_4, 0x21);
-		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
-			     0x3);
-		regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x3);
-	} else {
-		dev_err(wcd9xxx->dev, "%s: tasha codec version unknown\n",
-			__func__);
-		ret = -EINVAL;
-	}
-
-	return ret;
-}
-EXPORT_SYMBOL(wcd9335_bringup);
-
 /**
  * tasha_set_spkr_gain_offset - offset the speaker path
  * gain with the given offset value.
diff --git a/sound/soc/codecs/wcd9335.h b/sound/soc/codecs/wcd9335.h
index d27bb96..c76461e 100644
--- a/sound/soc/codecs/wcd9335.h
+++ b/sound/soc/codecs/wcd9335.h
@@ -83,44 +83,6 @@
 	TASHA_TX_MAX,
 };
 
-enum {
-	/* INTR_REG 0 */
-	WCD9335_IRQ_FLL_LOCK_LOSS = 1,
-	WCD9335_IRQ_HPH_PA_OCPL_FAULT,
-	WCD9335_IRQ_HPH_PA_OCPR_FAULT,
-	WCD9335_IRQ_EAR_PA_OCP_FAULT,
-	WCD9335_IRQ_HPH_PA_CNPL_COMPLETE,
-	WCD9335_IRQ_HPH_PA_CNPR_COMPLETE,
-	WCD9335_IRQ_EAR_PA_CNP_COMPLETE,
-	/* INTR_REG 1 */
-	WCD9335_IRQ_MBHC_SW_DET,
-	WCD9335_IRQ_MBHC_ELECT_INS_REM_DET,
-	WCD9335_IRQ_MBHC_BUTTON_PRESS_DET,
-	WCD9335_IRQ_MBHC_BUTTON_RELEASE_DET,
-	WCD9335_IRQ_MBHC_ELECT_INS_REM_LEG_DET,
-	WCD9335_IRQ_RESERVED_0,
-	WCD9335_IRQ_RESERVED_1,
-	WCD9335_IRQ_RESERVED_2,
-	/* INTR_REG 2 */
-	WCD9335_IRQ_LINE_PA1_CNP_COMPLETE,
-	WCD9335_IRQ_LINE_PA2_CNP_COMPLETE,
-	WCD9335_IRQ_LINE_PA3_CNP_COMPLETE,
-	WCD9335_IRQ_LINE_PA4_CNP_COMPLETE,
-	WCD9335_IRQ_SOUNDWIRE,
-	WCD9335_IRQ_VDD_DIG_RAMP_COMPLETE,
-	WCD9335_IRQ_RCO_ERROR,
-	WCD9335_IRQ_SVA_ERROR,
-	/* INTR_REG 3 */
-	WCD9335_IRQ_MAD_AUDIO,
-	WCD9335_IRQ_MAD_BEACON,
-	WCD9335_IRQ_MAD_ULTRASOUND,
-	WCD9335_IRQ_VBAT_ATTACK,
-	WCD9335_IRQ_VBAT_RESTORE,
-	WCD9335_IRQ_SVA_OUTBOX1,
-	WCD9335_IRQ_SVA_OUTBOX2,
-	WCD9335_NUM_IRQS,
-};
-
 enum wcd9335_codec_event {
 	WCD9335_CODEC_EVENT_CODEC_UP = 0,
 };
diff --git a/sound/soc/codecs/wcd934x/Makefile b/sound/soc/codecs/wcd934x/Makefile
index 2843fa1..12781f6 100644
--- a/sound/soc/codecs/wcd934x/Makefile
+++ b/sound/soc/codecs/wcd934x/Makefile
@@ -1,9 +1,6 @@
 #
 # Makefile for wcd934x codec driver.
 #
-snd-soc-wcd934x-objs := wcd934x.o wcd934x-dsp-cntl.o
+snd-soc-wcd934x-objs := wcd934x.o wcd934x-dsp-cntl.o \
+			wcd934x-mbhc.o wcd934x-dsd.o
 obj-$(CONFIG_SND_SOC_WCD934X) += snd-soc-wcd934x.o
-snd-soc-wcd934x-mbhc-objs := wcd934x-mbhc.o
-obj-$(CONFIG_SND_SOC_WCD934X_MBHC) += snd-soc-wcd934x-mbhc.o
-snd-soc-wcd934x-dsd-objs := wcd934x-dsd.o
-obj-$(CONFIG_SND_SOC_WCD934X_DSD) += snd-soc-wcd934x-dsd.o
diff --git a/sound/soc/codecs/wcd934x/wcd934x-dsd.h b/sound/soc/codecs/wcd934x/wcd934x-dsd.h
index 4982883..834b96c 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-dsd.h
+++ b/sound/soc/codecs/wcd934x/wcd934x-dsd.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -43,7 +43,7 @@
 	int version;
 };
 
-#ifdef CONFIG_SND_SOC_WCD934X_DSD
+#if IS_ENABLED(CONFIG_SND_SOC_WCD934X_DSD)
 int tavil_dsd_set_mixer_value(struct tavil_dsd_config *dsd_conf,
 			      int interp_num, int sw_value);
 int tavil_dsd_get_current_mixer_value(struct tavil_dsd_config *dsd_conf,
diff --git a/sound/soc/codecs/wcd934x/wcd934x-mbhc.c b/sound/soc/codecs/wcd934x/wcd934x-mbhc.c
index 578c347..a1a5e2d 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-mbhc.c
+++ b/sound/soc/codecs/wcd934x/wcd934x-mbhc.c
@@ -24,6 +24,7 @@
 #include <linux/mfd/wcd9xxx/wcd9xxx-irq.h>
 #include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
 #include <linux/mfd/wcd934x/registers.h>
+#include <linux/mfd/wcd934x/irq.h>
 #include <linux/mfd/wcd9xxx/pdata.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
diff --git a/sound/soc/codecs/wcd934x/wcd934x-mbhc.h b/sound/soc/codecs/wcd934x/wcd934x-mbhc.h
index d40546a..53c886d 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-mbhc.h
+++ b/sound/soc/codecs/wcd934x/wcd934x-mbhc.h
@@ -35,7 +35,7 @@
 	bool is_hph_recover;
 };
 
-#ifdef CONFIG_SND_SOC_WCD934X_MBHC
+#if IS_ENABLED(CONFIG_SND_SOC_WCD934X_MBHC)
 extern int tavil_mbhc_init(struct wcd934x_mbhc **mbhc,
 			   struct snd_soc_codec *codec,
 			   struct fw_info *fw_data);
diff --git a/sound/soc/codecs/wcd934x/wcd934x.c b/sound/soc/codecs/wcd934x/wcd934x.c
index ff08ccb..ca16ed8 100644
--- a/sound/soc/codecs/wcd934x/wcd934x.c
+++ b/sound/soc/codecs/wcd934x/wcd934x.c
@@ -31,6 +31,7 @@
 #include <linux/mfd/wcd9xxx/wcd9xxx-irq.h>
 #include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
 #include <linux/mfd/wcd934x/registers.h>
+#include <linux/mfd/wcd934x/irq.h>
 #include <linux/mfd/wcd9xxx/pdata.h>
 #include <linux/regulator/consumer.h>
 #include <linux/soundwire/swr-wcd.h>
@@ -233,37 +234,6 @@
 	u8 hph_idle_detect_en;
 };
 
-static const struct intr_data wcd934x_intr_table[] = {
-	{WCD9XXX_IRQ_SLIMBUS, false},
-	{WCD934X_IRQ_MBHC_SW_DET, true},
-	{WCD934X_IRQ_MBHC_BUTTON_PRESS_DET, true},
-	{WCD934X_IRQ_MBHC_BUTTON_RELEASE_DET, true},
-	{WCD934X_IRQ_MBHC_ELECT_INS_REM_DET, true},
-	{WCD934X_IRQ_MBHC_ELECT_INS_REM_LEG_DET, true},
-	{WCD934X_IRQ_MISC, false},
-	{WCD934X_IRQ_HPH_PA_CNPL_COMPLETE, false},
-	{WCD934X_IRQ_HPH_PA_CNPR_COMPLETE, false},
-	{WCD934X_IRQ_EAR_PA_CNP_COMPLETE, false},
-	{WCD934X_IRQ_LINE_PA1_CNP_COMPLETE, false},
-	{WCD934X_IRQ_LINE_PA2_CNP_COMPLETE, false},
-	{WCD934X_IRQ_SLNQ_ANALOG_ERROR, false},
-	{WCD934X_IRQ_RESERVED_3, false},
-	{WCD934X_IRQ_HPH_PA_OCPL_FAULT, false},
-	{WCD934X_IRQ_HPH_PA_OCPR_FAULT, false},
-	{WCD934X_IRQ_EAR_PA_OCP_FAULT, false},
-	{WCD934X_IRQ_SOUNDWIRE, false},
-	{WCD934X_IRQ_VDD_DIG_RAMP_COMPLETE, false},
-	{WCD934X_IRQ_RCO_ERROR, false},
-	{WCD934X_IRQ_CPE_ERROR, false},
-	{WCD934X_IRQ_MAD_AUDIO, false},
-	{WCD934X_IRQ_MAD_BEACON, false},
-	{WCD934X_IRQ_CPE1_INTR, true},
-	{WCD934X_IRQ_RESERVED_4, false},
-	{WCD934X_IRQ_MAD_ULTRASOUND, false},
-	{WCD934X_IRQ_VBAT_ATTACK, false},
-	{WCD934X_IRQ_VBAT_RESTORE, false},
-};
-
 struct tavil_cpr_reg_defaults {
 	int wr_data;
 	int wr_addr;
@@ -676,140 +646,6 @@
 
 static int __tavil_enable_efuse_sensing(struct tavil_priv *tavil);
 
-/*
- * wcd934x_get_codec_info: Get codec specific information
- *
- * @wcd9xxx: pointer to wcd9xxx structure
- * @wcd_type: pointer to wcd9xxx_codec_type structure
- *
- * Returns 0 for success or negative error code for failure
- */
-int wcd934x_get_codec_info(struct wcd9xxx *wcd9xxx,
-			   struct wcd9xxx_codec_type *wcd_type)
-{
-	u16 id_minor, id_major;
-	struct regmap *wcd_regmap;
-	int rc, version = -1;
-
-	if (!wcd9xxx || !wcd_type)
-		return -EINVAL;
-
-	if (!wcd9xxx->regmap) {
-		dev_err(wcd9xxx->dev, "%s: wcd9xxx regmap is null\n", __func__);
-		return -EINVAL;
-	}
-	wcd_regmap = wcd9xxx->regmap;
-
-	rc = regmap_bulk_read(wcd_regmap, WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE0,
-			      (u8 *)&id_minor, sizeof(u16));
-	if (rc)
-		return -EINVAL;
-
-	rc = regmap_bulk_read(wcd_regmap, WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE2,
-			      (u8 *)&id_major, sizeof(u16));
-	if (rc)
-		return -EINVAL;
-
-	dev_info(wcd9xxx->dev, "%s: wcd9xxx chip id major 0x%x, minor 0x%x\n",
-		 __func__, id_major, id_minor);
-
-	if (id_major != TAVIL_MAJOR)
-		goto version_unknown;
-
-	/*
-	 * As fine version info cannot be retrieved before tavil probe.
-	 * Assign coarse versions for possible future use before tavil probe.
-	 */
-	if (id_minor == cpu_to_le16(0))
-		version = TAVIL_VERSION_1_0;
-	else if (id_minor == cpu_to_le16(0x01))
-		version = TAVIL_VERSION_1_1;
-
-version_unknown:
-	if (version < 0)
-		dev_err(wcd9xxx->dev, "%s: wcd934x version unknown\n",
-			__func__);
-
-	/* Fill codec type info */
-	wcd_type->id_major = id_major;
-	wcd_type->id_minor = id_minor;
-	wcd_type->num_irqs = WCD934X_NUM_IRQS;
-	wcd_type->version = version;
-	wcd_type->slim_slave_type = WCD9XXX_SLIM_SLAVE_ADDR_TYPE_1;
-	wcd_type->i2c_chip_status = 0x01;
-	wcd_type->intr_tbl = wcd934x_intr_table;
-	wcd_type->intr_tbl_size = ARRAY_SIZE(wcd934x_intr_table);
-
-	wcd_type->intr_reg[WCD9XXX_INTR_STATUS_BASE] =
-						WCD934X_INTR_PIN1_STATUS0;
-	wcd_type->intr_reg[WCD9XXX_INTR_CLEAR_BASE] =
-						WCD934X_INTR_PIN1_CLEAR0;
-	wcd_type->intr_reg[WCD9XXX_INTR_MASK_BASE] =
-						WCD934X_INTR_PIN1_MASK0;
-	wcd_type->intr_reg[WCD9XXX_INTR_LEVEL_BASE] =
-						WCD934X_INTR_LEVEL0;
-	wcd_type->intr_reg[WCD9XXX_INTR_CLR_COMMIT] =
-						WCD934X_INTR_CLR_COMMIT;
-
-	return rc;
-}
-EXPORT_SYMBOL(wcd934x_get_codec_info);
-
-/*
- * wcd934x_bringdown: Bringdown WCD Codec
- *
- * @wcd9xxx: Pointer to wcd9xxx structure
- *
- * Returns 0 for success or negative error code for failure
- */
-int wcd934x_bringdown(struct wcd9xxx *wcd9xxx)
-{
-	if (!wcd9xxx || !wcd9xxx->regmap)
-		return -EINVAL;
-
-	regmap_write(wcd9xxx->regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
-		     0x04);
-
-	return 0;
-}
-EXPORT_SYMBOL(wcd934x_bringdown);
-
-/*
- * wcd934x_bringup: Bringup WCD Codec
- *
- * @wcd9xxx: Pointer to the wcd9xxx structure
- *
- * Returns 0 for success or negative error code for failure
- */
-int wcd934x_bringup(struct wcd9xxx *wcd9xxx)
-{
-	struct regmap *wcd_regmap;
-
-	if (!wcd9xxx)
-		return -EINVAL;
-
-	if (!wcd9xxx->regmap) {
-		dev_err(wcd9xxx->dev, "%s: wcd9xxx regmap is null!\n",
-			__func__);
-		return -EINVAL;
-	}
-	wcd_regmap = wcd9xxx->regmap;
-
-	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x01);
-	regmap_write(wcd_regmap, WCD934X_SIDO_NEW_VOUT_A_STARTUP, 0x19);
-	regmap_write(wcd_regmap, WCD934X_SIDO_NEW_VOUT_D_STARTUP, 0x15);
-	/* Add 1msec delay for VOUT to settle */
-	usleep_range(1000, 1100);
-	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x5);
-	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x7);
-	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x3);
-	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x7);
-	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x3);
-
-	return 0;
-}
-EXPORT_SYMBOL(wcd934x_bringup);
-
 /**
  * tavil_set_spkr_gain_offset - offset the speaker path
  * gain with the given offset value.
diff --git a/sound/soc/codecs/wcd934x/wcd934x.h b/sound/soc/codecs/wcd934x/wcd934x.h
index ae70175..c3bf50a 100644
--- a/sound/soc/codecs/wcd934x/wcd934x.h
+++ b/sound/soc/codecs/wcd934x/wcd934x.h
@@ -95,45 +95,6 @@
 	INTERP_MAX,
 };
 
-enum {
-	/* INTR_REG 0 */
-	WCD934X_IRQ_MISC = 1,
-	WCD934X_IRQ_HPH_PA_OCPL_FAULT,
-	WCD934X_IRQ_HPH_PA_OCPR_FAULT,
-	WCD934X_IRQ_EAR_PA_OCP_FAULT,
-	WCD934X_IRQ_HPH_PA_CNPL_COMPLETE,
-	WCD934X_IRQ_HPH_PA_CNPR_COMPLETE,
-	WCD934X_IRQ_EAR_PA_CNP_COMPLETE,
-	/* INTR_REG 1 */
-	WCD934X_IRQ_MBHC_SW_DET,
-	WCD934X_IRQ_MBHC_ELECT_INS_REM_DET,
-	WCD934X_IRQ_MBHC_BUTTON_PRESS_DET,
-	WCD934X_IRQ_MBHC_BUTTON_RELEASE_DET,
-	WCD934X_IRQ_MBHC_ELECT_INS_REM_LEG_DET,
-	WCD934X_IRQ_RESERVED_0,
-	WCD934X_IRQ_RESERVED_1,
-	WCD934X_IRQ_RESERVED_2,
-	/* INTR_REG 2 */
-	WCD934X_IRQ_LINE_PA1_CNP_COMPLETE,
-	WCD934X_IRQ_LINE_PA2_CNP_COMPLETE,
-	WCD934X_IRQ_SLNQ_ANALOG_ERROR,
-	WCD934X_IRQ_RESERVED_3,
-	WCD934X_IRQ_SOUNDWIRE,
-	WCD934X_IRQ_VDD_DIG_RAMP_COMPLETE,
-	WCD934X_IRQ_RCO_ERROR,
-	WCD934X_IRQ_CPE_ERROR,
-	/* INTR_REG 3 */
-	WCD934X_IRQ_MAD_AUDIO,
-	WCD934X_IRQ_MAD_BEACON,
-	WCD934X_IRQ_MAD_ULTRASOUND,
-	WCD934X_IRQ_VBAT_ATTACK,
-	WCD934X_IRQ_VBAT_RESTORE,
-	WCD934X_IRQ_CPE1_INTR,
-	WCD934X_IRQ_RESERVED_4,
-	WCD934X_IRQ_SLNQ_DIGITAL,
-	WCD934X_NUM_IRQS,
-};
-
 /*
  * Selects compander and smart boost settings
  * for a given speaker mode
diff --git a/sound/soc/codecs/wcd9xxx-common-v2.c b/sound/soc/codecs/wcd9xxx-common-v2.c
index 9ac38c2..6216657 100644
--- a/sound/soc/codecs/wcd9xxx-common-v2.c
+++ b/sound/soc/codecs/wcd9xxx-common-v2.c
@@ -1316,6 +1316,7 @@
 		break;
 	};
 }
+EXPORT_SYMBOL(wcd_clsh_fsm);
 
 int wcd_clsh_get_clsh_state(struct wcd_clsh_cdc_data *clsh)
 {
diff --git a/sound/soc/codecs/wcd9xxx-common.c b/sound/soc/codecs/wcd9xxx-common.c
deleted file mode 100644
index 7b2e68a..0000000
--- a/sound/soc/codecs/wcd9xxx-common.c
+++ /dev/null
@@ -1,1480 +0,0 @@
-/* Copyright (c) 2013-2015, 2017 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <sound/soc.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
-#include "wcd9xxx-common.h"
-
-#define CLSH_COMPUTE_EAR 0x01
-#define CLSH_COMPUTE_HPH_L 0x02
-#define CLSH_COMPUTE_HPH_R 0x03
-
-#define BUCK_VREF_0P494V 0x3F
-#define BUCK_VREF_2V 0xFF
-#define BUCK_VREF_0P494V 0x3F
-#define BUCK_VREF_1P8V 0xE6
-
-#define BUCK_SETTLE_TIME_US 50
-#define NCP_SETTLE_TIME_US 50
-
-#define MAX_IMPED_PARAMS 13
-
-#define USLEEP_RANGE_MARGIN_US 100
-
-struct wcd9xxx_imped_val {
-	u32 imped_val;
-	u8 index;
-};
-
-static const struct wcd9xxx_reg_mask_val imped_table[][MAX_IMPED_PARAMS] = {
-	{
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x46},
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x04},
-		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x11},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x9B},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x15},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1C},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x04},
-		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x0C},
-	},
-	{
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x47},
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x05},
-		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x11},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x9B},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x15},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1C},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x05},
-		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x0C},
-	},
-	{
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x49},
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x07},
-		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x12},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x35},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x4E},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x06},
-		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x0E},
-	},
-	{
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x49},
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x16},
-		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAC},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x17},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x5F},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xCF},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x06},
-		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x0F},
-	},
-	{
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x59},
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x15},
-		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x9C},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1B},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xCE},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xBD},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x07},
-		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x10},
-	},
-	{
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x66},
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x04},
-		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x9A},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2E},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xBD},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xA6},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x07},
-		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x11},
-	},
-	{
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x79},
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x04},
-		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x11},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x37},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xA6},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAD},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x08},
-		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x12},
-	},
-	{
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x76},
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x04},
-		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x11},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x4E},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAD},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAC},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x09},
-		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x12},
-	},
-	{
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x78},
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x05},
-		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x12},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xD0},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAC},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x13},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x0A},
-		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x13},
-	},
-	{
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x7A},
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x06},
-		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x14},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xB7},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x13},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x14},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x0B},
-		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x14},
-	},
-	{
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x60},
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x09},
-		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1C},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xA4},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x14},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1F},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x0C},
-		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x14},
-	},
-	{
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x79},
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x17},
-		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x25},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAE},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1F},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1D},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x0D},
-		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x15},
-	},
-	{
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x78},
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x16},
-		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2C},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAC},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1D},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1C},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x0E},
-		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x16},
-	},
-	{
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x89},
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x05},
-		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x40},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x13},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1C},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1B},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x10},
-		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x16},
-	},
-	{
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x97},
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x05},
-		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xD0},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x14},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1B},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1B},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x12},
-		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x17},
-	},
-	{
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x8A},
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x06},
-		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xB7},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x10},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1B},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x24},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x13},
-		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x17},
-	},
-	{
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x8A},
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x07},
-		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xA4},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1D},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x24},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x25},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x15},
-		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x18},
-	},
-	{
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x9A},
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x08},
-		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAE},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1C},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x25},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x27},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x18},
-		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x19},
-	},
-	{
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x8B},
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x18},
-		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAC},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1B},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x20},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2E},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x1A},
-		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x19},
-	},
-	{
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x9A},
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x17},
-		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x13},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1B},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2E},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2D},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x1D},
-		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x1A},
-	},
-	{
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0xA9},
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x06},
-		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x14},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x24},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2D},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2C},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x1F},
-		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x19},
-	},
-	{
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0xB9},
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x06},
-		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x10},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x25},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2C},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2C},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x23},
-		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x18},
-	},
-	{
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0xA9},
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x07},
-		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1D},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x27},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2C},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x35},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x26},
-		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x16},
-	},
-};
-
-static const struct wcd9xxx_imped_val imped_index[] = {
-	{4000, 0},
-	{4500, 1},
-	{5000, 2},
-	{5500, 3},
-	{6000, 4},
-	{6500, 5},
-	{7000, 6},
-	{7700, 7},
-	{8470, 8},
-	{9317, 9},
-	{10248, 10},
-	{11273, 11},
-	{12400, 12},
-	{13641, 13},
-	{15005, 14},
-	{16505, 15},
-	{18156, 16},
-	{19971, 17},
-	{21969, 18},
-	{24165, 19},
-	{26582, 20},
-	{29240, 21},
-	{32164, 22},
-};
-
-static inline void
-wcd9xxx_enable_clsh_block(struct snd_soc_codec *codec,
-			  struct wcd9xxx_clsh_cdc_data *clsh_d, bool enable)
-{
-	if ((enable && ++clsh_d->clsh_users == 1) ||
-	    (!enable && --clsh_d->clsh_users == 0))
-		snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLSH_B1_CTL,
-				    0x01, enable ? 0x01 : 0x00);
-	dev_dbg(codec->dev, "%s: clsh_users %d, enable %d", __func__,
-		clsh_d->clsh_users, enable);
-}
-
-static inline void wcd9xxx_enable_anc_delay(
-	struct snd_soc_codec *codec,
-	bool on)
-{
-	snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLSH_B1_CTL,
-		0x02, on ? 0x02 : 0x00);
-}
-
-static inline void
-wcd9xxx_enable_buck(struct snd_soc_codec *codec,
-		    struct wcd9xxx_clsh_cdc_data *clsh_d, bool enable)
-{
-	if ((enable && ++clsh_d->buck_users == 1) ||
-	    (!enable && --clsh_d->buck_users == 0))
-		snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_1,
-				    0x80, enable ? 0x80 : 0x00);
-	dev_dbg(codec->dev, "%s: buck_users %d, enable %d", __func__,
-		clsh_d->buck_users, enable);
-}
-
-static void (*clsh_state_fp[NUM_CLSH_STATES])(struct snd_soc_codec *,
-					      struct wcd9xxx_clsh_cdc_data *,
-					      u8 req_state, bool req_type);
-
-static const char *state_to_str(u8 state, char *buf, size_t buflen)
-{
-	int i;
-	int cnt = 0;
-	/*
-	 * This array of strings should match with enum wcd9xxx_clsh_state_bit.
-	 */
-	static const char *const states[] = {
-		"STATE_EAR",
-		"STATE_HPH_L",
-		"STATE_HPH_R",
-		"STATE_LO",
-	};
-
-	if (state == WCD9XXX_CLSH_STATE_IDLE) {
-		snprintf(buf, buflen, "[STATE_IDLE]");
-		goto done;
-	}
-
-	buf[0] = '\0';
-	for (i = 0; i < ARRAY_SIZE(states); i++) {
-		if (!(state & (1 << i)))
-			continue;
-		cnt = snprintf(buf, buflen - cnt - 1, "%s%s%s", buf,
-			       buf[0] == '\0' ? "[" : "|",
-			       states[i]);
-	}
-	if (cnt > 0)
-		strlcat(buf + cnt, "]", buflen);
-
-done:
-	if (buf[0] == '\0')
-		snprintf(buf, buflen, "[STATE_UNKNOWN]");
-	return buf;
-}
-
-static void wcd9xxx_cfg_clsh_param_common(
-		struct snd_soc_codec *codec)
-{
-	int i;
-	const struct wcd9xxx_reg_mask_val reg_set[] = {
-		{WCD9XXX_A_CDC_CLSH_BUCK_NCP_VARS, 0x3 << 0, 0},
-		{WCD9XXX_A_CDC_CLSH_BUCK_NCP_VARS, 0x3 << 2, 1 << 2},
-		{WCD9XXX_A_CDC_CLSH_BUCK_NCP_VARS, (0x1 << 4), 0},
-		{WCD9XXX_A_CDC_CLSH_B2_CTL, (0x3 << 0), 0x01},
-		{WCD9XXX_A_CDC_CLSH_B2_CTL, (0x3 << 2), (0x01 << 2)},
-		{WCD9XXX_A_CDC_CLSH_B2_CTL, (0xf << 4), (0x03 << 4)},
-		{WCD9XXX_A_CDC_CLSH_B3_CTL, (0xf << 4), (0x03 << 4)},
-		{WCD9XXX_A_CDC_CLSH_B3_CTL, (0xf << 0), (0x0B)},
-		{WCD9XXX_A_CDC_CLSH_B1_CTL, (0x1 << 5), (0x01 << 5)},
-		{WCD9XXX_A_CDC_CLSH_B1_CTL, (0x1 << 1), (0x01 << 1)},
-	};
-
-	for (i = 0; i < ARRAY_SIZE(reg_set); i++)
-		snd_soc_update_bits(codec, reg_set[i].reg, reg_set[i].mask,
-						    reg_set[i].val);
-
-	dev_dbg(codec->dev, "%s: Programmed class H controller common parameters",
-			 __func__);
-}
-
-static void wcd9xxx_chargepump_request(struct snd_soc_codec *codec, bool on)
-{
-	static int cp_count;
-
-	if (on && (++cp_count == 1)) {
-		snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLK_OTHR_CTL,
-				    0x01, 0x01);
-		dev_dbg(codec->dev, "%s: Charge Pump enabled, count = %d\n",
-			__func__, cp_count);
-	} else if (!on) {
-		if (--cp_count < 0) {
-			dev_dbg(codec->dev,
-				"%s: Unbalanced disable for charge pump\n",
-				__func__);
-			if (snd_soc_read(codec, WCD9XXX_A_CDC_CLK_OTHR_CTL) &
-			    0x01) {
-				dev_dbg(codec->dev,
-					"%s: Actual chargepump is ON\n",
-					__func__);
-			}
-			cp_count = 0;
-			WARN_ON(1);
-		}
-
-		if (cp_count == 0) {
-			snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLK_OTHR_CTL,
-					    0x01, 0x00);
-			dev_dbg(codec->dev,
-				"%s: Charge pump disabled, count = %d\n",
-				__func__, cp_count);
-		}
-	}
-}
-
-void wcd9xxx_enable_high_perf_mode(struct snd_soc_codec *codec,
-				struct wcd9xxx_clsh_cdc_data *clsh_d,
-				u8 uhqa_mode, u8 req_state, bool req_type)
-{
-	dev_dbg(codec->dev, "%s: users fclk8 %d, fclk5 %d", __func__,
-			clsh_d->ncp_users[NCP_FCLK_LEVEL_8],
-			clsh_d->ncp_users[NCP_FCLK_LEVEL_5]);
-
-	if (req_type == WCD9XXX_CLSAB_REQ_ENABLE) {
-		clsh_d->ncp_users[NCP_FCLK_LEVEL_8]++;
-		snd_soc_write(codec, WCD9XXX_A_RX_HPH_BIAS_PA,
-					WCD9XXX_A_RX_HPH_BIAS_PA__POR);
-		snd_soc_write(codec, WCD9XXX_A_RX_HPH_L_PA_CTL, 0x48);
-		snd_soc_write(codec, WCD9XXX_A_RX_HPH_R_PA_CTL, 0x48);
-		if (uhqa_mode)
-			snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_CHOP_CTL,
-						0x20, 0x00);
-		wcd9xxx_chargepump_request(codec, true);
-		wcd9xxx_enable_anc_delay(codec, true);
-		wcd9xxx_enable_buck(codec, clsh_d, false);
-		if (clsh_d->ncp_users[NCP_FCLK_LEVEL_8] > 0)
-			snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC,
-						0x0F, 0x08);
-		snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC, 0x30, 0x30);
-
-		/* Enable NCP and wait until settles down */
-		if (snd_soc_update_bits(codec, WCD9XXX_A_NCP_EN, 0x01, 0x01))
-			usleep_range(NCP_SETTLE_TIME_US, NCP_SETTLE_TIME_US+10);
-	} else {
-		snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_CHOP_CTL,
-					0x20, 0x20);
-		snd_soc_write(codec, WCD9XXX_A_RX_HPH_L_PA_CTL,
-					WCD9XXX_A_RX_HPH_L_PA_CTL__POR);
-		snd_soc_write(codec, WCD9XXX_A_RX_HPH_R_PA_CTL,
-					WCD9XXX_A_RX_HPH_R_PA_CTL__POR);
-		snd_soc_write(codec, WCD9XXX_A_RX_HPH_BIAS_PA, 0x57);
-		wcd9xxx_enable_buck(codec, clsh_d, true);
-		wcd9xxx_chargepump_request(codec, false);
-		wcd9xxx_enable_anc_delay(codec, false);
-		clsh_d->ncp_users[NCP_FCLK_LEVEL_8]--;
-		if (clsh_d->ncp_users[NCP_FCLK_LEVEL_8] == 0 &&
-		    clsh_d->ncp_users[NCP_FCLK_LEVEL_5] == 0)
-			snd_soc_update_bits(codec, WCD9XXX_A_NCP_EN,
-						0x01, 0x00);
-		else if (clsh_d->ncp_users[NCP_FCLK_LEVEL_8] == 0)
-			snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC,
-						0x0F, 0x05);
-	}
-	dev_dbg(codec->dev, "%s: leave\n", __func__);
-}
-EXPORT_SYMBOL(wcd9xxx_enable_high_perf_mode);
-
-static int get_impedance_index(u32 imped)
-{
-	int i = 0;
-
-	if (imped < imped_index[i].imped_val) {
-		pr_debug("%s, detected impedance is less than 4 Ohm\n",
-				__func__);
-		goto ret;
-	}
-	if (imped >= imped_index[ARRAY_SIZE(imped_index) - 1].imped_val) {
-		pr_debug("%s, detected impedance is greater than 32164 Ohm\n",
-				__func__);
-		i = ARRAY_SIZE(imped_index) - 1;
-		goto ret;
-	}
-	for (i = 0; i < ARRAY_SIZE(imped_index) - 1; i++) {
-		if (imped >= imped_index[i].imped_val &&
-			imped < imped_index[i + 1].imped_val)
-			break;
-	}
-ret:
-	pr_debug("%s: selected impedance index = %d\n",
-			__func__, imped_index[i].index);
-	return imped_index[i].index;
-}
-
-void wcd9xxx_clsh_imped_config(struct snd_soc_codec *codec,
-				  int imped)
-{
-	int i  = 0;
-	int index = 0;
-
-	index = get_impedance_index(imped);
-	if (index >= ARRAY_SIZE(imped_index)) {
-		pr_err("%s, invalid imped = %d\n", __func__, imped);
-		return;
-	}
-	for (i = 0; i < MAX_IMPED_PARAMS; i++)
-		snd_soc_write(codec, imped_table[index][i].reg,
-					imped_table[index][i].val);
-}
-
-static void wcd9xxx_clsh_comp_req(struct snd_soc_codec *codec,
-				  struct wcd9xxx_clsh_cdc_data *clsh_d,
-				  int compute_pa, bool on)
-{
-	u8 shift;
-
-	if (compute_pa == CLSH_COMPUTE_EAR) {
-		snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLSH_B1_CTL, 0x10,
-				    (on ? 0x10 : 0));
-	} else {
-		if (compute_pa == CLSH_COMPUTE_HPH_L) {
-			shift = 3;
-		} else if (compute_pa == CLSH_COMPUTE_HPH_R) {
-			shift = 2;
-		} else {
-			dev_dbg(codec->dev,
-				"%s: classh computation request is incorrect\n",
-				__func__);
-			return;
-		}
-
-		if (on)
-			wcd9xxx_resmgr_add_cond_update_bits(clsh_d->resmgr,
-						  WCD9XXX_COND_HPH,
-						  WCD9XXX_A_CDC_CLSH_B1_CTL,
-						  shift, false);
-		else
-			wcd9xxx_resmgr_rm_cond_update_bits(clsh_d->resmgr,
-						  WCD9XXX_COND_HPH,
-						  WCD9XXX_A_CDC_CLSH_B1_CTL,
-						  shift, false);
-	}
-}
-
-int wcd9xxx_soc_update_bits_push(struct snd_soc_codec *codec,
-					struct list_head *list,
-					uint16_t reg, uint8_t mask,
-					uint8_t value, int delay)
-{
-	int rc;
-	struct wcd9xxx_register_save_node *node;
-
-	node = kmalloc(sizeof(*node), GFP_KERNEL);
-	if (unlikely(!node)) {
-		pr_err("%s: Not enough memory\n", __func__);
-		return -ENOMEM;
-	}
-	node->reg = reg;
-	node->value = snd_soc_read(codec, reg);
-	list_add(&node->lh, list);
-	if (mask == 0xFF)
-		rc = snd_soc_write(codec, reg, value);
-	else
-		rc = snd_soc_update_bits(codec, reg, mask, value);
-	if (delay)
-		usleep_range(delay, delay + USLEEP_RANGE_MARGIN_US);
-	return rc;
-}
-EXPORT_SYMBOL(wcd9xxx_soc_update_bits_push);
-
-void wcd9xxx_restore_registers(struct snd_soc_codec *codec,
-			       struct list_head *lh)
-{
-	struct wcd9xxx_register_save_node *node, *nodetmp;
-
-	list_for_each_entry_safe(node, nodetmp, lh, lh) {
-		snd_soc_write(codec, node->reg, node->value);
-		list_del(&node->lh);
-		kfree(node);
-	}
-}
-EXPORT_SYMBOL(wcd9xxx_restore_registers);
-
-static void wcd9xxx_dynamic_bypass_buck_ctrl_lo(struct snd_soc_codec *cdc,
-						bool enable)
-{
-	int i;
-	const struct wcd9xxx_reg_mask_val reg_set[] = {
-		{WCD9XXX_A_BUCK_MODE_3, (0x1 << 3), (enable << 3)},
-		{WCD9XXX_A_BUCK_MODE_5, enable ? 0xFF : 0x02, 0x02},
-		{WCD9XXX_A_BUCK_MODE_5, 0x1, 0x01}
-	};
-
-	if (!enable) {
-		snd_soc_update_bits(cdc, WCD9XXX_A_BUCK_MODE_1,
-					(0x1 << 3), 0x00);
-		snd_soc_update_bits(cdc, WCD9XXX_A_BUCK_MODE_4,
-					0xFF, BUCK_VREF_2V);
-	}
-	for (i = 0; i < ARRAY_SIZE(reg_set); i++)
-		snd_soc_update_bits(cdc, reg_set[i].reg, reg_set[i].mask,
-							reg_set[i].val);
-
-	/* 50us sleep is reqd. as per the class H HW design sequence */
-	usleep_range(BUCK_SETTLE_TIME_US, BUCK_SETTLE_TIME_US+10);
-}
-
-static void wcd9xxx_dynamic_bypass_buck_ctrl(struct snd_soc_codec *cdc,
-						bool enable)
-{
-	int i;
-	const struct wcd9xxx_reg_mask_val reg_set[] = {
-		{WCD9XXX_A_BUCK_MODE_3, (0x1 << 3), (enable << 3)},
-		{WCD9XXX_A_BUCK_MODE_5, (0x1 << 1), ((!enable) << 1)},
-		{WCD9XXX_A_BUCK_MODE_5, 0x1, !enable}
-	};
-	if (!enable) {
-		snd_soc_update_bits(cdc, WCD9XXX_A_BUCK_MODE_1,
-					(0x1 << 3), 0x00);
-		snd_soc_update_bits(cdc, WCD9XXX_A_BUCK_MODE_4,
-					0xFF, BUCK_VREF_2V);
-	}
-	for (i = 0; i < ARRAY_SIZE(reg_set); i++)
-		snd_soc_update_bits(cdc, reg_set[i].reg, reg_set[i].mask,
-							reg_set[i].val);
-
-	/* 50us sleep is reqd. as per the class H HW design sequence */
-	usleep_range(BUCK_SETTLE_TIME_US, BUCK_SETTLE_TIME_US+10);
-}
-
-static void wcd9xxx_set_buck_mode(struct snd_soc_codec *codec, u8 buck_vref)
-{
-	int i;
-	const struct wcd9xxx_reg_mask_val reg_set[] = {
-		{WCD9XXX_A_BUCK_MODE_5, 0x02, 0x02},
-		{WCD9XXX_A_BUCK_MODE_4, 0xFF, buck_vref},
-		{WCD9XXX_A_BUCK_MODE_1, 0x04, 0x04},
-		{WCD9XXX_A_BUCK_MODE_3, 0x04, 0x00},
-		{WCD9XXX_A_BUCK_MODE_3, 0x08, 0x00},
-	};
-
-	for (i = 0; i < ARRAY_SIZE(reg_set); i++)
-		snd_soc_update_bits(codec, reg_set[i].reg,
-					reg_set[i].mask, reg_set[i].val);
-
-	dev_dbg(codec->dev, "%s: Done\n", __func__);
-	usleep_range(BUCK_SETTLE_TIME_US, BUCK_SETTLE_TIME_US + 10);
-}
-
-
-/* This will be called for all states except Lineout */
-static void wcd9xxx_clsh_enable_post_pa(struct snd_soc_codec *codec,
-	struct wcd9xxx_clsh_cdc_data *cdc_clsh_d)
-{
-	int i;
-	const struct wcd9xxx_reg_mask_val reg_set[] = {
-		{WCD9XXX_A_BUCK_MODE_5, 0x02, 0x00},
-		{WCD9XXX_A_NCP_STATIC, 0x20, 0x00},
-		{WCD9XXX_A_BUCK_MODE_3, 0x04, 0x04},
-	};
-
-	for (i = 0; i < ARRAY_SIZE(reg_set); i++)
-		snd_soc_update_bits(codec, reg_set[i].reg,
-					reg_set[i].mask, reg_set[i].val);
-
-	if (!cdc_clsh_d->is_dynamic_vdd_cp)
-		snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_3,
-							0x08, 0x08);
-
-	dev_dbg(codec->dev, "%s: completed clsh mode settings after PA enable\n",
-		   __func__);
-
-}
-
-static void wcd9xxx_set_fclk_get_ncp(struct snd_soc_codec *codec,
-				     struct wcd9xxx_clsh_cdc_data *clsh_d,
-				     enum ncp_fclk_level fclk_level)
-{
-	clsh_d->ncp_users[fclk_level]++;
-
-	pr_debug("%s: enter ncp type %d users fclk8 %d, fclk5 %d\n", __func__,
-		 fclk_level, clsh_d->ncp_users[NCP_FCLK_LEVEL_8],
-		 clsh_d->ncp_users[NCP_FCLK_LEVEL_5]);
-
-	snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC, 0x10, 0x00);
-	/* fclk level 8 dominates level 5 */
-	if (clsh_d->ncp_users[NCP_FCLK_LEVEL_8] > 0)
-		snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC, 0x0F, 0x08);
-	else if (clsh_d->ncp_users[NCP_FCLK_LEVEL_5] > 0)
-		snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC, 0x0F, 0x05);
-	else
-		WARN_ONCE(1, "Unexpected users %d,%d\n",
-			  clsh_d->ncp_users[NCP_FCLK_LEVEL_8],
-			  clsh_d->ncp_users[NCP_FCLK_LEVEL_5]);
-	snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC, 0x20, 0x20);
-
-	/* enable NCP and wait until settles down */
-	if (snd_soc_update_bits(codec, WCD9XXX_A_NCP_EN, 0x01, 0x01))
-		usleep_range(NCP_SETTLE_TIME_US, NCP_SETTLE_TIME_US + 50);
-	pr_debug("%s: leave\n", __func__);
-}
-
-static void wcd9xxx_set_fclk_put_ncp(struct snd_soc_codec *codec,
-				     struct wcd9xxx_clsh_cdc_data *clsh_d,
-				     enum ncp_fclk_level fclk_level)
-{
-	clsh_d->ncp_users[fclk_level]--;
-
-	pr_debug("%s: enter ncp type %d users fclk8 %d, fclk5 %d\n", __func__,
-		 fclk_level, clsh_d->ncp_users[NCP_FCLK_LEVEL_8],
-		 clsh_d->ncp_users[NCP_FCLK_LEVEL_5]);
-
-	if (clsh_d->ncp_users[NCP_FCLK_LEVEL_8] == 0 &&
-	    clsh_d->ncp_users[NCP_FCLK_LEVEL_5] == 0)
-		snd_soc_update_bits(codec, WCD9XXX_A_NCP_EN, 0x01, 0x00);
-	else if (clsh_d->ncp_users[NCP_FCLK_LEVEL_8] == 0)
-		/* if dominating level 8 has gone, switch to 5 */
-		snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC, 0x0F, 0x05);
-	pr_debug("%s: leave\n", __func__);
-}
-
-static void wcd9xxx_cfg_clsh_param_ear(struct snd_soc_codec *codec)
-{
-	int i;
-	const struct wcd9xxx_reg_mask_val reg_set[] = {
-		{WCD9XXX_A_CDC_CLSH_B1_CTL, (0x1 << 7), 0},
-		{WCD9XXX_A_CDC_CLSH_V_PA_HD_EAR, (0x3f << 0), 0x0D},
-		{WCD9XXX_A_CDC_CLSH_V_PA_MIN_EAR, (0x3f << 0), 0x3A},
-
-		/* Under assumption that EAR load is 10.7ohm */
-		{WCD9XXX_A_CDC_CLSH_IDLE_EAR_THSD, (0x3f << 0), 0x26},
-		{WCD9XXX_A_CDC_CLSH_FCLKONLY_EAR_THSD, (0x3f << 0), 0x2C},
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_EAR_L, 0xff, 0xA9},
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_EAR_U, 0xff, 0x07},
-		{WCD9XXX_A_CDC_CLSH_K_ADDR, (0x1 << 7), 0},
-		{WCD9XXX_A_CDC_CLSH_K_ADDR, (0xf << 0), 0x08},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1b},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2d},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x36},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x37},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-	};
-
-	for (i = 0; i < ARRAY_SIZE(reg_set); i++)
-		snd_soc_update_bits(codec, reg_set[i].reg,
-					reg_set[i].mask, reg_set[i].val);
-
-	dev_dbg(codec->dev, "%s: Programmed Class H controller EAR specific params\n",
-			 __func__);
-}
-
-static void wcd9xxx_cfg_clsh_param_hph(struct snd_soc_codec *codec)
-{
-	int i;
-	const struct wcd9xxx_reg_mask_val reg_set[] = {
-		{WCD9XXX_A_CDC_CLSH_B1_CTL, (0x1 << 6), 0},
-		{WCD9XXX_A_CDC_CLSH_V_PA_HD_HPH, 0x3f, 0x0D},
-		{WCD9XXX_A_CDC_CLSH_V_PA_MIN_HPH, 0x3f, 0x1D},
-
-		/* Under assumption that HPH load is 16ohm per channel */
-		{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0x3f, 0x13},
-		{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0x1f, 0x19},
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x97},
-		{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x05},
-		{WCD9XXX_A_CDC_CLSH_K_ADDR, (0x1 << 7), 0},
-		{WCD9XXX_A_CDC_CLSH_K_ADDR, 0x0f, 0},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAE},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1C},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x24},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x25},
-		{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
-	};
-
-	for (i = 0; i < ARRAY_SIZE(reg_set); i++)
-		snd_soc_update_bits(codec, reg_set[i].reg, reg_set[i].mask,
-							reg_set[i].val);
-	dev_dbg(codec->dev, "%s: Programmed Class H controller HPH specific params\n",
-			 __func__);
-}
-
-static void wcd9xxx_ncp_bypass_enable(struct snd_soc_codec *cdc, bool enable)
-{
-	snd_soc_update_bits(cdc, WCD9XXX_A_NCP_STATIC, 0x10, (enable << 4));
-	/* 50us sleep is reqd. as per the class H HW design sequence */
-	usleep_range(BUCK_SETTLE_TIME_US, BUCK_SETTLE_TIME_US+10);
-}
-
-static void wcd9xxx_clsh_set_Iest(struct snd_soc_codec *codec,
-		u8 value)
-{
-	snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_5,
-				    0x01, (0x01 & 0x03));
-	snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_5,
-				    0xFC, (value << 2));
-}
-
-static void wcd9xxx_clsh_state_hph_ear(struct snd_soc_codec *codec,
-			struct wcd9xxx_clsh_cdc_data *clsh_d,
-			u8 req_state, bool is_enable)
-{
-	int compute_pa = 0;
-
-	dev_dbg(codec->dev, "%s: enter %s\n", __func__,
-			is_enable ? "enable" : "disable");
-
-	if (is_enable) {
-		/*
-		 * The below check condition is required to make sure
-		 * functions inside if condition will execute only once.
-		 */
-		if ((clsh_d->state == WCD9XXX_CLSH_STATE_EAR) ||
-			(req_state == WCD9XXX_CLSH_STATE_EAR)) {
-			wcd9xxx_dynamic_bypass_buck_ctrl(codec, false);
-			wcd9xxx_ncp_bypass_enable(codec, true);
-		}
-		switch (req_state) {
-		case WCD9XXX_CLSH_STATE_HPHL:
-			compute_pa = CLSH_COMPUTE_HPH_L;
-			break;
-		case WCD9XXX_CLSH_STATE_HPHR:
-			compute_pa = CLSH_COMPUTE_HPH_R;
-			break;
-		case WCD9XXX_CLSH_STATE_EAR:
-			compute_pa = CLSH_COMPUTE_EAR;
-			break;
-		default:
-			dev_dbg(codec->dev,
-				"%s:Invalid state:0x%x,enable:0x%x\n",
-				__func__, req_state, is_enable);
-			break;
-		}
-		wcd9xxx_clsh_comp_req(codec, clsh_d, compute_pa, true);
-
-		dev_dbg(codec->dev, "%s: Enabled hph+ear mode clsh\n",
-				__func__);
-	} else {
-		switch (req_state) {
-		case WCD9XXX_CLSH_STATE_HPHL:
-			compute_pa = CLSH_COMPUTE_HPH_L;
-			break;
-		case WCD9XXX_CLSH_STATE_HPHR:
-			compute_pa = CLSH_COMPUTE_HPH_R;
-			break;
-		case WCD9XXX_CLSH_STATE_EAR:
-			compute_pa = CLSH_COMPUTE_EAR;
-			break;
-		default:
-			dev_dbg(codec->dev,
-				"%s:Invalid state:0x%x,enable:0x%x\n",
-				__func__, req_state, is_enable);
-			break;
-		}
-		wcd9xxx_clsh_comp_req(codec, clsh_d, compute_pa, false);
-
-		if (((clsh_d->state & (~req_state)) ==
-				WCD9XXX_CLSH_STATE_EAR) ||
-			(req_state == WCD9XXX_CLSH_STATE_EAR)) {
-			wcd9xxx_ncp_bypass_enable(codec, false);
-			wcd9xxx_dynamic_bypass_buck_ctrl(codec, true);
-		}
-	}
-}
-
-static void wcd9xxx_clsh_state_hph_lo(struct snd_soc_codec *codec,
-			struct wcd9xxx_clsh_cdc_data *clsh_d,
-			u8 req_state, bool is_enable)
-{
-
-	dev_dbg(codec->dev, "%s: enter %s\n", __func__,
-			is_enable ? "enable" : "disable");
-	if (is_enable) {
-		if ((clsh_d->state == WCD9XXX_CLSH_STATE_LO) ||
-			(req_state == WCD9XXX_CLSH_STATE_LO)) {
-			wcd9xxx_dynamic_bypass_buck_ctrl_lo(codec, false);
-			wcd9xxx_enable_buck(codec, clsh_d, true);
-			wcd9xxx_ncp_bypass_enable(codec, true);
-			if (req_state & WCD9XXX_CLSH_STATE_HPH_ST) {
-				wcd9xxx_set_fclk_get_ncp(codec, clsh_d,
-							NCP_FCLK_LEVEL_8);
-				wcd9xxx_set_fclk_put_ncp(codec, clsh_d,
-							NCP_FCLK_LEVEL_5);
-				wcd9xxx_enable_clsh_block(codec, clsh_d, true);
-				wcd9xxx_chargepump_request(codec, true);
-				wcd9xxx_enable_anc_delay(codec, true);
-			}
-		}
-		if (req_state == WCD9XXX_CLSH_STATE_HPHL)
-			wcd9xxx_clsh_comp_req(codec, clsh_d,
-						CLSH_COMPUTE_HPH_L, true);
-		if (req_state == WCD9XXX_CLSH_STATE_HPHR)
-			wcd9xxx_clsh_comp_req(codec, clsh_d,
-						CLSH_COMPUTE_HPH_R, true);
-	} else {
-		switch (req_state) {
-		case WCD9XXX_CLSH_STATE_LO:
-			snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC,
-						0x20, 0x00);
-			wcd9xxx_dynamic_bypass_buck_ctrl_lo(codec, true);
-			break;
-		case WCD9XXX_CLSH_STATE_HPHL:
-			wcd9xxx_clsh_comp_req(codec, clsh_d,
-						CLSH_COMPUTE_HPH_L, false);
-			break;
-		case WCD9XXX_CLSH_STATE_HPHR:
-			wcd9xxx_clsh_comp_req(codec, clsh_d,
-						CLSH_COMPUTE_HPH_R, false);
-			break;
-		default:
-			dev_dbg(codec->dev,
-				 "%s:Invalid state:0x%x,enable:0x%x\n",
-				__func__, req_state, is_enable);
-			break;
-		}
-		if ((req_state == WCD9XXX_CLSH_STATE_LO) ||
-		((clsh_d->state & (~req_state)) == WCD9XXX_CLSH_STATE_LO)) {
-			wcd9xxx_ncp_bypass_enable(codec, false);
-
-			if ((clsh_d->state & (~req_state)) ==
-						WCD9XXX_CLSH_STATE_LO) {
-				wcd9xxx_set_fclk_get_ncp(codec, clsh_d,
-							NCP_FCLK_LEVEL_5);
-				wcd9xxx_set_fclk_put_ncp(codec, clsh_d,
-							NCP_FCLK_LEVEL_8);
-			}
-
-			if (req_state & WCD9XXX_CLSH_STATE_HPH_ST) {
-				usleep_range(BUCK_SETTLE_TIME_US,
-						BUCK_SETTLE_TIME_US + 10);
-				if (clsh_d->buck_mv ==
-						WCD9XXX_CDC_BUCK_MV_1P8) {
-					wcd9xxx_enable_buck(codec, clsh_d,
-								false);
-					wcd9xxx_ncp_bypass_enable(codec, true);
-				} else {
-					/*
-					 *NCP settle time recommended by codec
-					 *specification
-					 */
-					usleep_range(NCP_SETTLE_TIME_US,
-						NCP_SETTLE_TIME_US + 10);
-					wcd9xxx_clsh_set_Iest(codec, 0x02);
-				}
-				snd_soc_update_bits(codec,
-						WCD9XXX_A_BUCK_MODE_1,
-						0x04, 0x00);
-				snd_soc_update_bits(codec,
-						 WCD9XXX_A_BUCK_MODE_4,
-						0xFF, BUCK_VREF_1P8V);
-			}
-		}
-	}
-}
-
-static void wcd9xxx_clsh_state_ear_lo(struct snd_soc_codec *codec,
-			struct wcd9xxx_clsh_cdc_data *clsh_d,
-			u8 req_state, bool is_enable)
-{
-
-	dev_dbg(codec->dev, "%s: enter %s\n", __func__,
-			is_enable ? "enable" : "disable");
-	if (is_enable) {
-		wcd9xxx_dynamic_bypass_buck_ctrl(codec, false);
-		wcd9xxx_enable_buck(codec, clsh_d, true);
-		wcd9xxx_ncp_bypass_enable(codec, true);
-		if (req_state & WCD9XXX_CLSH_STATE_EAR) {
-			wcd9xxx_set_fclk_get_ncp(codec, clsh_d,
-						NCP_FCLK_LEVEL_8);
-			wcd9xxx_set_fclk_put_ncp(codec, clsh_d,
-						NCP_FCLK_LEVEL_5);
-			wcd9xxx_enable_clsh_block(codec, clsh_d, true);
-			wcd9xxx_chargepump_request(codec, true);
-			wcd9xxx_enable_anc_delay(codec, true);
-			wcd9xxx_clsh_comp_req(codec, clsh_d,
-						CLSH_COMPUTE_EAR, true);
-		}
-	} else {
-		wcd9xxx_ncp_bypass_enable(codec, false);
-
-		if ((clsh_d->state & (~req_state)) == WCD9XXX_CLSH_STATE_LO) {
-			wcd9xxx_set_fclk_get_ncp(codec, clsh_d,
-						NCP_FCLK_LEVEL_5);
-			wcd9xxx_set_fclk_put_ncp(codec, clsh_d,
-						NCP_FCLK_LEVEL_8);
-		}
-
-		if (req_state & WCD9XXX_CLSH_STATE_LO) {
-			snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC,
-						0x20, 0x00);
-			wcd9xxx_dynamic_bypass_buck_ctrl(codec, true);
-		} else if (req_state & WCD9XXX_CLSH_STATE_EAR) {
-			wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_EAR,
-						false);
-			/*sleep 5ms*/
-			if (clsh_d->buck_mv == WCD9XXX_CDC_BUCK_MV_1P8) {
-				wcd9xxx_enable_buck(codec, clsh_d, false);
-				wcd9xxx_ncp_bypass_enable(codec, true);
-			} else {
-				/* NCP settle time recommended by codec	spec */
-				usleep_range(NCP_SETTLE_TIME_US,
-					     NCP_SETTLE_TIME_US + 10);
-				wcd9xxx_clsh_set_Iest(codec, 0x02);
-			}
-			snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_1,
-						0x04, 0x00);
-			snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_4,
-						0xFF, BUCK_VREF_1P8V);
-		}
-	}
-}
-
-static void wcd9xxx_clsh_state_hph_ear_lo(struct snd_soc_codec *codec,
-			struct wcd9xxx_clsh_cdc_data *clsh_d,
-			u8 req_state, bool is_enable)
-{
-	dev_dbg(codec->dev, "%s: enter %s\n", __func__,
-			is_enable ? "enable" : "disable");
-
-	if (req_state & WCD9XXX_CLSH_STATE_HPHL)
-		wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_L,
-					is_enable);
-
-	if (req_state & WCD9XXX_CLSH_STATE_HPHR)
-		wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_R,
-					is_enable);
-
-	if (req_state & WCD9XXX_CLSH_STATE_EAR)
-		wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_EAR,
-					is_enable);
-}
-
-static void wcd9xxx_clsh_state_ear(struct snd_soc_codec *codec,
-			struct wcd9xxx_clsh_cdc_data *clsh_d,
-			u8 req_state, bool is_enable)
-{
-	pr_debug("%s: enter %s\n", __func__, is_enable ? "enable" : "disable");
-	if (is_enable) {
-		wcd9xxx_cfg_clsh_param_common(codec);
-		wcd9xxx_cfg_clsh_param_ear(codec);
-		wcd9xxx_enable_clsh_block(codec, clsh_d, true);
-		wcd9xxx_chargepump_request(codec, true);
-		wcd9xxx_enable_anc_delay(codec, true);
-		wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_EAR, true);
-		wcd9xxx_set_buck_mode(codec, BUCK_VREF_2V);
-		wcd9xxx_enable_buck(codec, clsh_d, true);
-		wcd9xxx_set_fclk_get_ncp(codec, clsh_d, NCP_FCLK_LEVEL_8);
-
-		dev_dbg(codec->dev, "%s: Enabled ear mode class h\n", __func__);
-	} else {
-		dev_dbg(codec->dev, "%s: stub fallback to ear\n", __func__);
-		wcd9xxx_set_fclk_put_ncp(codec, clsh_d, NCP_FCLK_LEVEL_8);
-		wcd9xxx_enable_buck(codec, clsh_d, false);
-		wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_EAR, false);
-		wcd9xxx_chargepump_request(codec, false);
-		wcd9xxx_enable_clsh_block(codec, clsh_d, false);
-	}
-}
-
-static void wcd9xxx_clsh_state_hph_l(struct snd_soc_codec *codec,
-		struct wcd9xxx_clsh_cdc_data *clsh_d,
-		u8 req_state, bool is_enable)
-{
-	pr_debug("%s: enter %s\n", __func__, is_enable ? "enable" : "disable");
-
-	if (is_enable) {
-		wcd9xxx_cfg_clsh_param_common(codec);
-		wcd9xxx_cfg_clsh_param_hph(codec);
-		wcd9xxx_enable_clsh_block(codec, clsh_d, true);
-		wcd9xxx_chargepump_request(codec, true);
-		wcd9xxx_enable_anc_delay(codec, true);
-		wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_L, true);
-		wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_R, true);
-		wcd9xxx_set_buck_mode(codec, BUCK_VREF_0P494V);
-		wcd9xxx_enable_buck(codec, clsh_d, true);
-		wcd9xxx_set_fclk_get_ncp(codec, clsh_d, NCP_FCLK_LEVEL_8);
-
-		dev_dbg(codec->dev, "%s: Done\n", __func__);
-	} else {
-		wcd9xxx_set_fclk_put_ncp(codec, clsh_d, NCP_FCLK_LEVEL_8);
-		wcd9xxx_enable_buck(codec, clsh_d, false);
-		wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_L, false);
-		wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_R, false);
-		wcd9xxx_enable_clsh_block(codec, clsh_d, false);
-		wcd9xxx_chargepump_request(codec, false);
-	}
-}
-
-static void wcd9xxx_clsh_state_hph_r(struct snd_soc_codec *codec,
-		struct wcd9xxx_clsh_cdc_data *clsh_d,
-		u8 req_state, bool is_enable)
-{
-	pr_debug("%s: enter %s\n", __func__, is_enable ? "enable" : "disable");
-
-	if (is_enable) {
-		wcd9xxx_cfg_clsh_param_common(codec);
-		wcd9xxx_cfg_clsh_param_hph(codec);
-		wcd9xxx_enable_clsh_block(codec, clsh_d, true);
-		wcd9xxx_chargepump_request(codec, true);
-		wcd9xxx_enable_anc_delay(codec, true);
-		wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_L, true);
-		wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_R, true);
-		wcd9xxx_set_buck_mode(codec, BUCK_VREF_0P494V);
-		wcd9xxx_enable_buck(codec, clsh_d, true);
-		wcd9xxx_set_fclk_get_ncp(codec, clsh_d, NCP_FCLK_LEVEL_8);
-
-		dev_dbg(codec->dev, "%s: Done\n", __func__);
-	} else {
-		wcd9xxx_set_fclk_put_ncp(codec, clsh_d, NCP_FCLK_LEVEL_8);
-		wcd9xxx_enable_buck(codec, clsh_d, false);
-		wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_L, false);
-		wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_R, false);
-		wcd9xxx_enable_clsh_block(codec, clsh_d, false);
-		wcd9xxx_chargepump_request(codec, false);
-	}
-}
-
-static void wcd9xxx_clsh_state_hph_st(struct snd_soc_codec *codec,
-		struct wcd9xxx_clsh_cdc_data *clsh_d,
-		u8 req_state, bool is_enable)
-{
-	pr_debug("%s: enter %s\n", __func__, is_enable ? "enable" : "disable");
-
-	if (is_enable)
-		dev_dbg(codec->dev, "%s: stub fallback to hph_st\n", __func__);
-	else
-		dev_dbg(codec->dev, "%s: stub fallback to hph_st\n", __func__);
-}
-
-static void wcd9xxx_clsh_state_lo(struct snd_soc_codec *codec,
-		struct wcd9xxx_clsh_cdc_data *clsh_d,
-		u8 req_state, bool is_enable)
-{
-	pr_debug("%s: enter %s, buck_mv %d\n", __func__,
-		 is_enable ? "enable" : "disable", clsh_d->buck_mv);
-
-	if (is_enable) {
-		wcd9xxx_set_buck_mode(codec, BUCK_VREF_1P8V);
-		wcd9xxx_enable_buck(codec, clsh_d, true);
-		wcd9xxx_set_fclk_get_ncp(codec, clsh_d, NCP_FCLK_LEVEL_5);
-
-		if (clsh_d->buck_mv == WCD9XXX_CDC_BUCK_MV_1P8) {
-			wcd9xxx_enable_buck(codec, clsh_d, false);
-			snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC,
-					    1 << 4, 1 << 4);
-			/* NCP settle time recommended by codec specification */
-			usleep_range(NCP_SETTLE_TIME_US,
-				     NCP_SETTLE_TIME_US + 10);
-		} else {
-			/* NCP settle time recommended by codec specification */
-			usleep_range(NCP_SETTLE_TIME_US,
-				     NCP_SETTLE_TIME_US + 10);
-			snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_5,
-					    0x01, (0x01 & 0x03));
-			snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_5,
-					    0xFC, (0xFC & 0xB));
-		}
-		snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_1, 0x04, 0x00);
-	} else {
-		dev_dbg(codec->dev, "%s: stub fallback to lineout\n", __func__);
-		wcd9xxx_set_fclk_put_ncp(codec, clsh_d, NCP_FCLK_LEVEL_5);
-		if (clsh_d->buck_mv != WCD9XXX_CDC_BUCK_MV_1P8)
-			wcd9xxx_enable_buck(codec, clsh_d, false);
-	}
-}
-
-static void wcd9xxx_clsh_state_err(struct snd_soc_codec *codec,
-		struct wcd9xxx_clsh_cdc_data *clsh_d,
-		u8 req_state, bool is_enable)
-{
-	char msg[128];
-
-	dev_dbg(codec->dev,
-		"%s Wrong request for class H state machine requested to %s %s",
-		__func__, is_enable ? "enable" : "disable",
-		state_to_str(req_state, msg, sizeof(msg)));
-	WARN_ON(1);
-}
-
-/*
- * Function: wcd9xxx_clsh_is_state_valid
- * Params: state
- * Description:
- * Provides information on valid states of Class H configuration
- */
-static int wcd9xxx_clsh_is_state_valid(u8 state)
-{
-	switch (state) {
-	case WCD9XXX_CLSH_STATE_IDLE:
-	case WCD9XXX_CLSH_STATE_EAR:
-	case WCD9XXX_CLSH_STATE_HPHL:
-	case WCD9XXX_CLSH_STATE_HPHR:
-	case WCD9XXX_CLSH_STATE_HPH_ST:
-	case WCD9XXX_CLSH_STATE_LO:
-	case WCD9XXX_CLSH_STATE_HPHL_EAR:
-	case WCD9XXX_CLSH_STATE_HPHR_EAR:
-	case WCD9XXX_CLSH_STATE_HPH_ST_EAR:
-	case WCD9XXX_CLSH_STATE_HPHL_LO:
-	case WCD9XXX_CLSH_STATE_HPHR_LO:
-	case WCD9XXX_CLSH_STATE_HPH_ST_LO:
-	case WCD9XXX_CLSH_STATE_EAR_LO:
-	case WCD9XXX_CLSH_STATE_HPHL_EAR_LO:
-	case WCD9XXX_CLSH_STATE_HPHR_EAR_LO:
-	case WCD9XXX_CLSH_STATE_HPH_ST_EAR_LO:
-		return 1;
-	default:
-		break;
-	}
-	return 0;
-}
-
-/*
- * Function: wcd9xxx_clsh_fsm
- * Params: codec, cdc_clsh_d, req_state, req_type, clsh_event
- * Description:
- * This function handles PRE DAC and POST DAC conditions of different devices
- * and updates class H configuration of different combination of devices
- * based on validity of their states. cdc_clsh_d will contain current
- * class h state information
- */
-void wcd9xxx_clsh_fsm(struct snd_soc_codec *codec,
-		struct wcd9xxx_clsh_cdc_data *cdc_clsh_d,
-		u8 req_state, bool req_type, u8 clsh_event)
-{
-	u8 old_state, new_state;
-	char msg0[128], msg1[128];
-
-	switch (clsh_event) {
-	case WCD9XXX_CLSH_EVENT_PRE_DAC:
-		/* PRE_DAC event should be used only for Enable */
-		BUG_ON(req_type != WCD9XXX_CLSH_REQ_ENABLE);
-
-		old_state = cdc_clsh_d->state;
-		new_state = old_state | req_state;
-
-		if (!wcd9xxx_clsh_is_state_valid(new_state)) {
-			dev_dbg(codec->dev,
-				"%s: classH not a valid new state: %s\n",
-				__func__,
-				state_to_str(new_state, msg0, sizeof(msg0)));
-			return;
-		}
-		if (new_state == old_state) {
-			dev_dbg(codec->dev,
-				"%s: classH already in requested state: %s\n",
-				__func__,
-				state_to_str(new_state, msg0, sizeof(msg0)));
-			return;
-		}
-		(*clsh_state_fp[new_state]) (codec, cdc_clsh_d, req_state,
-					     req_type);
-		cdc_clsh_d->state = new_state;
-		dev_dbg(codec->dev,
-			"%s: ClassH state transition from %s to %s\n",
-			__func__, state_to_str(old_state, msg0, sizeof(msg0)),
-			state_to_str(cdc_clsh_d->state, msg1, sizeof(msg1)));
-
-		break;
-	case WCD9XXX_CLSH_EVENT_POST_PA:
-		if (req_type == WCD9XXX_CLSH_REQ_DISABLE) {
-			old_state = cdc_clsh_d->state;
-			new_state = old_state & (~req_state);
-
-			if (new_state < NUM_CLSH_STATES) {
-				if (!wcd9xxx_clsh_is_state_valid(old_state)) {
-					dev_dbg(codec->dev,
-						"%s:Invalid old state:%s\n",
-						__func__,
-						state_to_str(old_state, msg0,
-						sizeof(msg0)));
-					return;
-				}
-				if (new_state == old_state) {
-					dev_dbg(codec->dev,
-					"%s: clsH already in old state: %s\n",
-					__func__,
-					state_to_str(new_state, msg0,
-					sizeof(msg0)));
-					return;
-				}
-				(*clsh_state_fp[old_state]) (codec, cdc_clsh_d,
-							     req_state,
-							     req_type);
-				cdc_clsh_d->state = new_state;
-				dev_dbg(codec->dev, "%s: ClassH state transition from %s to %s\n",
-					__func__, state_to_str(old_state, msg0,
-							       sizeof(msg0)),
-					state_to_str(cdc_clsh_d->state, msg1,
-						     sizeof(msg1)));
-
-			} else {
-				dev_dbg(codec->dev, "%s:wrong new state=0x%x\n",
-						__func__, new_state);
-			}
-		} else if (!(cdc_clsh_d->state & WCD9XXX_CLSH_STATE_LO)) {
-			wcd9xxx_clsh_enable_post_pa(codec, cdc_clsh_d);
-		}
-
-		break;
-	}
-
-}
-EXPORT_SYMBOL(wcd9xxx_clsh_fsm);
-
-void wcd9xxx_clsh_init(struct wcd9xxx_clsh_cdc_data *clsh,
-		       struct wcd9xxx_resmgr *resmgr)
-{
-	int i;
-
-	clsh->state = WCD9XXX_CLSH_STATE_IDLE;
-	clsh->resmgr = resmgr;
-
-	for (i = 0; i < NUM_CLSH_STATES; i++)
-		clsh_state_fp[i] = wcd9xxx_clsh_state_err;
-
-	clsh_state_fp[WCD9XXX_CLSH_STATE_EAR] = wcd9xxx_clsh_state_ear;
-	clsh_state_fp[WCD9XXX_CLSH_STATE_HPHL] =
-						wcd9xxx_clsh_state_hph_l;
-	clsh_state_fp[WCD9XXX_CLSH_STATE_HPHR] =
-						wcd9xxx_clsh_state_hph_r;
-	clsh_state_fp[WCD9XXX_CLSH_STATE_HPH_ST] =
-						wcd9xxx_clsh_state_hph_st;
-	clsh_state_fp[WCD9XXX_CLSH_STATE_LO] = wcd9xxx_clsh_state_lo;
-	clsh_state_fp[WCD9XXX_CLSH_STATE_HPHL_EAR] =
-						wcd9xxx_clsh_state_hph_ear;
-	clsh_state_fp[WCD9XXX_CLSH_STATE_HPHR_EAR] =
-						wcd9xxx_clsh_state_hph_ear;
-	clsh_state_fp[WCD9XXX_CLSH_STATE_HPH_ST_EAR] =
-						wcd9xxx_clsh_state_hph_ear;
-	clsh_state_fp[WCD9XXX_CLSH_STATE_HPHL_LO] = wcd9xxx_clsh_state_hph_lo;
-	clsh_state_fp[WCD9XXX_CLSH_STATE_HPHR_LO] = wcd9xxx_clsh_state_hph_lo;
-	clsh_state_fp[WCD9XXX_CLSH_STATE_HPH_ST_LO] =
-						wcd9xxx_clsh_state_hph_lo;
-	clsh_state_fp[WCD9XXX_CLSH_STATE_EAR_LO] = wcd9xxx_clsh_state_ear_lo;
-	clsh_state_fp[WCD9XXX_CLSH_STATE_HPHL_EAR_LO] =
-						wcd9xxx_clsh_state_hph_ear_lo;
-	clsh_state_fp[WCD9XXX_CLSH_STATE_HPHR_EAR_LO] =
-						wcd9xxx_clsh_state_hph_ear_lo;
-	clsh_state_fp[WCD9XXX_CLSH_STATE_HPH_ST_EAR_LO] =
-						wcd9xxx_clsh_state_hph_ear_lo;
-
-}
-EXPORT_SYMBOL(wcd9xxx_clsh_init);
-
-MODULE_DESCRIPTION("WCD9XXX Common");
-MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/wcd9xxx-common.h b/sound/soc/codecs/wcd9xxx-common.h
deleted file mode 100644
index 5c0c4a9..0000000
--- a/sound/soc/codecs/wcd9xxx-common.h
+++ /dev/null
@@ -1,286 +0,0 @@
-/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef WCD9XXX_CODEC_COMMON
-
-#define WCD9XXX_CODEC_COMMON
-
-#include "wcd9xxx-resmgr.h"
-
-#define WCD9XXX_CLSH_REQ_ENABLE true
-#define WCD9XXX_CLSH_REQ_DISABLE false
-
-#define WCD9XXX_CLSH_EVENT_PRE_DAC 0x01
-#define WCD9XXX_CLSH_EVENT_POST_PA 0x02
-
-/* Basic states for Class H state machine.
- * represented as a bit mask within a u8 data type
- * bit 0: EAR mode
- * bit 1: HPH Left mode
- * bit 2: HPH Right mode
- * bit 3: Lineout mode
- * bit 4: Ultrasound mode
- */
-#define	WCD9XXX_CLSH_STATE_IDLE 0x00
-#define	WCD9XXX_CLSH_STATE_EAR (0x01 << 0)
-#define	WCD9XXX_CLSH_STATE_HPHL (0x01 << 1)
-#define	WCD9XXX_CLSH_STATE_HPHR (0x01 << 2)
-#define	WCD9XXX_CLSH_STATE_LO (0x01 << 3)
-#define NUM_CLSH_STATES (0x01 << 4)
-
-#define	WCD9XXX_CLSAB_STATE_IDLE  0x00
-#define WCD9XXX_CLSAB_STATE_HPHL (0x01 << 1)
-#define WCD9XXX_CLSAB_STATE_HPHR (0x01 << 2)
-
-#define WCD9XXX_CLSAB_REQ_ENABLE  true
-#define WCD9XXX_CLSAB_REQ_DISABLE false
-
-#define WCD9XXX_NON_UHQA_MODE	0
-
-#define WCD9XXX_DMIC_SAMPLE_RATE_DIV_2    0x0
-#define WCD9XXX_DMIC_SAMPLE_RATE_DIV_3    0x1
-#define WCD9XXX_DMIC_SAMPLE_RATE_DIV_4    0x2
-
-#define WCD9XXX_DMIC_B1_CTL_DIV_2 0x00
-#define WCD9XXX_DMIC_B1_CTL_DIV_3 0x22
-#define WCD9XXX_DMIC_B1_CTL_DIV_4 0x44
-
-#define WCD9XXX_DMIC_B2_CTL_DIV_2 0x00
-#define WCD9XXX_DMIC_B2_CTL_DIV_3 0x02
-#define WCD9XXX_DMIC_B2_CTL_DIV_4 0x04
-
-#define WCD9XXX_ANC_DMIC_X2_ON    0x1
-#define WCD9XXX_ANC_DMIC_X2_OFF   0x0
-
-/* Derived State: Bits 1 and 2 should be set for Headphone stereo */
-#define WCD9XXX_CLSH_STATE_HPH_ST (WCD9XXX_CLSH_STATE_HPHL | \
-						WCD9XXX_CLSH_STATE_HPHR)
-
-#define WCD9XXX_CLSH_STATE_HPHL_EAR (WCD9XXX_CLSH_STATE_HPHL | \
-						WCD9XXX_CLSH_STATE_EAR)
-#define WCD9XXX_CLSH_STATE_HPHR_EAR (WCD9XXX_CLSH_STATE_HPHR | \
-						WCD9XXX_CLSH_STATE_EAR)
-
-#define WCD9XXX_CLSH_STATE_HPH_ST_EAR (WCD9XXX_CLSH_STATE_HPH_ST | \
-						WCD9XXX_CLSH_STATE_EAR)
-
-#define WCD9XXX_CLSH_STATE_HPHL_LO (WCD9XXX_CLSH_STATE_HPHL | \
-						WCD9XXX_CLSH_STATE_LO)
-#define WCD9XXX_CLSH_STATE_HPHR_LO (WCD9XXX_CLSH_STATE_HPHR | \
-						WCD9XXX_CLSH_STATE_LO)
-
-#define WCD9XXX_CLSH_STATE_HPH_ST_LO (WCD9XXX_CLSH_STATE_HPH_ST | \
-						WCD9XXX_CLSH_STATE_LO)
-
-#define WCD9XXX_CLSH_STATE_EAR_LO (WCD9XXX_CLSH_STATE_EAR | \
-						WCD9XXX_CLSH_STATE_LO)
-
-#define WCD9XXX_CLSH_STATE_HPHL_EAR_LO (WCD9XXX_CLSH_STATE_HPHL | \
-						WCD9XXX_CLSH_STATE_EAR | \
-						WCD9XXX_CLSH_STATE_LO)
-#define WCD9XXX_CLSH_STATE_HPHR_EAR_LO (WCD9XXX_CLSH_STATE_HPHR | \
-						WCD9XXX_CLSH_STATE_EAR | \
-						WCD9XXX_CLSH_STATE_LO)
-#define WCD9XXX_CLSH_STATE_HPH_ST_EAR_LO (WCD9XXX_CLSH_STATE_HPH_ST | \
-						WCD9XXX_CLSH_STATE_EAR | \
-						WCD9XXX_CLSH_STATE_LO)
-
-struct wcd9xxx_reg_mask_val {
-	u16	reg;
-	u8	mask;
-	u8	val;
-};
-
-enum ncp_fclk_level {
-	NCP_FCLK_LEVEL_8,
-	NCP_FCLK_LEVEL_5,
-	NCP_FCLK_LEVEL_MAX,
-};
-
-/* Class H data that the codec driver will maintain */
-struct wcd9xxx_clsh_cdc_data {
-	u8 state;
-	int buck_mv;
-	bool is_dynamic_vdd_cp;
-	int clsh_users;
-	int buck_users;
-	int ncp_users[NCP_FCLK_LEVEL_MAX];
-	struct wcd9xxx_resmgr *resmgr;
-};
-
-struct wcd9xxx_anc_header {
-	u32 reserved[3];
-	u32 num_anc_slots;
-};
-
-enum wcd9xxx_buck_volt {
-	WCD9XXX_CDC_BUCK_UNSUPPORTED = 0,
-	WCD9XXX_CDC_BUCK_MV_1P8 = 1800000,
-	WCD9XXX_CDC_BUCK_MV_2P15 = 2150000,
-};
-
-struct mad_audio_header {
-	u32 reserved[3];
-	u32 num_reg_cfg;
-};
-
-struct mad_microphone_info {
-	uint8_t input_microphone;
-	uint8_t cycle_time;
-	uint8_t settle_time;
-	uint8_t padding;
-} __packed;
-
-struct mad_micbias_info {
-	uint8_t micbias;
-	uint8_t k_factor;
-	uint8_t external_bypass_capacitor;
-	uint8_t internal_biasing;
-	uint8_t cfilter;
-	uint8_t padding[3];
-} __packed;
-
-struct mad_rms_audio_beacon_info {
-	uint8_t rms_omit_samples;
-	uint8_t rms_comp_time;
-	uint8_t detection_mechanism;
-	uint8_t rms_diff_threshold;
-	uint8_t rms_threshold_lsb;
-	uint8_t rms_threshold_msb;
-	uint8_t padding[2];
-	uint8_t iir_coefficients[36];
-} __packed;
-
-struct mad_rms_ultrasound_info {
-	uint8_t rms_comp_time;
-	uint8_t detection_mechanism;
-	uint8_t rms_diff_threshold;
-	uint8_t rms_threshold_lsb;
-	uint8_t rms_threshold_msb;
-	uint8_t padding[3];
-	uint8_t iir_coefficients[36];
-} __packed;
-
-struct mad_audio_cal {
-	uint32_t version;
-	struct mad_microphone_info microphone_info;
-	struct mad_micbias_info micbias_info;
-	struct mad_rms_audio_beacon_info audio_info;
-	struct mad_rms_audio_beacon_info beacon_info;
-	struct mad_rms_ultrasound_info ultrasound_info;
-} __packed;
-
-extern void wcd9xxx_clsh_fsm(struct snd_soc_codec *codec,
-		struct wcd9xxx_clsh_cdc_data *cdc_clsh_d,
-		u8 req_state, bool req_type, u8 clsh_event);
-
-extern void wcd9xxx_enable_high_perf_mode(struct snd_soc_codec *codec,
-				struct wcd9xxx_clsh_cdc_data *clsh_d,
-				u8 uhqa_mode, u8 req_state, bool req_type);
-
-extern void wcd9xxx_clsh_init(struct wcd9xxx_clsh_cdc_data *clsh,
-			      struct wcd9xxx_resmgr *resmgr);
-
-extern void wcd9xxx_clsh_imped_config(struct snd_soc_codec *codec,
-				  int imped);
-
-enum wcd9xxx_codec_event {
-	WCD9XXX_CODEC_EVENT_CODEC_UP = 0,
-};
-
-struct wcd9xxx_register_save_node {
-	struct list_head lh;
-	u16 reg;
-	u16 value;
-};
-
-extern int wcd9xxx_soc_update_bits_push(struct snd_soc_codec *codec,
-					struct list_head *lh,
-					uint16_t reg, uint8_t mask,
-					uint8_t value, int delay);
-extern void wcd9xxx_restore_registers(struct snd_soc_codec *codec,
-				      struct list_head *lh);
-enum {
-	RESERVED = 0,
-	AANC_LPF_FF_FB = 1,
-	AANC_LPF_COEFF_MSB,
-	AANC_LPF_COEFF_LSB,
-	HW_MAD_AUDIO_ENABLE,
-	HW_MAD_ULTR_ENABLE,
-	HW_MAD_BEACON_ENABLE,
-	HW_MAD_AUDIO_SLEEP_TIME,
-	HW_MAD_ULTR_SLEEP_TIME,
-	HW_MAD_BEACON_SLEEP_TIME,
-	HW_MAD_TX_AUDIO_SWITCH_OFF,
-	HW_MAD_TX_ULTR_SWITCH_OFF,
-	HW_MAD_TX_BEACON_SWITCH_OFF,
-	MAD_AUDIO_INT_DEST_SELECT_REG,
-	MAD_ULT_INT_DEST_SELECT_REG,
-	MAD_BEACON_INT_DEST_SELECT_REG,
-	MAD_CLIP_INT_DEST_SELECT_REG,
-	MAD_VBAT_INT_DEST_SELECT_REG,
-	MAD_AUDIO_INT_MASK_REG,
-	MAD_ULT_INT_MASK_REG,
-	MAD_BEACON_INT_MASK_REG,
-	MAD_CLIP_INT_MASK_REG,
-	MAD_VBAT_INT_MASK_REG,
-	MAD_AUDIO_INT_STATUS_REG,
-	MAD_ULT_INT_STATUS_REG,
-	MAD_BEACON_INT_STATUS_REG,
-	MAD_CLIP_INT_STATUS_REG,
-	MAD_VBAT_INT_STATUS_REG,
-	MAD_AUDIO_INT_CLEAR_REG,
-	MAD_ULT_INT_CLEAR_REG,
-	MAD_BEACON_INT_CLEAR_REG,
-	MAD_CLIP_INT_CLEAR_REG,
-	MAD_VBAT_INT_CLEAR_REG,
-	SB_PGD_PORT_TX_WATERMARK_N,
-	SB_PGD_PORT_TX_ENABLE_N,
-	SB_PGD_PORT_RX_WATERMARK_N,
-	SB_PGD_PORT_RX_ENABLE_N,
-	SB_PGD_TX_PORTn_MULTI_CHNL_0,
-	SB_PGD_TX_PORTn_MULTI_CHNL_1,
-	SB_PGD_RX_PORTn_MULTI_CHNL_0,
-	SB_PGD_RX_PORTn_MULTI_CHNL_1,
-	AANC_FF_GAIN_ADAPTIVE,
-	AANC_FFGAIN_ADAPTIVE_EN,
-	AANC_GAIN_CONTROL,
-	SPKR_CLIP_PIPE_BANK_SEL,
-	SPKR_CLIPDET_VAL0,
-	SPKR_CLIPDET_VAL1,
-	SPKR_CLIPDET_VAL2,
-	SPKR_CLIPDET_VAL3,
-	SPKR_CLIPDET_VAL4,
-	SPKR_CLIPDET_VAL5,
-	SPKR_CLIPDET_VAL6,
-	SPKR_CLIPDET_VAL7,
-	VBAT_RELEASE_INT_DEST_SELECT_REG,
-	VBAT_RELEASE_INT_MASK_REG,
-	VBAT_RELEASE_INT_STATUS_REG,
-	VBAT_RELEASE_INT_CLEAR_REG,
-	MAD2_CLIP_INT_DEST_SELECT_REG,
-	MAD2_CLIP_INT_MASK_REG,
-	MAD2_CLIP_INT_STATUS_REG,
-	MAD2_CLIP_INT_CLEAR_REG,
-	SPKR2_CLIP_PIPE_BANK_SEL,
-	SPKR2_CLIPDET_VAL0,
-	SPKR2_CLIPDET_VAL1,
-	SPKR2_CLIPDET_VAL2,
-	SPKR2_CLIPDET_VAL3,
-	SPKR2_CLIPDET_VAL4,
-	SPKR2_CLIPDET_VAL5,
-	SPKR2_CLIPDET_VAL6,
-	SPKR2_CLIPDET_VAL7,
-	MAX_CFG_REGISTERS,
-};
-
-#endif
diff --git a/sound/soc/codecs/wcd9xxx-mbhc.c b/sound/soc/codecs/wcd9xxx-mbhc.c
deleted file mode 100644
index 3754b57..0000000
--- a/sound/soc/codecs/wcd9xxx-mbhc.c
+++ /dev/null
@@ -1,5671 +0,0 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/firmware.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/device.h>
-#include <linux/printk.h>
-#include <linux/ratelimit.h>
-#include <linux/debugfs.h>
-#include <linux/list.h>
-#include <linux/mfd/wcd9xxx/core.h>
-#include <linux/mfd/wcd9xxx/wcd9xxx-irq.h>
-#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
-#include <linux/mfd/wcd9xxx/wcd9320_registers.h>
-#include <linux/mfd/wcd9xxx/pdata.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-#include <sound/jack.h>
-#include <sound/tlv.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-#include <linux/pm_runtime.h>
-#include <linux/kernel.h>
-#include <linux/gpio.h>
-#include <linux/input.h>
-#include "wcd9xxx-mbhc.h"
-#include "wcdcal-hwdep.h"
-#include "wcd9xxx-resmgr.h"
-#include "wcd9xxx-common.h"
-
-#define WCD9XXX_JACK_MASK (SND_JACK_HEADSET | SND_JACK_OC_HPHL | \
-			   SND_JACK_OC_HPHR | SND_JACK_LINEOUT | \
-			   SND_JACK_UNSUPPORTED | SND_JACK_MICROPHONE2 | \
-			   SND_JACK_MECHANICAL)
-#define WCD9XXX_JACK_BUTTON_MASK (SND_JACK_BTN_0 | SND_JACK_BTN_1 | \
-				  SND_JACK_BTN_2 | SND_JACK_BTN_3 | \
-				  SND_JACK_BTN_4 | SND_JACK_BTN_5)
-
-#define NUM_DCE_PLUG_DETECT 3
-#define NUM_DCE_PLUG_INS_DETECT 5
-#define NUM_ATTEMPTS_INSERT_DETECT 25
-#define NUM_ATTEMPTS_TO_REPORT 5
-
-#define FAKE_INS_LOW 10
-#define FAKE_INS_HIGH 80
-#define FAKE_INS_HIGH_NO_SWCH 150
-#define FAKE_REMOVAL_MIN_PERIOD_MS 50
-#define FAKE_INS_DELTA_SCALED_MV 300
-
-#define BUTTON_MIN 0x8000
-#define STATUS_REL_DETECTION 0x0C
-
-#define HS_DETECT_PLUG_TIME_MS (5 * 1000)
-#define ANC_HPH_DETECT_PLUG_TIME_MS (5 * 1000)
-#define HS_DETECT_PLUG_INERVAL_MS 100
-#define SWCH_REL_DEBOUNCE_TIME_MS 50
-#define SWCH_IRQ_DEBOUNCE_TIME_US 5000
-#define BTN_RELEASE_DEBOUNCE_TIME_MS 25
-
-#define GND_MIC_SWAP_THRESHOLD 2
-#define OCP_ATTEMPT 1
-
-#define FW_READ_ATTEMPTS 15
-#define FW_READ_TIMEOUT 4000000
-
-#define BUTTON_POLLING_SUPPORTED true
-
-#define MCLK_RATE_12288KHZ 12288000
-#define MCLK_RATE_9600KHZ 9600000
-
-#define DEFAULT_DCE_STA_WAIT 55
-#define DEFAULT_DCE_WAIT 60000
-#define DEFAULT_STA_WAIT 5000
-
-#define VDDIO_MICBIAS_MV 1800
-
-#define WCD9XXX_MICBIAS_PULLDOWN_SETTLE_US 5000
-
-#define WCD9XXX_HPHL_STATUS_READY_WAIT_US 1000
-#define WCD9XXX_MUX_SWITCH_READY_WAIT_MS 50
-#define WCD9XXX_MEAS_DELTA_MAX_MV 120
-#define WCD9XXX_MEAS_INVALD_RANGE_LOW_MV 20
-#define WCD9XXX_MEAS_INVALD_RANGE_HIGH_MV 80
-
-/* Threshold in milliohm used for mono/stereo
- * plug classification
- */
-#define WCD9XXX_MONO_HS_DIFF_THR 20000000
-#define WCD9XXX_MONO_HS_MIN_THR 2000
-
-/*
- * Invalid voltage range for the detection
- * of plug type with current source
- */
-#define WCD9XXX_CS_MEAS_INVALD_RANGE_LOW_MV 160
-#define WCD9XXX_CS_MEAS_INVALD_RANGE_HIGH_MV 265
-
-/*
- * Threshold used to detect euro headset
- * with current source
- */
-#define WCD9XXX_CS_GM_SWAP_THRES_MIN_MV 10
-#define WCD9XXX_CS_GM_SWAP_THRES_MAX_MV 40
-
-#define WCD9XXX_MBHC_NSC_CS 9
-#define WCD9XXX_GM_SWAP_THRES_MIN_MV 150
-#define WCD9XXX_GM_SWAP_THRES_MAX_MV 650
-#define WCD9XXX_THRESHOLD_MIC_THRESHOLD 200
-
-#define WCD9XXX_USLEEP_RANGE_MARGIN_US 100
-
-/* RX_HPH_CNP_WG_TIME increases by 0.24ms */
-#define WCD9XXX_WG_TIME_FACTOR_US	240
-
-#define WCD9XXX_V_CS_HS_MAX 500
-#define WCD9XXX_V_CS_NO_MIC 5
-#define WCD9XXX_MB_MEAS_DELTA_MAX_MV 80
-#define WCD9XXX_CS_MEAS_DELTA_MAX_MV 12
-
-#define WCD9XXX_ZDET_ZONE_1 80000
-#define WCD9XXX_ZDET_ZONE_2 800000
-
-#define WCD9XXX_IS_IN_ZDET_ZONE_1(x) (x < WCD9XXX_ZDET_ZONE_1 ? 1 : 0)
-#define WCD9XXX_IS_IN_ZDET_ZONE_2(x) ((x > WCD9XXX_ZDET_ZONE_1 && \
-				x < WCD9XXX_ZDET_ZONE_2) ? 1 : 0)
-#define WCD9XXX_IS_IN_ZDET_ZONE_3(x) (x > WCD9XXX_ZDET_ZONE_2 ? 1 : 0)
-#define WCD9XXX_BOX_CAR_AVRG_MIN 1
-#define WCD9XXX_BOX_CAR_AVRG_MAX 10
-
-/*
- * Need to report LINEIN if H/L impedance
- * is larger than 5K ohm
- */
-#define WCD9XXX_LINEIN_THRESHOLD 5000000
-
-static int impedance_detect_en;
-module_param(impedance_detect_en, int, 0664);
-MODULE_PARM_DESC(impedance_detect_en, "enable/disable impedance detect");
-static unsigned int z_det_box_car_avg = 1;
-module_param(z_det_box_car_avg, int, 0664);
-MODULE_PARM_DESC(z_det_box_car_avg,
-		 "Number of samples for impedance detection");
-
-static bool detect_use_vddio_switch;
-
-struct wcd9xxx_mbhc_detect {
-	u16 dce;
-	u16 sta;
-	u16 hphl_status;
-	bool swap_gnd;
-	bool vddio;
-	bool hwvalue;
-	bool mic_bias;
-	/* internal purpose from here */
-	bool _above_no_mic;
-	bool _below_v_hs_max;
-	s16 _vdces;
-	enum wcd9xxx_mbhc_plug_type _type;
-};
-
-enum meas_type {
-	STA = 0,
-	DCE,
-};
-
-enum {
-	MBHC_USE_HPHL_TRIGGER = 1,
-	MBHC_USE_MB_TRIGGER = 2
-};
-
-/*
- * Flags to track of PA and DAC state.
- * PA and DAC should be tracked separately as AUXPGA loopback requires
- * only PA to be turned on without DAC being on.
- */
-enum pa_dac_ack_flags {
-	WCD9XXX_HPHL_PA_OFF_ACK = 0,
-	WCD9XXX_HPHR_PA_OFF_ACK,
-	WCD9XXX_HPHL_DAC_OFF_ACK,
-	WCD9XXX_HPHR_DAC_OFF_ACK
-};
-
-enum wcd9xxx_current_v_idx {
-	WCD9XXX_CURRENT_V_INS_H,
-	WCD9XXX_CURRENT_V_INS_HU,
-	WCD9XXX_CURRENT_V_B1_H,
-	WCD9XXX_CURRENT_V_B1_HU,
-	WCD9XXX_CURRENT_V_BR_H,
-};
-
-static int wcd9xxx_detect_impedance(struct wcd9xxx_mbhc *mbhc, uint32_t *zl,
-				    uint32_t *zr);
-static s16 wcd9xxx_get_current_v(struct wcd9xxx_mbhc *mbhc,
-				 const enum wcd9xxx_current_v_idx idx);
-static void wcd9xxx_get_z(struct wcd9xxx_mbhc *mbhc, s16 *dce_z, s16 *sta_z,
-			  struct mbhc_micbias_regs *micb_regs,
-			  bool norel);
-
-static void wcd9xxx_mbhc_calc_thres(struct wcd9xxx_mbhc *mbhc);
-
-static u16 wcd9xxx_codec_v_sta_dce(struct wcd9xxx_mbhc *mbhc,
-				   enum meas_type dce, s16 vin_mv,
-				   bool cs_enable);
-
-static bool wcd9xxx_mbhc_polling(struct wcd9xxx_mbhc *mbhc)
-{
-	return snd_soc_read(mbhc->codec, WCD9XXX_A_CDC_MBHC_EN_CTL) & 0x1;
-}
-
-static void wcd9xxx_turn_onoff_override(struct wcd9xxx_mbhc *mbhc, bool on)
-{
-	struct snd_soc_codec *codec = mbhc->codec;
-
-	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL,
-			    0x04, on ? 0x04 : 0x00);
-}
-
-/* called under codec_resource_lock acquisition */
-static void wcd9xxx_pause_hs_polling(struct wcd9xxx_mbhc *mbhc)
-{
-	struct snd_soc_codec *codec = mbhc->codec;
-
-	pr_debug("%s: enter\n", __func__);
-	if (!mbhc->polling_active) {
-		pr_debug("polling not active, nothing to pause\n");
-		return;
-	}
-
-	/* Soft reset MBHC block */
-	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8, 0x8);
-	pr_debug("%s: leave\n", __func__);
-}
-
-/* called under codec_resource_lock acquisition */
-static void wcd9xxx_start_hs_polling(struct wcd9xxx_mbhc *mbhc)
-{
-	struct snd_soc_codec *codec = mbhc->codec;
-	int mbhc_state = mbhc->mbhc_state;
-
-	pr_debug("%s: enter\n", __func__);
-	if (!mbhc->polling_active) {
-		pr_debug("Polling is not active, do not start polling\n");
-		return;
-	}
-
-	/*
-	 * setup internal micbias if codec uses internal micbias for
-	 * headset detection
-	 */
-	if (mbhc->mbhc_cfg->use_int_rbias) {
-		if (mbhc->mbhc_cb && mbhc->mbhc_cb->setup_int_rbias)
-			mbhc->mbhc_cb->setup_int_rbias(codec, true);
-		else
-			pr_err("%s: internal bias requested but codec did not provide callback\n",
-				__func__);
-	}
-
-	snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x04);
-	if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mux_bias_block)
-		mbhc->mbhc_cb->enable_mux_bias_block(codec);
-	else
-		snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1,
-				    0x80, 0x80);
-
-	if (!mbhc->no_mic_headset_override &&
-	    mbhc_state == MBHC_STATE_POTENTIAL) {
-		pr_debug("%s recovering MBHC state machine\n", __func__);
-		mbhc->mbhc_state = MBHC_STATE_POTENTIAL_RECOVERY;
-		/* set to max button press threshold */
-		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B2_CTL, 0x7F);
-		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B1_CTL, 0xFF);
-		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B4_CTL, 0x7F);
-		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B3_CTL, 0xFF);
-		/* set to max */
-		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B6_CTL, 0x7F);
-		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B5_CTL, 0xFF);
-	}
-
-	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x1);
-	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8, 0x0);
-	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x1);
-	pr_debug("%s: leave\n", __func__);
-}
-
-static int __wcd9xxx_resmgr_get_k_val(struct wcd9xxx_mbhc *mbhc,
-		unsigned int cfilt_mv)
-{
-	return wcd9xxx_resmgr_get_k_val(mbhc->resmgr, cfilt_mv);
-}
-
-/*
- * called under codec_resource_lock acquisition
- * return old status
- */
-static bool __wcd9xxx_switch_micbias(struct wcd9xxx_mbhc *mbhc,
-				     int vddio_switch, bool restartpolling,
-				     bool checkpolling)
-{
-	bool ret;
-	int cfilt_k_val;
-	bool override;
-	struct snd_soc_codec *codec;
-	struct mbhc_internal_cal_data *d = &mbhc->mbhc_data;
-
-	codec = mbhc->codec;
-
-	if (mbhc->micbias_enable) {
-		pr_debug("%s: micbias is already on\n", __func__);
-		ret = mbhc->mbhc_micbias_switched;
-		return ret;
-	}
-
-	ret = mbhc->mbhc_micbias_switched;
-	if (vddio_switch && !mbhc->mbhc_micbias_switched &&
-	    (!checkpolling || mbhc->polling_active)) {
-		if (restartpolling)
-			wcd9xxx_pause_hs_polling(mbhc);
-		override = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_CTL) &
-			   0x04;
-		if (!override)
-			wcd9xxx_turn_onoff_override(mbhc, true);
-
-		snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL,
-				    0x10, 0x00);
-		snd_soc_update_bits(codec, WCD9XXX_A_LDO_H_MODE_1,
-				    0x20, 0x00);
-		/* Adjust threshold if Mic Bias voltage changes */
-		if (d->micb_mv != VDDIO_MICBIAS_MV) {
-			cfilt_k_val = __wcd9xxx_resmgr_get_k_val(mbhc,
-							      VDDIO_MICBIAS_MV);
-			usleep_range(10000, 10100);
-			snd_soc_update_bits(codec,
-					mbhc->mbhc_bias_regs.cfilt_val,
-					0xFC, (cfilt_k_val << 2));
-			usleep_range(10000, 10100);
-			/* Threshods for insertion/removal */
-			snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B1_CTL,
-				      d->v_ins_hu[MBHC_V_IDX_VDDIO] & 0xFF);
-			snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B2_CTL,
-				      (d->v_ins_hu[MBHC_V_IDX_VDDIO] >> 8) &
-				      0xFF);
-
-			if (mbhc->mbhc_state != MBHC_STATE_POTENTIAL_RECOVERY) {
-				/* Threshods for button press */
-				snd_soc_write(codec,
-					WCD9XXX_A_CDC_MBHC_VOLT_B3_CTL,
-					d->v_b1_hu[MBHC_V_IDX_VDDIO] & 0xFF);
-				snd_soc_write(codec,
-					WCD9XXX_A_CDC_MBHC_VOLT_B4_CTL,
-					(d->v_b1_hu[MBHC_V_IDX_VDDIO] >> 8) &
-					0xFF);
-				snd_soc_write(codec,
-					WCD9XXX_A_CDC_MBHC_VOLT_B5_CTL,
-					d->v_b1_h[MBHC_V_IDX_VDDIO] & 0xFF);
-				snd_soc_write(codec,
-					WCD9XXX_A_CDC_MBHC_VOLT_B6_CTL,
-					(d->v_b1_h[MBHC_V_IDX_VDDIO] >> 8) &
-					0xFF);
-				/* Threshods for button release */
-				snd_soc_write(codec,
-					WCD9XXX_A_CDC_MBHC_VOLT_B9_CTL,
-					d->v_brh[MBHC_V_IDX_VDDIO] & 0xFF);
-				snd_soc_write(codec,
-					WCD9XXX_A_CDC_MBHC_VOLT_B10_CTL,
-					(d->v_brh[MBHC_V_IDX_VDDIO] >> 8) &
-					0xFF);
-			}
-			pr_debug("%s: Programmed MBHC thresholds to VDDIO\n",
-				 __func__);
-		}
-
-		/* Enable MIC BIAS Switch to VDDIO */
-		snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg,
-				    0x80, 0x80);
-		snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg,
-				    0x10, 0x00);
-		if (!override)
-			wcd9xxx_turn_onoff_override(mbhc, false);
-		if (restartpolling)
-			wcd9xxx_start_hs_polling(mbhc);
-
-		mbhc->mbhc_micbias_switched = true;
-		pr_debug("%s: VDDIO switch enabled\n", __func__);
-	} else if (!vddio_switch && mbhc->mbhc_micbias_switched) {
-		if ((!checkpolling || mbhc->polling_active) &&
-		    restartpolling)
-			wcd9xxx_pause_hs_polling(mbhc);
-
-			snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL,
-					    0x10, 0x10);
-			snd_soc_update_bits(codec, WCD9XXX_A_LDO_H_MODE_1,
-					    0x20, 0x20);
-		/* Reprogram thresholds */
-		if (d->micb_mv != VDDIO_MICBIAS_MV) {
-			cfilt_k_val =
-			    __wcd9xxx_resmgr_get_k_val(mbhc,
-						     d->micb_mv);
-			snd_soc_update_bits(codec,
-					mbhc->mbhc_bias_regs.cfilt_val,
-					0xFC, (cfilt_k_val << 2));
-			usleep_range(10000, 10100);
-			/* Revert threshods for insertion/removal */
-			snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B1_CTL,
-					d->v_ins_hu[MBHC_V_IDX_CFILT] & 0xFF);
-			snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B2_CTL,
-					(d->v_ins_hu[MBHC_V_IDX_CFILT] >> 8) &
-					0xFF);
-			if (mbhc->mbhc_state != MBHC_STATE_POTENTIAL_RECOVERY) {
-				/* Revert threshods for button press */
-				snd_soc_write(codec,
-					WCD9XXX_A_CDC_MBHC_VOLT_B3_CTL,
-					d->v_b1_hu[MBHC_V_IDX_CFILT] & 0xFF);
-				snd_soc_write(codec,
-					WCD9XXX_A_CDC_MBHC_VOLT_B4_CTL,
-					(d->v_b1_hu[MBHC_V_IDX_CFILT] >> 8) &
-					0xFF);
-				snd_soc_write(codec,
-					WCD9XXX_A_CDC_MBHC_VOLT_B5_CTL,
-					d->v_b1_h[MBHC_V_IDX_CFILT] & 0xFF);
-				snd_soc_write(codec,
-					WCD9XXX_A_CDC_MBHC_VOLT_B6_CTL,
-					(d->v_b1_h[MBHC_V_IDX_CFILT] >> 8) &
-					0xFF);
-				/* Revert threshods for button release */
-				snd_soc_write(codec,
-					WCD9XXX_A_CDC_MBHC_VOLT_B9_CTL,
-					d->v_brh[MBHC_V_IDX_CFILT] & 0xFF);
-				snd_soc_write(codec,
-					WCD9XXX_A_CDC_MBHC_VOLT_B10_CTL,
-					(d->v_brh[MBHC_V_IDX_CFILT] >> 8) &
-					0xFF);
-			}
-			pr_debug("%s: Programmed MBHC thresholds to MICBIAS\n",
-					__func__);
-		}
-
-		/* Disable MIC BIAS Switch to VDDIO */
-		snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg, 0x80,
-				    0x00);
-		snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg, 0x10,
-				    0x00);
-
-		if ((!checkpolling || mbhc->polling_active) && restartpolling)
-			wcd9xxx_start_hs_polling(mbhc);
-
-		mbhc->mbhc_micbias_switched = false;
-		pr_debug("%s: VDDIO switch disabled\n", __func__);
-	}
-
-	return ret;
-}
-
-static void wcd9xxx_switch_micbias(struct wcd9xxx_mbhc *mbhc, int vddio_switch)
-{
-	__wcd9xxx_switch_micbias(mbhc, vddio_switch, true, true);
-}
-
-static s16 wcd9xxx_get_current_v(struct wcd9xxx_mbhc *mbhc,
-				 const enum wcd9xxx_current_v_idx idx)
-{
-	enum mbhc_v_index vidx;
-	s16 ret = -EINVAL;
-
-	if ((mbhc->mbhc_data.micb_mv != VDDIO_MICBIAS_MV) &&
-	    mbhc->mbhc_micbias_switched)
-		vidx = MBHC_V_IDX_VDDIO;
-	else
-		vidx = MBHC_V_IDX_CFILT;
-
-	switch (idx) {
-	case WCD9XXX_CURRENT_V_INS_H:
-		ret = (s16)mbhc->mbhc_data.v_ins_h[vidx];
-		break;
-	case WCD9XXX_CURRENT_V_INS_HU:
-		ret = (s16)mbhc->mbhc_data.v_ins_hu[vidx];
-		break;
-	case WCD9XXX_CURRENT_V_B1_H:
-		ret = (s16)mbhc->mbhc_data.v_b1_h[vidx];
-		break;
-	case WCD9XXX_CURRENT_V_B1_HU:
-		ret = (s16)mbhc->mbhc_data.v_b1_hu[vidx];
-		break;
-	case WCD9XXX_CURRENT_V_BR_H:
-		ret = (s16)mbhc->mbhc_data.v_brh[vidx];
-		break;
-	}
-
-	return ret;
-}
-
-void *wcd9xxx_mbhc_cal_btn_det_mp(
-			    const struct wcd9xxx_mbhc_btn_detect_cfg *btn_det,
-			    const enum wcd9xxx_mbhc_btn_det_mem mem)
-{
-	void *ret = (void *)&btn_det->_v_btn_low;
-
-	switch (mem) {
-	case MBHC_BTN_DET_GAIN:
-		ret += sizeof(btn_det->_n_cic);
-		/* fallthrough */
-	case MBHC_BTN_DET_N_CIC:
-		ret += sizeof(btn_det->_n_ready);
-		/* fallthrough */
-	case MBHC_BTN_DET_N_READY:
-		ret += sizeof(btn_det->_v_btn_high[0]) * btn_det->num_btn;
-		/* fallthrough */
-	case MBHC_BTN_DET_V_BTN_HIGH:
-		ret += sizeof(btn_det->_v_btn_low[0]) * btn_det->num_btn;
-		/* fallthrough */
-	case MBHC_BTN_DET_V_BTN_LOW:
-		/* do nothing */
-		break;
-	default:
-		ret = NULL;
-	}
-
-	return ret;
-}
-EXPORT_SYMBOL(wcd9xxx_mbhc_cal_btn_det_mp);
-
-static void wcd9xxx_calibrate_hs_polling(struct wcd9xxx_mbhc *mbhc)
-{
-	struct snd_soc_codec *codec = mbhc->codec;
-	const s16 v_ins_hu = wcd9xxx_get_current_v(mbhc,
-						   WCD9XXX_CURRENT_V_INS_HU);
-	const s16 v_b1_hu = wcd9xxx_get_current_v(mbhc,
-						  WCD9XXX_CURRENT_V_B1_HU);
-	const s16 v_b1_h = wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_B1_H);
-	const s16 v_brh = wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_BR_H);
-
-	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B1_CTL, v_ins_hu & 0xFF);
-	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B2_CTL,
-		      (v_ins_hu >> 8) & 0xFF);
-
-	if (mbhc->mbhc_state != MBHC_STATE_POTENTIAL_RECOVERY) {
-		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B3_CTL, v_b1_hu &
-				0xFF);
-		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B4_CTL,
-				(v_b1_hu >> 8) & 0xFF);
-		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B5_CTL, v_b1_h &
-				0xFF);
-		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B6_CTL,
-				(v_b1_h >> 8) & 0xFF);
-		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B9_CTL, v_brh &
-				0xFF);
-		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B10_CTL,
-				(v_brh >> 8) & 0xFF);
-		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B11_CTL,
-				mbhc->mbhc_data.v_brl & 0xFF);
-		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B12_CTL,
-				(mbhc->mbhc_data.v_brl >> 8) & 0xFF);
-	}
-}
-
-static void wcd9xxx_codec_switch_cfilt_mode(struct wcd9xxx_mbhc *mbhc,
-					    bool fast)
-{
-	struct snd_soc_codec *codec = mbhc->codec;
-	struct wcd9xxx_cfilt_mode cfilt_mode;
-
-	if (mbhc->mbhc_cb && mbhc->mbhc_cb->switch_cfilt_mode) {
-		cfilt_mode = mbhc->mbhc_cb->switch_cfilt_mode(mbhc, fast);
-	} else {
-		if (fast)
-			cfilt_mode.reg_mode_val = WCD9XXX_CFILT_FAST_MODE;
-		else
-			cfilt_mode.reg_mode_val = WCD9XXX_CFILT_SLOW_MODE;
-
-		cfilt_mode.reg_mask = 0x40;
-		cfilt_mode.cur_mode_val =
-		    snd_soc_read(codec, mbhc->mbhc_bias_regs.cfilt_ctl) & 0x40;
-	}
-
-	if (cfilt_mode.cur_mode_val
-			!= cfilt_mode.reg_mode_val) {
-		if (mbhc->polling_active && wcd9xxx_mbhc_polling(mbhc))
-			wcd9xxx_pause_hs_polling(mbhc);
-		snd_soc_update_bits(codec,
-				    mbhc->mbhc_bias_regs.cfilt_ctl,
-					cfilt_mode.reg_mask,
-					cfilt_mode.reg_mode_val);
-		if (mbhc->polling_active && wcd9xxx_mbhc_polling(mbhc))
-			wcd9xxx_start_hs_polling(mbhc);
-		pr_debug("%s: CFILT mode change (%x to %x)\n", __func__,
-			cfilt_mode.cur_mode_val,
-			cfilt_mode.reg_mode_val);
-	} else {
-		pr_debug("%s: CFILT Value is already %x\n",
-			 __func__, cfilt_mode.cur_mode_val);
-	}
-}
-
-static void wcd9xxx_jack_report(struct wcd9xxx_mbhc *mbhc,
-				struct snd_soc_jack *jack, int status, int mask)
-{
-	if (jack == &mbhc->headset_jack) {
-		wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr,
-						WCD9XXX_COND_HPH_MIC,
-						status & SND_JACK_MICROPHONE);
-		wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr,
-						WCD9XXX_COND_HPH,
-						status & SND_JACK_HEADPHONE);
-	}
-
-	snd_soc_jack_report(jack, status, mask);
-}
-
-static void __hphocp_off_report(struct wcd9xxx_mbhc *mbhc, u32 jack_status,
-				int irq)
-{
-	struct snd_soc_codec *codec;
-
-	pr_debug("%s: clear ocp status %x\n", __func__, jack_status);
-	codec = mbhc->codec;
-	if (mbhc->hph_status & jack_status) {
-		mbhc->hph_status &= ~jack_status;
-		wcd9xxx_jack_report(mbhc, &mbhc->headset_jack,
-				    mbhc->hph_status, WCD9XXX_JACK_MASK);
-		snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_OCP_CTL, 0x10,
-				    0x00);
-		snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_OCP_CTL, 0x10,
-				    0x10);
-		/*
-		 * reset retry counter as PA is turned off signifying
-		 * start of new OCP detection session
-		 */
-		if (mbhc->intr_ids->hph_left_ocp)
-			mbhc->hphlocp_cnt = 0;
-		else
-			mbhc->hphrocp_cnt = 0;
-		wcd9xxx_enable_irq(mbhc->resmgr->core_res, irq);
-	}
-}
-
-static void hphrocp_off_report(struct wcd9xxx_mbhc *mbhc, u32 jack_status)
-{
-	__hphocp_off_report(mbhc, SND_JACK_OC_HPHR,
-			    mbhc->intr_ids->hph_right_ocp);
-}
-
-static void hphlocp_off_report(struct wcd9xxx_mbhc *mbhc, u32 jack_status)
-{
-	__hphocp_off_report(mbhc, SND_JACK_OC_HPHL,
-			    mbhc->intr_ids->hph_left_ocp);
-}
-
-static void wcd9xxx_get_mbhc_micbias_regs(struct wcd9xxx_mbhc *mbhc,
-				enum wcd9xxx_mbhc_micbias_type mb_type)
-{
-	unsigned int cfilt;
-	struct wcd9xxx_micbias_setting *micbias_pdata =
-		mbhc->resmgr->micbias_pdata;
-	struct mbhc_micbias_regs *micbias_regs;
-	enum wcd9xxx_micbias_num mb_num;
-
-	if (mb_type == MBHC_ANC_MIC_MB) {
-		micbias_regs = &mbhc->mbhc_anc_bias_regs;
-		mb_num = mbhc->mbhc_cfg->anc_micbias;
-	} else {
-		micbias_regs = &mbhc->mbhc_bias_regs;
-		mb_num = mbhc->mbhc_cfg->micbias;
-	}
-
-	switch (mb_num) {
-	case MBHC_MICBIAS1:
-		cfilt = micbias_pdata->bias1_cfilt_sel;
-		micbias_regs->mbhc_reg = WCD9XXX_A_MICB_1_MBHC;
-		micbias_regs->int_rbias = WCD9XXX_A_MICB_1_INT_RBIAS;
-		micbias_regs->ctl_reg = WCD9XXX_A_MICB_1_CTL;
-		break;
-	case MBHC_MICBIAS2:
-		cfilt = micbias_pdata->bias2_cfilt_sel;
-		micbias_regs->mbhc_reg = WCD9XXX_A_MICB_2_MBHC;
-		micbias_regs->int_rbias = WCD9XXX_A_MICB_2_INT_RBIAS;
-		micbias_regs->ctl_reg = WCD9XXX_A_MICB_2_CTL;
-		break;
-	case MBHC_MICBIAS3:
-		cfilt = micbias_pdata->bias3_cfilt_sel;
-		micbias_regs->mbhc_reg = WCD9XXX_A_MICB_3_MBHC;
-		micbias_regs->int_rbias = WCD9XXX_A_MICB_3_INT_RBIAS;
-		micbias_regs->ctl_reg = WCD9XXX_A_MICB_3_CTL;
-		break;
-	case MBHC_MICBIAS4:
-		cfilt = micbias_pdata->bias4_cfilt_sel;
-		micbias_regs->mbhc_reg = mbhc->resmgr->reg_addr->micb_4_mbhc;
-		micbias_regs->int_rbias =
-		    mbhc->resmgr->reg_addr->micb_4_int_rbias;
-		micbias_regs->ctl_reg = mbhc->resmgr->reg_addr->micb_4_ctl;
-		break;
-	default:
-		/* Should never reach here */
-		pr_err("%s: Invalid MIC BIAS for MBHC\n", __func__);
-		return;
-	}
-
-	micbias_regs->cfilt_sel = cfilt;
-
-	switch (cfilt) {
-	case WCD9XXX_CFILT1_SEL:
-		micbias_regs->cfilt_val = WCD9XXX_A_MICB_CFILT_1_VAL;
-		micbias_regs->cfilt_ctl = WCD9XXX_A_MICB_CFILT_1_CTL;
-		break;
-	case WCD9XXX_CFILT2_SEL:
-		micbias_regs->cfilt_val = WCD9XXX_A_MICB_CFILT_2_VAL;
-		micbias_regs->cfilt_ctl = WCD9XXX_A_MICB_CFILT_2_CTL;
-		break;
-	case WCD9XXX_CFILT3_SEL:
-		micbias_regs->cfilt_val = WCD9XXX_A_MICB_CFILT_3_VAL;
-		micbias_regs->cfilt_ctl = WCD9XXX_A_MICB_CFILT_3_CTL;
-		break;
-	}
-
-	if (mb_type == MBHC_PRIMARY_MIC_MB) {
-		switch (cfilt) {
-		case WCD9XXX_CFILT1_SEL:
-			mbhc->mbhc_data.micb_mv = micbias_pdata->cfilt1_mv;
-			break;
-		case WCD9XXX_CFILT2_SEL:
-			mbhc->mbhc_data.micb_mv = micbias_pdata->cfilt2_mv;
-			break;
-		case WCD9XXX_CFILT3_SEL:
-			mbhc->mbhc_data.micb_mv = micbias_pdata->cfilt3_mv;
-			break;
-		}
-	}
-
-}
-
-static void wcd9xxx_clr_and_turnon_hph_padac(struct wcd9xxx_mbhc *mbhc)
-{
-	bool pa_turned_on = false;
-	struct snd_soc_codec *codec = mbhc->codec;
-	u8 wg_time;
-
-	wg_time = snd_soc_read(codec, WCD9XXX_A_RX_HPH_CNP_WG_TIME);
-	wg_time += 1;
-
-	if (test_and_clear_bit(WCD9XXX_HPHR_DAC_OFF_ACK,
-			       &mbhc->hph_pa_dac_state)) {
-		pr_debug("%s: HPHR clear flag and enable DAC\n", __func__);
-		snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_R_DAC_CTL,
-				    0xC0, 0xC0);
-	}
-	if (test_and_clear_bit(WCD9XXX_HPHL_DAC_OFF_ACK,
-				&mbhc->hph_pa_dac_state)) {
-		pr_debug("%s: HPHL clear flag and enable DAC\n", __func__);
-		snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_L_DAC_CTL,
-				    0x80, 0x80);
-	}
-
-	if (test_and_clear_bit(WCD9XXX_HPHR_PA_OFF_ACK,
-			       &mbhc->hph_pa_dac_state)) {
-		pr_debug("%s: HPHR clear flag and enable PA\n", __func__);
-		snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_CNP_EN, 0x10,
-				    1 << 4);
-		pa_turned_on = true;
-	}
-	if (test_and_clear_bit(WCD9XXX_HPHL_PA_OFF_ACK,
-			       &mbhc->hph_pa_dac_state)) {
-		pr_debug("%s: HPHL clear flag and enable PA\n", __func__);
-		snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_CNP_EN, 0x20, 1
-				    << 5);
-		pa_turned_on = true;
-	}
-
-	if (pa_turned_on) {
-		pr_debug("%s: PA was turned on by MBHC and not by DAPM\n",
-			 __func__);
-		usleep_range(wg_time * 1000, wg_time * 1000 + 50);
-	}
-}
-
-static int wcd9xxx_cancel_btn_work(struct wcd9xxx_mbhc *mbhc)
-{
-	int r;
-
-	r = cancel_delayed_work_sync(&mbhc->mbhc_btn_dwork);
-	if (r)
-		/* if scheduled mbhc.mbhc_btn_dwork is canceled from here,
-		 * we have to unlock from here instead btn_work
-		 */
-		wcd9xxx_unlock_sleep(mbhc->resmgr->core_res);
-	return r;
-}
-
-static bool wcd9xxx_is_hph_dac_on(struct snd_soc_codec *codec, int left)
-{
-	u8 hph_reg_val = 0;
-
-	if (left)
-		hph_reg_val = snd_soc_read(codec, WCD9XXX_A_RX_HPH_L_DAC_CTL);
-	else
-		hph_reg_val = snd_soc_read(codec, WCD9XXX_A_RX_HPH_R_DAC_CTL);
-
-	return (hph_reg_val & 0xC0) ? true : false;
-}
-
-static bool wcd9xxx_is_hph_pa_on(struct snd_soc_codec *codec)
-{
-	u8 hph_reg_val = 0;
-
-	hph_reg_val = snd_soc_read(codec, WCD9XXX_A_RX_HPH_CNP_EN);
-
-	return (hph_reg_val & 0x30) ? true : false;
-}
-
-/* called under codec_resource_lock acquisition */
-static void wcd9xxx_set_and_turnoff_hph_padac(struct wcd9xxx_mbhc *mbhc)
-{
-	u8 wg_time;
-	struct snd_soc_codec *codec = mbhc->codec;
-
-	wg_time = snd_soc_read(codec, WCD9XXX_A_RX_HPH_CNP_WG_TIME);
-	wg_time += 1;
-
-	/* If headphone PA is on, check if userspace receives
-	 * removal event to sync-up PA's state
-	 */
-	if (wcd9xxx_is_hph_pa_on(codec)) {
-		pr_debug("%s PA is on, setting PA_OFF_ACK\n", __func__);
-		set_bit(WCD9XXX_HPHL_PA_OFF_ACK, &mbhc->hph_pa_dac_state);
-		set_bit(WCD9XXX_HPHR_PA_OFF_ACK, &mbhc->hph_pa_dac_state);
-	} else {
-		pr_debug("%s PA is off\n", __func__);
-	}
-
-	if (wcd9xxx_is_hph_dac_on(codec, 1))
-		set_bit(WCD9XXX_HPHL_DAC_OFF_ACK, &mbhc->hph_pa_dac_state);
-	if (wcd9xxx_is_hph_dac_on(codec, 0))
-		set_bit(WCD9XXX_HPHR_DAC_OFF_ACK, &mbhc->hph_pa_dac_state);
-
-	snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_CNP_EN, 0x30, 0x00);
-	snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_L_DAC_CTL, 0x80, 0x00);
-	snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_R_DAC_CTL, 0xC0, 0x00);
-	usleep_range(wg_time * 1000, wg_time * 1000 + 50);
-}
-
-static void wcd9xxx_insert_detect_setup(struct wcd9xxx_mbhc *mbhc, bool ins)
-{
-	if (!mbhc->mbhc_cfg->insert_detect)
-		return;
-	pr_debug("%s: Setting up %s detection\n", __func__,
-		 ins ? "insert" : "removal");
-	/* Disable detection to avoid glitch */
-	snd_soc_update_bits(mbhc->codec, WCD9XXX_A_MBHC_INSERT_DETECT, 1, 0);
-	if (mbhc->mbhc_cfg->gpio_level_insert)
-		snd_soc_write(mbhc->codec, WCD9XXX_A_MBHC_INSERT_DETECT,
-			      (0x68 | (ins ? (1 << 1) : 0)));
-	else
-		snd_soc_write(mbhc->codec, WCD9XXX_A_MBHC_INSERT_DETECT,
-			      (0x6C | (ins ? (1 << 1) : 0)));
-	/* Re-enable detection */
-	snd_soc_update_bits(mbhc->codec, WCD9XXX_A_MBHC_INSERT_DETECT, 1, 1);
-}
-
-/* called under codec_resource_lock acquisition */
-static void wcd9xxx_report_plug(struct wcd9xxx_mbhc *mbhc, int insertion,
-				enum snd_jack_types jack_type)
-{
-	WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
-
-	pr_debug("%s: enter insertion %d hph_status %x\n",
-		 __func__, insertion, mbhc->hph_status);
-	if (!insertion) {
-		/* Report removal */
-		mbhc->hph_status &= ~jack_type;
-		/*
-		 * cancel possibly scheduled btn work and
-		 * report release if we reported button press
-		 */
-		if (wcd9xxx_cancel_btn_work(mbhc))
-			pr_debug("%s: button press is canceled\n", __func__);
-		else if (mbhc->buttons_pressed) {
-			pr_debug("%s: release of button press%d\n",
-				 __func__, jack_type);
-			wcd9xxx_jack_report(mbhc, &mbhc->button_jack, 0,
-					    mbhc->buttons_pressed);
-			mbhc->buttons_pressed &=
-				~WCD9XXX_JACK_BUTTON_MASK;
-		}
-
-		if (mbhc->micbias_enable && mbhc->micbias_enable_cb) {
-			pr_debug("%s: Disabling micbias\n", __func__);
-			mbhc->micbias_enable = false;
-			mbhc->micbias_enable_cb(mbhc->codec, false,
-						mbhc->mbhc_cfg->micbias);
-		}
-		mbhc->zl = mbhc->zr = 0;
-		mbhc->hph_type = MBHC_HPH_NONE;
-		pr_debug("%s: Reporting removal %d(%x)\n", __func__,
-			 jack_type, mbhc->hph_status);
-		wcd9xxx_jack_report(mbhc, &mbhc->headset_jack, mbhc->hph_status,
-				    WCD9XXX_JACK_MASK);
-		wcd9xxx_set_and_turnoff_hph_padac(mbhc);
-		hphrocp_off_report(mbhc, SND_JACK_OC_HPHR);
-		hphlocp_off_report(mbhc, SND_JACK_OC_HPHL);
-		mbhc->current_plug = PLUG_TYPE_NONE;
-		mbhc->polling_active = false;
-		if (mbhc->mbhc_cb && mbhc->mbhc_cb->hph_auto_pulldown_ctrl)
-			mbhc->mbhc_cb->hph_auto_pulldown_ctrl(mbhc->codec,
-								false);
-	} else {
-		/*
-		 * Report removal of current jack type.
-		 * Headphone to headset shouldn't report headphone
-		 * removal.
-		 */
-		if (mbhc->mbhc_cfg->detect_extn_cable &&
-		    !(mbhc->current_plug == PLUG_TYPE_HEADPHONE &&
-		      jack_type == SND_JACK_HEADSET) &&
-		    (mbhc->hph_status && mbhc->hph_status != jack_type)) {
-			if (mbhc->micbias_enable && mbhc->micbias_enable_cb &&
-			    mbhc->hph_status == SND_JACK_HEADSET) {
-				pr_debug("%s: Disabling micbias\n", __func__);
-				mbhc->micbias_enable = false;
-				mbhc->micbias_enable_cb(mbhc->codec, false,
-						mbhc->mbhc_cfg->micbias);
-			}
-
-			pr_debug("%s: Reporting removal (%x)\n",
-				 __func__, mbhc->hph_status);
-			mbhc->zl = mbhc->zr = 0;
-			wcd9xxx_jack_report(mbhc, &mbhc->headset_jack,
-					    0, WCD9XXX_JACK_MASK);
-			mbhc->hph_status &= ~(SND_JACK_HEADSET |
-						SND_JACK_LINEOUT |
-						SND_JACK_ANC_HEADPHONE |
-						SND_JACK_UNSUPPORTED);
-			if (mbhc->mbhc_cb &&
-				 mbhc->mbhc_cb->hph_auto_pulldown_ctrl)
-				mbhc->mbhc_cb->hph_auto_pulldown_ctrl(
-								mbhc->codec,
-								false);
-		}
-
-		/* Report insertion */
-		if (jack_type == SND_JACK_HEADPHONE) {
-			mbhc->current_plug = PLUG_TYPE_HEADPHONE;
-		} else if (jack_type == SND_JACK_UNSUPPORTED) {
-			mbhc->current_plug = PLUG_TYPE_GND_MIC_SWAP;
-		} else if (jack_type == SND_JACK_HEADSET) {
-			mbhc->polling_active = BUTTON_POLLING_SUPPORTED;
-			mbhc->current_plug = PLUG_TYPE_HEADSET;
-			mbhc->update_z = true;
-		} else if (jack_type == SND_JACK_LINEOUT) {
-			mbhc->current_plug = PLUG_TYPE_HIGH_HPH;
-		} else if (jack_type == SND_JACK_ANC_HEADPHONE) {
-			mbhc->polling_active = BUTTON_POLLING_SUPPORTED;
-			mbhc->current_plug = PLUG_TYPE_ANC_HEADPHONE;
-		}
-
-		if (mbhc->impedance_detect && impedance_detect_en) {
-			wcd9xxx_detect_impedance(mbhc,
-					&mbhc->zl, &mbhc->zr);
-			if ((mbhc->zl > WCD9XXX_LINEIN_THRESHOLD) &&
-				(mbhc->zr > WCD9XXX_LINEIN_THRESHOLD)) {
-				jack_type = SND_JACK_LINEOUT;
-				mbhc->current_plug = PLUG_TYPE_HIGH_HPH;
-				pr_debug("%s: Replace with SND_JACK_LINEOUT\n",
-				__func__);
-			}
-		}
-
-		mbhc->hph_status |= jack_type;
-
-		if (mbhc->micbias_enable && mbhc->micbias_enable_cb) {
-			pr_debug("%s: Enabling micbias\n", __func__);
-			mbhc->micbias_enable_cb(mbhc->codec, true,
-						mbhc->mbhc_cfg->micbias);
-		}
-
-		pr_debug("%s: Reporting insertion %d(%x)\n", __func__,
-			 jack_type, mbhc->hph_status);
-		wcd9xxx_jack_report(mbhc, &mbhc->headset_jack,
-				    (mbhc->hph_status | SND_JACK_MECHANICAL),
-				    WCD9XXX_JACK_MASK);
-		/*
-		 * if PA is already on, switch micbias
-		 * source to VDDIO
-		 */
-		if (((mbhc->current_plug == PLUG_TYPE_HEADSET) ||
-		     (mbhc->current_plug == PLUG_TYPE_ANC_HEADPHONE)) &&
-		    ((mbhc->event_state & (1 << MBHC_EVENT_PA_HPHL |
-			1 << MBHC_EVENT_PA_HPHR))))
-			__wcd9xxx_switch_micbias(mbhc, 1, false,
-						 false);
-		wcd9xxx_clr_and_turnon_hph_padac(mbhc);
-	}
-	/* Setup insert detect */
-	wcd9xxx_insert_detect_setup(mbhc, !insertion);
-
-	pr_debug("%s: leave hph_status %x\n", __func__, mbhc->hph_status);
-}
-
-/* should be called under interrupt context that hold suspend */
-static void wcd9xxx_schedule_hs_detect_plug(struct wcd9xxx_mbhc *mbhc,
-					    struct work_struct *work)
-{
-	pr_debug("%s: scheduling wcd9xxx_correct_swch_plug\n", __func__);
-	WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
-	mbhc->hs_detect_work_stop = false;
-	wcd9xxx_lock_sleep(mbhc->resmgr->core_res);
-	schedule_work(work);
-}
-
-/* called under codec_resource_lock acquisition */
-static void wcd9xxx_cancel_hs_detect_plug(struct wcd9xxx_mbhc *mbhc,
-					 struct work_struct *work)
-{
-	pr_debug("%s: Canceling correct_plug_swch\n", __func__);
-	WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
-	mbhc->hs_detect_work_stop = true;
-
-	/* Make sure mbhc state update complete before unlocking. */
-	wmb();
-	WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
-	if (cancel_work_sync(work)) {
-		pr_debug("%s: correct_plug_swch is canceled\n",
-			 __func__);
-		wcd9xxx_unlock_sleep(mbhc->resmgr->core_res);
-	}
-	WCD9XXX_BCL_LOCK(mbhc->resmgr);
-}
-
-static s16 scale_v_micb_vddio(struct wcd9xxx_mbhc *mbhc, int v, bool tovddio)
-{
-	int r;
-	int vddio_k, mb_k;
-
-	vddio_k = __wcd9xxx_resmgr_get_k_val(mbhc, VDDIO_MICBIAS_MV);
-	mb_k = __wcd9xxx_resmgr_get_k_val(mbhc, mbhc->mbhc_data.micb_mv);
-	if (tovddio)
-		r = v * (vddio_k + 4) / (mb_k + 4);
-	else
-		r = v * (mb_k + 4) / (vddio_k + 4);
-	return r;
-}
-
-static s16 wcd9xxx_get_current_v_hs_max(struct wcd9xxx_mbhc *mbhc)
-{
-	s16 v_hs_max;
-	struct wcd9xxx_mbhc_plug_type_cfg *plug_type;
-
-	plug_type = WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(mbhc->mbhc_cfg->calibration);
-	if ((mbhc->mbhc_data.micb_mv != VDDIO_MICBIAS_MV) &&
-	    mbhc->mbhc_micbias_switched)
-		v_hs_max = scale_v_micb_vddio(mbhc, plug_type->v_hs_max, true);
-	else
-		v_hs_max = plug_type->v_hs_max;
-	return v_hs_max;
-}
-
-static short wcd9xxx_read_sta_result(struct snd_soc_codec *codec)
-{
-	u8 bias_msb, bias_lsb;
-	short bias_value;
-
-	bias_msb = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B3_STATUS);
-	bias_lsb = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B2_STATUS);
-	bias_value = (bias_msb << 8) | bias_lsb;
-	return bias_value;
-}
-
-static short wcd9xxx_read_dce_result(struct snd_soc_codec *codec)
-{
-	u8 bias_msb, bias_lsb;
-	short bias_value;
-
-	bias_msb = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B5_STATUS);
-	bias_lsb = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B4_STATUS);
-	bias_value = (bias_msb << 8) | bias_lsb;
-	return bias_value;
-}
-
-static void wcd9xxx_turn_onoff_rel_detection(struct snd_soc_codec *codec,
-					     bool on)
-{
-	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x02, on << 1);
-}
-
-static short __wcd9xxx_codec_sta_dce(struct wcd9xxx_mbhc *mbhc, int dce,
-				     bool override_bypass, bool noreldetection)
-{
-	short bias_value;
-	struct snd_soc_codec *codec = mbhc->codec;
-
-	wcd9xxx_disable_irq(mbhc->resmgr->core_res,
-			    mbhc->intr_ids->dce_est_complete);
-	if (noreldetection)
-		wcd9xxx_turn_onoff_rel_detection(codec, false);
-
-	if (mbhc->mbhc_cfg->do_recalibration)
-		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x2,
-				    0x0);
-	/* Turn on the override */
-	if (!override_bypass)
-		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x4, 0x4);
-	if (dce) {
-		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8,
-				    0x8);
-		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x4);
-		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8,
-				    0x0);
-		if (mbhc->mbhc_cfg->do_recalibration)
-			snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL,
-					    0x2, 0x2);
-		usleep_range(mbhc->mbhc_data.t_sta_dce,
-			     mbhc->mbhc_data.t_sta_dce + 50);
-		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x4);
-		usleep_range(mbhc->mbhc_data.t_dce, mbhc->mbhc_data.t_dce + 50);
-		bias_value = wcd9xxx_read_dce_result(codec);
-	} else {
-		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8,
-				    0x8);
-		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x2);
-		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8,
-				    0x0);
-		if (mbhc->mbhc_cfg->do_recalibration)
-			snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL,
-					    0x2, 0x2);
-		usleep_range(mbhc->mbhc_data.t_sta_dce,
-			     mbhc->mbhc_data.t_sta_dce + 50);
-		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x2);
-		usleep_range(mbhc->mbhc_data.t_sta,
-			     mbhc->mbhc_data.t_sta + 50);
-		bias_value = wcd9xxx_read_sta_result(codec);
-		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8,
-				    0x8);
-		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x0);
-	}
-	/* Turn off the override after measuring mic voltage */
-	if (!override_bypass)
-		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x04,
-				    0x00);
-
-	if (noreldetection)
-		wcd9xxx_turn_onoff_rel_detection(codec, true);
-	wcd9xxx_enable_irq(mbhc->resmgr->core_res,
-			   mbhc->intr_ids->dce_est_complete);
-
-	return bias_value;
-}
-
-static short wcd9xxx_codec_sta_dce(struct wcd9xxx_mbhc *mbhc, int dce,
-				   bool norel)
-{
-	bool override_bypass;
-
-	/* Bypass override if it is already enabled */
-	override_bypass = (snd_soc_read(mbhc->codec,
-					WCD9XXX_A_CDC_MBHC_B1_CTL) &
-			   0x04) ? true : false;
-
-	return __wcd9xxx_codec_sta_dce(mbhc, dce, override_bypass, norel);
-}
-
-static s32 __wcd9xxx_codec_sta_dce_v(struct wcd9xxx_mbhc *mbhc, s8 dce,
-				     u16 bias_value, s16 z, u32 micb_mv)
-{
-	s16 value, mb;
-	s32 mv = 0;
-
-	value = bias_value;
-	if (dce) {
-		mb = (mbhc->mbhc_data.dce_mb);
-		if (mb - z)
-			mv = (value - z) * (s32)micb_mv / (mb - z);
-	} else {
-		mb = (mbhc->mbhc_data.sta_mb);
-		if (mb - z)
-			mv = (value - z) * (s32)micb_mv / (mb - z);
-	}
-
-	return mv;
-}
-
-static s32 wcd9xxx_codec_sta_dce_v(struct wcd9xxx_mbhc *mbhc, s8 dce,
-				   u16 bias_value)
-{
-	s16 z;
-
-	z = dce ? (s16)mbhc->mbhc_data.dce_z : (s16)mbhc->mbhc_data.sta_z;
-	return __wcd9xxx_codec_sta_dce_v(mbhc, dce, bias_value, z,
-					 mbhc->mbhc_data.micb_mv);
-}
-
-/* To enable/disable bandgap and RC oscillator */
-static void wcd9xxx_mbhc_ctrl_clk_bandgap(struct wcd9xxx_mbhc *mbhc,
-		bool enable)
-{
-	if (enable) {
-		WCD9XXX_BG_CLK_LOCK(mbhc->resmgr);
-		wcd9xxx_resmgr_get_bandgap(mbhc->resmgr,
-				WCD9XXX_BANDGAP_AUDIO_MODE);
-		if (mbhc->mbhc_cb && mbhc->mbhc_cb->codec_rco_ctrl) {
-			WCD9XXX_BG_CLK_UNLOCK(mbhc->resmgr);
-			mbhc->mbhc_cb->codec_rco_ctrl(mbhc->codec, true);
-		} else {
-			wcd9xxx_resmgr_get_clk_block(mbhc->resmgr,
-					WCD9XXX_CLK_RCO);
-			WCD9XXX_BG_CLK_UNLOCK(mbhc->resmgr);
-		}
-	} else {
-		if (mbhc->mbhc_cb && mbhc->mbhc_cb->codec_rco_ctrl) {
-			mbhc->mbhc_cb->codec_rco_ctrl(mbhc->codec, false);
-			WCD9XXX_BG_CLK_LOCK(mbhc->resmgr);
-		} else {
-			WCD9XXX_BG_CLK_LOCK(mbhc->resmgr);
-			wcd9xxx_resmgr_put_clk_block(mbhc->resmgr,
-					WCD9XXX_CLK_RCO);
-		}
-		wcd9xxx_resmgr_put_bandgap(mbhc->resmgr,
-				WCD9XXX_BANDGAP_AUDIO_MODE);
-		WCD9XXX_BG_CLK_UNLOCK(mbhc->resmgr);
-	}
-}
-
-/* called only from interrupt which is under codec_resource_lock acquisition */
-static short wcd9xxx_mbhc_setup_hs_polling(struct wcd9xxx_mbhc *mbhc,
-				struct mbhc_micbias_regs *mbhc_micb_regs,
-				bool is_cs_enable)
-{
-	struct snd_soc_codec *codec = mbhc->codec;
-	short bias_value;
-	u8 cfilt_mode;
-
-	WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
-
-	pr_debug("%s: enter\n", __func__);
-	if (!mbhc->mbhc_cfg->calibration) {
-		pr_err("%s: Error, no calibration exists\n", __func__);
-		return -ENODEV;
-	}
-
-	/* Enable external voltage source to micbias if present */
-	if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mb_source)
-		mbhc->mbhc_cb->enable_mb_source(codec, true, true);
-
-	/*
-	 * setup internal micbias if codec uses internal micbias for
-	 * headset detection
-	 */
-	if (mbhc->mbhc_cfg->use_int_rbias) {
-		if (mbhc->mbhc_cb && mbhc->mbhc_cb->setup_int_rbias)
-			mbhc->mbhc_cb->setup_int_rbias(codec, true);
-	else
-		pr_err("%s: internal bias requested but codec did not provide callback\n",
-			__func__);
-	}
-
-	snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x05, 0x01);
-
-	/* Make sure CFILT is in fast mode, save current mode */
-	cfilt_mode = snd_soc_read(codec, mbhc_micb_regs->cfilt_ctl);
-	if (mbhc->mbhc_cb && mbhc->mbhc_cb->cfilt_fast_mode)
-		mbhc->mbhc_cb->cfilt_fast_mode(codec, mbhc);
-	else
-		snd_soc_update_bits(codec, mbhc_micb_regs->cfilt_ctl,
-				    0x70, 0x00);
-
-	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x2, 0x2);
-	snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1,
-		      mbhc->scaling_mux_in);
-	pr_debug("%s:  scaling_mux_input: %d\n", __func__,
-						 mbhc->scaling_mux_in);
-
-	if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mux_bias_block)
-		mbhc->mbhc_cb->enable_mux_bias_block(codec);
-	else
-		snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1,
-				    0x80, 0x80);
-
-	snd_soc_update_bits(codec, WCD9XXX_A_TX_7_MBHC_EN, 0x80, 0x80);
-	snd_soc_update_bits(codec, WCD9XXX_A_TX_7_MBHC_EN, 0x1F, 0x1C);
-	snd_soc_update_bits(codec, WCD9XXX_A_TX_7_MBHC_TEST_CTL, 0x40, 0x40);
-
-	snd_soc_update_bits(codec, WCD9XXX_A_TX_7_MBHC_EN, 0x80, 0x00);
-	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8, 0x8);
-	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8, 0x00);
-
-	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x2, 0x2);
-	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8, 0x8);
-
-	if (!mbhc->mbhc_cfg->do_recalibration) {
-		if (!is_cs_enable)
-			wcd9xxx_calibrate_hs_polling(mbhc);
-	}
-
-	/* don't flip override */
-	bias_value = __wcd9xxx_codec_sta_dce(mbhc, 1, true, true);
-	snd_soc_write(codec, mbhc_micb_regs->cfilt_ctl, cfilt_mode);
-	snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x13, 0x00);
-
-	return bias_value;
-}
-
-static void wcd9xxx_recalibrate(struct wcd9xxx_mbhc *mbhc,
-				struct mbhc_micbias_regs *mbhc_micb_regs,
-				bool is_cs_enable)
-{
-	struct snd_soc_codec *codec = mbhc->codec;
-	s16 reg;
-	int change;
-	struct wcd9xxx_mbhc_btn_detect_cfg *btn_det;
-	s16 sta_z = 0, dce_z = 0;
-
-	btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(mbhc->mbhc_cfg->calibration);
-
-	if (mbhc->mbhc_cfg->do_recalibration) {
-		/* recalibrate dce_z and sta_z */
-		reg = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_CTL);
-		change = snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL,
-					     0x78, btn_det->mbhc_nsc << 3);
-		wcd9xxx_get_z(mbhc, &dce_z, &sta_z, mbhc_micb_regs, true);
-		if (change)
-			snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, reg);
-		if (dce_z && sta_z) {
-			pr_debug("%s: sta_z 0x%x -> 0x%x, dce_z 0x%x -> 0x%x\n",
-				 __func__,
-				 mbhc->mbhc_data.sta_z, sta_z & 0xffff,
-				 mbhc->mbhc_data.dce_z, dce_z & 0xffff);
-			mbhc->mbhc_data.dce_z = dce_z;
-			mbhc->mbhc_data.sta_z = sta_z;
-			wcd9xxx_mbhc_calc_thres(mbhc);
-			wcd9xxx_calibrate_hs_polling(mbhc);
-		} else {
-			pr_warn("%s: failed get new dce_z/sta_z 0x%x/0x%x\n",
-				__func__, dce_z, sta_z);
-		}
-
-		if (is_cs_enable) {
-			/* recalibrate dce_nsc_cs_z */
-			reg = snd_soc_read(mbhc->codec,
-					   WCD9XXX_A_CDC_MBHC_B1_CTL);
-			snd_soc_update_bits(mbhc->codec,
-					    WCD9XXX_A_CDC_MBHC_B1_CTL,
-					    0x78, WCD9XXX_MBHC_NSC_CS << 3);
-			wcd9xxx_get_z(mbhc, &dce_z, NULL, mbhc_micb_regs,
-				      true);
-			snd_soc_write(mbhc->codec, WCD9XXX_A_CDC_MBHC_B1_CTL,
-				      reg);
-			if (dce_z) {
-				mbhc->mbhc_data.dce_nsc_cs_z = dce_z;
-				/* update v_cs_ins_h with new dce_nsc_cs_z */
-				mbhc->mbhc_data.v_cs_ins_h =
-						wcd9xxx_codec_v_sta_dce(
-							mbhc, DCE,
-							WCD9XXX_V_CS_HS_MAX,
-							is_cs_enable);
-				pr_debug("%s: dce_nsc_cs_z 0x%x -> 0x%x, v_cs_ins_h 0x%x\n",
-					  __func__,
-					  mbhc->mbhc_data.dce_nsc_cs_z,
-					  dce_z & 0xffff,
-					  mbhc->mbhc_data.v_cs_ins_h);
-			} else {
-				pr_debug("%s: failed get new dce_nsc_cs_z\n",
-					 __func__);
-			}
-		}
-	}
-}
-
-static void wcd9xxx_shutdown_hs_removal_detect(struct wcd9xxx_mbhc *mbhc)
-{
-	struct snd_soc_codec *codec = mbhc->codec;
-	const struct wcd9xxx_mbhc_general_cfg *generic =
-	    WCD9XXX_MBHC_CAL_GENERAL_PTR(mbhc->mbhc_cfg->calibration);
-
-	/* Need MBHC clock */
-	if (mbhc->mbhc_cb && mbhc->mbhc_cb->codec_rco_ctrl)
-		mbhc->mbhc_cb->codec_rco_ctrl(mbhc->codec, true);
-	else {
-		WCD9XXX_BG_CLK_LOCK(mbhc->resmgr);
-		wcd9xxx_resmgr_get_clk_block(mbhc->resmgr, WCD9XXX_CLK_RCO);
-		WCD9XXX_BG_CLK_UNLOCK(mbhc->resmgr);
-	}
-
-	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x2, 0x2);
-	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x6, 0x0);
-	__wcd9xxx_switch_micbias(mbhc, 0, false, false);
-
-	usleep_range(generic->t_shutdown_plug_rem,
-		     generic->t_shutdown_plug_rem + 50);
-
-	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0xA, 0x8);
-
-	if (mbhc->mbhc_cb && mbhc->mbhc_cb->codec_rco_ctrl)
-		mbhc->mbhc_cb->codec_rco_ctrl(mbhc->codec, false);
-	else {
-		WCD9XXX_BG_CLK_LOCK(mbhc->resmgr);
-		/* Put requested CLK back */
-		wcd9xxx_resmgr_put_clk_block(mbhc->resmgr, WCD9XXX_CLK_RCO);
-		WCD9XXX_BG_CLK_UNLOCK(mbhc->resmgr);
-	}
-
-	snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x00);
-}
-
-static void wcd9xxx_cleanup_hs_polling(struct wcd9xxx_mbhc *mbhc)
-{
-
-	pr_debug("%s: enter\n", __func__);
-	WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
-
-	wcd9xxx_shutdown_hs_removal_detect(mbhc);
-
-
-	/* Disable external voltage source to micbias if present */
-	if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mb_source)
-		mbhc->mbhc_cb->enable_mb_source(mbhc->codec, false, true);
-
-	mbhc->polling_active = false;
-	mbhc->mbhc_state = MBHC_STATE_NONE;
-	pr_debug("%s: leave\n", __func__);
-}
-
-/* called under codec_resource_lock acquisition */
-static void wcd9xxx_codec_hphr_gnd_switch(struct snd_soc_codec *codec, bool on)
-{
-	snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x01, on);
-	if (on)
-		usleep_range(5000, 5100);
-}
-
-static void wcd9xxx_onoff_vddio_switch(struct wcd9xxx_mbhc *mbhc, bool on)
-{
-	pr_debug("%s: vddio %d\n", __func__, on);
-
-	if (mbhc->mbhc_cb && mbhc->mbhc_cb->pull_mb_to_vddio) {
-		mbhc->mbhc_cb->pull_mb_to_vddio(mbhc->codec, on);
-		goto exit;
-	}
-
-	if (on) {
-		snd_soc_update_bits(mbhc->codec, mbhc->mbhc_bias_regs.mbhc_reg,
-				    1 << 7, 1 << 7);
-		snd_soc_update_bits(mbhc->codec, WCD9XXX_A_MAD_ANA_CTRL,
-				    1 << 4, 0);
-	} else {
-		snd_soc_update_bits(mbhc->codec, WCD9XXX_A_MAD_ANA_CTRL,
-				    1 << 4, 1 << 4);
-		snd_soc_update_bits(mbhc->codec, mbhc->mbhc_bias_regs.mbhc_reg,
-				    1 << 7, 0);
-	}
-
-exit:
-	/*
-	 * Wait for the micbias to settle down to vddio
-	 * when the micbias to vddio switch is enabled.
-	 */
-	if (on)
-		usleep_range(10000, 10100);
-}
-
-static int wcd9xxx_hphl_status(struct wcd9xxx_mbhc *mbhc)
-{
-	u16 hph, status;
-	struct snd_soc_codec *codec = mbhc->codec;
-
-	WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
-	hph = snd_soc_read(codec, WCD9XXX_A_MBHC_HPH);
-	snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x12, 0x02);
-	usleep_range(WCD9XXX_HPHL_STATUS_READY_WAIT_US,
-		     WCD9XXX_HPHL_STATUS_READY_WAIT_US +
-		     WCD9XXX_USLEEP_RANGE_MARGIN_US);
-	status = snd_soc_read(codec, WCD9XXX_A_RX_HPH_L_STATUS);
-	snd_soc_write(codec, WCD9XXX_A_MBHC_HPH, hph);
-	return status;
-}
-
-static enum wcd9xxx_mbhc_plug_type
-wcd9xxx_cs_find_plug_type(struct wcd9xxx_mbhc *mbhc,
-			  struct wcd9xxx_mbhc_detect *dt, const int size,
-			  bool highhph,
-			  unsigned long event_state)
-{
-	int i;
-	int vdce, mb_mv;
-	int ch, sz, delta_thr;
-	int minv = 0, maxv = INT_MIN;
-	struct wcd9xxx_mbhc_detect *d = dt;
-	struct wcd9xxx_mbhc_detect *dprev = d, *dmicbias = NULL, *dgnd = NULL;
-	enum wcd9xxx_mbhc_plug_type type = PLUG_TYPE_INVALID;
-
-	const struct wcd9xxx_mbhc_plug_type_cfg *plug_type =
-	    WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(mbhc->mbhc_cfg->calibration);
-	s16 hs_max, no_mic, dce_z;
-	int highhph_cnt = 0;
-
-	pr_debug("%s: enter\n", __func__);
-	pr_debug("%s: event_state 0x%lx\n", __func__, event_state);
-
-	sz = size - 1;
-	for (i = 0, d = dt, ch = 0; i < sz; i++, d++) {
-		if (d->mic_bias) {
-			dce_z = mbhc->mbhc_data.dce_z;
-			mb_mv = mbhc->mbhc_data.micb_mv;
-			hs_max = plug_type->v_hs_max;
-			no_mic = plug_type->v_no_mic;
-		} else {
-			dce_z = mbhc->mbhc_data.dce_nsc_cs_z;
-			mb_mv = VDDIO_MICBIAS_MV;
-			hs_max = WCD9XXX_V_CS_HS_MAX;
-			no_mic = WCD9XXX_V_CS_NO_MIC;
-		}
-
-		vdce = __wcd9xxx_codec_sta_dce_v(mbhc, true, d->dce,
-						 dce_z, (u32)mb_mv);
-		d->_vdces = vdce;
-		if (d->_vdces < no_mic)
-			d->_type = PLUG_TYPE_HEADPHONE;
-		else if (d->_vdces >= hs_max) {
-			d->_type = PLUG_TYPE_HIGH_HPH;
-			highhph_cnt++;
-		} else
-			d->_type = PLUG_TYPE_HEADSET;
-
-		pr_debug("%s: DCE #%d, %04x, V %04d(%04d), HPHL %d TYPE %d\n",
-			 __func__, i, d->dce, vdce, d->_vdces,
-			 d->hphl_status & 0x01,
-			 d->_type);
-
-		ch += d->hphl_status & 0x01;
-		if (!d->swap_gnd && !d->mic_bias) {
-			if (maxv < d->_vdces)
-				maxv = d->_vdces;
-			if (!minv || minv > d->_vdces)
-				minv = d->_vdces;
-		}
-		if ((!d->mic_bias &&
-		    (d->_vdces >= WCD9XXX_CS_MEAS_INVALD_RANGE_LOW_MV &&
-		     d->_vdces <= WCD9XXX_CS_MEAS_INVALD_RANGE_HIGH_MV)) ||
-		    (d->mic_bias &&
-		    (d->_vdces >= WCD9XXX_MEAS_INVALD_RANGE_LOW_MV &&
-		     d->_vdces <= WCD9XXX_MEAS_INVALD_RANGE_HIGH_MV))) {
-			pr_debug("%s: within invalid range\n", __func__);
-			type = PLUG_TYPE_INVALID;
-			goto exit;
-		}
-	}
-
-	delta_thr = ((highhph_cnt == sz) || highhph) ?
-			      WCD9XXX_MB_MEAS_DELTA_MAX_MV :
-			      WCD9XXX_CS_MEAS_DELTA_MAX_MV;
-
-	for (i = 0, d = dt; i < sz; i++, d++) {
-		if ((i > 0) && !d->mic_bias && !d->swap_gnd &&
-		    (d->_type != dprev->_type)) {
-			pr_debug("%s: Invalid, inconsistent types\n", __func__);
-			type = PLUG_TYPE_INVALID;
-			goto exit;
-		}
-
-		if (!d->swap_gnd && !d->mic_bias &&
-		    (abs(minv - d->_vdces) > delta_thr ||
-		     abs(maxv - d->_vdces) > delta_thr)) {
-			pr_debug("%s: Invalid, delta %dmv, %dmv and %dmv\n",
-				 __func__, d->_vdces, minv, maxv);
-			type = PLUG_TYPE_INVALID;
-			goto exit;
-		} else if (d->swap_gnd) {
-			dgnd = d;
-		}
-
-		if (!d->mic_bias && !d->swap_gnd)
-			dprev = d;
-		else if (d->mic_bias)
-			dmicbias = d;
-	}
-	if (dgnd && dt->_type != PLUG_TYPE_HEADSET &&
-	    dt->_type != dgnd->_type) {
-		pr_debug("%s: Invalid, inconsistent types\n", __func__);
-		type = PLUG_TYPE_INVALID;
-		goto exit;
-	}
-
-	type = dt->_type;
-	if (dmicbias) {
-		if (dmicbias->_type == PLUG_TYPE_HEADSET &&
-		    (dt->_type == PLUG_TYPE_HIGH_HPH ||
-		     dt->_type == PLUG_TYPE_HEADSET)) {
-			type = PLUG_TYPE_HEADSET;
-			if (dt->_type == PLUG_TYPE_HIGH_HPH) {
-				pr_debug("%s: Headset with threshold on MIC detected\n",
-					 __func__);
-				if (mbhc->mbhc_cfg->micbias_enable_flags &
-				 (1 << MBHC_MICBIAS_ENABLE_THRESHOLD_HEADSET))
-					mbhc->micbias_enable = true;
-			}
-		}
-	}
-
-	if (type == PLUG_TYPE_HEADSET && dgnd && !dgnd->mic_bias) {
-		/* if plug type is Headphone report as GND_MIC_SWAP */
-		if (dgnd->_type == PLUG_TYPE_HEADPHONE) {
-			pr_debug("%s: GND_MIC_SWAP\n", __func__);
-			type = PLUG_TYPE_GND_MIC_SWAP;
-			/*
-			 * if type is GND_MIC_SWAP we should not check
-			 * HPHL status hence goto exit
-			 */
-			goto exit;
-		} else if (dgnd->_type != PLUG_TYPE_HEADSET && !dmicbias) {
-			pr_debug("%s: Invalid, inconsistent types\n", __func__);
-			type = PLUG_TYPE_INVALID;
-		}
-	}
-
-	if (event_state & (1 << MBHC_EVENT_PA_HPHL)) {
-		pr_debug("%s: HPHL PA was ON\n", __func__);
-	} else if (ch != sz && ch > 0) {
-		pr_debug("%s: Invalid, inconsistent HPHL..\n", __func__);
-		type = PLUG_TYPE_INVALID;
-		goto exit;
-	}
-
-	if (!(event_state & (1UL << MBHC_EVENT_PA_HPHL))) {
-		if (((type == PLUG_TYPE_HEADSET ||
-		      type == PLUG_TYPE_HEADPHONE) && ch != sz)) {
-			pr_debug("%s: Invalid, not fully inserted, TYPE %d\n",
-				 __func__, type);
-			type = PLUG_TYPE_INVALID;
-		}
-	}
-
-	if (type == PLUG_TYPE_HEADSET &&
-	    (mbhc->mbhc_cfg->micbias_enable_flags &
-	    (1 << MBHC_MICBIAS_ENABLE_REGULAR_HEADSET)))
-		mbhc->micbias_enable = true;
-
-exit:
-	pr_debug("%s: Plug type %d detected\n", __func__, type);
-	return type;
-}
-
-/*
- * wcd9xxx_find_plug_type : Find out and return the best plug type with given
- *			    list of wcd9xxx_mbhc_detect structure.
- * param mbhc wcd9xxx_mbhc structure
- * param dt collected measurements
- * param size array size of dt
- * param event_state mbhc->event_state when dt is collected
- */
-static enum wcd9xxx_mbhc_plug_type
-wcd9xxx_find_plug_type(struct wcd9xxx_mbhc *mbhc,
-		       struct wcd9xxx_mbhc_detect *dt, const int size,
-		       unsigned long event_state)
-{
-	int i;
-	int ch;
-	enum wcd9xxx_mbhc_plug_type type;
-	int vdce;
-	struct wcd9xxx_mbhc_detect *d, *dprev, *dgnd = NULL, *dvddio = NULL;
-	int maxv = 0, minv = 0;
-	const struct wcd9xxx_mbhc_plug_type_cfg *plug_type =
-	    WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(mbhc->mbhc_cfg->calibration);
-	const s16 hs_max = plug_type->v_hs_max;
-	const s16 no_mic = plug_type->v_no_mic;
-
-	pr_debug("%s: event_state 0x%lx\n", __func__, event_state);
-
-	for (i = 0, d = dt, ch = 0; i < size; i++, d++) {
-		vdce = wcd9xxx_codec_sta_dce_v(mbhc, true, d->dce);
-		if (d->vddio)
-			d->_vdces = scale_v_micb_vddio(mbhc, vdce, false);
-		else
-			d->_vdces = vdce;
-
-		if (d->_vdces >= no_mic && d->_vdces < hs_max)
-			d->_type = PLUG_TYPE_HEADSET;
-		else if (d->_vdces < no_mic)
-			d->_type = PLUG_TYPE_HEADPHONE;
-		else
-			d->_type = PLUG_TYPE_HIGH_HPH;
-
-		ch += d->hphl_status & 0x01;
-		if (!d->swap_gnd && !d->hwvalue && !d->vddio) {
-			if (maxv < d->_vdces)
-				maxv = d->_vdces;
-			if (!minv || minv > d->_vdces)
-				minv = d->_vdces;
-		}
-
-		pr_debug("%s: DCE #%d, %04x, V %04d(%04d), GND %d, VDDIO %d, HPHL %d TYPE %d\n",
-			 __func__, i, d->dce, vdce, d->_vdces,
-			 d->swap_gnd, d->vddio, d->hphl_status & 0x01,
-			 d->_type);
-
-
-		/*
-		 * If GND and MIC prongs are aligned to HPHR and GND of
-		 * headphone, codec measures the voltage based on
-		 * impedance between HPHR and GND which results in ~80mv.
-		 * Avoid this.
-		 */
-		if (d->_vdces >= WCD9XXX_MEAS_INVALD_RANGE_LOW_MV &&
-		    d->_vdces <= WCD9XXX_MEAS_INVALD_RANGE_HIGH_MV) {
-			pr_debug("%s: within invalid range\n", __func__);
-			type = PLUG_TYPE_INVALID;
-			goto exit;
-		}
-	}
-
-	if (event_state & (1 << MBHC_EVENT_PA_HPHL)) {
-		pr_debug("%s: HPHL PA was ON\n", __func__);
-	} else if (ch != size && ch > 0) {
-		pr_debug("%s: Invalid, inconsistent HPHL\n", __func__);
-		type = PLUG_TYPE_INVALID;
-		goto exit;
-	}
-
-	for (i = 0, dprev = NULL, d = dt; i < size; i++, d++) {
-		if (d->vddio) {
-			dvddio = d;
-			continue;
-		}
-
-		if ((i > 0) && (dprev != NULL) && (d->_type != dprev->_type)) {
-			pr_debug("%s: Invalid, inconsistent types\n", __func__);
-			type = PLUG_TYPE_INVALID;
-			goto exit;
-		}
-
-		if (!d->swap_gnd && !d->hwvalue &&
-		    (abs(minv - d->_vdces) > WCD9XXX_MEAS_DELTA_MAX_MV ||
-		     abs(maxv - d->_vdces) > WCD9XXX_MEAS_DELTA_MAX_MV)) {
-			pr_debug("%s: Invalid, delta %dmv, %dmv and %dmv\n",
-				 __func__, d->_vdces, minv, maxv);
-			type = PLUG_TYPE_INVALID;
-			goto exit;
-		} else if (d->swap_gnd) {
-			dgnd = d;
-		}
-		dprev = d;
-	}
-
-	WARN_ON(i != size);
-	type = dt->_type;
-	if (type == PLUG_TYPE_HEADSET && dgnd) {
-		if ((dgnd->_vdces + WCD9XXX_GM_SWAP_THRES_MIN_MV <
-		     minv) &&
-		    (dgnd->_vdces + WCD9XXX_GM_SWAP_THRES_MAX_MV >
-		     maxv))
-			type = PLUG_TYPE_GND_MIC_SWAP;
-	}
-
-	/* if HPHL PA was on, we cannot use hphl status */
-	if (!(event_state & (1UL << MBHC_EVENT_PA_HPHL))) {
-		if (((type == PLUG_TYPE_HEADSET ||
-		      type == PLUG_TYPE_HEADPHONE) && ch != size) ||
-		    (type == PLUG_TYPE_GND_MIC_SWAP && ch)) {
-			pr_debug("%s: Invalid, not fully inserted, TYPE %d\n",
-				 __func__, type);
-			type = PLUG_TYPE_INVALID;
-		}
-	}
-
-	if (type == PLUG_TYPE_HEADSET) {
-		if (dvddio && ((dvddio->_vdces > hs_max) ||
-		   (dvddio->_vdces > minv + WCD9XXX_THRESHOLD_MIC_THRESHOLD))) {
-			pr_debug("%s: Headset with threshold on MIC detected\n",
-				 __func__);
-			if (mbhc->mbhc_cfg->micbias_enable_flags &
-			    (1 << MBHC_MICBIAS_ENABLE_THRESHOLD_HEADSET))
-				mbhc->micbias_enable = true;
-		} else {
-			pr_debug("%s: Headset with regular MIC detected\n",
-				 __func__);
-			if (mbhc->mbhc_cfg->micbias_enable_flags &
-			    (1 << MBHC_MICBIAS_ENABLE_REGULAR_HEADSET))
-				mbhc->micbias_enable = true;
-		}
-	}
-exit:
-	pr_debug("%s: Plug type %d detected, micbias_enable %d\n", __func__,
-		 type, mbhc->micbias_enable);
-	return type;
-}
-
-/*
- * Pull down MBHC micbias for provided duration in microsecond.
- */
-static int wcd9xxx_pull_down_micbias(struct wcd9xxx_mbhc *mbhc, int us)
-{
-	bool micbiasconn = false;
-	struct snd_soc_codec *codec = mbhc->codec;
-	const u16 ctlreg = mbhc->mbhc_bias_regs.ctl_reg;
-
-	/*
-	 * Disable MBHC to micbias connection to pull down
-	 * micbias and pull down micbias for a moment.
-	 */
-	if ((snd_soc_read(mbhc->codec, ctlreg) & 0x01)) {
-		WARN_ONCE(1, "MBHC micbias is already pulled down unexpectedly\n");
-		return -EFAULT;
-	}
-
-	if ((snd_soc_read(mbhc->codec, WCD9XXX_A_MAD_ANA_CTRL) & 1 << 4)) {
-		snd_soc_update_bits(mbhc->codec, WCD9XXX_A_MAD_ANA_CTRL,
-				    1 << 4, 0);
-		micbiasconn = true;
-	}
-
-	snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01, 0x01);
-
-	/*
-	 * Pull down for 1ms to discharge bias. Give small margin (10us) to be
-	 * able to get consistent result across DCEs.
-	 */
-	usleep_range(1000, 1000 + 10);
-
-	if (micbiasconn)
-		snd_soc_update_bits(mbhc->codec, WCD9XXX_A_MAD_ANA_CTRL,
-				    1 << 4, 1 << 4);
-	snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01, 0x00);
-	usleep_range(us, us + WCD9XXX_USLEEP_RANGE_MARGIN_US);
-
-	return 0;
-}
-
-/* Called under codec resource lock acquisition */
-void wcd9xxx_turn_onoff_current_source(struct wcd9xxx_mbhc *mbhc,
-				       struct mbhc_micbias_regs *mbhc_micb_regs,
-				       bool on, bool highhph)
-{
-	struct snd_soc_codec *codec;
-	struct wcd9xxx_mbhc_btn_detect_cfg *btn_det;
-	const struct wcd9xxx_mbhc_plug_detect_cfg *plug_det =
-	    WCD9XXX_MBHC_CAL_PLUG_DET_PTR(mbhc->mbhc_cfg->calibration);
-
-	btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(mbhc->mbhc_cfg->calibration);
-	codec = mbhc->codec;
-
-	WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
-
-	if ((on && mbhc->is_cs_enabled) ||
-	    (!on && !mbhc->is_cs_enabled)) {
-		pr_debug("%s: Current source is already %s\n",
-			__func__, on ? "ON" : "OFF");
-		return;
-	}
-
-	if (on) {
-		pr_debug("%s: enabling current source\n", __func__);
-		/* Nsc to 9 */
-		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL,
-				    0x78, 0x48);
-		/* pull down diode bit to 0 */
-		snd_soc_update_bits(codec, mbhc_micb_regs->mbhc_reg,
-				    0x01, 0x00);
-		/*
-		 * Keep the low power insertion/removal
-		 * detection (reg 0x3DD) disabled
-		 */
-		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_INT_CTL,
-				    0x01, 0x00);
-		/*
-		 * Enable the Mic Bias current source
-		 * Write bits[6:5] of register MICB_2_MBHC to 0x3 (V_20_UA)
-		 * Write bit[7] of register MICB_2_MBHC to 1
-		 * (INS_DET_ISRC_EN__ENABLE)
-		 * MICB_2_MBHC__SCHT_TRIG_EN to 1
-		 */
-		snd_soc_update_bits(codec, mbhc_micb_regs->mbhc_reg,
-				    0xF0, 0xF0);
-		/* Disconnect MBHC Override from MicBias and LDOH */
-		snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL, 0x10, 0x00);
-		mbhc->is_cs_enabled = true;
-	} else {
-		pr_debug("%s: disabling current source\n", __func__);
-		/* Connect MBHC Override from MicBias and LDOH */
-		snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL, 0x10, 0x10);
-		/* INS_DET_ISRC_CTL to acdb value */
-		snd_soc_update_bits(codec, mbhc_micb_regs->mbhc_reg,
-				    0x60, plug_det->mic_current << 5);
-		if (!highhph) {
-			/* INS_DET_ISRC_EN__ENABLE to 0 */
-			snd_soc_update_bits(codec,
-					    mbhc_micb_regs->mbhc_reg,
-					    0x80, 0x00);
-			/* MICB_2_MBHC__SCHT_TRIG_EN  to 0 */
-			snd_soc_update_bits(codec,
-					    mbhc_micb_regs->mbhc_reg,
-					    0x10, 0x00);
-		}
-		/* Nsc to acdb value */
-		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x78,
-				    btn_det->mbhc_nsc << 3);
-		mbhc->is_cs_enabled = false;
-	}
-}
-
-static enum wcd9xxx_mbhc_plug_type
-wcd9xxx_codec_cs_get_plug_type(struct wcd9xxx_mbhc *mbhc, bool highhph)
-{
-	struct snd_soc_codec *codec = mbhc->codec;
-	struct wcd9xxx_mbhc_detect rt[NUM_DCE_PLUG_INS_DETECT];
-	enum wcd9xxx_mbhc_plug_type type = PLUG_TYPE_INVALID;
-	int i;
-
-	pr_debug("%s: enter\n", __func__);
-	WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
-
-	BUG_ON(NUM_DCE_PLUG_INS_DETECT < 4);
-
-	wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, true);
-	rt[0].swap_gnd = false;
-	rt[0].vddio = false;
-	rt[0].hwvalue = true;
-	rt[0].hphl_status = wcd9xxx_hphl_status(mbhc);
-	rt[0].dce = wcd9xxx_mbhc_setup_hs_polling(mbhc, &mbhc->mbhc_bias_regs,
-						  true);
-	rt[0].mic_bias = false;
-
-	for (i = 1; i < NUM_DCE_PLUG_INS_DETECT - 1; i++) {
-		rt[i].swap_gnd = (i == NUM_DCE_PLUG_INS_DETECT - 3);
-		rt[i].mic_bias = ((i == NUM_DCE_PLUG_INS_DETECT - 4) &&
-				   highhph);
-		rt[i].hphl_status = wcd9xxx_hphl_status(mbhc);
-		if (rt[i].swap_gnd)
-			wcd9xxx_codec_hphr_gnd_switch(codec, true);
-
-		if (rt[i].mic_bias)
-			wcd9xxx_turn_onoff_current_source(mbhc,
-							  &mbhc->mbhc_bias_regs,
-							  false, false);
-
-		rt[i].dce = __wcd9xxx_codec_sta_dce(mbhc, 1, !highhph, true);
-		if (rt[i].mic_bias)
-			wcd9xxx_turn_onoff_current_source(mbhc,
-							  &mbhc->mbhc_bias_regs,
-							  true, false);
-		if (rt[i].swap_gnd)
-			wcd9xxx_codec_hphr_gnd_switch(codec, false);
-	}
-
-	/* recalibrate DCE/STA GND voltages */
-	wcd9xxx_recalibrate(mbhc, &mbhc->mbhc_bias_regs, true);
-
-	type = wcd9xxx_cs_find_plug_type(mbhc, rt, ARRAY_SIZE(rt), highhph,
-					 mbhc->event_state);
-
-	wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, false);
-	pr_debug("%s: plug_type:%d\n", __func__, type);
-
-	return type;
-}
-
-static enum wcd9xxx_mbhc_plug_type
-wcd9xxx_codec_get_plug_type(struct wcd9xxx_mbhc *mbhc, bool highhph)
-{
-	int i;
-	bool vddioon;
-	struct wcd9xxx_mbhc_plug_type_cfg *plug_type_ptr;
-	struct wcd9xxx_mbhc_detect rt[NUM_DCE_PLUG_INS_DETECT];
-	enum wcd9xxx_mbhc_plug_type type = PLUG_TYPE_INVALID;
-	struct snd_soc_codec *codec = mbhc->codec;
-
-	pr_debug("%s: enter\n", __func__);
-	WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
-
-	/* make sure override is on */
-	WARN_ON(!(snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_CTL) & 0x04));
-
-	/* GND and MIC swap detection requires at least 2 rounds of DCE */
-	BUG_ON(NUM_DCE_PLUG_INS_DETECT < 2);
-	detect_use_vddio_switch = mbhc->mbhc_cfg->use_vddio_meas;
-
-	/*
-	 * There are chances vddio switch is on and cfilt voltage is adjusted
-	 * to vddio voltage even after plug type removal reported.
-	 */
-	vddioon = __wcd9xxx_switch_micbias(mbhc, 0, false, false);
-	pr_debug("%s: vddio switch was %s\n", __func__, vddioon ? "on" : "off");
-
-	plug_type_ptr =
-	    WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(mbhc->mbhc_cfg->calibration);
-
-	/*
-	 * cfilter in fast mode requires 1ms to charge up and down micbias
-	 * fully.
-	 */
-	(void) wcd9xxx_pull_down_micbias(mbhc,
-					 WCD9XXX_MICBIAS_PULLDOWN_SETTLE_US);
-
-	wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, true);
-	rt[0].hphl_status = wcd9xxx_hphl_status(mbhc);
-	rt[0].dce = wcd9xxx_mbhc_setup_hs_polling(mbhc, &mbhc->mbhc_bias_regs,
-						  false);
-	rt[0].swap_gnd = false;
-	rt[0].vddio = false;
-	rt[0].hwvalue = true;
-	for (i = 1; i < NUM_DCE_PLUG_INS_DETECT; i++) {
-		rt[i].swap_gnd = (i == NUM_DCE_PLUG_INS_DETECT - 2);
-		if (detect_use_vddio_switch)
-			rt[i].vddio = (i == 1);
-		else
-			rt[i].vddio = false;
-		rt[i].hphl_status = wcd9xxx_hphl_status(mbhc);
-		rt[i].hwvalue = false;
-		if (rt[i].swap_gnd)
-			wcd9xxx_codec_hphr_gnd_switch(codec, true);
-		if (rt[i].vddio)
-			wcd9xxx_onoff_vddio_switch(mbhc, true);
-		/*
-		 * Pull down micbias to detect headset with mic which has
-		 * threshold and to have more consistent voltage measurements.
-		 *
-		 * cfilter in fast mode requires 1ms to charge up and down
-		 * micbias fully.
-		 */
-		(void) wcd9xxx_pull_down_micbias(mbhc,
-					    WCD9XXX_MICBIAS_PULLDOWN_SETTLE_US);
-		rt[i].dce = __wcd9xxx_codec_sta_dce(mbhc, 1, true, true);
-		if (rt[i].vddio)
-			wcd9xxx_onoff_vddio_switch(mbhc, false);
-		if (rt[i].swap_gnd)
-			wcd9xxx_codec_hphr_gnd_switch(codec, false);
-	}
-	/* recalibrate DCE/STA GND voltages */
-	wcd9xxx_recalibrate(mbhc, &mbhc->mbhc_bias_regs, false);
-
-	if (vddioon)
-		__wcd9xxx_switch_micbias(mbhc, 1, false, false);
-
-	type = wcd9xxx_find_plug_type(mbhc, rt, ARRAY_SIZE(rt),
-				      mbhc->event_state);
-
-	wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, false);
-	pr_debug("%s: leave\n", __func__);
-	return type;
-}
-
-static bool wcd9xxx_swch_level_remove(struct wcd9xxx_mbhc *mbhc)
-{
-	if (mbhc->mbhc_cfg->gpio)
-		return (gpio_get_value_cansleep(mbhc->mbhc_cfg->gpio) !=
-			mbhc->mbhc_cfg->gpio_level_insert);
-	else if (mbhc->mbhc_cfg->insert_detect) {
-		if (mbhc->mbhc_cb && mbhc->mbhc_cb->insert_rem_status)
-			return mbhc->mbhc_cb->insert_rem_status(mbhc->codec);
-		else
-			return snd_soc_read(mbhc->codec,
-				    WCD9XXX_A_MBHC_INSERT_DET_STATUS) &
-				    (1 << 2);
-	} else
-		WARN(1, "Invalid jack detection configuration\n");
-
-	return true;
-}
-
-static bool is_clk_active(struct snd_soc_codec *codec)
-{
-	return !!(snd_soc_read(codec, WCD9XXX_A_CDC_CLK_MCLK_CTL) & 0x05);
-}
-
-static int wcd9xxx_enable_hs_detect(struct wcd9xxx_mbhc *mbhc,
-				    int insertion, int trigger, bool padac_off)
-{
-	struct snd_soc_codec *codec = mbhc->codec;
-	int central_bias_enabled = 0;
-	const struct wcd9xxx_mbhc_general_cfg *generic =
-	    WCD9XXX_MBHC_CAL_GENERAL_PTR(mbhc->mbhc_cfg->calibration);
-	const struct wcd9xxx_mbhc_plug_detect_cfg *plug_det =
-	    WCD9XXX_MBHC_CAL_PLUG_DET_PTR(mbhc->mbhc_cfg->calibration);
-
-	pr_debug("%s: enter insertion(%d) trigger(0x%x)\n",
-		 __func__, insertion, trigger);
-
-	if (!mbhc->mbhc_cfg->calibration) {
-		pr_err("Error, no wcd9xxx calibration\n");
-		return -EINVAL;
-	}
-
-	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_INT_CTL, 0x1, 0);
-
-	/*
-	 * Make sure mic bias and Mic line schmitt trigger
-	 * are turned OFF
-	 */
-	snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01, 0x01);
-	snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg, 0x90, 0x00);
-
-	if (insertion) {
-		wcd9xxx_switch_micbias(mbhc, 0);
-
-		/* DAPM can manipulate PA/DAC bits concurrently */
-		if (padac_off == true)
-			wcd9xxx_set_and_turnoff_hph_padac(mbhc);
-
-		if (trigger & MBHC_USE_HPHL_TRIGGER) {
-			/* Enable HPH Schmitt Trigger */
-			snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x11,
-					0x11);
-			snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x0C,
-					plug_det->hph_current << 2);
-			snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x02,
-					0x02);
-		}
-		if (trigger & MBHC_USE_MB_TRIGGER) {
-			/* enable the mic line schmitt trigger */
-			snd_soc_update_bits(codec,
-					mbhc->mbhc_bias_regs.mbhc_reg,
-					0x60, plug_det->mic_current << 5);
-			snd_soc_update_bits(codec,
-					mbhc->mbhc_bias_regs.mbhc_reg,
-					0x80, 0x80);
-			usleep_range(plug_det->t_mic_pid, plug_det->t_mic_pid +
-						WCD9XXX_USLEEP_RANGE_MARGIN_US);
-			snd_soc_update_bits(codec,
-					mbhc->mbhc_bias_regs.ctl_reg, 0x01,
-					0x00);
-			snd_soc_update_bits(codec,
-					mbhc->mbhc_bias_regs.mbhc_reg,
-					0x10, 0x10);
-		}
-
-		/* setup for insetion detection */
-		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_INT_CTL, 0x2, 0);
-	} else {
-		pr_debug("setup for removal detection\n");
-		/* Make sure the HPH schmitt trigger is OFF */
-		snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x12, 0x00);
-
-		/* enable the mic line schmitt trigger */
-		snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg,
-				    0x01, 0x00);
-		snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg, 0x60,
-				    plug_det->mic_current << 5);
-		snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg,
-				    0x80, 0x80);
-		usleep_range(plug_det->t_mic_pid, plug_det->t_mic_pid +
-					WCD9XXX_USLEEP_RANGE_MARGIN_US);
-		snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg,
-				    0x10, 0x10);
-
-		/* Setup for low power removal detection */
-		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_INT_CTL, 0x2,
-				    0x2);
-	}
-
-	if (snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_CTL) & 0x4) {
-		/* called by interrupt */
-		if (!is_clk_active(codec)) {
-			wcd9xxx_resmgr_enable_config_mode(mbhc->resmgr, 1);
-			snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL,
-					0x06, 0);
-			usleep_range(generic->t_shutdown_plug_rem,
-					generic->t_shutdown_plug_rem +
-					WCD9XXX_USLEEP_RANGE_MARGIN_US);
-			wcd9xxx_resmgr_enable_config_mode(mbhc->resmgr, 0);
-		} else
-			snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL,
-					0x06, 0);
-	}
-
-	snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.int_rbias, 0x80, 0);
-
-	/* If central bandgap disabled */
-	if (!(snd_soc_read(codec, WCD9XXX_A_PIN_CTL_OE1) & 1)) {
-		snd_soc_update_bits(codec, WCD9XXX_A_PIN_CTL_OE1, 0x3, 0x3);
-		usleep_range(generic->t_bg_fast_settle,
-			     generic->t_bg_fast_settle +
-			     WCD9XXX_USLEEP_RANGE_MARGIN_US);
-		central_bias_enabled = 1;
-	}
-
-	/* If LDO_H disabled */
-	if (snd_soc_read(codec, WCD9XXX_A_PIN_CTL_OE0) & 0x80) {
-		snd_soc_update_bits(codec, WCD9XXX_A_PIN_CTL_OE0, 0x10, 0);
-		snd_soc_update_bits(codec, WCD9XXX_A_PIN_CTL_OE0, 0x80, 0x80);
-		usleep_range(generic->t_ldoh, generic->t_ldoh +
-					      WCD9XXX_USLEEP_RANGE_MARGIN_US);
-		snd_soc_update_bits(codec, WCD9XXX_A_PIN_CTL_OE0, 0x80, 0);
-
-		if (central_bias_enabled)
-			snd_soc_update_bits(codec, WCD9XXX_A_PIN_CTL_OE1, 0x1,
-					    0);
-	}
-
-	if (mbhc->resmgr->reg_addr && mbhc->resmgr->reg_addr->micb_4_mbhc)
-		snd_soc_update_bits(codec, mbhc->resmgr->reg_addr->micb_4_mbhc,
-				    0x3, mbhc->mbhc_cfg->micbias);
-
-	wcd9xxx_enable_irq(mbhc->resmgr->core_res, mbhc->intr_ids->insertion);
-	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_INT_CTL, 0x1, 0x1);
-	pr_debug("%s: leave\n", __func__);
-
-	return 0;
-}
-
-/*
- * Function to determine whether anc microphone is preset or not.
- * Return true if anc microphone is detected or false if not detected.
- */
-static bool wcd9xxx_detect_anc_plug_type(struct wcd9xxx_mbhc *mbhc)
-{
-	struct wcd9xxx_mbhc_detect rt[NUM_DCE_PLUG_INS_DETECT - 1];
-	bool anc_mic_found = true;
-	int i, mb_mv;
-	const struct wcd9xxx_mbhc_plug_type_cfg *plug_type =
-	    WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(mbhc->mbhc_cfg->calibration);
-	s16 hs_max, dce_z;
-	s16 no_mic;
-	bool override_en;
-	bool timedout;
-	unsigned long timeout, retry = 0;
-	enum wcd9xxx_mbhc_plug_type type;
-	bool cs_enable;
-
-	if (mbhc->mbhc_cfg->anc_micbias != MBHC_MICBIAS3 &&
-	    mbhc->mbhc_cfg->anc_micbias != MBHC_MICBIAS2)
-		return false;
-
-	pr_debug("%s: enter\n", __func__);
-
-	override_en = (snd_soc_read(mbhc->codec, WCD9XXX_A_CDC_MBHC_B1_CTL) &
-		       0x04) ? true : false;
-	cs_enable = ((mbhc->mbhc_cfg->cs_enable_flags &
-		    (1 << MBHC_CS_ENABLE_DET_ANC)) != 0) &&
-		    (!(snd_soc_read(mbhc->codec,
-		       mbhc->mbhc_anc_bias_regs.ctl_reg) & 0x80)) &&
-		     (mbhc->mbhc_cfg->micbias != mbhc->mbhc_cfg->anc_micbias);
-
-	if (cs_enable) {
-		wcd9xxx_turn_onoff_current_source(mbhc,
-						  &mbhc->mbhc_anc_bias_regs,
-						  true, false);
-	} else {
-		if (mbhc->mbhc_cfg->anc_micbias == MBHC_MICBIAS3) {
-			if (mbhc->micbias_enable_cb)
-				mbhc->micbias_enable_cb(mbhc->codec, true,
-						mbhc->mbhc_cfg->anc_micbias);
-			else
-				return false;
-		} else {
-			/* Enable override */
-			if (!override_en)
-				wcd9xxx_turn_onoff_override(mbhc, true);
-		}
-	}
-
-	if (!cs_enable) {
-		hs_max = plug_type->v_hs_max;
-		no_mic = plug_type->v_no_mic;
-		dce_z = mbhc->mbhc_data.dce_z;
-		mb_mv = mbhc->mbhc_data.micb_mv;
-	} else {
-		hs_max = WCD9XXX_V_CS_HS_MAX;
-		no_mic = WCD9XXX_V_CS_NO_MIC;
-		mb_mv = VDDIO_MICBIAS_MV;
-		dce_z = mbhc->mbhc_data.dce_nsc_cs_z;
-	}
-
-	wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, true);
-
-	timeout = jiffies + msecs_to_jiffies(ANC_HPH_DETECT_PLUG_TIME_MS);
-	anc_mic_found = true;
-
-	while (!(timedout = time_after(jiffies, timeout))) {
-		retry++;
-
-		if (wcd9xxx_swch_level_remove(mbhc)) {
-			pr_debug("%s: Switch level is low\n", __func__);
-			anc_mic_found = false;
-			break;
-		}
-
-		pr_debug("%s: Retry attempt %lu", __func__, retry - 1);
-
-		rt[0].hphl_status = wcd9xxx_hphl_status(mbhc);
-		rt[0].dce = wcd9xxx_mbhc_setup_hs_polling(mbhc,
-						  &mbhc->mbhc_anc_bias_regs,
-						  cs_enable);
-		rt[0]._vdces = __wcd9xxx_codec_sta_dce_v(mbhc, true, rt[0].dce,
-							 dce_z, (u32)mb_mv);
-
-		if (rt[0]._vdces >= no_mic && rt[0]._vdces < hs_max)
-			rt[0]._type = PLUG_TYPE_HEADSET;
-		else if (rt[0]._vdces < no_mic)
-			rt[0]._type = PLUG_TYPE_HEADPHONE;
-		else
-			rt[0]._type = PLUG_TYPE_HIGH_HPH;
-
-		pr_debug("%s: DCE #%d, V %04d, HPHL %d TYPE %d\n",
-				__func__, 0, rt[0]._vdces,
-				rt[0].hphl_status & 0x01,
-				rt[0]._type);
-
-		for (i = 1; i < NUM_DCE_PLUG_INS_DETECT - 1; i++) {
-			rt[i].dce = __wcd9xxx_codec_sta_dce(mbhc, 1,
-							    true, true);
-			rt[i]._vdces = __wcd9xxx_codec_sta_dce_v(mbhc, true,
-							 rt[i].dce, dce_z,
-							 (u32) mb_mv);
-
-			if (rt[i]._vdces >= no_mic && rt[i]._vdces < hs_max)
-				rt[i]._type = PLUG_TYPE_HEADSET;
-			else if (rt[i]._vdces < no_mic)
-				rt[i]._type = PLUG_TYPE_HEADPHONE;
-			else
-				rt[i]._type = PLUG_TYPE_HIGH_HPH;
-
-			rt[i].hphl_status = wcd9xxx_hphl_status(mbhc);
-
-			pr_debug("%s: DCE #%d, V %04d, HPHL %d TYPE %d\n",
-					__func__, i, rt[i]._vdces,
-					rt[i].hphl_status & 0x01,
-					rt[i]._type);
-		}
-
-		/*
-		 * Check for the "type" of all the 4 measurements
-		 * If all 4 measurements have the Type as PLUG_TYPE_HEADSET
-		 * then it is proper mic and declare that the plug has two mics
-		 */
-		for (i = 0; i < NUM_DCE_PLUG_INS_DETECT - 1; i++) {
-			if (i > 0 && (rt[i - 1]._type != rt[i]._type)) {
-				type = PLUG_TYPE_INVALID;
-				break;
-			} else {
-				type = rt[0]._type;
-			}
-		}
-
-		pr_debug("%s: Plug type found in ANC detection :%d",
-			__func__, type);
-
-		if (type != PLUG_TYPE_HEADSET)
-			anc_mic_found = false;
-		if (anc_mic_found || (type == PLUG_TYPE_HEADPHONE &&
-		    mbhc->mbhc_cfg->hw_jack_type == FIVE_POLE_JACK) ||
-		    (type == PLUG_TYPE_HIGH_HPH &&
-		    mbhc->mbhc_cfg->hw_jack_type == SIX_POLE_JACK))
-			break;
-	}
-
-	wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, false);
-	if (cs_enable) {
-		wcd9xxx_turn_onoff_current_source(mbhc,
-						  &mbhc->mbhc_anc_bias_regs,
-						  false, false);
-	} else {
-		if (mbhc->mbhc_cfg->anc_micbias == MBHC_MICBIAS3) {
-			if (mbhc->micbias_enable_cb)
-				mbhc->micbias_enable_cb(mbhc->codec, false,
-						mbhc->mbhc_cfg->anc_micbias);
-		} else {
-			/* Disable override */
-			if (!override_en)
-				wcd9xxx_turn_onoff_override(mbhc, false);
-		}
-	}
-	pr_debug("%s: leave\n", __func__);
-	return anc_mic_found;
-}
-
-/* called under codec_resource_lock acquisition */
-static void wcd9xxx_find_plug_and_report(struct wcd9xxx_mbhc *mbhc,
-					 enum wcd9xxx_mbhc_plug_type plug_type)
-{
-	bool anc_mic_found = false;
-
-	pr_debug("%s: enter current_plug(%d) new_plug(%d)\n",
-		 __func__, mbhc->current_plug, plug_type);
-
-	WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
-
-	if (plug_type == PLUG_TYPE_HEADPHONE &&
-	    mbhc->current_plug == PLUG_TYPE_NONE) {
-		/*
-		 * Nothing was reported previously
-		 * report a headphone or unsupported
-		 */
-		wcd9xxx_report_plug(mbhc, 1, SND_JACK_HEADPHONE);
-		wcd9xxx_cleanup_hs_polling(mbhc);
-	} else if (plug_type == PLUG_TYPE_GND_MIC_SWAP) {
-		if (!mbhc->mbhc_cfg->detect_extn_cable) {
-			if (mbhc->current_plug == PLUG_TYPE_HEADSET)
-				wcd9xxx_report_plug(mbhc, 0,
-							 SND_JACK_HEADSET);
-			else if (mbhc->current_plug == PLUG_TYPE_HEADPHONE)
-				wcd9xxx_report_plug(mbhc, 0,
-							 SND_JACK_HEADPHONE);
-		}
-		wcd9xxx_report_plug(mbhc, 1, SND_JACK_UNSUPPORTED);
-		wcd9xxx_cleanup_hs_polling(mbhc);
-	} else if (plug_type == PLUG_TYPE_HEADSET) {
-
-		if (mbhc->mbhc_cfg->enable_anc_mic_detect) {
-			/*
-			 * Do not report Headset, because at this point
-			 * it could be a ANC headphone having two mics.
-			 * So, proceed further to detect if there is a
-			 * second mic.
-			 */
-			mbhc->scaling_mux_in = 0x08;
-			anc_mic_found = wcd9xxx_detect_anc_plug_type(mbhc);
-		}
-
-		if (anc_mic_found) {
-			/* Report ANC headphone */
-			wcd9xxx_report_plug(mbhc, 1, SND_JACK_ANC_HEADPHONE);
-		} else {
-			/*
-			 * If Headphone was reported previously, this will
-			 * only report the mic line
-			 */
-			wcd9xxx_report_plug(mbhc, 1, SND_JACK_HEADSET);
-		}
-		/* Button detection required RC oscillator */
-		wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, true);
-		/*
-		 * sleep so that audio path completely tears down
-		 * before report plug insertion to the user space
-		 */
-		msleep(100);
-
-		wcd9xxx_start_hs_polling(mbhc);
-	} else if (plug_type == PLUG_TYPE_HIGH_HPH) {
-		if (mbhc->mbhc_cfg->detect_extn_cable) {
-			/* High impedance device found. Report as LINEOUT*/
-			if (mbhc->current_plug == PLUG_TYPE_NONE)
-				wcd9xxx_report_plug(mbhc, 1, SND_JACK_LINEOUT);
-			wcd9xxx_cleanup_hs_polling(mbhc);
-			pr_debug("%s: setup mic trigger for further detection\n",
-				 __func__);
-			mbhc->lpi_enabled = true;
-			/*
-			 * Do not enable HPHL trigger. If playback is active,
-			 * it might lead to continuous false HPHL triggers
-			 */
-			wcd9xxx_enable_hs_detect(mbhc, 1, MBHC_USE_MB_TRIGGER,
-						 false);
-		} else {
-			if (mbhc->current_plug == PLUG_TYPE_NONE)
-				wcd9xxx_report_plug(mbhc, 1,
-							 SND_JACK_HEADPHONE);
-			wcd9xxx_cleanup_hs_polling(mbhc);
-			pr_debug("setup mic trigger for further detection\n");
-			mbhc->lpi_enabled = true;
-			wcd9xxx_enable_hs_detect(mbhc, 1, MBHC_USE_MB_TRIGGER |
-							  MBHC_USE_HPHL_TRIGGER,
-						 false);
-		}
-	} else {
-		WARN(1, "Unexpected current plug_type %d, plug_type %d\n",
-		     mbhc->current_plug, plug_type);
-	}
-	pr_debug("%s: leave\n", __func__);
-}
-
-/* called under codec_resource_lock acquisition */
-static void wcd9xxx_mbhc_decide_swch_plug(struct wcd9xxx_mbhc *mbhc)
-{
-	enum wcd9xxx_mbhc_plug_type plug_type;
-	bool current_source_enable;
-
-	pr_debug("%s: enter\n", __func__);
-
-	WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
-
-	current_source_enable = (((mbhc->mbhc_cfg->cs_enable_flags &
-		      (1 << MBHC_CS_ENABLE_INSERTION)) != 0) &&
-		     (!(snd_soc_read(mbhc->codec,
-				     mbhc->mbhc_bias_regs.ctl_reg) & 0x80)));
-
-	mbhc->scaling_mux_in = 0x04;
-
-	if (current_source_enable) {
-		wcd9xxx_turn_onoff_current_source(mbhc, &mbhc->mbhc_bias_regs,
-						  true, false);
-		plug_type = wcd9xxx_codec_cs_get_plug_type(mbhc, false);
-		/*
-		 * For other plug types, the current source disable
-		 * will be done from wcd9xxx_correct_swch_plug
-		 */
-		if (plug_type == PLUG_TYPE_HEADSET)
-			wcd9xxx_turn_onoff_current_source(mbhc,
-						&mbhc->mbhc_bias_regs,
-						false, false);
-	} else {
-		wcd9xxx_turn_onoff_override(mbhc, true);
-		plug_type = wcd9xxx_codec_get_plug_type(mbhc, true);
-		wcd9xxx_turn_onoff_override(mbhc, false);
-	}
-
-	if (wcd9xxx_swch_level_remove(mbhc)) {
-		if (current_source_enable && mbhc->is_cs_enabled) {
-			wcd9xxx_turn_onoff_current_source(mbhc,
-					&mbhc->mbhc_bias_regs,
-					false, false);
-		}
-		pr_debug("%s: Switch level is low when determining plug\n",
-			 __func__);
-		return;
-	}
-
-	if (plug_type == PLUG_TYPE_INVALID ||
-	    plug_type == PLUG_TYPE_GND_MIC_SWAP) {
-		wcd9xxx_cleanup_hs_polling(mbhc);
-		wcd9xxx_schedule_hs_detect_plug(mbhc,
-						&mbhc->correct_plug_swch);
-	} else if (plug_type == PLUG_TYPE_HEADPHONE) {
-		wcd9xxx_report_plug(mbhc, 1, SND_JACK_HEADPHONE);
-		wcd9xxx_cleanup_hs_polling(mbhc);
-		wcd9xxx_schedule_hs_detect_plug(mbhc,
-						&mbhc->correct_plug_swch);
-	} else if (plug_type == PLUG_TYPE_HIGH_HPH) {
-		wcd9xxx_cleanup_hs_polling(mbhc);
-		wcd9xxx_schedule_hs_detect_plug(mbhc,
-						&mbhc->correct_plug_swch);
-	} else {
-		pr_debug("%s: Valid plug found, determine plug type %d\n",
-			 __func__, plug_type);
-		wcd9xxx_find_plug_and_report(mbhc, plug_type);
-	}
-	pr_debug("%s: leave\n", __func__);
-}
-
-/* called under codec_resource_lock acquisition */
-static void wcd9xxx_mbhc_detect_plug_type(struct wcd9xxx_mbhc *mbhc)
-{
-	pr_debug("%s: enter\n", __func__);
-	WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
-
-	if (wcd9xxx_swch_level_remove(mbhc))
-		pr_debug("%s: Switch level low when determining plug\n",
-			 __func__);
-	else
-		wcd9xxx_mbhc_decide_swch_plug(mbhc);
-	pr_debug("%s: leave\n", __func__);
-}
-
-/* called only from interrupt which is under codec_resource_lock acquisition */
-static void wcd9xxx_hs_insert_irq_swch(struct wcd9xxx_mbhc *mbhc,
-				       bool is_removal)
-{
-	if (!is_removal) {
-		pr_debug("%s: MIC trigger insertion interrupt\n", __func__);
-
-		/* Make sure memory read is completed before reading
-		 * lpi_enabled.
-		 */
-		rmb();
-		if (mbhc->lpi_enabled)
-			msleep(100);
-
-		/* Make sure memory read is completed before reading
-		 * lpi_enabled.
-		 */
-		rmb();
-		if (!mbhc->lpi_enabled) {
-			pr_debug("%s: lpi is disabled\n", __func__);
-		} else if (!wcd9xxx_swch_level_remove(mbhc)) {
-			pr_debug("%s: Valid insertion, detect plug type\n",
-				 __func__);
-			wcd9xxx_mbhc_decide_swch_plug(mbhc);
-		} else {
-			pr_debug("%s: Invalid insertion stop plug detection\n",
-				 __func__);
-		}
-	} else if (mbhc->mbhc_cfg->detect_extn_cable) {
-		pr_debug("%s: Removal\n", __func__);
-		if (!wcd9xxx_swch_level_remove(mbhc)) {
-			/*
-			 * Switch indicates, something is still inserted.
-			 * This could be extension cable i.e. headset is
-			 * removed from extension cable.
-			 */
-			/* cancel detect plug */
-			wcd9xxx_cancel_hs_detect_plug(mbhc,
-						      &mbhc->correct_plug_swch);
-			wcd9xxx_mbhc_decide_swch_plug(mbhc);
-		}
-	} else {
-		pr_err("%s: Switch IRQ used, invalid MBHC Removal\n", __func__);
-	}
-}
-
-static bool is_valid_mic_voltage(struct wcd9xxx_mbhc *mbhc, s32 mic_mv,
-				 bool cs_enable)
-{
-	const struct wcd9xxx_mbhc_plug_type_cfg *plug_type =
-	    WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(mbhc->mbhc_cfg->calibration);
-	const s16 v_hs_max = wcd9xxx_get_current_v_hs_max(mbhc);
-
-	if (cs_enable)
-		return ((mic_mv > WCD9XXX_V_CS_NO_MIC) &&
-			 (mic_mv < WCD9XXX_V_CS_HS_MAX)) ? true : false;
-	else
-		return (!(mic_mv > WCD9XXX_MEAS_INVALD_RANGE_LOW_MV &&
-			  mic_mv < WCD9XXX_MEAS_INVALD_RANGE_HIGH_MV) &&
-			(mic_mv > plug_type->v_no_mic) &&
-			(mic_mv < v_hs_max)) ? true : false;
-}
-
-/*
- * called under codec_resource_lock acquisition
- * returns true if mic voltage range is back to normal insertion
- * returns false either if timedout or removed
- */
-static bool wcd9xxx_hs_remove_settle(struct wcd9xxx_mbhc *mbhc)
-{
-	int i;
-	bool timedout, settled = false;
-	s32 mic_mv[NUM_DCE_PLUG_DETECT];
-	short mb_v[NUM_DCE_PLUG_DETECT];
-	unsigned long retry = 0, timeout;
-	bool cs_enable;
-
-	cs_enable = (((mbhc->mbhc_cfg->cs_enable_flags &
-		      (1 << MBHC_CS_ENABLE_REMOVAL)) != 0) &&
-		     (!(snd_soc_read(mbhc->codec,
-				     mbhc->mbhc_bias_regs.ctl_reg) & 0x80)));
-	if (cs_enable)
-		wcd9xxx_turn_onoff_current_source(mbhc, &mbhc->mbhc_bias_regs,
-						  true, false);
-
-	timeout = jiffies + msecs_to_jiffies(HS_DETECT_PLUG_TIME_MS);
-	while (!(timedout = time_after(jiffies, timeout))) {
-		retry++;
-		if (wcd9xxx_swch_level_remove(mbhc)) {
-			pr_debug("%s: Switch indicates removal\n", __func__);
-			break;
-		}
-
-		if (retry > 1)
-			msleep(250);
-		else
-			msleep(50);
-
-		if (wcd9xxx_swch_level_remove(mbhc)) {
-			pr_debug("%s: Switch indicates removal\n", __func__);
-			break;
-		}
-
-		if (cs_enable) {
-			for (i = 0; i < NUM_DCE_PLUG_DETECT; i++) {
-				mb_v[i] = __wcd9xxx_codec_sta_dce(mbhc, 1,
-								  true, true);
-				mic_mv[i] = __wcd9xxx_codec_sta_dce_v(mbhc,
-								      true,
-								      mb_v[i],
-						mbhc->mbhc_data.dce_nsc_cs_z,
-						(u32)VDDIO_MICBIAS_MV);
-				pr_debug("%s : DCE run %lu, mic_mv = %d(%x)\n",
-					 __func__, retry, mic_mv[i], mb_v[i]);
-			}
-		} else {
-			for (i = 0; i < NUM_DCE_PLUG_DETECT; i++) {
-				mb_v[i] = wcd9xxx_codec_sta_dce(mbhc, 1,
-								true);
-				mic_mv[i] = wcd9xxx_codec_sta_dce_v(mbhc, 1,
-								mb_v[i]);
-				pr_debug("%s : DCE run %lu, mic_mv = %d(%x)\n",
-					 __func__, retry, mic_mv[i],
-								mb_v[i]);
-			}
-		}
-
-		if (wcd9xxx_swch_level_remove(mbhc)) {
-			pr_debug("%s: Switcn indicates removal\n", __func__);
-			break;
-		}
-
-		if (mbhc->current_plug == PLUG_TYPE_NONE) {
-			pr_debug("%s : headset/headphone is removed\n",
-				 __func__);
-			break;
-		}
-
-		for (i = 0; i < NUM_DCE_PLUG_DETECT; i++)
-			if (!is_valid_mic_voltage(mbhc, mic_mv[i], cs_enable))
-				break;
-
-		if (i == NUM_DCE_PLUG_DETECT) {
-			pr_debug("%s: MIC voltage settled\n", __func__);
-			settled = true;
-			msleep(200);
-			break;
-		}
-	}
-
-	if (cs_enable)
-		wcd9xxx_turn_onoff_current_source(mbhc, &mbhc->mbhc_bias_regs,
-						  false, false);
-
-	if (timedout)
-		pr_debug("%s: Microphone did not settle in %d seconds\n",
-			 __func__, HS_DETECT_PLUG_TIME_MS);
-	return settled;
-}
-
-/* called only from interrupt which is under codec_resource_lock acquisition */
-static void wcd9xxx_hs_remove_irq_swch(struct wcd9xxx_mbhc *mbhc)
-{
-	pr_debug("%s: enter\n", __func__);
-	if (wcd9xxx_hs_remove_settle(mbhc))
-		wcd9xxx_start_hs_polling(mbhc);
-	pr_debug("%s: leave\n", __func__);
-}
-
-/* called only from interrupt which is under codec_resource_lock acquisition */
-static void wcd9xxx_hs_remove_irq_noswch(struct wcd9xxx_mbhc *mbhc)
-{
-	s16 dce, dcez;
-	unsigned long timeout;
-	bool removed = true;
-	struct snd_soc_codec *codec = mbhc->codec;
-	const struct wcd9xxx_mbhc_general_cfg *generic =
-		WCD9XXX_MBHC_CAL_GENERAL_PTR(mbhc->mbhc_cfg->calibration);
-	bool cs_enable;
-	s16 cur_v_ins_h;
-	u32 mb_mv;
-
-	pr_debug("%s: enter\n", __func__);
-	if (mbhc->current_plug != PLUG_TYPE_HEADSET &&
-		mbhc->current_plug != PLUG_TYPE_ANC_HEADPHONE) {
-		pr_debug("%s(): Headset is not inserted, ignore removal\n",
-			 __func__);
-		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL,
-				0x08, 0x08);
-		return;
-	}
-
-	usleep_range(generic->t_shutdown_plug_rem,
-		     generic->t_shutdown_plug_rem +
-		     WCD9XXX_USLEEP_RANGE_MARGIN_US);
-
-	/* If micbias is enabled, don't enable current source */
-	cs_enable = (((mbhc->mbhc_cfg->cs_enable_flags &
-		      (1 << MBHC_CS_ENABLE_REMOVAL)) != 0) &&
-		     (!(snd_soc_read(codec,
-				     mbhc->mbhc_bias_regs.ctl_reg) & 0x80)));
-	if (cs_enable)
-		wcd9xxx_turn_onoff_current_source(mbhc, &mbhc->mbhc_bias_regs,
-						  true, false);
-
-	timeout = jiffies + msecs_to_jiffies(FAKE_REMOVAL_MIN_PERIOD_MS);
-	do {
-		if (cs_enable) {
-			dce = __wcd9xxx_codec_sta_dce(mbhc, 1,  true, true);
-			dcez = mbhc->mbhc_data.dce_nsc_cs_z;
-			mb_mv = VDDIO_MICBIAS_MV;
-		} else {
-			dce = wcd9xxx_codec_sta_dce(mbhc, 1,  true);
-			dcez = mbhc->mbhc_data.dce_z;
-			mb_mv = mbhc->mbhc_data.micb_mv;
-		}
-
-		pr_debug("%s: DCE 0x%x,%d\n", __func__, dce,
-			  __wcd9xxx_codec_sta_dce_v(mbhc, true, dce,
-						    dcez, mb_mv));
-
-		cur_v_ins_h = cs_enable ? (s16) mbhc->mbhc_data.v_cs_ins_h :
-					  (wcd9xxx_get_current_v(mbhc,
-					   WCD9XXX_CURRENT_V_INS_H));
-
-		if (dce < cur_v_ins_h) {
-			removed = false;
-			break;
-		}
-	} while (!time_after(jiffies, timeout));
-	pr_debug("%s: headset %sactually removed\n", __func__,
-		  removed ? "" : "not ");
-
-	if (cs_enable)
-		wcd9xxx_turn_onoff_current_source(mbhc, &mbhc->mbhc_bias_regs,
-						  false, false);
-
-	if (removed) {
-		if (mbhc->mbhc_cfg->detect_extn_cable) {
-			if (!wcd9xxx_swch_level_remove(mbhc)) {
-				/*
-				 * extension cable is still plugged in
-				 * report it as LINEOUT device
-				 */
-				if (mbhc->hph_status == SND_JACK_HEADSET)
-					wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc,
-							false);
-				wcd9xxx_report_plug(mbhc, 1, SND_JACK_LINEOUT);
-				wcd9xxx_cleanup_hs_polling(mbhc);
-				wcd9xxx_enable_hs_detect(mbhc, 1,
-							 MBHC_USE_MB_TRIGGER,
-							 false);
-			}
-		} else {
-			/* Cancel possibly running hs_detect_work */
-			wcd9xxx_cancel_hs_detect_plug(mbhc,
-						    &mbhc->correct_plug_noswch);
-			/*
-			 * If this removal is not false, first check the micbias
-			 * switch status and switch it to LDOH if it is already
-			 * switched to VDDIO.
-			 */
-			wcd9xxx_switch_micbias(mbhc, 0);
-
-			wcd9xxx_report_plug(mbhc, 0, SND_JACK_HEADSET);
-			wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, false);
-			wcd9xxx_cleanup_hs_polling(mbhc);
-			wcd9xxx_enable_hs_detect(mbhc, 1, MBHC_USE_MB_TRIGGER |
-							  MBHC_USE_HPHL_TRIGGER,
-						 true);
-		}
-	} else {
-		wcd9xxx_start_hs_polling(mbhc);
-	}
-	pr_debug("%s: leave\n", __func__);
-}
-
-/* called only from interrupt which is under codec_resource_lock acquisition */
-static void wcd9xxx_hs_insert_irq_extn(struct wcd9xxx_mbhc *mbhc,
-				       bool is_mb_trigger)
-{
-	/* Cancel possibly running hs_detect_work */
-	wcd9xxx_cancel_hs_detect_plug(mbhc, &mbhc->correct_plug_swch);
-
-	if (is_mb_trigger) {
-		pr_debug("%s: Waiting for Headphone left trigger\n", __func__);
-		wcd9xxx_enable_hs_detect(mbhc, 1, MBHC_USE_HPHL_TRIGGER, false);
-	} else  {
-		pr_debug("%s: HPHL trigger received, detecting plug type\n",
-			 __func__);
-		wcd9xxx_mbhc_detect_plug_type(mbhc);
-	}
-}
-
-static irqreturn_t wcd9xxx_hs_remove_irq(int irq, void *data)
-{
-	struct wcd9xxx_mbhc *mbhc = data;
-
-	pr_debug("%s: enter, removal interrupt\n", __func__);
-	WCD9XXX_BCL_LOCK(mbhc->resmgr);
-	/*
-	 * While we don't know whether MIC is there or not, let the resmgr know
-	 * so micbias can be disabled temporarily
-	 */
-	if (mbhc->current_plug == PLUG_TYPE_HEADSET) {
-		wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr,
-						WCD9XXX_COND_HPH_MIC, false);
-		wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr,
-						WCD9XXX_COND_HPH, false);
-	} else if (mbhc->current_plug == PLUG_TYPE_HEADPHONE) {
-		wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr,
-						WCD9XXX_COND_HPH, false);
-	}
-
-	if (mbhc->mbhc_cfg->detect_extn_cable &&
-	    !wcd9xxx_swch_level_remove(mbhc))
-		wcd9xxx_hs_remove_irq_noswch(mbhc);
-	else
-		wcd9xxx_hs_remove_irq_swch(mbhc);
-
-	if (mbhc->current_plug == PLUG_TYPE_HEADSET) {
-		wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr,
-						WCD9XXX_COND_HPH, true);
-		wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr,
-						WCD9XXX_COND_HPH_MIC, true);
-	} else if (mbhc->current_plug == PLUG_TYPE_HEADPHONE) {
-		wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr,
-						WCD9XXX_COND_HPH, true);
-	}
-	WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
-
-	return IRQ_HANDLED;
-}
-
-static irqreturn_t wcd9xxx_hs_insert_irq(int irq, void *data)
-{
-	bool is_mb_trigger, is_removal;
-	struct wcd9xxx_mbhc *mbhc = data;
-	struct snd_soc_codec *codec = mbhc->codec;
-
-	pr_debug("%s: enter\n", __func__);
-	WCD9XXX_BCL_LOCK(mbhc->resmgr);
-	wcd9xxx_disable_irq(mbhc->resmgr->core_res, mbhc->intr_ids->insertion);
-
-	is_mb_trigger = !!(snd_soc_read(codec, mbhc->mbhc_bias_regs.mbhc_reg) &
-			   0x10);
-	is_removal = !!(snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_INT_CTL) & 0x02);
-	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_INT_CTL, 0x03, 0x00);
-
-	/* Turn off both HPH and MIC line schmitt triggers */
-	snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg, 0x90, 0x00);
-	snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x13, 0x00);
-	snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01, 0x00);
-
-	if (mbhc->mbhc_cfg->detect_extn_cable &&
-	    mbhc->current_plug == PLUG_TYPE_HIGH_HPH)
-		wcd9xxx_hs_insert_irq_extn(mbhc, is_mb_trigger);
-	else
-		wcd9xxx_hs_insert_irq_swch(mbhc, is_removal);
-
-	WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
-	return IRQ_HANDLED;
-}
-
-static void wcd9xxx_btn_lpress_fn(struct work_struct *work)
-{
-	struct delayed_work *dwork;
-	short bias_value;
-	int dce_mv, sta_mv;
-	struct wcd9xxx_mbhc *mbhc;
-
-	pr_debug("%s:\n", __func__);
-
-	dwork = to_delayed_work(work);
-	mbhc = container_of(dwork, struct wcd9xxx_mbhc, mbhc_btn_dwork);
-
-	bias_value = wcd9xxx_read_sta_result(mbhc->codec);
-	sta_mv = wcd9xxx_codec_sta_dce_v(mbhc, 0, bias_value);
-
-	bias_value = wcd9xxx_read_dce_result(mbhc->codec);
-	dce_mv = wcd9xxx_codec_sta_dce_v(mbhc, 1, bias_value);
-	pr_debug("%s: STA: %d, DCE: %d\n", __func__, sta_mv, dce_mv);
-
-	pr_debug("%s: Reporting long button press event\n", __func__);
-	wcd9xxx_jack_report(mbhc, &mbhc->button_jack, mbhc->buttons_pressed,
-			    mbhc->buttons_pressed);
-
-	pr_debug("%s: leave\n", __func__);
-	wcd9xxx_unlock_sleep(mbhc->resmgr->core_res);
-}
-
-static void wcd9xxx_mbhc_insert_work(struct work_struct *work)
-{
-	struct delayed_work *dwork;
-	struct wcd9xxx_mbhc *mbhc;
-	struct snd_soc_codec *codec;
-	struct wcd9xxx_core_resource *core_res;
-
-	dwork = to_delayed_work(work);
-	mbhc = container_of(dwork, struct wcd9xxx_mbhc, mbhc_insert_dwork);
-	codec = mbhc->codec;
-	core_res = mbhc->resmgr->core_res;
-
-	pr_debug("%s:\n", __func__);
-
-	/* Turn off both HPH and MIC line schmitt triggers */
-	snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg, 0x90, 0x00);
-	snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x13, 0x00);
-	snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01, 0x00);
-	wcd9xxx_disable_irq_sync(core_res, mbhc->intr_ids->insertion);
-	wcd9xxx_mbhc_detect_plug_type(mbhc);
-	wcd9xxx_unlock_sleep(core_res);
-}
-
-static bool wcd9xxx_mbhc_fw_validate(const void *data, size_t size)
-{
-	u32 cfg_offset;
-	struct wcd9xxx_mbhc_imped_detect_cfg *imped_cfg;
-	struct wcd9xxx_mbhc_btn_detect_cfg *btn_cfg;
-	struct firmware_cal fw;
-
-	fw.data = (void *)data;
-	fw.size = size;
-
-	if (fw.size < WCD9XXX_MBHC_CAL_MIN_SIZE)
-		return false;
-
-	/*
-	 * Previous check guarantees that there is enough fw data up
-	 * to num_btn
-	 */
-	btn_cfg = WCD9XXX_MBHC_CAL_BTN_DET_PTR(fw.data);
-	cfg_offset = (u32) ((void *) btn_cfg - (void *) fw.data);
-	if (fw.size < (cfg_offset + WCD9XXX_MBHC_CAL_BTN_SZ(btn_cfg)))
-		return false;
-
-	/*
-	 * Previous check guarantees that there is enough fw data up
-	 * to start of impedance detection configuration
-	 */
-	imped_cfg = WCD9XXX_MBHC_CAL_IMPED_DET_PTR(fw.data);
-	cfg_offset = (u32) ((void *) imped_cfg - (void *) fw.data);
-
-	if (fw.size < (cfg_offset + WCD9XXX_MBHC_CAL_IMPED_MIN_SZ))
-		return false;
-
-	if (fw.size < (cfg_offset + WCD9XXX_MBHC_CAL_IMPED_SZ(imped_cfg)))
-		return false;
-
-	return true;
-}
-
-static u16 wcd9xxx_codec_v_sta_dce(struct wcd9xxx_mbhc *mbhc,
-				   enum meas_type dce, s16 vin_mv,
-				   bool cs_enable)
-{
-	s16 diff, zero;
-	u32 mb_mv, in;
-	u16 value;
-	s16 dce_z;
-
-	mb_mv = mbhc->mbhc_data.micb_mv;
-	dce_z = mbhc->mbhc_data.dce_z;
-
-	if (mb_mv == 0) {
-		pr_err("%s: Mic Bias voltage is set to zero\n", __func__);
-		return -EINVAL;
-	}
-	if (cs_enable) {
-		mb_mv = VDDIO_MICBIAS_MV;
-		dce_z = mbhc->mbhc_data.dce_nsc_cs_z;
-	}
-
-	if (dce) {
-		diff = (mbhc->mbhc_data.dce_mb) - (dce_z);
-		zero = (dce_z);
-	} else {
-		diff = (mbhc->mbhc_data.sta_mb) - (mbhc->mbhc_data.sta_z);
-		zero = (mbhc->mbhc_data.sta_z);
-	}
-	in = (u32) diff * vin_mv;
-
-	value = (u16) (in / mb_mv) + zero;
-	return value;
-}
-
-static void wcd9xxx_mbhc_calc_thres(struct wcd9xxx_mbhc *mbhc)
-{
-	struct snd_soc_codec *codec;
-	s16 adj_v_hs_max;
-	s16 btn_mv = 0, btn_mv_sta[MBHC_V_IDX_NUM], btn_mv_dce[MBHC_V_IDX_NUM];
-	struct wcd9xxx_mbhc_btn_detect_cfg *btn_det;
-	struct wcd9xxx_mbhc_plug_type_cfg *plug_type;
-	u16 *btn_high;
-	int i;
-
-	pr_debug("%s: enter\n", __func__);
-	codec = mbhc->codec;
-	btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(mbhc->mbhc_cfg->calibration);
-	plug_type = WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(mbhc->mbhc_cfg->calibration);
-
-	mbhc->mbhc_data.v_ins_hu[MBHC_V_IDX_CFILT] =
-	    wcd9xxx_codec_v_sta_dce(mbhc, STA, plug_type->v_hs_max, false);
-	mbhc->mbhc_data.v_ins_h[MBHC_V_IDX_CFILT] =
-	    wcd9xxx_codec_v_sta_dce(mbhc, DCE, plug_type->v_hs_max, false);
-
-	mbhc->mbhc_data.v_inval_ins_low = FAKE_INS_LOW;
-	mbhc->mbhc_data.v_inval_ins_high = FAKE_INS_HIGH;
-
-	if (mbhc->mbhc_data.micb_mv != VDDIO_MICBIAS_MV) {
-		adj_v_hs_max = scale_v_micb_vddio(mbhc, plug_type->v_hs_max,
-						  true);
-		mbhc->mbhc_data.v_ins_hu[MBHC_V_IDX_VDDIO] =
-		    wcd9xxx_codec_v_sta_dce(mbhc, STA, adj_v_hs_max, false);
-		mbhc->mbhc_data.v_ins_h[MBHC_V_IDX_VDDIO] =
-		    wcd9xxx_codec_v_sta_dce(mbhc, DCE, adj_v_hs_max, false);
-		mbhc->mbhc_data.v_inval_ins_low =
-		    scale_v_micb_vddio(mbhc, mbhc->mbhc_data.v_inval_ins_low,
-				       false);
-		mbhc->mbhc_data.v_inval_ins_high =
-		    scale_v_micb_vddio(mbhc, mbhc->mbhc_data.v_inval_ins_high,
-				       false);
-	}
-	mbhc->mbhc_data.v_cs_ins_h = wcd9xxx_codec_v_sta_dce(mbhc, DCE,
-							WCD9XXX_V_CS_HS_MAX,
-							true);
-	pr_debug("%s: v_ins_h for current source: 0x%x\n", __func__,
-		  mbhc->mbhc_data.v_cs_ins_h);
-
-	btn_high = wcd9xxx_mbhc_cal_btn_det_mp(btn_det,
-					       MBHC_BTN_DET_V_BTN_HIGH);
-	for (i = 0; i < btn_det->num_btn; i++)
-		btn_mv = btn_high[i] > btn_mv ? btn_high[i] : btn_mv;
-
-	btn_mv_sta[MBHC_V_IDX_CFILT] = btn_mv + btn_det->v_btn_press_delta_sta;
-	btn_mv_dce[MBHC_V_IDX_CFILT] = btn_mv + btn_det->v_btn_press_delta_cic;
-	btn_mv_sta[MBHC_V_IDX_VDDIO] =
-	    scale_v_micb_vddio(mbhc, btn_mv_sta[MBHC_V_IDX_CFILT], true);
-	btn_mv_dce[MBHC_V_IDX_VDDIO] =
-	    scale_v_micb_vddio(mbhc, btn_mv_dce[MBHC_V_IDX_CFILT], true);
-
-	mbhc->mbhc_data.v_b1_hu[MBHC_V_IDX_CFILT] =
-	    wcd9xxx_codec_v_sta_dce(mbhc, STA, btn_mv_sta[MBHC_V_IDX_CFILT],
-				    false);
-	mbhc->mbhc_data.v_b1_h[MBHC_V_IDX_CFILT] =
-	    wcd9xxx_codec_v_sta_dce(mbhc, DCE, btn_mv_dce[MBHC_V_IDX_CFILT],
-				    false);
-	mbhc->mbhc_data.v_b1_hu[MBHC_V_IDX_VDDIO] =
-	    wcd9xxx_codec_v_sta_dce(mbhc, STA, btn_mv_sta[MBHC_V_IDX_VDDIO],
-				    false);
-	mbhc->mbhc_data.v_b1_h[MBHC_V_IDX_VDDIO] =
-	    wcd9xxx_codec_v_sta_dce(mbhc, DCE, btn_mv_dce[MBHC_V_IDX_VDDIO],
-				    false);
-
-	mbhc->mbhc_data.v_brh[MBHC_V_IDX_CFILT] =
-	    mbhc->mbhc_data.v_b1_h[MBHC_V_IDX_CFILT];
-	mbhc->mbhc_data.v_brh[MBHC_V_IDX_VDDIO] =
-	    mbhc->mbhc_data.v_b1_h[MBHC_V_IDX_VDDIO];
-
-	mbhc->mbhc_data.v_brl = BUTTON_MIN;
-
-	mbhc->mbhc_data.v_no_mic =
-	    wcd9xxx_codec_v_sta_dce(mbhc, STA, plug_type->v_no_mic, false);
-	pr_debug("%s: leave\n", __func__);
-}
-
-static void wcd9xxx_onoff_ext_mclk(struct wcd9xxx_mbhc *mbhc, bool on)
-{
-	/*
-	 * XXX: {codec}_mclk_enable holds WCD9XXX_BCL_LOCK,
-	 * therefore wcd9xxx_onoff_ext_mclk caller SHOULDN'T hold
-	 * WCD9XXX_BCL_LOCK when it calls wcd9xxx_onoff_ext_mclk()
-	 */
-	if (mbhc && mbhc->mbhc_cfg && mbhc->mbhc_cfg->mclk_cb_fn)
-		mbhc->mbhc_cfg->mclk_cb_fn(mbhc->codec, on, false);
-}
-
-/*
- * Mic Bias Enable Decision
- * Return true if high_hph_cnt is a power of 2 (!= 2)
- * otherwise return false
- */
-static bool wcd9xxx_mbhc_enable_mb_decision(int high_hph_cnt)
-{
-	return (high_hph_cnt > 2) && !(high_hph_cnt & (high_hph_cnt - 1));
-}
-
-static inline void wcd9xxx_handle_gnd_mic_swap(struct wcd9xxx_mbhc *mbhc,
-					int pt_gnd_mic_swap_cnt,
-					enum wcd9xxx_mbhc_plug_type plug_type)
-{
-	if (mbhc->mbhc_cfg->swap_gnd_mic &&
-	    (pt_gnd_mic_swap_cnt == GND_MIC_SWAP_THRESHOLD)) {
-		/*
-		 * if switch is toggled, check again,
-		 * otherwise report unsupported plug
-		 */
-		mbhc->mbhc_cfg->swap_gnd_mic(mbhc->codec);
-	} else if (pt_gnd_mic_swap_cnt >= GND_MIC_SWAP_THRESHOLD) {
-		/* Report UNSUPPORTED plug
-		 * and continue polling
-		 */
-		WCD9XXX_BCL_LOCK(mbhc->resmgr);
-		if (!mbhc->mbhc_cfg->detect_extn_cable) {
-			if (mbhc->current_plug == PLUG_TYPE_HEADPHONE)
-				wcd9xxx_report_plug(mbhc, 0,
-						    SND_JACK_HEADPHONE);
-			else if (mbhc->current_plug == PLUG_TYPE_HEADSET)
-				wcd9xxx_report_plug(mbhc, 0,
-						    SND_JACK_HEADSET);
-		}
-		if (mbhc->current_plug != plug_type)
-			wcd9xxx_report_plug(mbhc, 1,
-					SND_JACK_UNSUPPORTED);
-		WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
-	}
-}
-
-static void wcd9xxx_correct_swch_plug(struct work_struct *work)
-{
-	struct wcd9xxx_mbhc *mbhc;
-	struct snd_soc_codec *codec;
-	enum wcd9xxx_mbhc_plug_type plug_type = PLUG_TYPE_INVALID;
-	unsigned long timeout;
-	int retry = 0, pt_gnd_mic_swap_cnt = 0;
-	int highhph_cnt = 0;
-	bool correction = false;
-	bool current_source_enable;
-	bool wrk_complete = true, highhph = false;
-
-	pr_debug("%s: enter\n", __func__);
-
-	mbhc = container_of(work, struct wcd9xxx_mbhc, correct_plug_swch);
-	codec = mbhc->codec;
-
-	current_source_enable = (((mbhc->mbhc_cfg->cs_enable_flags &
-		      (1 << MBHC_CS_ENABLE_POLLING)) != 0) &&
-		     (!(snd_soc_read(codec,
-				     mbhc->mbhc_bias_regs.ctl_reg) & 0x80)));
-
-	wcd9xxx_onoff_ext_mclk(mbhc, true);
-
-	/*
-	 * Keep override on during entire plug type correction work.
-	 *
-	 * This is okay under the assumption that any switch irqs which use
-	 * MBHC block cancel and sync this work so override is off again
-	 * prior to switch interrupt handler's MBHC block usage.
-	 * Also while this correction work is running, we can guarantee
-	 * DAPM doesn't use any MBHC block as this work only runs with
-	 * headphone detection.
-	 */
-	if (current_source_enable) {
-		WCD9XXX_BCL_LOCK(mbhc->resmgr);
-		wcd9xxx_turn_onoff_current_source(mbhc, &mbhc->mbhc_bias_regs,
-						  true, false);
-		WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
-	} else {
-		wcd9xxx_turn_onoff_override(mbhc, true);
-	}
-
-	timeout = jiffies + msecs_to_jiffies(HS_DETECT_PLUG_TIME_MS);
-	while (!time_after(jiffies, timeout)) {
-		++retry;
-
-		/* Make sure any pending memory read is completed, before
-		 * hs_detect_work_stop value is read.
-		 */
-		rmb();
-		if (mbhc->hs_detect_work_stop) {
-			wrk_complete = false;
-			pr_debug("%s: stop requested\n", __func__);
-			break;
-		}
-
-		msleep(HS_DETECT_PLUG_INERVAL_MS);
-		if (wcd9xxx_swch_level_remove(mbhc)) {
-			wrk_complete = false;
-			pr_debug("%s: Switch level is low\n", __func__);
-			break;
-		}
-
-		/* can race with removal interrupt */
-		WCD9XXX_BCL_LOCK(mbhc->resmgr);
-		if (current_source_enable)
-			plug_type = wcd9xxx_codec_cs_get_plug_type(mbhc,
-								   highhph);
-		else
-			plug_type = wcd9xxx_codec_get_plug_type(mbhc, true);
-		WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
-
-		pr_debug("%s: attempt(%d) current_plug(%d) new_plug(%d)\n",
-			 __func__, retry, mbhc->current_plug, plug_type);
-
-		highhph_cnt = (plug_type == PLUG_TYPE_HIGH_HPH) ?
-					(highhph_cnt + 1) :
-					0;
-		highhph = wcd9xxx_mbhc_enable_mb_decision(highhph_cnt);
-		if (plug_type == PLUG_TYPE_INVALID) {
-			pr_debug("Invalid plug in attempt # %d\n", retry);
-			if (!mbhc->mbhc_cfg->detect_extn_cable &&
-			    retry == NUM_ATTEMPTS_TO_REPORT &&
-			    mbhc->current_plug == PLUG_TYPE_NONE) {
-				WCD9XXX_BCL_LOCK(mbhc->resmgr);
-				wcd9xxx_report_plug(mbhc, 1,
-						    SND_JACK_HEADPHONE);
-				WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
-			}
-		} else if (plug_type == PLUG_TYPE_HEADPHONE) {
-			pr_debug("Good headphone detected, continue polling\n");
-			WCD9XXX_BCL_LOCK(mbhc->resmgr);
-			if (mbhc->mbhc_cfg->detect_extn_cable) {
-				if (mbhc->current_plug != plug_type)
-					wcd9xxx_report_plug(mbhc, 1,
-							    SND_JACK_HEADPHONE);
-			} else if (mbhc->current_plug == PLUG_TYPE_NONE) {
-				wcd9xxx_report_plug(mbhc, 1,
-						    SND_JACK_HEADPHONE);
-			}
-			WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
-		} else if (plug_type == PLUG_TYPE_HIGH_HPH) {
-			pr_debug("%s: High HPH detected, continue polling\n",
-				  __func__);
-			WCD9XXX_BCL_LOCK(mbhc->resmgr);
-			if (mbhc->mbhc_cfg->detect_extn_cable) {
-				if (mbhc->current_plug != plug_type)
-					wcd9xxx_report_plug(mbhc, 1,
-							    SND_JACK_LINEOUT);
-			} else if (mbhc->current_plug == PLUG_TYPE_NONE) {
-				wcd9xxx_report_plug(mbhc, 1,
-						    SND_JACK_HEADPHONE);
-			}
-			WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
-		} else {
-			if (plug_type == PLUG_TYPE_GND_MIC_SWAP) {
-				pt_gnd_mic_swap_cnt++;
-				if (pt_gnd_mic_swap_cnt >=
-						GND_MIC_SWAP_THRESHOLD)
-					wcd9xxx_handle_gnd_mic_swap(mbhc,
-							pt_gnd_mic_swap_cnt,
-							plug_type);
-				pr_debug("%s: unsupported HS detected, continue polling\n",
-					 __func__);
-				continue;
-			} else {
-				pt_gnd_mic_swap_cnt = 0;
-
-				WCD9XXX_BCL_LOCK(mbhc->resmgr);
-				/* Turn off override/current source */
-				if (current_source_enable)
-					wcd9xxx_turn_onoff_current_source(mbhc,
-							&mbhc->mbhc_bias_regs,
-							false, false);
-				else
-					wcd9xxx_turn_onoff_override(mbhc,
-								    false);
-				/*
-				 * The valid plug also includes
-				 * PLUG_TYPE_GND_MIC_SWAP
-				 */
-				wcd9xxx_find_plug_and_report(mbhc, plug_type);
-				WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
-				pr_debug("Attempt %d found correct plug %d\n",
-						retry,
-						plug_type);
-				correction = true;
-			}
-			break;
-		}
-	}
-
-	highhph = false;
-	if (wrk_complete && plug_type == PLUG_TYPE_HIGH_HPH) {
-		pr_debug("%s: polling is done, still HPH, so enabling MIC trigger\n",
-			 __func__);
-		WCD9XXX_BCL_LOCK(mbhc->resmgr);
-		wcd9xxx_find_plug_and_report(mbhc, plug_type);
-		highhph = true;
-		WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
-	}
-
-	if (plug_type == PLUG_TYPE_HEADPHONE) {
-		if (mbhc->mbhc_cb && mbhc->mbhc_cb->hph_auto_pulldown_ctrl)
-			mbhc->mbhc_cb->hph_auto_pulldown_ctrl(codec, true);
-	}
-
-	if (!correction && current_source_enable) {
-		WCD9XXX_BCL_LOCK(mbhc->resmgr);
-		wcd9xxx_turn_onoff_current_source(mbhc, &mbhc->mbhc_bias_regs,
-						  false, highhph);
-		WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
-	} else if (!correction) {
-		wcd9xxx_turn_onoff_override(mbhc, false);
-	}
-
-	wcd9xxx_onoff_ext_mclk(mbhc, false);
-
-	if (mbhc->mbhc_cfg->detect_extn_cable) {
-		WCD9XXX_BCL_LOCK(mbhc->resmgr);
-		if ((mbhc->current_plug == PLUG_TYPE_HEADPHONE &&
-		    wrk_complete) ||
-		    mbhc->current_plug == PLUG_TYPE_GND_MIC_SWAP ||
-		    mbhc->current_plug == PLUG_TYPE_INVALID ||
-		    (plug_type == PLUG_TYPE_INVALID && wrk_complete)) {
-			/* Enable removal detection */
-			wcd9xxx_cleanup_hs_polling(mbhc);
-			wcd9xxx_enable_hs_detect(mbhc, 0, 0, false);
-		}
-		WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
-	}
-	pr_debug("%s: leave current_plug(%d)\n", __func__, mbhc->current_plug);
-	/* unlock sleep */
-	wcd9xxx_unlock_sleep(mbhc->resmgr->core_res);
-}
-
-static void wcd9xxx_swch_irq_handler(struct wcd9xxx_mbhc *mbhc)
-{
-	bool insert;
-	bool is_removed = false;
-	struct snd_soc_codec *codec = mbhc->codec;
-
-	pr_debug("%s: enter\n", __func__);
-
-	mbhc->in_swch_irq_handler = true;
-	/* Wait here for debounce time */
-	usleep_range(SWCH_IRQ_DEBOUNCE_TIME_US, SWCH_IRQ_DEBOUNCE_TIME_US +
-					WCD9XXX_USLEEP_RANGE_MARGIN_US);
-
-	WCD9XXX_BCL_LOCK(mbhc->resmgr);
-
-	/* cancel pending button press */
-	if (wcd9xxx_cancel_btn_work(mbhc))
-		pr_debug("%s: button press is canceled\n", __func__);
-
-	insert = !wcd9xxx_swch_level_remove(mbhc);
-	pr_debug("%s: Current plug type %d, insert %d\n", __func__,
-		 mbhc->current_plug, insert);
-	if ((mbhc->current_plug == PLUG_TYPE_NONE) && insert) {
-
-		mbhc->lpi_enabled = false;
-
-		/* Make sure mbhc state update complete before cancel detect
-		 * plug.
-		 */
-		wmb();
-		/* cancel detect plug */
-		wcd9xxx_cancel_hs_detect_plug(mbhc,
-				      &mbhc->correct_plug_swch);
-
-		if ((mbhc->current_plug != PLUG_TYPE_NONE) &&
-		    (mbhc->current_plug != PLUG_TYPE_HIGH_HPH) &&
-		    !(snd_soc_read(codec, WCD9XXX_A_MBHC_INSERT_DETECT) &
-				   (1 << 1))) {
-			pr_debug("%s: current plug: %d\n", __func__,
-				mbhc->current_plug);
-			goto exit;
-		}
-
-		/* Disable Mic Bias pull down and HPH Switch to GND */
-		snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01,
-				    0x00);
-		snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x01, 0x00);
-		wcd9xxx_mbhc_detect_plug_type(mbhc);
-	} else if ((mbhc->current_plug != PLUG_TYPE_NONE) && !insert) {
-		mbhc->lpi_enabled = false;
-
-		/* Make sure mbhc state update complete before cancel detect
-		 * plug.
-		 */
-		wmb();
-		/* cancel detect plug */
-		wcd9xxx_cancel_hs_detect_plug(mbhc,
-				      &mbhc->correct_plug_swch);
-
-		if (mbhc->current_plug == PLUG_TYPE_HEADPHONE) {
-			wcd9xxx_report_plug(mbhc, 0, SND_JACK_HEADPHONE);
-			is_removed = true;
-		} else if (mbhc->current_plug == PLUG_TYPE_GND_MIC_SWAP) {
-			wcd9xxx_report_plug(mbhc, 0, SND_JACK_UNSUPPORTED);
-			is_removed = true;
-		} else if (mbhc->current_plug == PLUG_TYPE_HEADSET) {
-			wcd9xxx_pause_hs_polling(mbhc);
-			wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, false);
-			wcd9xxx_cleanup_hs_polling(mbhc);
-			wcd9xxx_report_plug(mbhc, 0, SND_JACK_HEADSET);
-			is_removed = true;
-		} else if (mbhc->current_plug == PLUG_TYPE_HIGH_HPH) {
-			wcd9xxx_report_plug(mbhc, 0, SND_JACK_LINEOUT);
-			is_removed = true;
-		} else if (mbhc->current_plug == PLUG_TYPE_ANC_HEADPHONE) {
-			wcd9xxx_pause_hs_polling(mbhc);
-			wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, false);
-			wcd9xxx_cleanup_hs_polling(mbhc);
-			wcd9xxx_report_plug(mbhc, 0, SND_JACK_ANC_HEADPHONE);
-			is_removed = true;
-		}
-
-		if (is_removed) {
-			snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1,
-				      0x00);
-			snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL,
-					    0x02, 0x00);
-
-			/* Enable Mic Bias pull down and HPH Switch to GND */
-			snd_soc_update_bits(codec,
-					mbhc->mbhc_bias_regs.ctl_reg, 0x01,
-					0x01);
-			snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x01,
-					0x01);
-			/* Make sure mic trigger is turned off */
-			snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg,
-					    0x01, 0x01);
-			snd_soc_update_bits(codec,
-					    mbhc->mbhc_bias_regs.mbhc_reg,
-					    0x90, 0x00);
-			/* Reset MBHC State Machine */
-			snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL,
-					    0x08, 0x08);
-			snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL,
-					    0x08, 0x00);
-			/* Turn off override */
-			wcd9xxx_turn_onoff_override(mbhc, false);
-		}
-	}
-exit:
-	mbhc->in_swch_irq_handler = false;
-	WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
-	pr_debug("%s: leave\n", __func__);
-}
-
-static irqreturn_t wcd9xxx_mech_plug_detect_irq(int irq, void *data)
-{
-	int r = IRQ_HANDLED;
-	struct wcd9xxx_mbhc *mbhc = data;
-
-	pr_debug("%s: enter\n", __func__);
-	if (unlikely(wcd9xxx_lock_sleep(mbhc->resmgr->core_res) == false)) {
-		pr_warn("%s: failed to hold suspend\n", __func__);
-		r = IRQ_NONE;
-	} else {
-		/* Call handler */
-		wcd9xxx_swch_irq_handler(mbhc);
-		wcd9xxx_unlock_sleep(mbhc->resmgr->core_res);
-	}
-
-	pr_debug("%s: leave %d\n", __func__, r);
-	return r;
-}
-
-static int wcd9xxx_is_false_press(struct wcd9xxx_mbhc *mbhc)
-{
-	s16 mb_v;
-	int i = 0;
-	int r = 0;
-	const s16 v_ins_hu =
-	    wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_INS_HU);
-	const s16 v_ins_h =
-	    wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_INS_H);
-	const s16 v_b1_hu =
-	    wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_B1_HU);
-	const s16 v_b1_h =
-	    wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_B1_H);
-	const unsigned long timeout =
-	    jiffies + msecs_to_jiffies(BTN_RELEASE_DEBOUNCE_TIME_MS);
-
-	while (time_before(jiffies, timeout)) {
-		/*
-		 * This function needs to run measurements just few times during
-		 * release debounce time.  Make 1ms interval to avoid
-		 * unnecessary excessive measurements.
-		 */
-		usleep_range(1000, 1000 + WCD9XXX_USLEEP_RANGE_MARGIN_US);
-		if (i == 0) {
-			mb_v = wcd9xxx_codec_sta_dce(mbhc, 0, true);
-			pr_debug("%s: STA[0]: %d,%d\n", __func__, mb_v,
-				 wcd9xxx_codec_sta_dce_v(mbhc, 0, mb_v));
-			if (mb_v < v_b1_hu || mb_v > v_ins_hu) {
-				r = 1;
-				break;
-			}
-		} else {
-			mb_v = wcd9xxx_codec_sta_dce(mbhc, 1, true);
-			pr_debug("%s: DCE[%d]: %d,%d\n", __func__, i, mb_v,
-				 wcd9xxx_codec_sta_dce_v(mbhc, 1, mb_v));
-			if (mb_v < v_b1_h || mb_v > v_ins_h) {
-				r = 1;
-				break;
-			}
-		}
-		i++;
-	}
-
-	return r;
-}
-
-/* called under codec_resource_lock acquisition */
-static int wcd9xxx_determine_button(const struct wcd9xxx_mbhc *mbhc,
-				  const s32 micmv)
-{
-	s16 *v_btn_low, *v_btn_high;
-	struct wcd9xxx_mbhc_btn_detect_cfg *btn_det;
-	int i, btn = -1;
-
-	btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(mbhc->mbhc_cfg->calibration);
-	v_btn_low = wcd9xxx_mbhc_cal_btn_det_mp(btn_det,
-						MBHC_BTN_DET_V_BTN_LOW);
-	v_btn_high = wcd9xxx_mbhc_cal_btn_det_mp(btn_det,
-						 MBHC_BTN_DET_V_BTN_HIGH);
-
-	for (i = 0; i < btn_det->num_btn; i++) {
-		if ((v_btn_low[i] <= micmv) && (v_btn_high[i] >= micmv)) {
-			btn = i;
-			break;
-		}
-	}
-
-	if (btn == -1)
-		pr_debug("%s: couldn't find button number for mic mv %d\n",
-			 __func__, micmv);
-
-	return btn;
-}
-
-static int wcd9xxx_get_button_mask(const int btn)
-{
-	int mask = 0;
-
-	switch (btn) {
-	case 0:
-		mask = SND_JACK_BTN_0;
-		break;
-	case 1:
-		mask = SND_JACK_BTN_1;
-		break;
-	case 2:
-		mask = SND_JACK_BTN_2;
-		break;
-	case 3:
-		mask = SND_JACK_BTN_3;
-		break;
-	case 4:
-		mask = SND_JACK_BTN_4;
-		break;
-	case 5:
-		mask = SND_JACK_BTN_5;
-		break;
-	}
-	return mask;
-}
-
-static void wcd9xxx_get_z(struct wcd9xxx_mbhc *mbhc, s16 *dce_z, s16 *sta_z,
-			  struct mbhc_micbias_regs *micb_regs,
-			  bool norel_detection)
-{
-	s16 reg0, reg1;
-	int change;
-	struct snd_soc_codec *codec = mbhc->codec;
-
-	WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
-	/* Pull down micbias to ground and disconnect vddio switch */
-	reg0 = snd_soc_read(codec, micb_regs->ctl_reg);
-	snd_soc_update_bits(codec, micb_regs->ctl_reg, 0x81, 0x1);
-	reg1 = snd_soc_read(codec, micb_regs->mbhc_reg);
-	snd_soc_update_bits(codec, micb_regs->mbhc_reg, 1 << 7, 0);
-
-	/* Disconnect override from micbias */
-	change = snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL, 1 << 4,
-				     1 << 0);
-	usleep_range(1000, 1000 + 1000);
-	if (sta_z) {
-		*sta_z = wcd9xxx_codec_sta_dce(mbhc, 0, norel_detection);
-		pr_debug("%s: sta_z 0x%x\n", __func__, *sta_z & 0xFFFF);
-	}
-	if (dce_z) {
-		*dce_z = wcd9xxx_codec_sta_dce(mbhc, 1, norel_detection);
-		pr_debug("%s: dce_z 0x%x\n", __func__, *dce_z & 0xFFFF);
-	}
-
-	/* Connect override from micbias */
-	if (change)
-		snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL, 1 << 4,
-				    1 << 4);
-	/* Disable pull down micbias to ground */
-	snd_soc_write(codec, micb_regs->mbhc_reg, reg1);
-	snd_soc_write(codec, micb_regs->ctl_reg, reg0);
-}
-
-/*
- * This function recalibrates dce_z and sta_z parameters.
- * No release detection will be false when this function is
- * used.
- */
-void wcd9xxx_update_z(struct wcd9xxx_mbhc *mbhc)
-{
-	const u16 sta_z = mbhc->mbhc_data.sta_z;
-	const u16 dce_z = mbhc->mbhc_data.dce_z;
-
-	wcd9xxx_get_z(mbhc, &mbhc->mbhc_data.dce_z, &mbhc->mbhc_data.sta_z,
-		      &mbhc->mbhc_bias_regs, false);
-	pr_debug("%s: sta_z 0x%x,dce_z 0x%x -> sta_z 0x%x,dce_z 0x%x\n",
-		 __func__, sta_z & 0xFFFF, dce_z & 0xFFFF,
-		 mbhc->mbhc_data.sta_z & 0xFFFF,
-		 mbhc->mbhc_data.dce_z & 0xFFFF);
-
-	wcd9xxx_mbhc_calc_thres(mbhc);
-	wcd9xxx_calibrate_hs_polling(mbhc);
-}
-
-/*
- * wcd9xxx_update_rel_threshold : update mbhc release upper bound threshold
- *				  to ceilmv + buffer
- */
-static int wcd9xxx_update_rel_threshold(struct wcd9xxx_mbhc *mbhc, int ceilmv,
-					bool vddio)
-{
-	u16 v_brh, v_b1_hu;
-	int mv;
-	struct wcd9xxx_mbhc_btn_detect_cfg *btn_det;
-	void *calibration = mbhc->mbhc_cfg->calibration;
-	struct snd_soc_codec *codec = mbhc->codec;
-
-	btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(calibration);
-	mv = ceilmv + btn_det->v_btn_press_delta_cic;
-	if (vddio)
-		mv = scale_v_micb_vddio(mbhc, mv, true);
-	pr_debug("%s: reprogram vb1hu/vbrh to %dmv\n", __func__, mv);
-
-	if (mbhc->mbhc_state != MBHC_STATE_POTENTIAL_RECOVERY) {
-		/*
-		 * update LSB first so mbhc hardware block
-		 * doesn't see too low value.
-		 */
-		v_b1_hu = wcd9xxx_codec_v_sta_dce(mbhc, STA, mv, false);
-		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B3_CTL, v_b1_hu &
-				0xFF);
-		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B4_CTL,
-				(v_b1_hu >> 8) & 0xFF);
-		v_brh = wcd9xxx_codec_v_sta_dce(mbhc, DCE, mv, false);
-		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B9_CTL, v_brh &
-				0xFF);
-		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B10_CTL,
-				(v_brh >> 8) & 0xFF);
-	}
-	return 0;
-}
-
-irqreturn_t wcd9xxx_dce_handler(int irq, void *data)
-{
-	int i, mask;
-	bool vddio;
-	u8 mbhc_status;
-	s16 dce_z, sta_z;
-	s32 stamv, stamv_s;
-	s16 *v_btn_high;
-	struct wcd9xxx_mbhc_btn_detect_cfg *btn_det;
-	int btn = -1, meas = 0;
-	struct wcd9xxx_mbhc *mbhc = data;
-	const struct wcd9xxx_mbhc_btn_detect_cfg *d =
-	    WCD9XXX_MBHC_CAL_BTN_DET_PTR(mbhc->mbhc_cfg->calibration);
-	short btnmeas[d->n_btn_meas + 1];
-	short dce[d->n_btn_meas + 1], sta;
-	s32 mv[d->n_btn_meas + 1], mv_s[d->n_btn_meas + 1];
-	struct snd_soc_codec *codec = mbhc->codec;
-	struct wcd9xxx_core_resource *core_res = mbhc->resmgr->core_res;
-	int n_btn_meas = d->n_btn_meas;
-	void *calibration = mbhc->mbhc_cfg->calibration;
-
-	pr_debug("%s: enter\n", __func__);
-
-	WCD9XXX_BCL_LOCK(mbhc->resmgr);
-	mutex_lock(&mbhc->mbhc_lock);
-	mbhc_status = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_STATUS) & 0x3E;
-
-	if (mbhc->mbhc_state == MBHC_STATE_POTENTIAL_RECOVERY) {
-		pr_debug("%s: mbhc is being recovered, skip button press\n",
-			 __func__);
-		goto done;
-	}
-
-	mbhc->mbhc_state = MBHC_STATE_POTENTIAL;
-
-	if (!mbhc->polling_active) {
-		pr_warn("%s: mbhc polling is not active, skip button press\n",
-			__func__);
-		goto done;
-	}
-
-	/* If switch nterrupt already kicked in, ignore button press */
-	if (mbhc->in_swch_irq_handler) {
-		pr_debug("%s: Swtich level changed, ignore button press\n",
-			 __func__);
-		btn = -1;
-		goto done;
-	}
-
-	/*
-	 * setup internal micbias if codec uses internal micbias for
-	 * headset detection
-	 */
-	if (mbhc->mbhc_cfg->use_int_rbias) {
-		if (mbhc->mbhc_cb && mbhc->mbhc_cb->setup_int_rbias)
-			mbhc->mbhc_cb->setup_int_rbias(codec, true);
-		else
-			pr_err("%s: internal bias requested but codec did not provide callback\n",
-				__func__);
-	}
-
-
-	/* Measure scaled HW DCE */
-	vddio = (mbhc->mbhc_data.micb_mv != VDDIO_MICBIAS_MV &&
-		 mbhc->mbhc_micbias_switched);
-
-	dce_z = mbhc->mbhc_data.dce_z;
-	sta_z = mbhc->mbhc_data.sta_z;
-
-	/* Measure scaled HW STA */
-	dce[0] = wcd9xxx_read_dce_result(codec);
-	sta = wcd9xxx_read_sta_result(codec);
-	if (mbhc_status != STATUS_REL_DETECTION) {
-		if (mbhc->mbhc_last_resume &&
-		    !time_after(jiffies, mbhc->mbhc_last_resume + HZ)) {
-			pr_debug("%s: Button is released after resume\n",
-				__func__);
-			n_btn_meas = 0;
-		} else {
-			pr_debug("%s: Button is released without resume",
-				 __func__);
-			if (mbhc->update_z) {
-				wcd9xxx_update_z(mbhc);
-				dce_z = mbhc->mbhc_data.dce_z;
-				sta_z = mbhc->mbhc_data.sta_z;
-				mbhc->update_z = true;
-			}
-			stamv = __wcd9xxx_codec_sta_dce_v(mbhc, 0, sta, sta_z,
-						mbhc->mbhc_data.micb_mv);
-			if (vddio)
-				stamv_s = scale_v_micb_vddio(mbhc, stamv,
-							     false);
-			else
-				stamv_s = stamv;
-			mv[0] = __wcd9xxx_codec_sta_dce_v(mbhc, 1, dce[0],
-					  dce_z, mbhc->mbhc_data.micb_mv);
-			mv_s[0] = vddio ? scale_v_micb_vddio(mbhc, mv[0],
-							     false) : mv[0];
-			btn = wcd9xxx_determine_button(mbhc, mv_s[0]);
-			if (btn != wcd9xxx_determine_button(mbhc, stamv_s))
-				btn = -1;
-			goto done;
-		}
-	}
-
-	for (meas = 1; ((d->n_btn_meas) && (meas < (d->n_btn_meas + 1)));
-	     meas++)
-		dce[meas] = wcd9xxx_codec_sta_dce(mbhc, 1, false);
-
-	if (mbhc->update_z) {
-		wcd9xxx_update_z(mbhc);
-		dce_z = mbhc->mbhc_data.dce_z;
-		sta_z = mbhc->mbhc_data.sta_z;
-		mbhc->update_z = true;
-	}
-
-	stamv = __wcd9xxx_codec_sta_dce_v(mbhc, 0, sta, sta_z,
-					  mbhc->mbhc_data.micb_mv);
-	if (vddio)
-		stamv_s = scale_v_micb_vddio(mbhc, stamv, false);
-	else
-		stamv_s = stamv;
-	pr_debug("%s: Meas HW - STA 0x%x,%d,%d\n", __func__,
-		 sta & 0xFFFF, stamv, stamv_s);
-
-	/* determine pressed button */
-	mv[0] = __wcd9xxx_codec_sta_dce_v(mbhc, 1, dce[0], dce_z,
-					  mbhc->mbhc_data.micb_mv);
-	mv_s[0] = vddio ? scale_v_micb_vddio(mbhc, mv[0], false) : mv[0];
-	btnmeas[0] = wcd9xxx_determine_button(mbhc, mv_s[0]);
-	pr_debug("%s: Meas HW - DCE 0x%x,%d,%d button %d\n", __func__,
-		 dce[0] & 0xFFFF, mv[0], mv_s[0], btnmeas[0]);
-	if (n_btn_meas == 0)
-		btn = btnmeas[0];
-	for (meas = 1; (n_btn_meas && d->n_btn_meas &&
-			(meas < (d->n_btn_meas + 1))); meas++) {
-		mv[meas] = __wcd9xxx_codec_sta_dce_v(mbhc, 1, dce[meas], dce_z,
-						     mbhc->mbhc_data.micb_mv);
-		mv_s[meas] = vddio ? scale_v_micb_vddio(mbhc, mv[meas], false) :
-				     mv[meas];
-		btnmeas[meas] = wcd9xxx_determine_button(mbhc, mv_s[meas]);
-		pr_debug("%s: Meas %d - DCE 0x%x,%d,%d button %d\n",
-			 __func__, meas, dce[meas] & 0xFFFF, mv[meas],
-			 mv_s[meas], btnmeas[meas]);
-		/*
-		 * if large enough measurements are collected,
-		 * start to check if last all n_btn_con measurements were
-		 * in same button low/high range
-		 */
-		if (meas + 1 >= d->n_btn_con) {
-			for (i = 0; i < d->n_btn_con; i++)
-				if ((btnmeas[meas] < 0) ||
-				    (btnmeas[meas] != btnmeas[meas - i]))
-					break;
-			if (i == d->n_btn_con) {
-				/* button pressed */
-				btn = btnmeas[meas];
-				break;
-			} else if ((n_btn_meas - meas) < (d->n_btn_con - 1)) {
-				/*
-				 * if left measurements are less than n_btn_con,
-				 * it's impossible to find button number
-				 */
-				break;
-			}
-		}
-	}
-
-	if (btn >= 0) {
-		if (mbhc->in_swch_irq_handler) {
-			pr_debug(
-			"%s: Switch irq triggered, ignore button press\n",
-			__func__);
-			goto done;
-		}
-		btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(calibration);
-		v_btn_high = wcd9xxx_mbhc_cal_btn_det_mp(btn_det,
-						       MBHC_BTN_DET_V_BTN_HIGH);
-		WARN_ON(btn >= btn_det->num_btn);
-		/* reprogram release threshold to catch voltage ramp up early */
-		wcd9xxx_update_rel_threshold(mbhc, v_btn_high[btn], vddio);
-
-		mask = wcd9xxx_get_button_mask(btn);
-		mbhc->buttons_pressed |= mask;
-		wcd9xxx_lock_sleep(core_res);
-		if (schedule_delayed_work(&mbhc->mbhc_btn_dwork,
-					  msecs_to_jiffies(400)) == 0) {
-			WARN(1, "Button pressed twice without release event\n");
-			wcd9xxx_unlock_sleep(core_res);
-		}
-	} else {
-		pr_debug("%s: bogus button press, too short press?\n",
-			 __func__);
-	}
-
- done:
-	pr_debug("%s: leave\n", __func__);
-	mutex_unlock(&mbhc->mbhc_lock);
-	WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
-	return IRQ_HANDLED;
-}
-
-static irqreturn_t wcd9xxx_release_handler(int irq, void *data)
-{
-	int ret;
-	bool waitdebounce = true;
-	struct wcd9xxx_mbhc *mbhc = data;
-
-	pr_debug("%s: enter\n", __func__);
-	WCD9XXX_BCL_LOCK(mbhc->resmgr);
-	mbhc->mbhc_state = MBHC_STATE_RELEASE;
-
-	if (mbhc->buttons_pressed & WCD9XXX_JACK_BUTTON_MASK) {
-		ret = wcd9xxx_cancel_btn_work(mbhc);
-		if (ret == 0) {
-			pr_debug("%s: Reporting long button release event\n",
-				 __func__);
-			wcd9xxx_jack_report(mbhc, &mbhc->button_jack, 0,
-					    mbhc->buttons_pressed);
-		} else {
-			if (wcd9xxx_is_false_press(mbhc)) {
-				pr_debug("%s: Fake button press interrupt\n",
-					 __func__);
-			} else {
-				if (mbhc->in_swch_irq_handler) {
-					pr_debug("%s: Switch irq kicked in, ignore\n",
-						 __func__);
-				} else {
-					pr_debug("%s: Reporting btn press\n",
-						 __func__);
-					wcd9xxx_jack_report(mbhc,
-							 &mbhc->button_jack,
-							 mbhc->buttons_pressed,
-							 mbhc->buttons_pressed);
-					pr_debug("%s: Reporting btn release\n",
-						 __func__);
-					wcd9xxx_jack_report(mbhc,
-						      &mbhc->button_jack,
-						      0, mbhc->buttons_pressed);
-					waitdebounce = false;
-				}
-			}
-		}
-
-		mbhc->buttons_pressed &= ~WCD9XXX_JACK_BUTTON_MASK;
-	}
-
-	wcd9xxx_calibrate_hs_polling(mbhc);
-
-	if (waitdebounce)
-		msleep(SWCH_REL_DEBOUNCE_TIME_MS);
-	wcd9xxx_start_hs_polling(mbhc);
-
-	pr_debug("%s: leave\n", __func__);
-	WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
-	return IRQ_HANDLED;
-}
-
-static irqreturn_t wcd9xxx_hphl_ocp_irq(int irq, void *data)
-{
-	struct wcd9xxx_mbhc *mbhc = data;
-	struct snd_soc_codec *codec;
-
-	pr_info("%s: received HPHL OCP irq\n", __func__);
-
-	if (mbhc) {
-		codec = mbhc->codec;
-		if ((mbhc->hphlocp_cnt < OCP_ATTEMPT) &&
-		    (!mbhc->hphrocp_cnt)) {
-			pr_info("%s: retry\n", __func__);
-			mbhc->hphlocp_cnt++;
-			snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_OCP_CTL,
-					    0x10, 0x00);
-			snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_OCP_CTL,
-					    0x10, 0x10);
-		} else {
-			wcd9xxx_disable_irq(mbhc->resmgr->core_res,
-					  mbhc->intr_ids->hph_left_ocp);
-			mbhc->hph_status |= SND_JACK_OC_HPHL;
-			wcd9xxx_jack_report(mbhc, &mbhc->headset_jack,
-					    mbhc->hph_status,
-					    WCD9XXX_JACK_MASK);
-		}
-	} else {
-		pr_err("%s: Bad wcd9xxx private data\n", __func__);
-	}
-
-	return IRQ_HANDLED;
-}
-
-static irqreturn_t wcd9xxx_hphr_ocp_irq(int irq, void *data)
-{
-	struct wcd9xxx_mbhc *mbhc = data;
-	struct snd_soc_codec *codec;
-
-	pr_info("%s: received HPHR OCP irq\n", __func__);
-	codec = mbhc->codec;
-	if ((mbhc->hphrocp_cnt < OCP_ATTEMPT) &&
-	    (!mbhc->hphlocp_cnt)) {
-		pr_info("%s: retry\n", __func__);
-		mbhc->hphrocp_cnt++;
-		snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_OCP_CTL, 0x10,
-				    0x00);
-		snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_OCP_CTL, 0x10,
-				    0x10);
-	} else {
-		wcd9xxx_disable_irq(mbhc->resmgr->core_res,
-				    mbhc->intr_ids->hph_right_ocp);
-		mbhc->hph_status |= SND_JACK_OC_HPHR;
-		wcd9xxx_jack_report(mbhc, &mbhc->headset_jack,
-				    mbhc->hph_status, WCD9XXX_JACK_MASK);
-	}
-
-	return IRQ_HANDLED;
-}
-
-static int wcd9xxx_acdb_mclk_index(const int rate)
-{
-	if (rate == MCLK_RATE_12288KHZ)
-		return 0;
-	else if (rate == MCLK_RATE_9600KHZ)
-		return 1;
-	else {
-		BUG_ON(1);
-		return -EINVAL;
-	}
-}
-
-static void wcd9xxx_update_mbhc_clk_rate(struct wcd9xxx_mbhc *mbhc, u32 rate)
-{
-	u32 dce_wait, sta_wait;
-	u8 ncic, nmeas, navg;
-	void *calibration;
-	u8 *n_cic, *n_ready;
-	struct wcd9xxx_mbhc_btn_detect_cfg *btn_det;
-	u8 npoll = 4, nbounce_wait = 30;
-	struct snd_soc_codec *codec = mbhc->codec;
-	int idx = wcd9xxx_acdb_mclk_index(rate);
-	int idxmclk = wcd9xxx_acdb_mclk_index(mbhc->mbhc_cfg->mclk_rate);
-
-	pr_debug("%s: Updating clock rate dependents, rate = %u\n", __func__,
-		 rate);
-	calibration = mbhc->mbhc_cfg->calibration;
-
-	/*
-	 * First compute the DCE / STA wait times depending on tunable
-	 * parameters. The value is computed in microseconds
-	 */
-	btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(calibration);
-	n_ready = wcd9xxx_mbhc_cal_btn_det_mp(btn_det, MBHC_BTN_DET_N_READY);
-	n_cic = wcd9xxx_mbhc_cal_btn_det_mp(btn_det, MBHC_BTN_DET_N_CIC);
-	nmeas = WCD9XXX_MBHC_CAL_BTN_DET_PTR(calibration)->n_meas;
-	navg = WCD9XXX_MBHC_CAL_GENERAL_PTR(calibration)->mbhc_navg;
-
-	/* ncic stays with the same what we had during calibration */
-	ncic = n_cic[idxmclk];
-	dce_wait = (1000 * 512 * ncic * (nmeas + 1)) / (rate / 1000);
-	sta_wait = (1000 * 128 * (navg + 1)) / (rate / 1000);
-	mbhc->mbhc_data.t_dce = dce_wait;
-	/* give extra margin to sta for safety */
-	mbhc->mbhc_data.t_sta = sta_wait + 250;
-	mbhc->mbhc_data.t_sta_dce = ((1000 * 256) / (rate / 1000) *
-				     n_ready[idx]) + 10;
-
-	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_TIMER_B1_CTL, n_ready[idx]);
-	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_TIMER_B6_CTL, ncic);
-
-	if (rate == MCLK_RATE_12288KHZ) {
-		npoll = 4;
-		nbounce_wait = 30;
-	} else if (rate == MCLK_RATE_9600KHZ) {
-		npoll = 3;
-		nbounce_wait = 23;
-	}
-
-	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_TIMER_B2_CTL, npoll);
-	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_TIMER_B3_CTL, nbounce_wait);
-	pr_debug("%s: leave\n", __func__);
-}
-
-static void wcd9xxx_mbhc_cal(struct wcd9xxx_mbhc *mbhc)
-{
-	u8 cfilt_mode;
-	u16 reg0, reg1, reg2;
-	struct snd_soc_codec *codec = mbhc->codec;
-
-	pr_debug("%s: enter\n", __func__);
-	wcd9xxx_disable_irq(mbhc->resmgr->core_res,
-			    mbhc->intr_ids->dce_est_complete);
-	wcd9xxx_turn_onoff_rel_detection(codec, false);
-
-	/* t_dce and t_sta are updated by wcd9xxx_update_mbhc_clk_rate() */
-	WARN_ON(!mbhc->mbhc_data.t_dce);
-	WARN_ON(!mbhc->mbhc_data.t_sta);
-
-	/*
-	 * LDOH and CFILT are already configured during pdata handling.
-	 * Only need to make sure CFILT and bandgap are in Fast mode.
-	 * Need to restore defaults once calculation is done.
-	 *
-	 * In case when Micbias is powered by external source, request
-	 * turn on the external voltage source for Calibration.
-	 */
-	if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mb_source)
-		mbhc->mbhc_cb->enable_mb_source(codec, true, false);
-
-	cfilt_mode = snd_soc_read(codec, mbhc->mbhc_bias_regs.cfilt_ctl);
-	if (mbhc->mbhc_cb && mbhc->mbhc_cb->cfilt_fast_mode)
-		mbhc->mbhc_cb->cfilt_fast_mode(codec, mbhc);
-	else
-		snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.cfilt_ctl,
-				    0x40, 0x00);
-
-	if (mbhc->mbhc_cb && mbhc->mbhc_cb->micbias_pulldown_ctrl)
-		mbhc->mbhc_cb->micbias_pulldown_ctrl(mbhc, false);
-
-	/*
-	 * Micbias, CFILT, LDOH, MBHC MUX mode settings
-	 * to perform ADC calibration
-	 */
-	if (mbhc->mbhc_cb && mbhc->mbhc_cb->select_cfilt)
-		mbhc->mbhc_cb->select_cfilt(codec, mbhc);
-	else
-	snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x60,
-			    mbhc->mbhc_cfg->micbias << 5);
-	snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01, 0x00);
-	snd_soc_update_bits(codec, WCD9XXX_A_LDO_H_MODE_1, 0x60, 0x60);
-	snd_soc_write(codec, WCD9XXX_A_TX_7_MBHC_TEST_CTL, 0x78);
-	if (mbhc->mbhc_cb && mbhc->mbhc_cb->codec_specific_cal)
-		mbhc->mbhc_cb->codec_specific_cal(codec, mbhc);
-	else
-		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL,
-				    0x04, 0x04);
-
-	/* Pull down micbias to ground */
-	reg0 = snd_soc_read(codec, mbhc->mbhc_bias_regs.ctl_reg);
-	snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 1, 1);
-	/* Disconnect override from micbias */
-	reg1 = snd_soc_read(codec, WCD9XXX_A_MAD_ANA_CTRL);
-	snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL, 1 << 4, 1 << 0);
-	/* Connect the MUX to micbias */
-	snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x02);
-	if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mux_bias_block)
-		mbhc->mbhc_cb->enable_mux_bias_block(codec);
-	else
-		snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1,
-				    0x80, 0x80);
-	/*
-	 * Hardware that has external cap can delay mic bias ramping down up
-	 * to 50ms.
-	 */
-	msleep(WCD9XXX_MUX_SWITCH_READY_WAIT_MS);
-	/* DCE measurement for 0 voltage */
-	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x0A);
-	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x02);
-	mbhc->mbhc_data.dce_z = __wcd9xxx_codec_sta_dce(mbhc, 1, true, false);
-
-	/* compute dce_z for current source */
-	reg2 = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_CTL);
-	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x78,
-			    WCD9XXX_MBHC_NSC_CS << 3);
-
-	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x0A);
-	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x02);
-	mbhc->mbhc_data.dce_nsc_cs_z = __wcd9xxx_codec_sta_dce(mbhc, 1, true,
-							       false);
-	pr_debug("%s: dce_z with nsc cs: 0x%x\n", __func__,
-						 mbhc->mbhc_data.dce_nsc_cs_z);
-
-	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, reg2);
-
-	/* STA measurement for 0 voltage */
-	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x0A);
-	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x02);
-	mbhc->mbhc_data.sta_z = __wcd9xxx_codec_sta_dce(mbhc, 0, true, false);
-
-	/* Restore registers */
-	snd_soc_write(codec, mbhc->mbhc_bias_regs.ctl_reg, reg0);
-	snd_soc_write(codec, WCD9XXX_A_MAD_ANA_CTRL, reg1);
-
-	/* DCE measurment for MB voltage */
-	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x0A);
-	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x02);
-	snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x02);
-	if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mux_bias_block)
-		mbhc->mbhc_cb->enable_mux_bias_block(codec);
-	else
-		snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1,
-				    0x80, 0x80);
-	/*
-	 * Hardware that has external cap can delay mic bias ramping down up
-	 * to 50ms.
-	 */
-	msleep(WCD9XXX_MUX_SWITCH_READY_WAIT_MS);
-	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x04);
-	usleep_range(mbhc->mbhc_data.t_dce, mbhc->mbhc_data.t_dce +
-					WCD9XXX_USLEEP_RANGE_MARGIN_US);
-	mbhc->mbhc_data.dce_mb = wcd9xxx_read_dce_result(codec);
-
-	/* STA Measurement for MB Voltage */
-	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x0A);
-	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x02);
-	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x02);
-	snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x02);
-	if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mux_bias_block)
-		mbhc->mbhc_cb->enable_mux_bias_block(codec);
-	else
-		snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1,
-				    0x80, 0x80);
-	/*
-	 * Hardware that has external cap can delay mic bias ramping down up
-	 * to 50ms.
-	 */
-	msleep(WCD9XXX_MUX_SWITCH_READY_WAIT_MS);
-	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x02);
-	usleep_range(mbhc->mbhc_data.t_sta, mbhc->mbhc_data.t_sta +
-					WCD9XXX_USLEEP_RANGE_MARGIN_US);
-	mbhc->mbhc_data.sta_mb = wcd9xxx_read_sta_result(codec);
-
-	/* Restore default settings. */
-	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x04, 0x00);
-	snd_soc_write(codec, mbhc->mbhc_bias_regs.cfilt_ctl, cfilt_mode);
-	snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x04);
-	if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mux_bias_block)
-		mbhc->mbhc_cb->enable_mux_bias_block(codec);
-	else
-		snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1,
-				    0x80, 0x80);
-	usleep_range(100, 110);
-
-	if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mb_source)
-		mbhc->mbhc_cb->enable_mb_source(codec, false, false);
-
-	if (mbhc->mbhc_cb && mbhc->mbhc_cb->micbias_pulldown_ctrl)
-		mbhc->mbhc_cb->micbias_pulldown_ctrl(mbhc, true);
-
-	wcd9xxx_enable_irq(mbhc->resmgr->core_res,
-			   mbhc->intr_ids->dce_est_complete);
-	wcd9xxx_turn_onoff_rel_detection(codec, true);
-
-	pr_debug("%s: leave\n", __func__);
-}
-
-static void wcd9xxx_mbhc_setup(struct wcd9xxx_mbhc *mbhc)
-{
-	int n;
-	u8 *gain;
-	struct wcd9xxx_mbhc_general_cfg *generic;
-	struct wcd9xxx_mbhc_btn_detect_cfg *btn_det;
-	struct snd_soc_codec *codec = mbhc->codec;
-	const int idx = wcd9xxx_acdb_mclk_index(mbhc->mbhc_cfg->mclk_rate);
-
-	pr_debug("%s: enter\n", __func__);
-	generic = WCD9XXX_MBHC_CAL_GENERAL_PTR(mbhc->mbhc_cfg->calibration);
-	btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(mbhc->mbhc_cfg->calibration);
-
-	for (n = 0; n < 8; n++) {
-		snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_FIR_B1_CFG,
-				    0x07, n);
-		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_FIR_B2_CFG,
-			      btn_det->c[n]);
-	}
-
-	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B2_CTL, 0x07,
-			    btn_det->nc);
-
-	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_TIMER_B4_CTL, 0x70,
-			    generic->mbhc_nsa << 4);
-
-	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_TIMER_B4_CTL, 0x0F,
-			    btn_det->n_meas);
-
-	snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_TIMER_B5_CTL,
-		      generic->mbhc_navg);
-
-	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x80, 0x80);
-
-	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x78,
-			    btn_det->mbhc_nsc << 3);
-
-	if (mbhc->mbhc_cb && mbhc->mbhc_cb->get_cdc_type &&
-			mbhc->mbhc_cb->get_cdc_type() !=
-					WCD9XXX_CDC_TYPE_HELICON) {
-		if (mbhc->resmgr->reg_addr->micb_4_mbhc)
-			snd_soc_update_bits(codec,
-					mbhc->resmgr->reg_addr->micb_4_mbhc,
-					0x03, MBHC_MICBIAS2);
-	}
-
-	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x02, 0x02);
-
-	snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_2, 0xF0, 0xF0);
-
-	gain = wcd9xxx_mbhc_cal_btn_det_mp(btn_det, MBHC_BTN_DET_GAIN);
-	snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B2_CTL, 0x78,
-			    gain[idx] << 3);
-	snd_soc_update_bits(codec, WCD9XXX_A_MICB_2_MBHC, 0x04, 0x04);
-
-	pr_debug("%s: leave\n", __func__);
-}
-
-static int wcd9xxx_setup_jack_detect_irq(struct wcd9xxx_mbhc *mbhc)
-{
-	int ret = 0;
-	void *core_res = mbhc->resmgr->core_res;
-
-	if (mbhc->mbhc_cfg->gpio) {
-		ret = request_threaded_irq(mbhc->mbhc_cfg->gpio_irq, NULL,
-					   wcd9xxx_mech_plug_detect_irq,
-					   (IRQF_TRIGGER_RISING |
-					    IRQF_TRIGGER_FALLING),
-					   "headset detect", mbhc);
-		if (ret) {
-			pr_err("%s: Failed to request gpio irq %d\n", __func__,
-			       mbhc->mbhc_cfg->gpio_irq);
-		} else {
-			ret = enable_irq_wake(mbhc->mbhc_cfg->gpio_irq);
-			if (ret)
-				pr_err("%s: Failed to enable wake up irq %d\n",
-				       __func__, mbhc->mbhc_cfg->gpio_irq);
-		}
-	} else if (mbhc->mbhc_cfg->insert_detect) {
-		/* Enable HPHL_10K_SW */
-		snd_soc_update_bits(mbhc->codec, WCD9XXX_A_RX_HPH_OCP_CTL,
-				    1 << 1, 1 << 1);
-
-		ret = wcd9xxx_request_irq(core_res,
-					  mbhc->intr_ids->hs_jack_switch,
-					  wcd9xxx_mech_plug_detect_irq,
-					  "Jack Detect",
-					  mbhc);
-		if (ret)
-			pr_err("%s: Failed to request insert detect irq %d\n",
-				__func__, mbhc->intr_ids->hs_jack_switch);
-	}
-
-	return ret;
-}
-
-static int wcd9xxx_init_and_calibrate(struct wcd9xxx_mbhc *mbhc)
-{
-	int ret = 0;
-	struct snd_soc_codec *codec = mbhc->codec;
-
-	pr_debug("%s: enter\n", __func__);
-
-	/* Enable MCLK during calibration */
-	wcd9xxx_onoff_ext_mclk(mbhc, true);
-	wcd9xxx_mbhc_setup(mbhc);
-	wcd9xxx_mbhc_cal(mbhc);
-	wcd9xxx_mbhc_calc_thres(mbhc);
-	wcd9xxx_onoff_ext_mclk(mbhc, false);
-	wcd9xxx_calibrate_hs_polling(mbhc);
-
-	/* Enable Mic Bias pull down and HPH Switch to GND */
-	snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01, 0x01);
-	snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x01, 0x01);
-	INIT_WORK(&mbhc->correct_plug_swch, wcd9xxx_correct_swch_plug);
-
-	snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_OCP_CTL, 0x10,
-			    0x10);
-	wcd9xxx_enable_irq(mbhc->resmgr->core_res,
-			   mbhc->intr_ids->hph_left_ocp);
-	wcd9xxx_enable_irq(mbhc->resmgr->core_res,
-			   mbhc->intr_ids->hph_right_ocp);
-
-	/* Initialize mechanical mbhc */
-	ret = wcd9xxx_setup_jack_detect_irq(mbhc);
-
-	if (!ret && mbhc->mbhc_cfg->gpio) {
-		/* Requested with IRQF_DISABLED */
-		enable_irq(mbhc->mbhc_cfg->gpio_irq);
-
-		/* Bootup time detection */
-		wcd9xxx_swch_irq_handler(mbhc);
-	} else if (!ret && mbhc->mbhc_cfg->insert_detect) {
-		pr_debug("%s: Setting up codec own insert detection\n",
-			 __func__);
-		/* Setup for insertion detection */
-		wcd9xxx_insert_detect_setup(mbhc, true);
-	}
-
-	pr_debug("%s: leave\n", __func__);
-
-	return ret;
-}
-
-static void wcd9xxx_mbhc_fw_read(struct work_struct *work)
-{
-	struct delayed_work *dwork;
-	struct wcd9xxx_mbhc *mbhc;
-	struct snd_soc_codec *codec;
-	const struct firmware *fw;
-	struct firmware_cal *fw_data = NULL;
-	int ret = -1, retry = 0;
-	bool use_default_cal = false;
-
-	dwork = to_delayed_work(work);
-	mbhc = container_of(dwork, struct wcd9xxx_mbhc, mbhc_firmware_dwork);
-	codec = mbhc->codec;
-
-	while (retry < FW_READ_ATTEMPTS) {
-		retry++;
-		pr_info("%s:Attempt %d to request MBHC firmware\n",
-				__func__, retry);
-		if (mbhc->mbhc_cb->get_hwdep_fw_cal)
-			fw_data = mbhc->mbhc_cb->get_hwdep_fw_cal(codec,
-					WCD9XXX_MBHC_CAL);
-		if (!fw_data)
-			ret = request_firmware(&fw, "wcd9320/wcd9320_mbhc.bin",
-					codec->dev);
-		/*
-		 * if request_firmware and hwdep cal both fail then
-		 * retry for few times before bailing out
-		 */
-		if ((ret != 0) && !fw_data) {
-			usleep_range(FW_READ_TIMEOUT, FW_READ_TIMEOUT +
-					WCD9XXX_USLEEP_RANGE_MARGIN_US);
-		} else {
-			pr_info("%s: MBHC Firmware read successful\n",
-					__func__);
-			break;
-		}
-	}
-	if (!fw_data)
-		pr_info("%s: using request_firmware\n", __func__);
-	else
-		pr_info("%s: using hwdep cal\n", __func__);
-	if (ret != 0 && !fw_data) {
-		pr_err("%s: Cannot load MBHC firmware use default cal\n",
-				__func__);
-		use_default_cal = true;
-	}
-	if (!use_default_cal) {
-		const void *data;
-		size_t size;
-
-		if (fw_data) {
-			data = fw_data->data;
-			size = fw_data->size;
-		} else {
-			data = fw->data;
-			size = fw->size;
-		}
-		if (wcd9xxx_mbhc_fw_validate(data, size) == false) {
-			pr_err("%s: Invalid MBHC cal data size use default cal\n",
-			       __func__);
-			if (!fw_data)
-				release_firmware(fw);
-		} else {
-			if (fw_data) {
-				mbhc->mbhc_cfg->calibration =
-						(void *)fw_data->data;
-				mbhc->mbhc_cal = fw_data;
-			} else {
-				mbhc->mbhc_cfg->calibration =
-						(void *)fw->data;
-				mbhc->mbhc_fw = fw;
-			}
-		}
-	}
-
-	(void) wcd9xxx_init_and_calibrate(mbhc);
-}
-
-#ifdef CONFIG_DEBUG_FS
-ssize_t codec_mbhc_debug_read(struct file *file, char __user *buf,
-			      size_t count, loff_t *pos)
-{
-	const int size = 768;
-	char buffer[size];
-	int n = 0;
-	struct wcd9xxx_mbhc *mbhc = file->private_data;
-	const struct mbhc_internal_cal_data *p = &mbhc->mbhc_data;
-	const s16 v_ins_hu =
-	    wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_INS_HU);
-	const s16 v_ins_h =
-	    wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_INS_H);
-	const s16 v_b1_hu =
-	    wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_B1_HU);
-	const s16 v_b1_h =
-	    wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_B1_H);
-	const s16 v_br_h =
-	    wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_BR_H);
-
-	n = scnprintf(buffer, size - n, "dce_z = %x(%dmv)\n",
-		      p->dce_z, wcd9xxx_codec_sta_dce_v(mbhc, 1, p->dce_z));
-	n += scnprintf(buffer + n, size - n, "dce_mb = %x(%dmv)\n",
-		       p->dce_mb, wcd9xxx_codec_sta_dce_v(mbhc, 1, p->dce_mb));
-	n += scnprintf(buffer + n, size - n, "dce_nsc_cs_z = %x(%dmv)\n",
-		      p->dce_nsc_cs_z,
-		      __wcd9xxx_codec_sta_dce_v(mbhc, 1, p->dce_nsc_cs_z,
-						p->dce_nsc_cs_z,
-						VDDIO_MICBIAS_MV));
-	n += scnprintf(buffer + n, size - n, "sta_z = %x(%dmv)\n",
-		       p->sta_z, wcd9xxx_codec_sta_dce_v(mbhc, 0, p->sta_z));
-	n += scnprintf(buffer + n, size - n, "sta_mb = %x(%dmv)\n",
-		       p->sta_mb, wcd9xxx_codec_sta_dce_v(mbhc, 0, p->sta_mb));
-	n += scnprintf(buffer + n, size - n, "t_dce = %d\n",  p->t_dce);
-	n += scnprintf(buffer + n, size - n, "t_sta = %d\n",  p->t_sta);
-	n += scnprintf(buffer + n, size - n, "micb_mv = %dmv\n", p->micb_mv);
-	n += scnprintf(buffer + n, size - n, "v_ins_hu = %x(%dmv)\n",
-		       v_ins_hu, wcd9xxx_codec_sta_dce_v(mbhc, 0, v_ins_hu));
-	n += scnprintf(buffer + n, size - n, "v_ins_h = %x(%dmv)\n",
-		       v_ins_h, wcd9xxx_codec_sta_dce_v(mbhc, 1, v_ins_h));
-	n += scnprintf(buffer + n, size - n, "v_b1_hu = %x(%dmv)\n",
-		       v_b1_hu, wcd9xxx_codec_sta_dce_v(mbhc, 0, v_b1_hu));
-	n += scnprintf(buffer + n, size - n, "v_b1_h = %x(%dmv)\n",
-		       v_b1_h, wcd9xxx_codec_sta_dce_v(mbhc, 1, v_b1_h));
-	n += scnprintf(buffer + n, size - n, "v_brh = %x(%dmv)\n",
-		       v_br_h, wcd9xxx_codec_sta_dce_v(mbhc, 1, v_br_h));
-	n += scnprintf(buffer + n, size - n, "v_brl = %x(%dmv)\n",  p->v_brl,
-		       wcd9xxx_codec_sta_dce_v(mbhc, 0, p->v_brl));
-	n += scnprintf(buffer + n, size - n, "v_no_mic = %x(%dmv)\n",
-		       p->v_no_mic,
-		       wcd9xxx_codec_sta_dce_v(mbhc, 0, p->v_no_mic));
-	n += scnprintf(buffer + n, size - n, "v_inval_ins_low = %d\n",
-		       p->v_inval_ins_low);
-	n += scnprintf(buffer + n, size - n, "v_inval_ins_high = %d\n",
-		       p->v_inval_ins_high);
-	n += scnprintf(buffer + n, size - n, "Insert detect insert = %d\n",
-		       !wcd9xxx_swch_level_remove(mbhc));
-	buffer[n] = 0;
-
-	return simple_read_from_buffer(buf, count, pos, buffer, n);
-}
-
-static int codec_debug_open(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
-static ssize_t codec_debug_write(struct file *filp,
-				 const char __user *ubuf, size_t cnt,
-				 loff_t *ppos)
-{
-	char lbuf[32];
-	char *buf;
-	int rc;
-	struct wcd9xxx_mbhc *mbhc = filp->private_data;
-
-	if (cnt > sizeof(lbuf) - 1)
-		return -EINVAL;
-
-	rc = copy_from_user(lbuf, ubuf, cnt);
-	if (rc)
-		return -EFAULT;
-
-	lbuf[cnt] = '\0';
-	buf = (char *)lbuf;
-	mbhc->no_mic_headset_override = (*strsep(&buf, " ") == '0') ?
-					     false : true;
-	return rc;
-}
-
-static const struct file_operations mbhc_trrs_debug_ops = {
-	.open = codec_debug_open,
-	.write = codec_debug_write,
-};
-
-static const struct file_operations mbhc_debug_ops = {
-	.open = codec_debug_open,
-	.read = codec_mbhc_debug_read,
-};
-
-static void wcd9xxx_init_debugfs(struct wcd9xxx_mbhc *mbhc)
-{
-	mbhc->debugfs_poke =
-	    debugfs_create_file("TRRS", S_IFREG | 0444, NULL, mbhc,
-				&mbhc_trrs_debug_ops);
-	mbhc->debugfs_mbhc =
-	    debugfs_create_file("wcd9xxx_mbhc", S_IFREG | 0444,
-				NULL, mbhc, &mbhc_debug_ops);
-}
-
-static void wcd9xxx_cleanup_debugfs(struct wcd9xxx_mbhc *mbhc)
-{
-	debugfs_remove(mbhc->debugfs_poke);
-	debugfs_remove(mbhc->debugfs_mbhc);
-}
-#else
-static void wcd9xxx_init_debugfs(struct wcd9xxx_mbhc *mbhc)
-{
-}
-
-static void wcd9xxx_cleanup_debugfs(struct wcd9xxx_mbhc *mbhc)
-{
-}
-#endif
-
-int wcd9xxx_mbhc_set_keycode(struct wcd9xxx_mbhc *mbhc)
-{
-	enum snd_jack_types type = SND_JACK_BTN_0;
-	int i, ret, result = 0;
-	int *btn_key_code;
-
-	btn_key_code = mbhc->mbhc_cfg->key_code;
-
-	for (i = 0 ; i < 8 ; i++) {
-		if (btn_key_code[i] != 0) {
-			switch (i) {
-			case 0:
-				type = SND_JACK_BTN_0;
-				break;
-			case 1:
-				type = SND_JACK_BTN_1;
-				break;
-			case 2:
-				type = SND_JACK_BTN_2;
-				break;
-			case 3:
-				type = SND_JACK_BTN_3;
-				break;
-			case 4:
-				type = SND_JACK_BTN_4;
-				break;
-			case 5:
-				type = SND_JACK_BTN_5;
-				break;
-			default:
-				WARN_ONCE(1, "Wrong button number:%d\n", i);
-				result = -1;
-				break;
-			}
-			ret = snd_jack_set_key(mbhc->button_jack.jack,
-					       type,
-					       btn_key_code[i]);
-			if (ret) {
-				pr_err("%s: Failed to set code for %d\n",
-					__func__, btn_key_code[i]);
-				result = -1;
-			}
-			input_set_capability(
-				mbhc->button_jack.jack->input_dev,
-				EV_KEY, btn_key_code[i]);
-			pr_debug("%s: set btn%d key code:%d\n", __func__,
-				i, btn_key_code[i]);
-		}
-	}
-	return result;
-}
-
-int wcd9xxx_mbhc_start(struct wcd9xxx_mbhc *mbhc,
-		       struct wcd9xxx_mbhc_config *mbhc_cfg)
-{
-	int rc = 0;
-	struct snd_soc_codec *codec = mbhc->codec;
-
-	pr_debug("%s: enter\n", __func__);
-
-	if (!codec) {
-		pr_err("%s: no codec\n", __func__);
-		return -EINVAL;
-	}
-
-	if (mbhc_cfg->mclk_rate != MCLK_RATE_12288KHZ &&
-	    mbhc_cfg->mclk_rate != MCLK_RATE_9600KHZ) {
-		pr_err("Error: unsupported clock rate %d\n",
-		       mbhc_cfg->mclk_rate);
-		return -EINVAL;
-	}
-
-	/* Save mbhc config */
-	mbhc->mbhc_cfg = mbhc_cfg;
-
-	/* Set btn key code */
-	if (wcd9xxx_mbhc_set_keycode(mbhc))
-		pr_err("Set btn key code error!!!\n");
-
-	/* Get HW specific mbhc registers' address */
-	wcd9xxx_get_mbhc_micbias_regs(mbhc, MBHC_PRIMARY_MIC_MB);
-
-	/* Get HW specific mbhc registers' address for anc */
-	wcd9xxx_get_mbhc_micbias_regs(mbhc, MBHC_ANC_MIC_MB);
-
-	/* Put CFILT in fast mode by default */
-	if (mbhc->mbhc_cb && mbhc->mbhc_cb->cfilt_fast_mode)
-		mbhc->mbhc_cb->cfilt_fast_mode(codec, mbhc);
-	else
-		snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.cfilt_ctl,
-		0x40, WCD9XXX_CFILT_FAST_MODE);
-
-	/*
-	 * setup internal micbias if codec uses internal micbias for
-	 * headset detection
-	 */
-	if (mbhc->mbhc_cfg->use_int_rbias) {
-		if (mbhc->mbhc_cb && mbhc->mbhc_cb->setup_int_rbias) {
-			mbhc->mbhc_cb->setup_int_rbias(codec, true);
-		} else {
-			pr_info("%s: internal bias requested but codec did not provide callback\n",
-				__func__);
-		}
-	}
-
-	/*
-	 * If codec has specific clock gating for MBHC,
-	 * remove the clock gate
-	 */
-	if (mbhc->mbhc_cb &&
-			mbhc->mbhc_cb->enable_clock_gate)
-		mbhc->mbhc_cb->enable_clock_gate(mbhc->codec, true);
-
-	if (!mbhc->mbhc_cfg->read_fw_bin ||
-	    (mbhc->mbhc_cfg->read_fw_bin && mbhc->mbhc_fw) ||
-	    (mbhc->mbhc_cfg->read_fw_bin && mbhc->mbhc_cal)) {
-		rc = wcd9xxx_init_and_calibrate(mbhc);
-	} else {
-		if (!mbhc->mbhc_fw || !mbhc->mbhc_cal)
-			schedule_delayed_work(&mbhc->mbhc_firmware_dwork,
-					     usecs_to_jiffies(FW_READ_TIMEOUT));
-		else
-			pr_debug("%s: Skipping to read mbhc fw, 0x%pK %pK\n",
-				 __func__, mbhc->mbhc_fw, mbhc->mbhc_cal);
-	}
-
-	pr_debug("%s: leave %d\n", __func__, rc);
-	return rc;
-}
-EXPORT_SYMBOL(wcd9xxx_mbhc_start);
-
-void wcd9xxx_mbhc_stop(struct wcd9xxx_mbhc *mbhc)
-{
-	if (mbhc->mbhc_fw || mbhc->mbhc_cal) {
-		cancel_delayed_work_sync(&mbhc->mbhc_firmware_dwork);
-		if (!mbhc->mbhc_cal)
-			release_firmware(mbhc->mbhc_fw);
-		mbhc->mbhc_fw = NULL;
-		mbhc->mbhc_cal = NULL;
-	}
-}
-EXPORT_SYMBOL(wcd9xxx_mbhc_stop);
-
-static enum wcd9xxx_micbias_num
-wcd9xxx_event_to_micbias(const enum wcd9xxx_notify_event event)
-{
-	enum wcd9xxx_micbias_num ret;
-
-	switch (event) {
-	case WCD9XXX_EVENT_PRE_MICBIAS_1_ON:
-	case WCD9XXX_EVENT_PRE_MICBIAS_1_OFF:
-	case WCD9XXX_EVENT_POST_MICBIAS_1_ON:
-	case WCD9XXX_EVENT_POST_MICBIAS_1_OFF:
-		ret = MBHC_MICBIAS1;
-		break;
-	case WCD9XXX_EVENT_PRE_MICBIAS_2_ON:
-	case WCD9XXX_EVENT_PRE_MICBIAS_2_OFF:
-	case WCD9XXX_EVENT_POST_MICBIAS_2_ON:
-	case WCD9XXX_EVENT_POST_MICBIAS_2_OFF:
-		ret = MBHC_MICBIAS2;
-		break;
-	case WCD9XXX_EVENT_PRE_MICBIAS_3_ON:
-	case WCD9XXX_EVENT_PRE_MICBIAS_3_OFF:
-	case WCD9XXX_EVENT_POST_MICBIAS_3_ON:
-	case WCD9XXX_EVENT_POST_MICBIAS_3_OFF:
-		ret = MBHC_MICBIAS3;
-		break;
-	case WCD9XXX_EVENT_PRE_MICBIAS_4_ON:
-	case WCD9XXX_EVENT_PRE_MICBIAS_4_OFF:
-	case WCD9XXX_EVENT_POST_MICBIAS_4_ON:
-	case WCD9XXX_EVENT_POST_MICBIAS_4_OFF:
-		ret = MBHC_MICBIAS4;
-		break;
-	default:
-		WARN_ONCE(1, "Cannot convert event %d to micbias\n", event);
-		ret = MBHC_MICBIAS_INVALID;
-		break;
-	}
-	return ret;
-}
-
-static int wcd9xxx_event_to_cfilt(const enum wcd9xxx_notify_event event)
-{
-	int ret;
-
-	switch (event) {
-	case WCD9XXX_EVENT_PRE_CFILT_1_OFF:
-	case WCD9XXX_EVENT_POST_CFILT_1_OFF:
-	case WCD9XXX_EVENT_PRE_CFILT_1_ON:
-	case WCD9XXX_EVENT_POST_CFILT_1_ON:
-		ret = WCD9XXX_CFILT1_SEL;
-		break;
-	case WCD9XXX_EVENT_PRE_CFILT_2_OFF:
-	case WCD9XXX_EVENT_POST_CFILT_2_OFF:
-	case WCD9XXX_EVENT_PRE_CFILT_2_ON:
-	case WCD9XXX_EVENT_POST_CFILT_2_ON:
-		ret = WCD9XXX_CFILT2_SEL;
-		break;
-	case WCD9XXX_EVENT_PRE_CFILT_3_OFF:
-	case WCD9XXX_EVENT_POST_CFILT_3_OFF:
-	case WCD9XXX_EVENT_PRE_CFILT_3_ON:
-	case WCD9XXX_EVENT_POST_CFILT_3_ON:
-		ret = WCD9XXX_CFILT3_SEL;
-		break;
-	default:
-		ret = -1;
-	}
-	return ret;
-}
-
-static int wcd9xxx_get_mbhc_cfilt_sel(struct wcd9xxx_mbhc *mbhc)
-{
-	int cfilt;
-	const struct wcd9xxx_micbias_setting *mb_pdata =
-		mbhc->resmgr->micbias_pdata;
-
-	switch (mbhc->mbhc_cfg->micbias) {
-	case MBHC_MICBIAS1:
-		cfilt = mb_pdata->bias1_cfilt_sel;
-		break;
-	case MBHC_MICBIAS2:
-		cfilt = mb_pdata->bias2_cfilt_sel;
-		break;
-	case MBHC_MICBIAS3:
-		cfilt = mb_pdata->bias3_cfilt_sel;
-		break;
-	case MBHC_MICBIAS4:
-		cfilt = mb_pdata->bias4_cfilt_sel;
-		break;
-	default:
-		cfilt = MBHC_MICBIAS_INVALID;
-		break;
-	}
-	return cfilt;
-}
-
-static void wcd9xxx_enable_mbhc_txfe(struct wcd9xxx_mbhc *mbhc, bool on)
-{
-	if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mbhc_txfe)
-		mbhc->mbhc_cb->enable_mbhc_txfe(mbhc->codec, on);
-	else
-		snd_soc_update_bits(mbhc->codec, WCD9XXX_A_TX_7_MBHC_TEST_CTL,
-				    0x40, on ? 0x40 : 0x00);
-}
-
-static int wcd9xxx_event_notify(struct notifier_block *self, unsigned long val,
-				void *data)
-{
-	int ret = 0;
-	struct wcd9xxx_mbhc *mbhc = ((struct wcd9xxx_resmgr *)data)->mbhc;
-	struct snd_soc_codec *codec;
-	enum wcd9xxx_notify_event event = (enum wcd9xxx_notify_event)val;
-
-	pr_debug("%s: enter event %s(%d)\n", __func__,
-		 wcd9xxx_get_event_string(event), event);
-
-	if (!mbhc || !mbhc->mbhc_cfg) {
-		pr_debug("mbhc not initialized\n");
-		return 0;
-	}
-	codec = mbhc->codec;
-	mutex_lock(&mbhc->mbhc_lock);
-	switch (event) {
-	/* MICBIAS usage change */
-	case WCD9XXX_EVENT_PRE_MICBIAS_1_ON:
-	case WCD9XXX_EVENT_PRE_MICBIAS_2_ON:
-	case WCD9XXX_EVENT_PRE_MICBIAS_3_ON:
-	case WCD9XXX_EVENT_PRE_MICBIAS_4_ON:
-		if (mbhc->mbhc_cfg && mbhc->mbhc_cfg->micbias ==
-		    wcd9xxx_event_to_micbias(event)) {
-			wcd9xxx_switch_micbias(mbhc, 0);
-			/*
-			 * Enable MBHC TxFE whenever  micbias is
-			 * turned ON and polling is active
-			 */
-			if (mbhc->polling_active)
-				wcd9xxx_enable_mbhc_txfe(mbhc, true);
-		}
-		break;
-	case WCD9XXX_EVENT_POST_MICBIAS_1_ON:
-	case WCD9XXX_EVENT_POST_MICBIAS_2_ON:
-	case WCD9XXX_EVENT_POST_MICBIAS_3_ON:
-	case WCD9XXX_EVENT_POST_MICBIAS_4_ON:
-		if (mbhc->mbhc_cfg && mbhc->mbhc_cfg->micbias ==
-		    wcd9xxx_event_to_micbias(event) &&
-		    wcd9xxx_mbhc_polling(mbhc)) {
-			/* if polling is on, restart it */
-			wcd9xxx_pause_hs_polling(mbhc);
-			wcd9xxx_start_hs_polling(mbhc);
-		}
-		break;
-	case WCD9XXX_EVENT_POST_MICBIAS_1_OFF:
-	case WCD9XXX_EVENT_POST_MICBIAS_2_OFF:
-	case WCD9XXX_EVENT_POST_MICBIAS_3_OFF:
-	case WCD9XXX_EVENT_POST_MICBIAS_4_OFF:
-		if (mbhc->mbhc_cfg && mbhc->mbhc_cfg->micbias ==
-		    wcd9xxx_event_to_micbias(event)) {
-			if (mbhc->event_state &
-			   (1 << MBHC_EVENT_PA_HPHL | 1 << MBHC_EVENT_PA_HPHR))
-				wcd9xxx_switch_micbias(mbhc, 1);
-			/*
-			 * Disable MBHC TxFE, in case it was enabled earlier
-			 * when micbias was enabled and polling is not active.
-			 */
-			if (!mbhc->polling_active)
-				wcd9xxx_enable_mbhc_txfe(mbhc, false);
-		}
-		if (mbhc->micbias_enable && mbhc->polling_active &&
-		    !(snd_soc_read(mbhc->codec, mbhc->mbhc_bias_regs.ctl_reg)
-		    & 0x80)) {
-			pr_debug("%s:Micbias turned off by recording, set up again",
-				 __func__);
-			snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg,
-					    0x80, 0x80);
-		}
-		break;
-	/* PA usage change */
-	case WCD9XXX_EVENT_PRE_HPHL_PA_ON:
-		set_bit(MBHC_EVENT_PA_HPHL, &mbhc->event_state);
-		if (!(snd_soc_read(codec, mbhc->mbhc_bias_regs.ctl_reg) & 0x80))
-			/* if micbias is not enabled, switch to vddio */
-			wcd9xxx_switch_micbias(mbhc, 1);
-		break;
-	case WCD9XXX_EVENT_PRE_HPHR_PA_ON:
-		set_bit(MBHC_EVENT_PA_HPHR, &mbhc->event_state);
-		break;
-	case WCD9XXX_EVENT_POST_HPHL_PA_OFF:
-		clear_bit(MBHC_EVENT_PA_HPHL, &mbhc->event_state);
-		/* if HPH PAs are off, report OCP and switch back to CFILT */
-		clear_bit(WCD9XXX_HPHL_PA_OFF_ACK, &mbhc->hph_pa_dac_state);
-		clear_bit(WCD9XXX_HPHL_DAC_OFF_ACK, &mbhc->hph_pa_dac_state);
-		if (mbhc->hph_status & SND_JACK_OC_HPHL)
-			hphlocp_off_report(mbhc, SND_JACK_OC_HPHL);
-		if (!(mbhc->event_state &
-		      (1 << MBHC_EVENT_PA_HPHL | 1 << MBHC_EVENT_PA_HPHR |
-		       1 << MBHC_EVENT_PRE_TX_3_ON)))
-			wcd9xxx_switch_micbias(mbhc, 0);
-		break;
-	case WCD9XXX_EVENT_POST_HPHR_PA_OFF:
-		clear_bit(MBHC_EVENT_PA_HPHR, &mbhc->event_state);
-		/* if HPH PAs are off, report OCP and switch back to CFILT */
-		clear_bit(WCD9XXX_HPHR_PA_OFF_ACK, &mbhc->hph_pa_dac_state);
-		clear_bit(WCD9XXX_HPHR_DAC_OFF_ACK, &mbhc->hph_pa_dac_state);
-		if (mbhc->hph_status & SND_JACK_OC_HPHR)
-			hphrocp_off_report(mbhc, SND_JACK_OC_HPHL);
-		if (!(mbhc->event_state &
-		      (1 << MBHC_EVENT_PA_HPHL | 1 << MBHC_EVENT_PA_HPHR |
-		       1 << MBHC_EVENT_PRE_TX_3_ON)))
-			wcd9xxx_switch_micbias(mbhc, 0);
-		break;
-	/* Clock usage change */
-	case WCD9XXX_EVENT_PRE_MCLK_ON:
-		break;
-	case WCD9XXX_EVENT_POST_MCLK_ON:
-		/* Change to lower TxAAF frequency */
-		snd_soc_update_bits(codec, WCD9XXX_A_TX_COM_BIAS, 1 << 4,
-				    1 << 4);
-		/* Re-calibrate clock rate dependent values */
-		wcd9xxx_update_mbhc_clk_rate(mbhc, mbhc->mbhc_cfg->mclk_rate);
-		/* If clock source changes, stop and restart polling */
-		if (wcd9xxx_mbhc_polling(mbhc)) {
-			wcd9xxx_calibrate_hs_polling(mbhc);
-			wcd9xxx_start_hs_polling(mbhc);
-		}
-		break;
-	case WCD9XXX_EVENT_PRE_MCLK_OFF:
-		/* If clock source changes, stop and restart polling */
-		if (wcd9xxx_mbhc_polling(mbhc))
-			wcd9xxx_pause_hs_polling(mbhc);
-		break;
-	case WCD9XXX_EVENT_POST_MCLK_OFF:
-		break;
-	case WCD9XXX_EVENT_PRE_RCO_ON:
-		break;
-	case WCD9XXX_EVENT_POST_RCO_ON:
-		/* Change to higher TxAAF frequency */
-		snd_soc_update_bits(codec, WCD9XXX_A_TX_COM_BIAS, 1 << 4,
-				    0 << 4);
-		/* Re-calibrate clock rate dependent values */
-		wcd9xxx_update_mbhc_clk_rate(mbhc, mbhc->rco_clk_rate);
-		/* If clock source changes, stop and restart polling */
-		if (wcd9xxx_mbhc_polling(mbhc)) {
-			wcd9xxx_calibrate_hs_polling(mbhc);
-			wcd9xxx_start_hs_polling(mbhc);
-		}
-		break;
-	case WCD9XXX_EVENT_PRE_RCO_OFF:
-		/* If clock source changes, stop and restart polling */
-		if (wcd9xxx_mbhc_polling(mbhc))
-			wcd9xxx_pause_hs_polling(mbhc);
-		break;
-	case WCD9XXX_EVENT_POST_RCO_OFF:
-		break;
-	/* CFILT usage change */
-	case WCD9XXX_EVENT_PRE_CFILT_1_ON:
-	case WCD9XXX_EVENT_PRE_CFILT_2_ON:
-	case WCD9XXX_EVENT_PRE_CFILT_3_ON:
-		if (wcd9xxx_get_mbhc_cfilt_sel(mbhc) ==
-		    wcd9xxx_event_to_cfilt(event))
-			/*
-			 * Switch CFILT to slow mode if MBHC CFILT is being
-			 * used.
-			 */
-			wcd9xxx_codec_switch_cfilt_mode(mbhc, false);
-		break;
-	case WCD9XXX_EVENT_POST_CFILT_1_OFF:
-	case WCD9XXX_EVENT_POST_CFILT_2_OFF:
-	case WCD9XXX_EVENT_POST_CFILT_3_OFF:
-		if (wcd9xxx_get_mbhc_cfilt_sel(mbhc) ==
-		    wcd9xxx_event_to_cfilt(event))
-			/*
-			 * Switch CFILT to fast mode if MBHC CFILT is not
-			 * used anymore.
-			 */
-			wcd9xxx_codec_switch_cfilt_mode(mbhc, true);
-		break;
-	/* System resume */
-	case WCD9XXX_EVENT_POST_RESUME:
-		mbhc->mbhc_last_resume = jiffies;
-		break;
-	/* BG mode chage */
-	case WCD9XXX_EVENT_PRE_BG_OFF:
-	case WCD9XXX_EVENT_POST_BG_OFF:
-	case WCD9XXX_EVENT_PRE_BG_AUDIO_ON:
-	case WCD9XXX_EVENT_POST_BG_AUDIO_ON:
-	case WCD9XXX_EVENT_PRE_BG_MBHC_ON:
-	case WCD9XXX_EVENT_POST_BG_MBHC_ON:
-		/* Not used for now */
-		break;
-	case WCD9XXX_EVENT_PRE_TX_3_ON:
-		/*
-		 * if polling is ON, mbhc micbias not enabled
-		 *  switch micbias source to VDDIO
-		 */
-		set_bit(MBHC_EVENT_PRE_TX_3_ON, &mbhc->event_state);
-		if (!(snd_soc_read(codec, mbhc->mbhc_bias_regs.ctl_reg)
-		      & 0x80) &&
-		    mbhc->polling_active && !mbhc->mbhc_micbias_switched)
-			wcd9xxx_switch_micbias(mbhc, 1);
-		break;
-	case WCD9XXX_EVENT_POST_TX_3_OFF:
-		/*
-		 * Switch back to micbias if HPH PA or TX3 path
-		 * is disabled
-		 */
-		clear_bit(MBHC_EVENT_PRE_TX_3_ON, &mbhc->event_state);
-		if (mbhc->polling_active && mbhc->mbhc_micbias_switched &&
-		    !(mbhc->event_state & (1 << MBHC_EVENT_PA_HPHL |
-		      1 << MBHC_EVENT_PA_HPHR)))
-			wcd9xxx_switch_micbias(mbhc, 0);
-		break;
-	default:
-		WARN(1, "Unknown event %d\n", event);
-		ret = -EINVAL;
-	}
-	mutex_unlock(&mbhc->mbhc_lock);
-
-	pr_debug("%s: leave\n", __func__);
-
-	return ret;
-}
-
-static s16 wcd9xxx_read_impedance_regs(struct wcd9xxx_mbhc *mbhc)
-{
-	struct snd_soc_codec *codec = mbhc->codec;
-	short bias_value;
-	int i;
-	s32 z_t = 0;
-	s32 z_loop = z_det_box_car_avg;
-
-	/* Box Car avrg of less than a particular loop count will not be
-	 * accomodated. Similarly if the count is more than a particular number
-	 * it will not be counted. Set z_loop counter to a limit, if its more
-	 * or less than the value in WCD9XXX_BOX_CAR_AVRG_MAX or
-	 * WCD9XXX_BOX_CAR_AVRG_MIN
-	 */
-	if (z_loop < WCD9XXX_BOX_CAR_AVRG_MIN) {
-		dev_dbg(codec->dev,
-			"%s: Box Car avrg counter < %d. Limiting it to %d\n",
-			__func__, WCD9XXX_BOX_CAR_AVRG_MIN,
-			WCD9XXX_BOX_CAR_AVRG_MIN);
-		z_loop = WCD9XXX_BOX_CAR_AVRG_MIN;
-	} else if (z_loop > WCD9XXX_BOX_CAR_AVRG_MAX) {
-		dev_dbg(codec->dev,
-			"%s: Box Car avrg counter > %d. Limiting it to %d\n",
-			__func__, WCD9XXX_BOX_CAR_AVRG_MAX,
-			WCD9XXX_BOX_CAR_AVRG_MAX);
-		z_loop = WCD9XXX_BOX_CAR_AVRG_MAX;
-	}
-
-	/* Take box car average if needed */
-	for (i = 0; i < z_loop; i++) {
-		snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x2);
-		/* Wait for atleast 1800uS to let register write to settle */
-		usleep_range(1800, 1800 + WCD9XXX_USLEEP_RANGE_MARGIN_US);
-		z_t += wcd9xxx_read_sta_result(codec);
-	}
-	/* Take average of the Z values read */
-	bias_value = (s16) (z_t / z_loop);
-	return bias_value;
-}
-
-static int wcd9xxx_remeasure_z_values(struct wcd9xxx_mbhc *mbhc,
-					s16 l[3], s16 r[3],
-					uint32_t *zl, uint32_t *zr,
-					u32 *zl_stereo, u32 *zl_mono)
-{
-	s16 l_t[3] = {0}, r_t[3] = {0};
-	s16 l2_stereo, l2_mono;
-	bool left, right;
-	struct snd_soc_codec *codec = mbhc->codec;
-
-	if (!mbhc->mbhc_cb || !mbhc->mbhc_cb->setup_zdet ||
-		!mbhc->mbhc_cb->compute_impedance) {
-		dev_err(codec->dev, "%s: Invalid parameters\n", __func__);
-		return -EINVAL;
-	}
-
-	left = !!(l);
-	right = !!(r);
-
-	dev_dbg(codec->dev, "%s: Remeasuring impedance values\n", __func__);
-	dev_dbg(codec->dev, "%s: l: %pK, r: %pK, left=%d, right=%d\n", __func__,
-		 l, r, left, right);
-
-	/* Remeasure V2 values */
-	snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_2, 0xFF, 0xF0);
-	if (right)
-		r_t[2] = wcd9xxx_read_impedance_regs(mbhc);
-	snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0xFF, 0xC0);
-	if (left)
-		l_t[2] = wcd9xxx_read_impedance_regs(mbhc);
-
-	/* Ramp down HPHR */
-	mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_HPHR_RAMP_DISABLE);
-
-	if (right) {
-		/* Take R0'/R1' */
-		snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_2,
-				    0xFF, 0xF8);
-		snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1,
-				    0xFF, 0xA0);
-		r_t[1] = wcd9xxx_read_impedance_regs(mbhc);
-		snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_2,
-				    0xFF, 0xF0);
-		r_t[0] = wcd9xxx_read_impedance_regs(mbhc);
-	}
-
-	/* Put back gain to 1x */
-	if (!left && right)
-		mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_GAIN_0);
-
-	snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0xFF, 0xC0);
-	/* Take L2'' measurement */
-	l2_stereo = wcd9xxx_read_impedance_regs(mbhc);
-
-	/* Turn off HPHR PA and take L2''' */
-	mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_HPHR_PA_DISABLE);
-	l2_mono = wcd9xxx_read_impedance_regs(mbhc);
-
-	/* Ramp HPHL from -15mV to 0V */
-	mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_HPHL_RAMP_DISABLE);
-
-	/* Take L0' and L1' with iCal */
-	l_t[0] = wcd9xxx_read_impedance_regs(mbhc);
-	snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_2, 0xFF, 0xF8);
-	l_t[1] = wcd9xxx_read_impedance_regs(mbhc);
-
-	if (left) {
-		l[0] = l_t[0];
-		l[1] = l_t[1];
-		l[2] = l_t[2];
-	}
-	if (right) {
-		r[0] = r_t[0];
-		r[1] = r_t[1];
-		r[2] = r_t[2];
-	}
-
-	/* compute the new impedance values */
-	mbhc->mbhc_cb->compute_impedance(mbhc, l, r, zl, zr);
-
-	if (!left && right)
-		mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_GAIN_UPDATE_1X);
-	/* compute the new ZL'' value */
-	l_t[2] = l2_stereo;
-	mbhc->mbhc_cb->compute_impedance(mbhc, l_t, NULL, zl_stereo, NULL);
-	/* compute the new ZL''' value */
-	l_t[2] = l2_mono;
-	mbhc->mbhc_cb->compute_impedance(mbhc, l_t, NULL, zl_mono, NULL);
-
-	pr_debug("%s: L0': 0x%x, L1': 0x%x L2_stereo: 0x%x, L2_mono: 0x%x\n",
-		 __func__, l_t[0] & 0xffff, l_t[1] & 0xffff,
-		 l2_stereo & 0xffff, l2_mono & 0xffff);
-	pr_debug("%s: ZL_stereo = %u, ZL_mono = %u\n",
-		 __func__, *zl_stereo, *zl_mono);
-
-	return 0;
-}
-
-static enum mbhc_zdet_zones wcd9xxx_assign_zdet_zone(uint32_t zl, uint32_t zr,
-						     int32_t *gain)
-{
-	enum mbhc_zdet_zones zdet_zone;
-
-	if (WCD9XXX_IS_IN_ZDET_ZONE_1(zl) &&
-		 WCD9XXX_IS_IN_ZDET_ZONE_1(zr)) {
-		zdet_zone = ZL_ZONE1__ZR_ZONE1;
-		*gain = 0;
-	} else if (WCD9XXX_IS_IN_ZDET_ZONE_2(zl) &&
-		 WCD9XXX_IS_IN_ZDET_ZONE_2(zr)) {
-		zdet_zone = ZL_ZONE2__ZR_ZONE2;
-		*gain = MBHC_ZDET_GAIN_1;
-	} else if (WCD9XXX_IS_IN_ZDET_ZONE_3(zl) &&
-		 WCD9XXX_IS_IN_ZDET_ZONE_3(zr)) {
-		zdet_zone = ZL_ZONE3__ZR_ZONE3;
-		*gain = MBHC_ZDET_GAIN_2;
-	} else if (WCD9XXX_IS_IN_ZDET_ZONE_2(zl) &&
-		 WCD9XXX_IS_IN_ZDET_ZONE_1(zr)) {
-		zdet_zone = ZL_ZONE2__ZR_ZONE1;
-		*gain = MBHC_ZDET_GAIN_1;
-	} else if (WCD9XXX_IS_IN_ZDET_ZONE_3(zl) &&
-		 WCD9XXX_IS_IN_ZDET_ZONE_1(zr)) {
-		zdet_zone = ZL_ZONE3__ZR_ZONE1;
-		*gain = MBHC_ZDET_GAIN_2;
-	} else if (WCD9XXX_IS_IN_ZDET_ZONE_1(zl) &&
-		 WCD9XXX_IS_IN_ZDET_ZONE_2(zr)) {
-		zdet_zone = ZL_ZONE1__ZR_ZONE2;
-		*gain = MBHC_ZDET_GAIN_1;
-	} else if (WCD9XXX_IS_IN_ZDET_ZONE_1(zl) &&
-		 WCD9XXX_IS_IN_ZDET_ZONE_3(zr)) {
-		zdet_zone = ZL_ZONE1__ZR_ZONE3;
-		*gain = MBHC_ZDET_GAIN_2;
-	} else {
-		zdet_zone = ZL_ZR_NOT_IN_ZONE1;
-		*gain = MBHC_ZDET_GAIN_1;
-	}
-
-	return zdet_zone;
-}
-
-static int wcd9xxx_detect_impedance(struct wcd9xxx_mbhc *mbhc, uint32_t *zl,
-				    uint32_t *zr)
-{
-	int i;
-	int ret = 0;
-	u8 micb_mbhc_val;
-	s16 l[3], r[3];
-	s16 *z[] = {
-		&l[0], &r[0], &r[1], &l[1], &l[2], &r[2],
-	};
-	u32 zl_stereo, zl_mono;
-	u32 zl_diff_1, zl_diff_2;
-	bool override_en;
-	struct snd_soc_codec *codec = mbhc->codec;
-	const int mux_wait_us = 25;
-	const struct wcd9xxx_reg_mask_val reg_set_mux[] = {
-		/* Phase 1 */
-		/* Set MBHC_MUX for HPHL without ical */
-		{WCD9XXX_A_MBHC_SCALING_MUX_2, 0xFF, 0xF0},
-		/* Set MBHC_MUX for HPHR without ical */
-		{WCD9XXX_A_MBHC_SCALING_MUX_1, 0xFF, 0xA0},
-		/* Set MBHC_MUX for HPHR with ical */
-		{WCD9XXX_A_MBHC_SCALING_MUX_2, 0xFF, 0xF8},
-		/* Set MBHC_MUX for HPHL with ical */
-		{WCD9XXX_A_MBHC_SCALING_MUX_1, 0xFF, 0xC0},
-
-		/* Phase 2 */
-		{WCD9XXX_A_MBHC_SCALING_MUX_2, 0xFF, 0xF0},
-		/* Set MBHC_MUX for HPHR without ical and wait for 25us */
-		{WCD9XXX_A_MBHC_SCALING_MUX_1, 0xFF, 0xA0},
-	};
-
-	pr_debug("%s: enter\n", __func__);
-	WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
-
-	if (!mbhc->mbhc_cb || !mbhc->mbhc_cb->setup_zdet ||
-	    !mbhc->mbhc_cb->compute_impedance || !zl || !zr) {
-		return -EINVAL;
-	}
-
-	/*
-	 * Impedance detection is an intrusive function as it mutes RX paths,
-	 * enable PAs and etc.  Therefore codec drvier including ALSA
-	 * shouldn't read and write hardware registers during detection.
-	 */
-	wcd9xxx_onoff_ext_mclk(mbhc, true);
-
-	/*
-	 * For impedance detection, make sure to disable micbias from
-	 * override signal so that override does not cause micbias
-	 * to be enabled. This setting will be undone after completing
-	 * impedance measurement.
-	 */
-	micb_mbhc_val = snd_soc_read(codec, WCD9XXX_A_MAD_ANA_CTRL);
-	snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL,
-			    0x10, 0x00);
-
-	override_en = (snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_CTL) & 0x04) ?
-					true : false;
-	if (!override_en)
-		wcd9xxx_turn_onoff_override(mbhc, true);
-	pr_debug("%s: Setting impedance detection\n", __func__);
-
-	/* Codec specific setup for L0, R0, L1 and R1 measurements */
-	mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_PRE_MEASURE);
-
-	pr_debug("%s: Performing impedance detection\n", __func__);
-	for (i = 0; i < ARRAY_SIZE(reg_set_mux) - 2; i++) {
-		snd_soc_update_bits(codec, reg_set_mux[i].reg,
-				    reg_set_mux[i].mask,
-				    reg_set_mux[i].val);
-		if (mbhc->mbhc_cb->get_cdc_type &&
-		    mbhc->mbhc_cb->get_cdc_type() ==
-				WCD9XXX_CDC_TYPE_TOMTOM) {
-			*(z[i]) = wcd9xxx_read_impedance_regs(mbhc);
-		} else {
-			if (mbhc->mbhc_cb->enable_mux_bias_block)
-				mbhc->mbhc_cb->enable_mux_bias_block(codec);
-			else
-				snd_soc_update_bits(codec,
-						WCD9XXX_A_MBHC_SCALING_MUX_1,
-						0x80, 0x80);
-			/* 25us is required after mux change to settle down */
-			usleep_range(mux_wait_us,
-				 mux_wait_us + WCD9XXX_USLEEP_RANGE_MARGIN_US);
-			*(z[i]) = __wcd9xxx_codec_sta_dce(mbhc, 0,
-							  true, false);
-		}
-	}
-
-	/* Codec specific setup for L2 and R2 measurements */
-	mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_POST_MEASURE);
-
-	for (; i < ARRAY_SIZE(reg_set_mux); i++) {
-		snd_soc_update_bits(codec, reg_set_mux[i].reg,
-				    reg_set_mux[i].mask,
-				    reg_set_mux[i].val);
-		if (mbhc->mbhc_cb->get_cdc_type &&
-		    mbhc->mbhc_cb->get_cdc_type() ==
-				WCD9XXX_CDC_TYPE_TOMTOM) {
-			*(z[i]) = wcd9xxx_read_impedance_regs(mbhc);
-		} else {
-			if (mbhc->mbhc_cb->enable_mux_bias_block)
-				mbhc->mbhc_cb->enable_mux_bias_block(codec);
-			else
-				snd_soc_update_bits(codec,
-						WCD9XXX_A_MBHC_SCALING_MUX_1,
-						0x80, 0x80);
-			/* 25us is required after mux change to settle down */
-			usleep_range(mux_wait_us,
-				mux_wait_us + WCD9XXX_USLEEP_RANGE_MARGIN_US);
-			*(z[i]) = __wcd9xxx_codec_sta_dce(mbhc, 0,
-							  true, false);
-		}
-	}
-
-	mbhc->mbhc_cb->compute_impedance(mbhc, l, r, zl, zr);
-
-	/*
-	 * For some codecs, an additional step of zdet is needed
-	 * to overcome effects of noise and for better accuracy of
-	 * z values
-	 */
-	if (mbhc->mbhc_cb->get_cdc_type &&
-	     mbhc->mbhc_cb->get_cdc_type() == WCD9XXX_CDC_TYPE_TOMTOM) {
-		uint32_t zl_t = 0, zr_t = 0;
-		s16 *l_p, *r_p;
-		enum mbhc_zdet_zones zdet_zone;
-		int32_t gain;
-
-		zdet_zone = wcd9xxx_assign_zdet_zone(*zl, *zr, &gain);
-		switch (zdet_zone) {
-		case ZL_ZONE1__ZR_ZONE1:
-			l_p = NULL;
-			r_p = NULL;
-			break;
-		case ZL_ZONE2__ZR_ZONE2:
-		case ZL_ZONE3__ZR_ZONE3:
-		case ZL_ZR_NOT_IN_ZONE1:
-			l_p = l;
-			r_p = r;
-			break;
-		case ZL_ZONE2__ZR_ZONE1:
-		case ZL_ZONE3__ZR_ZONE1:
-		/* If ZR falls in Zone 1, further computations with
-		 * gain update are not required
-		 */
-			l_p = l;
-			r_p = NULL;
-			break;
-		case ZL_ZONE1__ZR_ZONE2:
-		case ZL_ZONE1__ZR_ZONE3:
-		/* If ZL falls in Zone 1, further computations with
-		 * gain update are not required
-		 */
-			l_p = NULL;
-			r_p = r;
-			break;
-		}
-		pr_debug("%s:zdet_zone = %d, gain = %d\n", __func__,
-			 zdet_zone, gain);
-		if (gain)
-			mbhc->mbhc_cb->setup_zdet(mbhc, gain);
-
-		wcd9xxx_remeasure_z_values(mbhc, l_p, r_p, &zl_t, &zr_t,
-					   &zl_stereo, &zl_mono);
-
-		*zl = (zl_t) ? zl_t : *zl;
-		*zr = (zr_t) ? zr_t : *zr;
-
-		/* Check for Mono/Stereo Type
-		 * Conditions to classify Mono/Stereo
-		 * i. Difference of zl_stereo and zl_mono > (1/2) of zl_mono
-		 * ii. Absolute difference of zl and zr above a threshold
-		 */
-		zl_diff_1 = (zl_mono > zl_stereo) ? (zl_mono - zl_stereo) :
-						  (zl_stereo - zl_mono);
-		zl_diff_2 = (*zl > *zr) ? (*zl - *zr) : (*zr - *zl);
-
-		mbhc->hph_type = MBHC_HPH_NONE;
-		if (mbhc->current_plug != PLUG_TYPE_HIGH_HPH) {
-			if ((zl_diff_1 > (zl_mono >> 1)) ||
-			    (zl_diff_2 > WCD9XXX_MONO_HS_DIFF_THR) ||
-			    ((*zl < WCD9XXX_MONO_HS_MIN_THR) &&
-			     (*zr > WCD9XXX_MONO_HS_MIN_THR)) ||
-			    ((*zr < WCD9XXX_MONO_HS_MIN_THR) &&
-			     (*zl > WCD9XXX_MONO_HS_MIN_THR))) {
-				pr_debug("%s: MONO plug type detected\n",
-					 __func__);
-				mbhc->hph_type = MBHC_HPH_MONO;
-				*zl = zl_mono;
-			} else {
-				pr_debug("%s: STEREO plug type detected\n",
-					 __func__);
-				mbhc->hph_type = MBHC_HPH_STEREO;
-			}
-		}
-	}
-
-	mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_PA_DISABLE);
-
-	/* Calculate z values based on the Q-fuse registers, if used */
-	if (mbhc->mbhc_cb->zdet_error_approx)
-		mbhc->mbhc_cb->zdet_error_approx(mbhc, zl, zr);
-
-	wcd9xxx_onoff_ext_mclk(mbhc, false);
-
-	if (!override_en)
-		wcd9xxx_turn_onoff_override(mbhc, false);
-
-	/* Undo the micbias disable for override */
-	snd_soc_write(codec, WCD9XXX_A_MAD_ANA_CTRL, micb_mbhc_val);
-
-	pr_debug("%s: L0: 0x%x(%d), L1: 0x%x(%d), L2: 0x%x(%d)\n",
-		 __func__,
-		 l[0] & 0xffff, l[0], l[1] & 0xffff, l[1], l[2] & 0xffff, l[2]);
-	pr_debug("%s: R0: 0x%x(%d), R1: 0x%x(%d), R2: 0x%x(%d)\n",
-		 __func__,
-		 r[0] & 0xffff, r[0], r[1] & 0xffff, r[1], r[2] & 0xffff, r[2]);
-	pr_debug("%s: RL %u milliohm, RR %u milliohm\n", __func__, *zl, *zr);
-	pr_debug("%s: Impedance detection completed\n", __func__);
-
-	return ret;
-}
-
-int wcd9xxx_mbhc_get_impedance(struct wcd9xxx_mbhc *mbhc, uint32_t *zl,
-			       uint32_t *zr)
-{
-	*zl = mbhc->zl;
-	*zr = mbhc->zr;
-
-	if (*zl && *zr)
-		return 0;
-	else
-		return -EINVAL;
-}
-
-/*
- * wcd9xxx_mbhc_init : initialize MBHC internal structures.
- *
- * NOTE: mbhc->mbhc_cfg is not YET configure so shouldn't be used
- */
-int wcd9xxx_mbhc_init(struct wcd9xxx_mbhc *mbhc, struct wcd9xxx_resmgr *resmgr,
-		      struct snd_soc_codec *codec,
-		      int (*micbias_enable_cb)(struct snd_soc_codec*,  bool,
-						enum wcd9xxx_micbias_num),
-		      const struct wcd9xxx_mbhc_cb *mbhc_cb,
-		      const struct wcd9xxx_mbhc_intr *mbhc_cdc_intr_ids,
-		      int rco_clk_rate,
-		      bool impedance_det_en)
-{
-	int ret;
-	void *core_res;
-
-	pr_debug("%s: enter\n", __func__);
-	memset(&mbhc->mbhc_bias_regs, 0, sizeof(struct mbhc_micbias_regs));
-	memset(&mbhc->mbhc_data, 0, sizeof(struct mbhc_internal_cal_data));
-
-	mbhc->mbhc_data.t_sta_dce = DEFAULT_DCE_STA_WAIT;
-	mbhc->mbhc_data.t_dce = DEFAULT_DCE_WAIT;
-	mbhc->mbhc_data.t_sta = DEFAULT_STA_WAIT;
-	mbhc->mbhc_micbias_switched = false;
-	mbhc->polling_active = false;
-	mbhc->mbhc_state = MBHC_STATE_NONE;
-	mbhc->in_swch_irq_handler = false;
-	mbhc->current_plug = PLUG_TYPE_NONE;
-	mbhc->lpi_enabled = false;
-	mbhc->no_mic_headset_override = false;
-	mbhc->mbhc_last_resume = 0;
-	mbhc->codec = codec;
-	mbhc->resmgr = resmgr;
-	mbhc->resmgr->mbhc = mbhc;
-	mbhc->micbias_enable_cb = micbias_enable_cb;
-	mbhc->rco_clk_rate = rco_clk_rate;
-	mbhc->mbhc_cb = mbhc_cb;
-	mbhc->intr_ids = mbhc_cdc_intr_ids;
-	mbhc->impedance_detect = impedance_det_en;
-	mbhc->hph_type = MBHC_HPH_NONE;
-
-	if (mbhc->intr_ids == NULL) {
-		pr_err("%s: Interrupt mapping not provided\n", __func__);
-		return -EINVAL;
-	}
-
-	if (mbhc->headset_jack.jack == NULL) {
-		ret = snd_soc_card_jack_new(codec->component.card,
-					    "Headset Jack", WCD9XXX_JACK_MASK,
-					    &mbhc->headset_jack, NULL, 0);
-		if (ret) {
-			pr_err("%s: Failed to create new jack\n", __func__);
-			return ret;
-		}
-
-		ret = snd_soc_card_jack_new(codec->component.card,
-					    "Button Jack",
-					    WCD9XXX_JACK_BUTTON_MASK,
-					    &mbhc->button_jack, NULL, 0);
-		if (ret) {
-			pr_err("Failed to create new jack\n");
-			return ret;
-		}
-
-		ret = snd_jack_set_key(mbhc->button_jack.jack,
-				       SND_JACK_BTN_0,
-				       KEY_MEDIA);
-		if (ret) {
-			pr_err("%s: Failed to set code for btn-0\n",
-				__func__);
-			return ret;
-		}
-
-		INIT_DELAYED_WORK(&mbhc->mbhc_firmware_dwork,
-				  wcd9xxx_mbhc_fw_read);
-		INIT_DELAYED_WORK(&mbhc->mbhc_btn_dwork, wcd9xxx_btn_lpress_fn);
-		INIT_DELAYED_WORK(&mbhc->mbhc_insert_dwork,
-				  wcd9xxx_mbhc_insert_work);
-	}
-
-	mutex_init(&mbhc->mbhc_lock);
-
-	/* Register event notifier */
-	mbhc->nblock.notifier_call = wcd9xxx_event_notify;
-	ret = wcd9xxx_resmgr_register_notifier(mbhc->resmgr, &mbhc->nblock);
-	if (ret) {
-		pr_err("%s: Failed to register notifier %d\n", __func__, ret);
-		mutex_destroy(&mbhc->mbhc_lock);
-		return ret;
-	}
-
-	wcd9xxx_init_debugfs(mbhc);
-
-	/* Disable Impedance detection by default for certain codec types */
-	if (mbhc->mbhc_cb && mbhc->mbhc_cb->get_cdc_type &&
-	    (mbhc->mbhc_cb->get_cdc_type() == WCD9XXX_CDC_TYPE_HELICON))
-		impedance_detect_en = 0;
-	else
-		impedance_detect_en = impedance_det_en ? 1 : 0;
-
-	core_res = mbhc->resmgr->core_res;
-	ret = wcd9xxx_request_irq(core_res, mbhc->intr_ids->insertion,
-				  wcd9xxx_hs_insert_irq,
-				  "Headset insert detect", mbhc);
-	if (ret) {
-		pr_err("%s: Failed to request irq %d, ret = %d\n", __func__,
-		       mbhc->intr_ids->insertion, ret);
-		goto err_insert_irq;
-	}
-	wcd9xxx_disable_irq(core_res, mbhc->intr_ids->insertion);
-
-	ret = wcd9xxx_request_irq(core_res, mbhc->intr_ids->poll_plug_rem,
-				  wcd9xxx_hs_remove_irq,
-				  "Headset remove detect", mbhc);
-	if (ret) {
-		pr_err("%s: Failed to request irq %d\n", __func__,
-			mbhc->intr_ids->poll_plug_rem);
-		goto err_remove_irq;
-	}
-
-	ret = wcd9xxx_request_irq(core_res, mbhc->intr_ids->dce_est_complete,
-				  wcd9xxx_dce_handler, "DC Estimation detect",
-				  mbhc);
-	if (ret) {
-		pr_err("%s: Failed to request irq %d\n", __func__,
-		       mbhc->intr_ids->dce_est_complete);
-		goto err_potential_irq;
-	}
-
-	ret = wcd9xxx_request_irq(core_res, mbhc->intr_ids->button_release,
-				  wcd9xxx_release_handler,
-				  "Button Release detect", mbhc);
-	if (ret) {
-		pr_err("%s: Failed to request irq %d\n", __func__,
-			mbhc->intr_ids->button_release);
-		goto err_release_irq;
-	}
-
-	ret = wcd9xxx_request_irq(core_res, mbhc->intr_ids->hph_left_ocp,
-				  wcd9xxx_hphl_ocp_irq, "HPH_L OCP detect",
-				  mbhc);
-	if (ret) {
-		pr_err("%s: Failed to request irq %d\n", __func__,
-		       mbhc->intr_ids->hph_left_ocp);
-		goto err_hphl_ocp_irq;
-	}
-	wcd9xxx_disable_irq(core_res, mbhc->intr_ids->hph_left_ocp);
-
-	ret = wcd9xxx_request_irq(core_res, mbhc->intr_ids->hph_right_ocp,
-				  wcd9xxx_hphr_ocp_irq, "HPH_R OCP detect",
-				  mbhc);
-	if (ret) {
-		pr_err("%s: Failed to request irq %d\n", __func__,
-		       mbhc->intr_ids->hph_right_ocp);
-		goto err_hphr_ocp_irq;
-	}
-	wcd9xxx_disable_irq(core_res, mbhc->intr_ids->hph_right_ocp);
-
-	wcd9xxx_regmgr_cond_register(resmgr, 1 << WCD9XXX_COND_HPH_MIC |
-					     1 << WCD9XXX_COND_HPH);
-
-	pr_debug("%s: leave ret %d\n", __func__, ret);
-	return ret;
-
-err_hphr_ocp_irq:
-	wcd9xxx_free_irq(core_res, mbhc->intr_ids->hph_left_ocp, mbhc);
-err_hphl_ocp_irq:
-	wcd9xxx_free_irq(core_res, mbhc->intr_ids->button_release, mbhc);
-err_release_irq:
-	wcd9xxx_free_irq(core_res, mbhc->intr_ids->dce_est_complete, mbhc);
-err_potential_irq:
-	wcd9xxx_free_irq(core_res, mbhc->intr_ids->poll_plug_rem, mbhc);
-err_remove_irq:
-	wcd9xxx_free_irq(core_res, mbhc->intr_ids->insertion, mbhc);
-err_insert_irq:
-	wcd9xxx_resmgr_unregister_notifier(mbhc->resmgr, &mbhc->nblock);
-
-	mutex_destroy(&mbhc->mbhc_lock);
-
-	pr_debug("%s: leave ret %d\n", __func__, ret);
-	return ret;
-}
-EXPORT_SYMBOL(wcd9xxx_mbhc_init);
-
-void wcd9xxx_mbhc_deinit(struct wcd9xxx_mbhc *mbhc)
-{
-	struct wcd9xxx_core_resource *core_res =
-				mbhc->resmgr->core_res;
-
-	wcd9xxx_regmgr_cond_deregister(mbhc->resmgr, 1 << WCD9XXX_COND_HPH_MIC |
-						     1 << WCD9XXX_COND_HPH);
-
-	wcd9xxx_free_irq(core_res, mbhc->intr_ids->button_release, mbhc);
-	wcd9xxx_free_irq(core_res, mbhc->intr_ids->dce_est_complete, mbhc);
-	wcd9xxx_free_irq(core_res, mbhc->intr_ids->poll_plug_rem, mbhc);
-	wcd9xxx_free_irq(core_res, mbhc->intr_ids->insertion, mbhc);
-	wcd9xxx_free_irq(core_res, mbhc->intr_ids->hs_jack_switch, mbhc);
-	wcd9xxx_free_irq(core_res, mbhc->intr_ids->hph_left_ocp, mbhc);
-	wcd9xxx_free_irq(core_res, mbhc->intr_ids->hph_right_ocp, mbhc);
-
-	mutex_destroy(&mbhc->mbhc_lock);
-	wcd9xxx_resmgr_unregister_notifier(mbhc->resmgr, &mbhc->nblock);
-	wcd9xxx_cleanup_debugfs(mbhc);
-}
-EXPORT_SYMBOL(wcd9xxx_mbhc_deinit);
-
-MODULE_DESCRIPTION("wcd9xxx MBHC module");
-MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/wcd9xxx-mbhc.h b/sound/soc/codecs/wcd9xxx-mbhc.h
deleted file mode 100644
index e35f7d4..0000000
--- a/sound/soc/codecs/wcd9xxx-mbhc.h
+++ /dev/null
@@ -1,492 +0,0 @@
-/* Copyright (c) 2012-2015, 2017 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-#ifndef __WCD9XXX_MBHC_H__
-#define __WCD9XXX_MBHC_H__
-
-#include "wcd9xxx-resmgr.h"
-#include "wcdcal-hwdep.h"
-
-#define WCD9XXX_CFILT_FAST_MODE 0x00
-#define WCD9XXX_CFILT_SLOW_MODE 0x40
-#define WCD9XXX_CFILT_EXT_PRCHG_EN 0x30
-#define WCD9XXX_CFILT_EXT_PRCHG_DSBL 0x00
-
-#define WCD9XXX_USLEEP_RANGE_MARGIN_US 100
-
-struct mbhc_micbias_regs {
-	u16 cfilt_val;
-	u16 cfilt_ctl;
-	u16 mbhc_reg;
-	u16 int_rbias;
-	u16 ctl_reg;
-	u8 cfilt_sel;
-};
-
-enum mbhc_v_index {
-	MBHC_V_IDX_CFILT,
-	MBHC_V_IDX_VDDIO,
-	MBHC_V_IDX_NUM,
-};
-
-enum mbhc_cal_type {
-	MBHC_CAL_MCLK,
-	MBHC_CAL_RCO,
-	MBHC_CAL_NUM,
-};
-
-enum mbhc_impedance_detect_stages {
-	MBHC_ZDET_PRE_MEASURE,
-	MBHC_ZDET_POST_MEASURE,
-	MBHC_ZDET_GAIN_0,
-	MBHC_ZDET_GAIN_1,
-	MBHC_ZDET_GAIN_2,
-	MBHC_ZDET_HPHR_RAMP_DISABLE,
-	MBHC_ZDET_HPHL_RAMP_DISABLE,
-	MBHC_ZDET_RAMP_DISABLE,
-	MBHC_ZDET_HPHR_PA_DISABLE,
-	MBHC_ZDET_PA_DISABLE,
-	MBHC_ZDET_GAIN_UPDATE_1X,
-};
-
-/* Zone assignments used in WCD9330 for Zdet */
-enum mbhc_zdet_zones {
-	ZL_ZONE1__ZR_ZONE1,
-	ZL_ZONE2__ZR_ZONE2,
-	ZL_ZONE3__ZR_ZONE3,
-	ZL_ZONE2__ZR_ZONE1,
-	ZL_ZONE3__ZR_ZONE1,
-	ZL_ZONE1__ZR_ZONE2,
-	ZL_ZONE1__ZR_ZONE3,
-	ZL_ZR_NOT_IN_ZONE1,
-};
-
-/* Data used by MBHC */
-struct mbhc_internal_cal_data {
-	u16 dce_z;
-	u16 dce_nsc_cs_z;
-	u16 dce_mb;
-	u16 sta_z;
-	u16 sta_mb;
-	u32 t_sta_dce;
-	u32 t_dce;
-	u32 t_sta;
-	u32 micb_mv;
-	u16 v_ins_hu[MBHC_V_IDX_NUM];
-	u16 v_ins_h[MBHC_V_IDX_NUM];
-	u16 v_b1_hu[MBHC_V_IDX_NUM];
-	u16 v_b1_h[MBHC_V_IDX_NUM];
-	u16 v_brh[MBHC_V_IDX_NUM];
-	u16 v_brl;
-	u16 v_no_mic;
-	s16 v_inval_ins_low;
-	s16 v_inval_ins_high;
-	u16 v_cs_ins_h;
-};
-
-enum wcd9xxx_mbhc_plug_type {
-	PLUG_TYPE_INVALID = -1,
-	PLUG_TYPE_NONE,
-	PLUG_TYPE_HEADSET,
-	PLUG_TYPE_HEADPHONE,
-	PLUG_TYPE_HIGH_HPH,
-	PLUG_TYPE_GND_MIC_SWAP,
-	PLUG_TYPE_ANC_HEADPHONE,
-};
-
-enum wcd9xxx_mbhc_micbias_type {
-	MBHC_PRIMARY_MIC_MB,
-	MBHC_ANC_MIC_MB,
-};
-
-enum wcd9xxx_micbias_num {
-	MBHC_MICBIAS_INVALID = -1,
-	MBHC_MICBIAS1,
-	MBHC_MICBIAS2,
-	MBHC_MICBIAS3,
-	MBHC_MICBIAS4,
-};
-
-enum hw_jack_type {
-	FOUR_POLE_JACK = 0,
-	FIVE_POLE_JACK,
-	SIX_POLE_JACK,
-};
-
-enum wcd9xx_mbhc_micbias_enable_bits {
-	MBHC_MICBIAS_ENABLE_THRESHOLD_HEADSET,
-	MBHC_MICBIAS_ENABLE_REGULAR_HEADSET,
-};
-
-enum wcd9xx_mbhc_cs_enable_bits {
-	MBHC_CS_ENABLE_POLLING,
-	MBHC_CS_ENABLE_INSERTION,
-	MBHC_CS_ENABLE_REMOVAL,
-	MBHC_CS_ENABLE_DET_ANC,
-};
-
-enum wcd9xxx_mbhc_state {
-	MBHC_STATE_NONE = -1,
-	MBHC_STATE_POTENTIAL,
-	MBHC_STATE_POTENTIAL_RECOVERY,
-	MBHC_STATE_RELEASE,
-};
-
-enum wcd9xxx_mbhc_btn_det_mem {
-	MBHC_BTN_DET_V_BTN_LOW,
-	MBHC_BTN_DET_V_BTN_HIGH,
-	MBHC_BTN_DET_N_READY,
-	MBHC_BTN_DET_N_CIC,
-	MBHC_BTN_DET_GAIN
-};
-
-enum wcd9xxx_mbhc_clk_freq {
-	TAIKO_MCLK_12P2MHZ = 0,
-	TAIKO_MCLK_9P6MHZ,
-	TAIKO_NUM_CLK_FREQS,
-};
-
-enum wcd9xxx_mbhc_event_state {
-	MBHC_EVENT_PA_HPHL,
-	MBHC_EVENT_PA_HPHR,
-	MBHC_EVENT_PRE_TX_3_ON,
-	MBHC_EVENT_POST_TX_3_OFF,
-};
-
-enum mbhc_hph_type {
-	MBHC_HPH_NONE = 0,
-	MBHC_HPH_MONO,
-	MBHC_HPH_STEREO,
-};
-
-struct wcd9xxx_mbhc_general_cfg {
-	u8 t_ldoh;
-	u8 t_bg_fast_settle;
-	u8 t_shutdown_plug_rem;
-	u8 mbhc_nsa;
-	u8 mbhc_navg;
-	u8 v_micbias_l;
-	u8 v_micbias;
-	u8 mbhc_reserved;
-	u16 settle_wait;
-	u16 t_micbias_rampup;
-	u16 t_micbias_rampdown;
-	u16 t_supply_bringup;
-} __packed;
-
-struct wcd9xxx_mbhc_plug_detect_cfg {
-	u32 mic_current;
-	u32 hph_current;
-	u16 t_mic_pid;
-	u16 t_ins_complete;
-	u16 t_ins_retry;
-	u16 v_removal_delta;
-	u8 micbias_slow_ramp;
-	u8 reserved0;
-	u8 reserved1;
-	u8 reserved2;
-} __packed;
-
-struct wcd9xxx_mbhc_plug_type_cfg {
-	u8 av_detect;
-	u8 mono_detect;
-	u8 num_ins_tries;
-	u8 reserved0;
-	s16 v_no_mic;
-	s16 v_av_min;
-	s16 v_av_max;
-	s16 v_hs_min;
-	s16 v_hs_max;
-	u16 reserved1;
-} __packed;
-
-struct wcd9xxx_mbhc_btn_detect_cfg {
-	s8 c[8];
-	u8 nc;
-	u8 n_meas;
-	u8 mbhc_nsc;
-	u8 n_btn_meas;
-	u8 n_btn_con;
-	u8 num_btn;
-	u8 reserved0;
-	u8 reserved1;
-	u16 t_poll;
-	u16 t_bounce_wait;
-	u16 t_rel_timeout;
-	s16 v_btn_press_delta_sta;
-	s16 v_btn_press_delta_cic;
-	u16 t_btn0_timeout;
-	s16 _v_btn_low[0]; /* v_btn_low[num_btn] */
-	s16 _v_btn_high[0]; /* v_btn_high[num_btn] */
-	u8 _n_ready[TAIKO_NUM_CLK_FREQS];
-	u8 _n_cic[TAIKO_NUM_CLK_FREQS];
-	u8 _gain[TAIKO_NUM_CLK_FREQS];
-} __packed;
-
-struct wcd9xxx_mbhc_imped_detect_cfg {
-	u8 _hs_imped_detect;
-	u8 _n_rload;
-	u8 _hph_keep_on;
-	u8 _repeat_rload_calc;
-	u16 _t_dac_ramp_time;
-	u16 _rhph_high;
-	u16 _rhph_low;
-	u16 _rload[0]; /* rload[n_rload] */
-	u16 _alpha[0]; /* alpha[n_rload] */
-	u16 _beta[3];
-} __packed;
-
-struct wcd9xxx_mbhc_config {
-	bool read_fw_bin;
-	/*
-	 * void* calibration contains:
-	 *  struct wcd9xxx_mbhc_general_cfg generic;
-	 *  struct wcd9xxx_mbhc_plug_detect_cfg plug_det;
-	 *  struct wcd9xxx_mbhc_plug_type_cfg plug_type;
-	 *  struct wcd9xxx_mbhc_btn_detect_cfg btn_det;
-	 *  struct wcd9xxx_mbhc_imped_detect_cfg imped_det;
-	 * Note: various size depends on btn_det->num_btn
-	 */
-	void *calibration;
-	enum wcd9xxx_micbias_num micbias;
-	enum wcd9xxx_micbias_num anc_micbias;
-	int (*mclk_cb_fn)(struct snd_soc_codec*, int, bool);
-	unsigned int mclk_rate;
-	unsigned int gpio;
-	unsigned int gpio_irq;
-	int gpio_level_insert;
-	bool insert_detect; /* codec has own MBHC_INSERT_DETECT */
-	bool detect_extn_cable;
-	/* bit mask of enum wcd9xx_mbhc_micbias_enable_bits */
-	unsigned long micbias_enable_flags;
-	/* swap_gnd_mic returns true if extern GND/MIC swap switch toggled */
-	bool (*swap_gnd_mic)(struct snd_soc_codec *);
-	unsigned long cs_enable_flags;
-	bool use_int_rbias;
-	bool do_recalibration;
-	bool use_vddio_meas;
-	bool enable_anc_mic_detect;
-	enum hw_jack_type hw_jack_type;
-	int key_code[8];
-};
-
-struct wcd9xxx_cfilt_mode {
-	u8 reg_mode_val;
-	u8 cur_mode_val;
-	u8 reg_mask;
-};
-
-struct wcd9xxx_mbhc_intr {
-	int poll_plug_rem;
-	int shortavg_complete;
-	int potential_button_press;
-	int button_release;
-	int dce_est_complete;
-	int insertion;
-	int hph_left_ocp;
-	int hph_right_ocp;
-	int hs_jack_switch;
-};
-
-struct wcd9xxx_mbhc_cb {
-	void (*enable_mux_bias_block)(struct snd_soc_codec *);
-	void (*cfilt_fast_mode)(struct snd_soc_codec *, struct wcd9xxx_mbhc *);
-	void (*codec_specific_cal)(struct snd_soc_codec *,
-				    struct wcd9xxx_mbhc *);
-	struct wcd9xxx_cfilt_mode (*switch_cfilt_mode)(struct wcd9xxx_mbhc *,
-							bool);
-	void (*select_cfilt)(struct snd_soc_codec *, struct wcd9xxx_mbhc *);
-	enum wcd9xxx_cdc_type (*get_cdc_type)(void);
-	void (*enable_clock_gate)(struct snd_soc_codec *, bool);
-	int (*setup_zdet)(struct wcd9xxx_mbhc *,
-			   enum mbhc_impedance_detect_stages stage);
-	void (*compute_impedance)(struct wcd9xxx_mbhc *, s16 *, s16 *,
-				   uint32_t *, uint32_t *);
-	void (*zdet_error_approx)(struct wcd9xxx_mbhc *, uint32_t *,
-				    uint32_t *);
-	void (*enable_mbhc_txfe)(struct snd_soc_codec *, bool);
-	int (*enable_mb_source)(struct snd_soc_codec *, bool, bool);
-	void (*setup_int_rbias)(struct snd_soc_codec *, bool);
-	void (*pull_mb_to_vddio)(struct snd_soc_codec *, bool);
-	bool (*insert_rem_status)(struct snd_soc_codec *);
-	void (*micbias_pulldown_ctrl)(struct wcd9xxx_mbhc *, bool);
-	int (*codec_rco_ctrl)(struct snd_soc_codec *, bool);
-	void (*hph_auto_pulldown_ctrl)(struct snd_soc_codec *, bool);
-	struct firmware_cal * (*get_hwdep_fw_cal)(struct snd_soc_codec *,
-				enum wcd_cal_type);
-};
-
-struct wcd9xxx_mbhc {
-	bool polling_active;
-	/* Delayed work to report long button press */
-	struct delayed_work mbhc_btn_dwork;
-	int buttons_pressed;
-	enum wcd9xxx_mbhc_state mbhc_state;
-	struct wcd9xxx_mbhc_config *mbhc_cfg;
-	const struct wcd9xxx_mbhc_cb *mbhc_cb;
-
-	struct mbhc_internal_cal_data mbhc_data;
-
-	struct mbhc_micbias_regs mbhc_bias_regs;
-	struct mbhc_micbias_regs mbhc_anc_bias_regs;
-
-	bool mbhc_micbias_switched;
-
-	u32 hph_status; /* track headhpone status */
-	u8 hphlocp_cnt; /* headphone left ocp retry */
-	u8 hphrocp_cnt; /* headphone right ocp retry */
-
-	/* Work to perform MBHC Firmware Read */
-	struct delayed_work mbhc_firmware_dwork;
-	const struct firmware *mbhc_fw;
-	struct firmware_cal *mbhc_cal;
-
-	struct delayed_work mbhc_insert_dwork;
-
-	u8 current_plug;
-	struct work_struct correct_plug_swch;
-	/*
-	 * Work to perform polling on microphone voltage
-	 * in order to correct plug type once plug type
-	 * is detected as headphone
-	 */
-	struct work_struct correct_plug_noswch;
-	bool hs_detect_work_stop;
-
-	bool lpi_enabled; /* low power insertion detection */
-	bool in_swch_irq_handler;
-
-	struct wcd9xxx_resmgr *resmgr;
-	struct snd_soc_codec *codec;
-
-	bool no_mic_headset_override;
-
-	/* track PA/DAC state to sync with userspace */
-	unsigned long hph_pa_dac_state;
-	/*
-	 * save codec's state with resmgr event notification
-	 * bit flags of enum wcd9xxx_mbhc_event_state
-	 */
-	unsigned long event_state;
-
-	unsigned long mbhc_last_resume; /* in jiffies */
-
-	bool insert_detect_level_insert;
-
-	struct snd_soc_jack headset_jack;
-	struct snd_soc_jack button_jack;
-
-	struct notifier_block nblock;
-
-	bool micbias_enable;
-	int (*micbias_enable_cb)(struct snd_soc_codec*,  bool,
-				  enum wcd9xxx_micbias_num);
-
-	bool impedance_detect;
-	/* impedance of hphl and hphr */
-	uint32_t zl, zr;
-
-	u32 rco_clk_rate;
-
-	bool update_z;
-
-	u8   scaling_mux_in;
-	/* Holds codec specific interrupt mapping */
-	const struct wcd9xxx_mbhc_intr *intr_ids;
-
-	/* Indicates status of current source switch */
-	bool is_cs_enabled;
-
-	/* Holds type of Headset - Mono/Stereo */
-	enum mbhc_hph_type hph_type;
-
-#ifdef CONFIG_DEBUG_FS
-	struct dentry *debugfs_poke;
-	struct dentry *debugfs_mbhc;
-#endif
-
-	struct mutex mbhc_lock;
-};
-
-#define WCD9XXX_MBHC_CAL_SIZE(buttons, rload) ( \
-	sizeof(enum wcd9xxx_micbias_num) + \
-	sizeof(struct wcd9xxx_mbhc_general_cfg) + \
-	sizeof(struct wcd9xxx_mbhc_plug_detect_cfg) + \
-	    ((sizeof(s16) + sizeof(s16)) * buttons) + \
-	sizeof(struct wcd9xxx_mbhc_plug_type_cfg) + \
-	sizeof(struct wcd9xxx_mbhc_btn_detect_cfg) + \
-	sizeof(struct wcd9xxx_mbhc_imped_detect_cfg) + \
-	    ((sizeof(u16) + sizeof(u16)) * rload) \
-	)
-
-#define WCD9XXX_MBHC_CAL_GENERAL_PTR(cali) ( \
-	    (struct wcd9xxx_mbhc_general_cfg *) cali)
-#define WCD9XXX_MBHC_CAL_PLUG_DET_PTR(cali) ( \
-	    (struct wcd9xxx_mbhc_plug_detect_cfg *) \
-	    &(WCD9XXX_MBHC_CAL_GENERAL_PTR(cali)[1]))
-#define WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(cali) ( \
-	    (struct wcd9xxx_mbhc_plug_type_cfg *) \
-	    &(WCD9XXX_MBHC_CAL_PLUG_DET_PTR(cali)[1]))
-#define WCD9XXX_MBHC_CAL_BTN_DET_PTR(cali) ( \
-	    (struct wcd9xxx_mbhc_btn_detect_cfg *) \
-	    &(WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(cali)[1]))
-#define WCD9XXX_MBHC_CAL_IMPED_DET_PTR(cali) ( \
-	    (struct wcd9xxx_mbhc_imped_detect_cfg *) \
-	    (((void *)&WCD9XXX_MBHC_CAL_BTN_DET_PTR(cali)[1]) + \
-	     (WCD9XXX_MBHC_CAL_BTN_DET_PTR(cali)->num_btn * \
-	      (sizeof(WCD9XXX_MBHC_CAL_BTN_DET_PTR(cali)->_v_btn_low[0]) + \
-	       sizeof(WCD9XXX_MBHC_CAL_BTN_DET_PTR(cali)->_v_btn_high[0])))) \
-	)
-
-/* minimum size of calibration data assuming there is only one button and
- * one rload.
- */
-#define WCD9XXX_MBHC_CAL_MIN_SIZE ( \
-	    sizeof(struct wcd9xxx_mbhc_general_cfg) + \
-	    sizeof(struct wcd9xxx_mbhc_plug_detect_cfg) + \
-	    sizeof(struct wcd9xxx_mbhc_plug_type_cfg) + \
-	    sizeof(struct wcd9xxx_mbhc_btn_detect_cfg) + \
-	    sizeof(struct wcd9xxx_mbhc_imped_detect_cfg) + \
-	    (sizeof(u16) * 2) \
-	)
-
-#define WCD9XXX_MBHC_CAL_BTN_SZ(cfg_ptr) ( \
-	    sizeof(struct wcd9xxx_mbhc_btn_detect_cfg) + \
-	    (cfg_ptr->num_btn * (sizeof(cfg_ptr->_v_btn_low[0]) + \
-				 sizeof(cfg_ptr->_v_btn_high[0]))))
-
-#define WCD9XXX_MBHC_CAL_IMPED_MIN_SZ ( \
-	    sizeof(struct wcd9xxx_mbhc_imped_detect_cfg) + sizeof(u16) * 2)
-
-#define WCD9XXX_MBHC_CAL_IMPED_SZ(cfg_ptr) ( \
-	    sizeof(struct wcd9xxx_mbhc_imped_detect_cfg) + \
-	    (cfg_ptr->_n_rload * \
-	     (sizeof(cfg_ptr->_rload[0]) + sizeof(cfg_ptr->_alpha[0]))))
-
-int wcd9xxx_mbhc_set_keycode(struct wcd9xxx_mbhc *mbhc);
-int wcd9xxx_mbhc_start(struct wcd9xxx_mbhc *mbhc,
-		       struct wcd9xxx_mbhc_config *mbhc_cfg);
-void wcd9xxx_mbhc_stop(struct wcd9xxx_mbhc *mbhc);
-int wcd9xxx_mbhc_init(struct wcd9xxx_mbhc *mbhc, struct wcd9xxx_resmgr *resmgr,
-		      struct snd_soc_codec *codec,
-		      int (*micbias_enable_cb)(struct snd_soc_codec*,  bool,
-						enum wcd9xxx_micbias_num),
-		      const struct wcd9xxx_mbhc_cb *mbhc_cb,
-		      const struct wcd9xxx_mbhc_intr *mbhc_cdc_intr_ids,
-		      int rco_clk_rate,
-		      bool impedance_det_en);
-void wcd9xxx_mbhc_deinit(struct wcd9xxx_mbhc *mbhc);
-void *wcd9xxx_mbhc_cal_btn_det_mp(
-			    const struct wcd9xxx_mbhc_btn_detect_cfg *btn_det,
-			    const enum wcd9xxx_mbhc_btn_det_mem mem);
-int wcd9xxx_mbhc_get_impedance(struct wcd9xxx_mbhc *mbhc, uint32_t *zl,
-			       uint32_t *zr);
-#endif /* __WCD9XXX_MBHC_H__ */
diff --git a/sound/soc/codecs/wcd9xxx-resmgr-v2.c b/sound/soc/codecs/wcd9xxx-resmgr-v2.c
index 825aaee..feef0a4 100644
--- a/sound/soc/codecs/wcd9xxx-resmgr-v2.c
+++ b/sound/soc/codecs/wcd9xxx-resmgr-v2.c
@@ -108,6 +108,7 @@
 	}
 	return resmgr->clk_type;
 }
+EXPORT_SYMBOL(wcd_resmgr_get_clk_type);
 
 static void wcd_resmgr_cdc_specific_get_clk(struct wcd9xxx_resmgr_v2 *resmgr,
 						int clk_users)
@@ -123,6 +124,10 @@
 	}
 }
 
+/*
+ * wcd_resmgr_post_ssr_v2
+ * @resmgr: handle to struct wcd9xxx_resmgr_v2
+ */
 void wcd_resmgr_post_ssr_v2(struct wcd9xxx_resmgr_v2 *resmgr)
 {
 	int old_bg_audio_users;
@@ -157,7 +162,7 @@
 
 	WCD9XXX_V2_BG_CLK_UNLOCK(resmgr);
 }
-
+EXPORT_SYMBOL(wcd_resmgr_post_ssr_v2);
 
 /*
  * wcd_resmgr_enable_master_bias: enable codec master bias
@@ -190,6 +195,7 @@
 	mutex_unlock(&resmgr->master_bias_lock);
 	return 0;
 }
+EXPORT_SYMBOL(wcd_resmgr_enable_master_bias);
 
 /*
  * wcd_resmgr_disable_master_bias: disable codec master bias
@@ -213,6 +219,7 @@
 	mutex_unlock(&resmgr->master_bias_lock);
 	return 0;
 }
+EXPORT_SYMBOL(wcd_resmgr_disable_master_bias);
 
 static int wcd_resmgr_enable_clk_mclk(struct wcd9xxx_resmgr_v2 *resmgr)
 {
@@ -511,6 +518,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL(wcd_resmgr_enable_clk_block);
 
 void wcd_resmgr_set_sido_input_src(struct wcd9xxx_resmgr_v2 *resmgr,
 					  int sido_src)
@@ -601,6 +609,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL(wcd_resmgr_disable_clk_block);
 
 /*
  * wcd_resmgr_init: initialize wcd resource manager
@@ -639,6 +648,7 @@
 
 	return resmgr;
 }
+EXPORT_SYMBOL(wcd_resmgr_init);
 
 /*
  * wcd_resmgr_remove: Clean-up wcd resource manager
@@ -649,6 +659,7 @@
 	mutex_destroy(&resmgr->master_bias_lock);
 	kfree(resmgr);
 }
+EXPORT_SYMBOL(wcd_resmgr_remove);
 
 /*
  * wcd_resmgr_post_init: post init call to assign codec handle
@@ -676,5 +687,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(wcd_resmgr_post_init);
+
 MODULE_DESCRIPTION("wcd9xxx resmgr v2 module");
 MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/wcd9xxx-resmgr.c b/sound/soc/codecs/wcd9xxx-resmgr.c
deleted file mode 100644
index 4b02652..0000000
--- a/sound/soc/codecs/wcd9xxx-resmgr.c
+++ /dev/null
@@ -1,1099 +0,0 @@
-/* Copyright (c) 2012-2014, 2016-2017 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/firmware.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/device.h>
-#include <linux/printk.h>
-#include <linux/ratelimit.h>
-#include <linux/debugfs.h>
-#include <linux/mfd/wcd9xxx/core.h>
-#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
-#include <uapi/linux/mfd/wcd9xxx/wcd9320_registers.h>
-#include <linux/mfd/wcd9xxx/wcd9330_registers.h>
-#include <linux/mfd/wcd9xxx/pdata.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-#include <sound/tlv.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-#include <linux/pm_runtime.h>
-#include <linux/kernel.h>
-#include <linux/gpio.h>
-#include "wcd9xxx-resmgr.h"
-
-static char wcd9xxx_event_string[][64] = {
-	"WCD9XXX_EVENT_INVALID",
-
-	"WCD9XXX_EVENT_PRE_RCO_ON",
-	"WCD9XXX_EVENT_POST_RCO_ON",
-	"WCD9XXX_EVENT_PRE_RCO_OFF",
-	"WCD9XXX_EVENT_POST_RCO_OFF",
-
-	"WCD9XXX_EVENT_PRE_MCLK_ON",
-	"WCD9XXX_EVENT_POST_MCLK_ON",
-	"WCD9XXX_EVENT_PRE_MCLK_OFF",
-	"WCD9XXX_EVENT_POST_MCLK_OFF",
-
-	"WCD9XXX_EVENT_PRE_BG_OFF",
-	"WCD9XXX_EVENT_POST_BG_OFF",
-	"WCD9XXX_EVENT_PRE_BG_AUDIO_ON",
-	"WCD9XXX_EVENT_POST_BG_AUDIO_ON",
-	"WCD9XXX_EVENT_PRE_BG_MBHC_ON",
-	"WCD9XXX_EVENT_POST_BG_MBHC_ON",
-
-	"WCD9XXX_EVENT_PRE_MICBIAS_1_OFF",
-	"WCD9XXX_EVENT_POST_MICBIAS_1_OFF",
-	"WCD9XXX_EVENT_PRE_MICBIAS_2_OFF",
-	"WCD9XXX_EVENT_POST_MICBIAS_2_OFF",
-	"WCD9XXX_EVENT_PRE_MICBIAS_3_OFF",
-	"WCD9XXX_EVENT_POST_MICBIAS_3_OFF",
-	"WCD9XXX_EVENT_PRE_MICBIAS_4_OFF",
-	"WCD9XXX_EVENT_POST_MICBIAS_4_OFF",
-	"WCD9XXX_EVENT_PRE_MICBIAS_1_ON",
-	"WCD9XXX_EVENT_POST_MICBIAS_1_ON",
-	"WCD9XXX_EVENT_PRE_MICBIAS_2_ON",
-	"WCD9XXX_EVENT_POST_MICBIAS_2_ON",
-	"WCD9XXX_EVENT_PRE_MICBIAS_3_ON",
-	"WCD9XXX_EVENT_POST_MICBIAS_3_ON",
-	"WCD9XXX_EVENT_PRE_MICBIAS_4_ON",
-	"WCD9XXX_EVENT_POST_MICBIAS_4_ON",
-
-	"WCD9XXX_EVENT_PRE_CFILT_1_OFF",
-	"WCD9XXX_EVENT_POST_CFILT_1_OFF",
-	"WCD9XXX_EVENT_PRE_CFILT_2_OFF",
-	"WCD9XXX_EVENT_POST_CFILT_2_OFF",
-	"WCD9XXX_EVENT_PRE_CFILT_3_OFF",
-	"WCD9XXX_EVENT_POST_CFILT_3_OFF",
-	"WCD9XXX_EVENT_PRE_CFILT_1_ON",
-	"WCD9XXX_EVENT_POST_CFILT_1_ON",
-	"WCD9XXX_EVENT_PRE_CFILT_2_ON",
-	"WCD9XXX_EVENT_POST_CFILT_2_ON",
-	"WCD9XXX_EVENT_PRE_CFILT_3_ON",
-	"WCD9XXX_EVENT_POST_CFILT_3_ON",
-
-	"WCD9XXX_EVENT_PRE_HPHL_PA_ON",
-	"WCD9XXX_EVENT_POST_HPHL_PA_OFF",
-	"WCD9XXX_EVENT_PRE_HPHR_PA_ON",
-	"WCD9XXX_EVENT_POST_HPHR_PA_OFF",
-
-	"WCD9XXX_EVENT_POST_RESUME",
-
-	"WCD9XXX_EVENT_PRE_TX_3_ON",
-	"WCD9XXX_EVENT_POST_TX_3_OFF",
-
-	"WCD9XXX_EVENT_LAST",
-};
-
-#define WCD9XXX_RCO_CALIBRATION_RETRY_COUNT 5
-#define WCD9XXX_RCO_CALIBRATION_DELAY_US 5000
-#define WCD9XXX_USLEEP_RANGE_MARGIN_US 100
-#define WCD9XXX_RCO_CALIBRATION_DELAY_INC_US 1000
-
-struct wcd9xxx_resmgr_cond_entry {
-	unsigned short reg;
-	int shift;
-	bool invert;
-	enum wcd9xxx_resmgr_cond cond;
-	struct list_head list;
-};
-
-static enum wcd9xxx_clock_type wcd9xxx_save_clock(struct wcd9xxx_resmgr
-						  *resmgr);
-static void wcd9xxx_restore_clock(struct wcd9xxx_resmgr *resmgr,
-				  enum wcd9xxx_clock_type type);
-
-const char *wcd9xxx_get_event_string(enum wcd9xxx_notify_event type)
-{
-	return wcd9xxx_event_string[type];
-}
-
-void wcd9xxx_resmgr_notifier_call(struct wcd9xxx_resmgr *resmgr,
-				  const enum wcd9xxx_notify_event e)
-{
-	pr_debug("%s: notifier call event %d\n", __func__, e);
-	blocking_notifier_call_chain(&resmgr->notifier, e, resmgr);
-}
-
-static void wcd9xxx_disable_bg(struct wcd9xxx_resmgr *resmgr)
-{
-	/* Notify bg mode change */
-	wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_BG_OFF);
-	/* Disable bg */
-	snd_soc_update_bits(resmgr->codec, WCD9XXX_A_BIAS_CENTRAL_BG_CTL,
-			    0x03, 0x00);
-	usleep_range(100, 110);
-	/* Notify bg mode change */
-	wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_POST_BG_OFF);
-}
-
-/*
- * BG enablement should always enable in slow mode.
- * The fast mode doesn't need to be enabled as fast mode BG is to be driven
- * by MBHC override.
- */
-static void wcd9xxx_enable_bg(struct wcd9xxx_resmgr *resmgr)
-{
-	struct snd_soc_codec *codec = resmgr->codec;
-
-	/* Enable BG in slow mode and precharge */
-	snd_soc_update_bits(codec, WCD9XXX_A_BIAS_CENTRAL_BG_CTL, 0x80, 0x80);
-	snd_soc_update_bits(codec, WCD9XXX_A_BIAS_CENTRAL_BG_CTL, 0x04, 0x04);
-	snd_soc_update_bits(codec, WCD9XXX_A_BIAS_CENTRAL_BG_CTL, 0x01, 0x01);
-	usleep_range(1000, 1100);
-	snd_soc_update_bits(codec, WCD9XXX_A_BIAS_CENTRAL_BG_CTL, 0x80, 0x00);
-}
-
-static void wcd9xxx_enable_bg_audio(struct wcd9xxx_resmgr *resmgr)
-{
-	/* Notify bandgap mode change */
-	wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_BG_AUDIO_ON);
-	wcd9xxx_enable_bg(resmgr);
-	/* Notify bandgap mode change */
-	wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_POST_BG_AUDIO_ON);
-}
-
-static void wcd9xxx_enable_bg_mbhc(struct wcd9xxx_resmgr *resmgr)
-{
-	struct snd_soc_codec *codec = resmgr->codec;
-
-	/* Notify bandgap mode change */
-	wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_BG_MBHC_ON);
-
-	/*
-	 * mclk should be off or clk buff source souldn't be VBG
-	 * Let's turn off mclk always
-	 */
-	WARN_ON(snd_soc_read(codec, WCD9XXX_A_CLK_BUFF_EN2) & (1 << 2));
-
-	wcd9xxx_enable_bg(resmgr);
-	/* Notify bandgap mode change */
-	wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_POST_BG_MBHC_ON);
-}
-
-static void wcd9xxx_disable_clock_block(struct wcd9xxx_resmgr *resmgr)
-{
-	struct snd_soc_codec *codec = resmgr->codec;
-
-	pr_debug("%s: enter\n", __func__);
-	WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr);
-
-	/* Notify */
-	if (resmgr->clk_type == WCD9XXX_CLK_RCO)
-		wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_RCO_OFF);
-	else
-		wcd9xxx_resmgr_notifier_call(resmgr,
-					     WCD9XXX_EVENT_PRE_MCLK_OFF);
-
-	switch (resmgr->codec_type) {
-	case WCD9XXX_CDC_TYPE_TOMTOM:
-		snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x04, 0x00);
-		usleep_range(50, 55);
-		snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x02, 0x02);
-		snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x40, 0x40);
-		snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x40, 0x00);
-		snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x01, 0x00);
-		break;
-	default:
-		snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x04, 0x00);
-		usleep_range(50, 55);
-		snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x02, 0x02);
-		snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x05, 0x00);
-		break;
-	}
-	usleep_range(50, 55);
-	/* Notify */
-	if (resmgr->clk_type == WCD9XXX_CLK_RCO) {
-		wcd9xxx_resmgr_notifier_call(resmgr,
-					     WCD9XXX_EVENT_POST_RCO_OFF);
-	} else {
-		wcd9xxx_resmgr_notifier_call(resmgr,
-					     WCD9XXX_EVENT_POST_MCLK_OFF);
-	}
-	pr_debug("%s: leave\n", __func__);
-}
-
-static void wcd9xxx_resmgr_cdc_specific_get_clk(struct wcd9xxx_resmgr *resmgr,
-						int clk_users)
-{
-	/* Caller of this function should have acquired
-	 *  BG_CLK lock
-	 */
-	WCD9XXX_BG_CLK_UNLOCK(resmgr);
-	if (clk_users) {
-		if (resmgr->resmgr_cb &&
-		    resmgr->resmgr_cb->cdc_rco_ctrl) {
-			while (clk_users--)
-				resmgr->resmgr_cb->cdc_rco_ctrl(resmgr->codec,
-								true);
-		}
-	}
-	/* Acquire BG_CLK lock before return */
-	WCD9XXX_BG_CLK_LOCK(resmgr);
-}
-
-void wcd9xxx_resmgr_post_ssr(struct wcd9xxx_resmgr *resmgr)
-{
-	int old_bg_audio_users, old_bg_mbhc_users;
-	int old_clk_rco_users, old_clk_mclk_users;
-
-	pr_debug("%s: enter\n", __func__);
-
-	WCD9XXX_BG_CLK_LOCK(resmgr);
-	old_bg_audio_users = resmgr->bg_audio_users;
-	old_bg_mbhc_users = resmgr->bg_mbhc_users;
-	old_clk_rco_users = resmgr->clk_rco_users;
-	old_clk_mclk_users = resmgr->clk_mclk_users;
-	resmgr->bg_audio_users = 0;
-	resmgr->bg_mbhc_users = 0;
-	resmgr->bandgap_type = WCD9XXX_BANDGAP_OFF;
-	resmgr->clk_rco_users = 0;
-	resmgr->clk_mclk_users = 0;
-	resmgr->clk_type = WCD9XXX_CLK_OFF;
-
-	if (old_bg_audio_users) {
-		while (old_bg_audio_users--)
-			wcd9xxx_resmgr_get_bandgap(resmgr,
-						  WCD9XXX_BANDGAP_AUDIO_MODE);
-	}
-
-	if (old_bg_mbhc_users) {
-		while (old_bg_mbhc_users--)
-			wcd9xxx_resmgr_get_bandgap(resmgr,
-						  WCD9XXX_BANDGAP_MBHC_MODE);
-	}
-
-	if (old_clk_mclk_users) {
-		while (old_clk_mclk_users--)
-			wcd9xxx_resmgr_get_clk_block(resmgr, WCD9XXX_CLK_MCLK);
-	}
-
-	if (resmgr->codec_type == WCD9XXX_CDC_TYPE_TOMTOM) {
-		wcd9xxx_resmgr_cdc_specific_get_clk(resmgr, old_clk_rco_users);
-	} else if (old_clk_rco_users) {
-		while (old_clk_rco_users--)
-			wcd9xxx_resmgr_get_clk_block(resmgr,
-					WCD9XXX_CLK_RCO);
-	}
-	WCD9XXX_BG_CLK_UNLOCK(resmgr);
-	pr_debug("%s: leave\n", __func__);
-}
-
-/*
- * wcd9xxx_resmgr_get_bandgap : Vote for bandgap ref
- * choice : WCD9XXX_BANDGAP_AUDIO_MODE, WCD9XXX_BANDGAP_MBHC_MODE
- */
-void wcd9xxx_resmgr_get_bandgap(struct wcd9xxx_resmgr *resmgr,
-				const enum wcd9xxx_bandgap_type choice)
-{
-	enum wcd9xxx_clock_type clock_save = WCD9XXX_CLK_OFF;
-
-	pr_debug("%s: enter, wants %d\n", __func__, choice);
-
-	WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr);
-	switch (choice) {
-	case WCD9XXX_BANDGAP_AUDIO_MODE:
-		resmgr->bg_audio_users++;
-		if (resmgr->bg_audio_users == 1 && resmgr->bg_mbhc_users) {
-			/*
-			 * Current bg is MBHC mode, about to switch to
-			 * audio mode.
-			 */
-			WARN_ON(resmgr->bandgap_type !=
-				WCD9XXX_BANDGAP_MBHC_MODE);
-
-			/* BG mode can be changed only with clock off */
-			if (resmgr->codec_type != WCD9XXX_CDC_TYPE_TOMTOM)
-				clock_save = wcd9xxx_save_clock(resmgr);
-			/* Swtich BG mode */
-			wcd9xxx_disable_bg(resmgr);
-			wcd9xxx_enable_bg_audio(resmgr);
-			/* restore clock */
-			if (resmgr->codec_type != WCD9XXX_CDC_TYPE_TOMTOM)
-				wcd9xxx_restore_clock(resmgr, clock_save);
-		} else if (resmgr->bg_audio_users == 1) {
-			/* currently off, just enable it */
-			WARN_ON(resmgr->bandgap_type != WCD9XXX_BANDGAP_OFF);
-			wcd9xxx_enable_bg_audio(resmgr);
-		}
-		resmgr->bandgap_type = WCD9XXX_BANDGAP_AUDIO_MODE;
-		break;
-	case WCD9XXX_BANDGAP_MBHC_MODE:
-		resmgr->bg_mbhc_users++;
-		if (resmgr->bandgap_type == WCD9XXX_BANDGAP_MBHC_MODE ||
-		    resmgr->bandgap_type == WCD9XXX_BANDGAP_AUDIO_MODE)
-			/* do nothing */
-			break;
-
-		/* bg mode can be changed only with clock off */
-		clock_save = wcd9xxx_save_clock(resmgr);
-		/* enable bg with MBHC mode */
-		wcd9xxx_enable_bg_mbhc(resmgr);
-		/* restore clock */
-		wcd9xxx_restore_clock(resmgr, clock_save);
-		/* save current mode */
-		resmgr->bandgap_type = WCD9XXX_BANDGAP_MBHC_MODE;
-		break;
-	default:
-		pr_err("%s: Error, Invalid bandgap settings\n", __func__);
-		break;
-	}
-
-	pr_debug("%s: bg users audio %d, mbhc %d\n", __func__,
-		 resmgr->bg_audio_users, resmgr->bg_mbhc_users);
-}
-
-/*
- * wcd9xxx_resmgr_put_bandgap : Unvote bandgap ref that has been voted
- * choice : WCD9XXX_BANDGAP_AUDIO_MODE, WCD9XXX_BANDGAP_MBHC_MODE
- */
-void wcd9xxx_resmgr_put_bandgap(struct wcd9xxx_resmgr *resmgr,
-				enum wcd9xxx_bandgap_type choice)
-{
-	enum wcd9xxx_clock_type clock_save;
-
-	pr_debug("%s: enter choice %d\n", __func__, choice);
-
-	WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr);
-	switch (choice) {
-	case WCD9XXX_BANDGAP_AUDIO_MODE:
-		if (--resmgr->bg_audio_users == 0) {
-			if (resmgr->bg_mbhc_users) {
-				/* bg mode can be changed only with clock off */
-				clock_save = wcd9xxx_save_clock(resmgr);
-				/* switch to MBHC mode */
-				wcd9xxx_enable_bg_mbhc(resmgr);
-				/* restore clock */
-				wcd9xxx_restore_clock(resmgr, clock_save);
-				resmgr->bandgap_type =
-				    WCD9XXX_BANDGAP_MBHC_MODE;
-			} else {
-				/* turn off */
-				wcd9xxx_disable_bg(resmgr);
-				resmgr->bandgap_type = WCD9XXX_BANDGAP_OFF;
-			}
-		}
-		break;
-	case WCD9XXX_BANDGAP_MBHC_MODE:
-		WARN(resmgr->bandgap_type == WCD9XXX_BANDGAP_OFF,
-		     "Unexpected bandgap type %d\n", resmgr->bandgap_type);
-		if (--resmgr->bg_mbhc_users == 0 &&
-		    resmgr->bandgap_type == WCD9XXX_BANDGAP_MBHC_MODE) {
-			wcd9xxx_disable_bg(resmgr);
-			resmgr->bandgap_type = WCD9XXX_BANDGAP_OFF;
-		}
-		break;
-	default:
-		pr_err("%s: Error, Invalid bandgap settings\n", __func__);
-		break;
-	}
-
-	pr_debug("%s: bg users audio %d, mbhc %d\n", __func__,
-		 resmgr->bg_audio_users, resmgr->bg_mbhc_users);
-}
-
-void wcd9xxx_resmgr_enable_rx_bias(struct wcd9xxx_resmgr *resmgr, u32 enable)
-{
-	struct snd_soc_codec *codec = resmgr->codec;
-
-	if (enable) {
-		resmgr->rx_bias_count++;
-		if (resmgr->rx_bias_count == 1)
-			snd_soc_update_bits(codec, WCD9XXX_A_RX_COM_BIAS,
-					    0x80, 0x80);
-	} else {
-		resmgr->rx_bias_count--;
-		if (!resmgr->rx_bias_count)
-			snd_soc_update_bits(codec, WCD9XXX_A_RX_COM_BIAS,
-					    0x80, 0x00);
-	}
-}
-
-int wcd9xxx_resmgr_enable_config_mode(struct wcd9xxx_resmgr *resmgr, int enable)
-{
-	struct snd_soc_codec *codec = resmgr->codec;
-
-	pr_debug("%s: enable = %d\n", __func__, enable);
-	if (enable) {
-		snd_soc_update_bits(codec, WCD9XXX_A_RC_OSC_FREQ, 0x10, 0);
-		/* bandgap mode to fast */
-		if (resmgr->pdata->mclk_rate == WCD9XXX_MCLK_CLK_12P288MHZ)
-			/* Set current value to 200nA for 12.288MHz clock */
-			snd_soc_write(codec, WCD9XXX_A_BIAS_OSC_BG_CTL, 0x37);
-		else
-			snd_soc_write(codec, WCD9XXX_A_BIAS_OSC_BG_CTL, 0x17);
-
-		usleep_range(5, 10);
-		snd_soc_update_bits(codec, WCD9XXX_A_RC_OSC_FREQ, 0x80, 0x80);
-		snd_soc_update_bits(codec, WCD9XXX_A_RC_OSC_TEST, 0x80, 0x80);
-		usleep_range(10, 20);
-		snd_soc_update_bits(codec, WCD9XXX_A_RC_OSC_TEST, 0x80, 0);
-		usleep_range(10000, 10100);
-
-		if (resmgr->pdata->mclk_rate != WCD9XXX_MCLK_CLK_12P288MHZ)
-			snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1,
-							0x08, 0x08);
-	} else {
-		snd_soc_update_bits(codec, WCD9XXX_A_BIAS_OSC_BG_CTL, 0x1, 0);
-		snd_soc_update_bits(codec, WCD9XXX_A_RC_OSC_FREQ, 0x80, 0);
-	}
-
-	return 0;
-}
-
-static void wcd9xxx_enable_clock_block(struct wcd9xxx_resmgr *resmgr,
-				enum wcd9xxx_clock_config_mode config_mode)
-{
-	struct snd_soc_codec *codec = resmgr->codec;
-	unsigned long delay = WCD9XXX_RCO_CALIBRATION_DELAY_US;
-	int num_retry = 0;
-	unsigned int valr;
-	unsigned int valr1;
-	unsigned int valw[] = {0x01, 0x01, 0x10, 0x00};
-
-	pr_debug("%s: config_mode = %d\n", __func__, config_mode);
-
-	/* transit to RCO requires mclk off */
-	if (resmgr->codec_type != WCD9XXX_CDC_TYPE_TOMTOM)
-		WARN_ON(snd_soc_read(codec, WCD9XXX_A_CLK_BUFF_EN2) & (1 << 2));
-
-	if (config_mode == WCD9XXX_CFG_RCO) {
-		/* Notify */
-		wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_RCO_ON);
-		/* enable RCO and switch to it */
-		wcd9xxx_resmgr_enable_config_mode(resmgr, 1);
-		snd_soc_write(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x02);
-		usleep_range(1000, 1100);
-	} else if (config_mode == WCD9XXX_CFG_CAL_RCO) {
-		snd_soc_update_bits(codec, TOMTOM_A_BIAS_OSC_BG_CTL,
-				    0x01, 0x01);
-		/* 1ms sleep required after BG enabled */
-		usleep_range(1000, 1100);
-
-		if (resmgr->pdata->mclk_rate == WCD9XXX_MCLK_CLK_12P288MHZ) {
-			/*
-			 * Set RCO clock rate as 12.288MHz rate explicitly
-			 * as the Qfuse values are incorrect for this rate
-			 */
-			snd_soc_update_bits(codec, TOMTOM_A_RCO_CTRL,
-					0x50, 0x50);
-		} else {
-			snd_soc_update_bits(codec, TOMTOM_A_RCO_CTRL,
-					0x18, 0x10);
-			valr = snd_soc_read(codec,
-					TOMTOM_A_QFUSE_DATA_OUT0) & (0x04);
-			valr1 = snd_soc_read(codec,
-					TOMTOM_A_QFUSE_DATA_OUT1) & (0x08);
-			valr = (valr >> 1) | (valr1 >> 3);
-			snd_soc_update_bits(codec, TOMTOM_A_RCO_CTRL, 0x60,
-					valw[valr] << 5);
-		}
-		snd_soc_update_bits(codec, TOMTOM_A_RCO_CTRL, 0x80, 0x80);
-
-		do {
-			snd_soc_update_bits(codec,
-					    TOMTOM_A_RCO_CALIBRATION_CTRL1,
-					    0x80, 0x80);
-			snd_soc_update_bits(codec,
-					    TOMTOM_A_RCO_CALIBRATION_CTRL1,
-					    0x80, 0x00);
-			/* RCO calibration takes approx. 5ms */
-			usleep_range(delay, delay +
-					    WCD9XXX_USLEEP_RANGE_MARGIN_US);
-			if (!(snd_soc_read(codec,
-				TOMTOM_A_RCO_CALIBRATION_RESULT1) & 0x10))
-				break;
-			if (num_retry >= 3) {
-				delay = delay +
-					WCD9XXX_RCO_CALIBRATION_DELAY_INC_US;
-			}
-		} while (num_retry++ < WCD9XXX_RCO_CALIBRATION_RETRY_COUNT);
-	} else {
-		/* Notify */
-		wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_MCLK_ON);
-		/* switch to MCLK */
-
-		switch (resmgr->codec_type) {
-		case WCD9XXX_CDC_TYPE_TOMTOM:
-			snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1,
-					    0x08, 0x00);
-			snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1,
-					    0x40, 0x40);
-			snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1,
-					    0x40, 0x00);
-			/* clk source to ext clk and clk buff ref to VBG */
-			snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1,
-					    0x0C, 0x04);
-			break;
-		default:
-			snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1,
-					    0x08, 0x00);
-			/* if RCO is enabled, switch from it */
-			if (snd_soc_read(codec, WCD9XXX_A_RC_OSC_FREQ) & 0x80) {
-				snd_soc_write(codec, WCD9XXX_A_CLK_BUFF_EN2,
-					      0x02);
-				wcd9xxx_resmgr_enable_config_mode(resmgr, 0);
-			}
-			/* clk source to ext clk and clk buff ref to VBG */
-			snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1,
-					    0x0C, 0x04);
-			break;
-		}
-	}
-
-	if (config_mode != WCD9XXX_CFG_CAL_RCO) {
-		snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1,
-				    0x01, 0x01);
-		/*
-		 * sleep required by codec hardware to
-		 * enable clock buffer
-		 */
-		usleep_range(1000, 1200);
-		snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2,
-				    0x02, 0x00);
-		/* on MCLK */
-		snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2,
-				    0x04, 0x04);
-		snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLK_MCLK_CTL,
-				    0x01, 0x01);
-	}
-	usleep_range(50, 55);
-
-	/* Notify */
-	if (config_mode == WCD9XXX_CFG_RCO)
-		wcd9xxx_resmgr_notifier_call(resmgr,
-					     WCD9XXX_EVENT_POST_RCO_ON);
-	else if (config_mode == WCD9XXX_CFG_MCLK)
-		wcd9xxx_resmgr_notifier_call(resmgr,
-					     WCD9XXX_EVENT_POST_MCLK_ON);
-}
-
-/*
- * disable clock and return previous clock state
- */
-static enum wcd9xxx_clock_type wcd9xxx_save_clock(struct wcd9xxx_resmgr *resmgr)
-{
-	WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr);
-	if (resmgr->clk_type != WCD9XXX_CLK_OFF)
-		wcd9xxx_disable_clock_block(resmgr);
-	return resmgr->clk_type != WCD9XXX_CLK_OFF;
-}
-
-static void wcd9xxx_restore_clock(struct wcd9xxx_resmgr *resmgr,
-				  enum wcd9xxx_clock_type type)
-{
-	if (type != WCD9XXX_CLK_OFF)
-		wcd9xxx_enable_clock_block(resmgr, type == WCD9XXX_CLK_RCO);
-}
-
-void wcd9xxx_resmgr_get_clk_block(struct wcd9xxx_resmgr *resmgr,
-				  enum wcd9xxx_clock_type type)
-{
-	struct snd_soc_codec *codec = resmgr->codec;
-
-	pr_debug("%s: current %d, requested %d, rco_users %d, mclk_users %d\n",
-		 __func__, resmgr->clk_type, type,
-		 resmgr->clk_rco_users, resmgr->clk_mclk_users);
-	WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr);
-	switch (type) {
-	case WCD9XXX_CLK_RCO:
-		if (++resmgr->clk_rco_users == 1 &&
-		    resmgr->clk_type == WCD9XXX_CLK_OFF) {
-			/* enable RCO and switch to it */
-			wcd9xxx_enable_clock_block(resmgr, WCD9XXX_CFG_RCO);
-			resmgr->clk_type = WCD9XXX_CLK_RCO;
-		} else if (resmgr->clk_rco_users == 1 &&
-			   resmgr->clk_type == WCD9XXX_CLK_MCLK &&
-			   resmgr->codec_type == WCD9XXX_CDC_TYPE_TOMTOM) {
-			/*
-			 * Enable RCO but do not switch CLK MUX to RCO
-			 * unless ext_clk_users is 1, which indicates
-			 * EXT CLK is enabled for RCO calibration
-			 */
-			wcd9xxx_enable_clock_block(resmgr, WCD9XXX_CFG_CAL_RCO);
-			if (resmgr->ext_clk_users == 1) {
-				/* Notify */
-				wcd9xxx_resmgr_notifier_call(resmgr,
-						WCD9XXX_EVENT_PRE_RCO_ON);
-				/* CLK MUX to RCO */
-				if (resmgr->pdata->mclk_rate !=
-						WCD9XXX_MCLK_CLK_12P288MHZ)
-					snd_soc_update_bits(codec,
-						WCD9XXX_A_CLK_BUFF_EN1,
-						0x08, 0x08);
-				resmgr->clk_type = WCD9XXX_CLK_RCO;
-				wcd9xxx_resmgr_notifier_call(resmgr,
-						WCD9XXX_EVENT_POST_RCO_ON);
-			}
-		}
-		break;
-	case WCD9XXX_CLK_MCLK:
-		if (++resmgr->clk_mclk_users == 1 &&
-		    resmgr->clk_type == WCD9XXX_CLK_OFF) {
-			/* switch to MCLK */
-			wcd9xxx_enable_clock_block(resmgr, WCD9XXX_CFG_MCLK);
-			resmgr->clk_type = WCD9XXX_CLK_MCLK;
-		} else if (resmgr->clk_mclk_users == 1 &&
-			   resmgr->clk_type == WCD9XXX_CLK_RCO) {
-			/* RCO to MCLK switch, with RCO still powered on */
-			if (resmgr->codec_type == WCD9XXX_CDC_TYPE_TOMTOM) {
-				wcd9xxx_resmgr_notifier_call(resmgr,
-						WCD9XXX_EVENT_PRE_MCLK_ON);
-				snd_soc_update_bits(codec,
-						WCD9XXX_A_BIAS_CENTRAL_BG_CTL,
-						0x40, 0x00);
-				/* Enable clock buffer */
-				snd_soc_update_bits(codec,
-						WCD9XXX_A_CLK_BUFF_EN1,
-						0x01, 0x01);
-				snd_soc_update_bits(codec,
-						WCD9XXX_A_CLK_BUFF_EN1,
-						0x08, 0x00);
-				wcd9xxx_resmgr_notifier_call(resmgr,
-						WCD9XXX_EVENT_POST_MCLK_ON);
-			} else {
-				/* if RCO is enabled, switch from it */
-				WARN_ON(!(snd_soc_read(resmgr->codec,
-					WCD9XXX_A_RC_OSC_FREQ) & 0x80));
-				/* disable clock block */
-				wcd9xxx_disable_clock_block(resmgr);
-				/* switch to MCLK */
-				wcd9xxx_enable_clock_block(resmgr,
-							   WCD9XXX_CFG_MCLK);
-			}
-			resmgr->clk_type = WCD9XXX_CLK_MCLK;
-		}
-		break;
-	default:
-		pr_err("%s: Error, Invalid clock get request %d\n", __func__,
-		       type);
-		break;
-	}
-	pr_debug("%s: leave\n", __func__);
-}
-
-void wcd9xxx_resmgr_put_clk_block(struct wcd9xxx_resmgr *resmgr,
-				  enum wcd9xxx_clock_type type)
-{
-	struct snd_soc_codec *codec = resmgr->codec;
-
-	pr_debug("%s: current %d, put %d\n", __func__, resmgr->clk_type, type);
-
-	WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr);
-	switch (type) {
-	case WCD9XXX_CLK_RCO:
-		if (--resmgr->clk_rco_users == 0 &&
-		    resmgr->clk_type == WCD9XXX_CLK_RCO) {
-			wcd9xxx_disable_clock_block(resmgr);
-			if (resmgr->codec_type == WCD9XXX_CDC_TYPE_TOMTOM) {
-				/* Powerdown RCO */
-				snd_soc_update_bits(codec, TOMTOM_A_RCO_CTRL,
-						    0x80, 0x00);
-				snd_soc_update_bits(codec,
-						    TOMTOM_A_BIAS_OSC_BG_CTL,
-						    0x01, 0x00);
-			} else {
-				/* if RCO is enabled, switch from it */
-				if (snd_soc_read(resmgr->codec,
-						 WCD9XXX_A_RC_OSC_FREQ)
-						 & 0x80) {
-					snd_soc_write(resmgr->codec,
-						WCD9XXX_A_CLK_BUFF_EN2,
-						0x02);
-					wcd9xxx_resmgr_enable_config_mode(
-								resmgr,	0);
-				}
-			}
-			resmgr->clk_type = WCD9XXX_CLK_OFF;
-		}
-		break;
-	case WCD9XXX_CLK_MCLK:
-		if (--resmgr->clk_mclk_users == 0 &&
-		    resmgr->clk_rco_users == 0) {
-			wcd9xxx_disable_clock_block(resmgr);
-
-			if ((resmgr->codec_type == WCD9XXX_CDC_TYPE_TOMTOM) &&
-			    (snd_soc_read(codec, TOMTOM_A_RCO_CTRL) & 0x80)) {
-				/* powerdown RCO*/
-				snd_soc_update_bits(codec, TOMTOM_A_RCO_CTRL,
-						    0x80, 0x00);
-				snd_soc_update_bits(codec,
-						    TOMTOM_A_BIAS_OSC_BG_CTL,
-						    0x01, 0x00);
-			}
-			resmgr->clk_type = WCD9XXX_CLK_OFF;
-		} else if (resmgr->clk_mclk_users == 0 &&
-			   resmgr->clk_rco_users) {
-			if (resmgr->codec_type == WCD9XXX_CDC_TYPE_TOMTOM) {
-				if (!(snd_soc_read(codec, TOMTOM_A_RCO_CTRL) &
-				      0x80)) {
-					dev_dbg(codec->dev, "%s: Enabling RCO\n",
-						__func__);
-					wcd9xxx_enable_clock_block(resmgr,
-							WCD9XXX_CFG_CAL_RCO);
-					snd_soc_update_bits(codec,
-							WCD9XXX_A_CLK_BUFF_EN1,
-							0x01, 0x00);
-				} else {
-					wcd9xxx_resmgr_notifier_call(resmgr,
-						WCD9XXX_EVENT_PRE_MCLK_OFF);
-					snd_soc_update_bits(codec,
-							WCD9XXX_A_CLK_BUFF_EN1,
-							0x08, 0x08);
-					snd_soc_update_bits(codec,
-							WCD9XXX_A_CLK_BUFF_EN1,
-							0x01, 0x00);
-					wcd9xxx_resmgr_notifier_call(resmgr,
-						WCD9XXX_EVENT_POST_MCLK_OFF);
-					/* CLK Mux changed to RCO, notify that
-					 * RCO is ON
-					 */
-					wcd9xxx_resmgr_notifier_call(resmgr,
-						WCD9XXX_EVENT_POST_RCO_ON);
-				}
-			} else {
-				/* disable clock */
-				wcd9xxx_disable_clock_block(resmgr);
-				/* switch to RCO */
-				wcd9xxx_enable_clock_block(resmgr,
-							WCD9XXX_CFG_RCO);
-			}
-			resmgr->clk_type = WCD9XXX_CLK_RCO;
-		}
-		break;
-	default:
-		pr_err("%s: Error, Invalid clock get request %d\n", __func__,
-		       type);
-		break;
-	}
-	WARN_ON(resmgr->clk_rco_users < 0);
-	WARN_ON(resmgr->clk_mclk_users < 0);
-
-	pr_debug("%s: new rco_users %d, mclk_users %d\n", __func__,
-		 resmgr->clk_rco_users, resmgr->clk_mclk_users);
-}
-
-/*
- * wcd9xxx_resmgr_get_clk_type()
- * Returns clk type that is currently enabled
- */
-int wcd9xxx_resmgr_get_clk_type(struct wcd9xxx_resmgr *resmgr)
-{
-	return resmgr->clk_type;
-}
-
-static void wcd9xxx_resmgr_update_cfilt_usage(struct wcd9xxx_resmgr *resmgr,
-					      enum wcd9xxx_cfilt_sel cfilt_sel,
-					      bool inc)
-{
-	u16 micb_cfilt_reg;
-	enum wcd9xxx_notify_event e_pre_on, e_post_off;
-	struct snd_soc_codec *codec = resmgr->codec;
-
-	switch (cfilt_sel) {
-	case WCD9XXX_CFILT1_SEL:
-		micb_cfilt_reg = WCD9XXX_A_MICB_CFILT_1_CTL;
-		e_pre_on = WCD9XXX_EVENT_PRE_CFILT_1_ON;
-		e_post_off = WCD9XXX_EVENT_POST_CFILT_1_OFF;
-		break;
-	case WCD9XXX_CFILT2_SEL:
-		micb_cfilt_reg = WCD9XXX_A_MICB_CFILT_2_CTL;
-		e_pre_on = WCD9XXX_EVENT_PRE_CFILT_2_ON;
-		e_post_off = WCD9XXX_EVENT_POST_CFILT_2_OFF;
-		break;
-	case WCD9XXX_CFILT3_SEL:
-		micb_cfilt_reg = WCD9XXX_A_MICB_CFILT_3_CTL;
-		e_pre_on = WCD9XXX_EVENT_PRE_CFILT_3_ON;
-		e_post_off = WCD9XXX_EVENT_POST_CFILT_3_OFF;
-		break;
-	default:
-		WARN(1, "Invalid CFILT selection %d\n", cfilt_sel);
-		return; /* should not happen */
-	}
-
-	if (inc) {
-		if ((resmgr->cfilt_users[cfilt_sel]++) == 0) {
-			/* Notify */
-			wcd9xxx_resmgr_notifier_call(resmgr, e_pre_on);
-			/* Enable CFILT */
-			snd_soc_update_bits(codec, micb_cfilt_reg, 0x80, 0x80);
-		}
-	} else {
-		/*
-		 * Check if count not zero, decrease
-		 * then check if zero, go ahead disable cfilter
-		 */
-		WARN(resmgr->cfilt_users[cfilt_sel] == 0,
-		     "Invalid CFILT use count 0\n");
-		if ((--resmgr->cfilt_users[cfilt_sel]) == 0) {
-			/* Disable CFILT */
-			snd_soc_update_bits(codec, micb_cfilt_reg, 0x80, 0);
-			/* Notify MBHC so MBHC can switch CFILT to fast mode */
-			wcd9xxx_resmgr_notifier_call(resmgr, e_post_off);
-		}
-	}
-}
-
-void wcd9xxx_resmgr_cfilt_get(struct wcd9xxx_resmgr *resmgr,
-			      enum wcd9xxx_cfilt_sel cfilt_sel)
-{
-	return wcd9xxx_resmgr_update_cfilt_usage(resmgr, cfilt_sel, true);
-}
-
-void wcd9xxx_resmgr_cfilt_put(struct wcd9xxx_resmgr *resmgr,
-			      enum wcd9xxx_cfilt_sel cfilt_sel)
-{
-	return wcd9xxx_resmgr_update_cfilt_usage(resmgr, cfilt_sel, false);
-}
-
-int wcd9xxx_resmgr_get_k_val(struct wcd9xxx_resmgr *resmgr,
-			     unsigned int cfilt_mv)
-{
-	int rc = -EINVAL;
-	unsigned int ldoh_v = resmgr->micbias_pdata->ldoh_v;
-	unsigned int min_mv, max_mv;
-
-	switch (ldoh_v) {
-	case WCD9XXX_LDOH_1P95_V:
-		min_mv = 160;
-		max_mv = 1800;
-		break;
-	case WCD9XXX_LDOH_2P35_V:
-		min_mv = 200;
-		max_mv = 2200;
-		break;
-	case WCD9XXX_LDOH_2P75_V:
-		min_mv = 240;
-		max_mv = 2600;
-		break;
-	case WCD9XXX_LDOH_3P0_V:
-		min_mv = 260;
-		max_mv = 2875;
-		break;
-	default:
-		goto done;
-	}
-
-	if (cfilt_mv < min_mv || cfilt_mv > max_mv)
-		goto done;
-
-	for (rc = 4; rc <= 44; rc++) {
-		min_mv = max_mv * (rc) / 44;
-		if (min_mv >= cfilt_mv) {
-			rc -= 4;
-			break;
-		}
-	}
-done:
-	return rc;
-}
-
-static void wcd9xxx_resmgr_cond_trigger_cond(struct wcd9xxx_resmgr *resmgr,
-					     enum wcd9xxx_resmgr_cond cond)
-{
-	struct list_head *l;
-	struct wcd9xxx_resmgr_cond_entry *e;
-	bool set;
-
-	pr_debug("%s: enter\n", __func__);
-	/* update bit if cond isn't available or cond is set */
-	set = !test_bit(cond, &resmgr->cond_avail_flags) ||
-	      !!test_bit(cond, &resmgr->cond_flags);
-	list_for_each(l, &resmgr->update_bit_cond_h) {
-		e = list_entry(l, struct wcd9xxx_resmgr_cond_entry, list);
-		if (e->cond == cond)
-			snd_soc_update_bits(resmgr->codec, e->reg,
-					    1 << e->shift,
-					    (set ? !e->invert : e->invert)
-					    << e->shift);
-	}
-	pr_debug("%s: leave\n", __func__);
-}
-
-/*
- * wcd9xxx_regmgr_cond_register : notify resmgr conditions in the condbits are
- *				  available and notified.
- * condbits : contains bitmask of enum wcd9xxx_resmgr_cond
- */
-void wcd9xxx_regmgr_cond_register(struct wcd9xxx_resmgr *resmgr,
-				  unsigned long condbits)
-{
-	unsigned int cond;
-
-	for_each_set_bit(cond, &condbits, BITS_PER_BYTE * sizeof(condbits)) {
-		mutex_lock(&resmgr->update_bit_cond_lock);
-		WARN(test_bit(cond, &resmgr->cond_avail_flags),
-		     "Condition 0x%0x is already registered\n", cond);
-		set_bit(cond, &resmgr->cond_avail_flags);
-		wcd9xxx_resmgr_cond_trigger_cond(resmgr, cond);
-		mutex_unlock(&resmgr->update_bit_cond_lock);
-		pr_debug("%s: Condition 0x%x is registered\n", __func__, cond);
-	}
-}
-
-void wcd9xxx_regmgr_cond_deregister(struct wcd9xxx_resmgr *resmgr,
-				    unsigned long condbits)
-{
-	unsigned int cond;
-
-	for_each_set_bit(cond, &condbits, BITS_PER_BYTE * sizeof(condbits)) {
-		mutex_lock(&resmgr->update_bit_cond_lock);
-		WARN(!test_bit(cond, &resmgr->cond_avail_flags),
-		     "Condition 0x%0x isn't registered\n", cond);
-		clear_bit(cond, &resmgr->cond_avail_flags);
-		wcd9xxx_resmgr_cond_trigger_cond(resmgr, cond);
-		mutex_unlock(&resmgr->update_bit_cond_lock);
-		pr_debug("%s: Condition 0x%x is deregistered\n", __func__,
-			 cond);
-	}
-}
-
-void wcd9xxx_resmgr_cond_update_cond(struct wcd9xxx_resmgr *resmgr,
-				     enum wcd9xxx_resmgr_cond cond, bool set)
-{
-	mutex_lock(&resmgr->update_bit_cond_lock);
-	if ((set && !test_and_set_bit(cond, &resmgr->cond_flags)) ||
-	    (!set && test_and_clear_bit(cond, &resmgr->cond_flags))) {
-		pr_debug("%s: Resource %d condition changed to %s\n", __func__,
-			 cond, set ? "set" : "clear");
-		wcd9xxx_resmgr_cond_trigger_cond(resmgr, cond);
-	}
-	mutex_unlock(&resmgr->update_bit_cond_lock);
-}
-
-int wcd9xxx_resmgr_add_cond_update_bits(struct wcd9xxx_resmgr *resmgr,
-					enum wcd9xxx_resmgr_cond cond,
-					unsigned short reg, int shift,
-					bool invert)
-{
-	struct wcd9xxx_resmgr_cond_entry *entry;
-
-	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
-	if (!entry)
-		return -ENOMEM;
-
-	entry->cond = cond;
-	entry->reg = reg;
-	entry->shift = shift;
-	entry->invert = invert;
-
-	mutex_lock(&resmgr->update_bit_cond_lock);
-	list_add_tail(&entry->list, &resmgr->update_bit_cond_h);
-
-	wcd9xxx_resmgr_cond_trigger_cond(resmgr, cond);
-	mutex_unlock(&resmgr->update_bit_cond_lock);
-
-	return 0;
-}
-
-/*
- * wcd9xxx_resmgr_rm_cond_update_bits :
- * Clear bit and remove from the conditional bit update list
- */
-int wcd9xxx_resmgr_rm_cond_update_bits(struct wcd9xxx_resmgr *resmgr,
-				       enum wcd9xxx_resmgr_cond cond,
-				       unsigned short reg, int shift,
-				       bool invert)
-{
-	struct list_head *l, *next;
-	struct wcd9xxx_resmgr_cond_entry *e = NULL;
-
-	pr_debug("%s: enter\n", __func__);
-	mutex_lock(&resmgr->update_bit_cond_lock);
-	list_for_each_safe(l, next, &resmgr->update_bit_cond_h) {
-		e = list_entry(l, struct wcd9xxx_resmgr_cond_entry, list);
-		if (e->reg == reg && e->shift == shift && e->invert == invert) {
-			snd_soc_update_bits(resmgr->codec, e->reg,
-					    1 << e->shift,
-					    e->invert << e->shift);
-			list_del(&e->list);
-			mutex_unlock(&resmgr->update_bit_cond_lock);
-			kfree(e);
-			return 0;
-		}
-	}
-	mutex_unlock(&resmgr->update_bit_cond_lock);
-	pr_err("%s: Cannot find update bit entry reg 0x%x, shift %d\n",
-	       __func__, e ? e->reg : 0, e ? e->shift : 0);
-
-	return -EINVAL;
-}
-
-int wcd9xxx_resmgr_register_notifier(struct wcd9xxx_resmgr *resmgr,
-				     struct notifier_block *nblock)
-{
-	return blocking_notifier_chain_register(&resmgr->notifier, nblock);
-}
-
-int wcd9xxx_resmgr_unregister_notifier(struct wcd9xxx_resmgr *resmgr,
-				       struct notifier_block *nblock)
-{
-	return blocking_notifier_chain_unregister(&resmgr->notifier, nblock);
-}
-
-int wcd9xxx_resmgr_init(struct wcd9xxx_resmgr *resmgr,
-			struct snd_soc_codec *codec,
-			struct wcd9xxx_core_resource *core_res,
-			struct wcd9xxx_pdata *pdata,
-			struct wcd9xxx_micbias_setting *micbias_pdata,
-			struct wcd9xxx_reg_address *reg_addr,
-			const struct wcd9xxx_resmgr_cb *resmgr_cb,
-			enum wcd9xxx_cdc_type cdc_type)
-{
-	WARN(ARRAY_SIZE(wcd9xxx_event_string) != WCD9XXX_EVENT_LAST + 1,
-	     "Event string table isn't up to date!, %zd != %d\n",
-	     ARRAY_SIZE(wcd9xxx_event_string), WCD9XXX_EVENT_LAST + 1);
-
-	resmgr->bandgap_type = WCD9XXX_BANDGAP_OFF;
-	resmgr->codec = codec;
-	resmgr->codec_type = cdc_type;
-	/* This gives access of core handle to lock/unlock suspend */
-	resmgr->core_res = core_res;
-	resmgr->pdata = pdata;
-	resmgr->micbias_pdata = micbias_pdata;
-	resmgr->reg_addr = reg_addr;
-	resmgr->resmgr_cb = resmgr_cb;
-
-	INIT_LIST_HEAD(&resmgr->update_bit_cond_h);
-
-	BLOCKING_INIT_NOTIFIER_HEAD(&resmgr->notifier);
-
-	mutex_init(&resmgr->codec_resource_lock);
-	mutex_init(&resmgr->codec_bg_clk_lock);
-	mutex_init(&resmgr->update_bit_cond_lock);
-
-	return 0;
-}
-
-void wcd9xxx_resmgr_deinit(struct wcd9xxx_resmgr *resmgr)
-{
-	mutex_destroy(&resmgr->update_bit_cond_lock);
-	mutex_destroy(&resmgr->codec_bg_clk_lock);
-	mutex_destroy(&resmgr->codec_resource_lock);
-}
-
-void wcd9xxx_resmgr_bcl_lock(struct wcd9xxx_resmgr *resmgr)
-{
-	mutex_lock(&resmgr->codec_resource_lock);
-}
-
-void wcd9xxx_resmgr_bcl_unlock(struct wcd9xxx_resmgr *resmgr)
-{
-	mutex_unlock(&resmgr->codec_resource_lock);
-}
-
-MODULE_DESCRIPTION("wcd9xxx resmgr module");
-MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/wcd9xxx-resmgr.h b/sound/soc/codecs/wcd9xxx-resmgr.h
deleted file mode 100644
index e35d616..0000000
--- a/sound/soc/codecs/wcd9xxx-resmgr.h
+++ /dev/null
@@ -1,280 +0,0 @@
-/* Copyright (c) 2012-2014, 2016 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-#ifndef __WCD9XXX_COMMON_H__
-#define __WCD9XXX_COMMON_H__
-
-#include <linux/notifier.h>
-#include <linux/mfd/wcd9xxx/core.h>
-#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
-
-enum wcd9xxx_bandgap_type {
-	WCD9XXX_BANDGAP_OFF,
-	WCD9XXX_BANDGAP_AUDIO_MODE,
-	WCD9XXX_BANDGAP_MBHC_MODE,
-};
-
-enum wcd9xxx_cdc_type {
-	WCD9XXX_CDC_TYPE_INVALID = 0,
-	WCD9XXX_CDC_TYPE_TAIKO,
-	WCD9XXX_CDC_TYPE_TAPAN,
-	WCD9XXX_CDC_TYPE_HELICON,
-	WCD9XXX_CDC_TYPE_TOMTOM,
-};
-
-enum wcd9xxx_clock_type {
-	WCD9XXX_CLK_OFF,
-	WCD9XXX_CLK_RCO,
-	WCD9XXX_CLK_MCLK,
-};
-
-enum wcd9xxx_clock_config_mode {
-	WCD9XXX_CFG_MCLK = 0,
-	WCD9XXX_CFG_RCO,
-	WCD9XXX_CFG_CAL_RCO,
-};
-
-enum wcd9xxx_cfilt_sel {
-	WCD9XXX_CFILT1_SEL,
-	WCD9XXX_CFILT2_SEL,
-	WCD9XXX_CFILT3_SEL,
-	WCD9XXX_NUM_OF_CFILT,
-};
-
-struct wcd9xxx_reg_address {
-	u16 micb_4_ctl;
-	u16 micb_4_int_rbias;
-	u16 micb_4_mbhc;
-};
-
-enum wcd9xxx_notify_event {
-	WCD9XXX_EVENT_INVALID,
-
-	WCD9XXX_EVENT_PRE_RCO_ON,
-	WCD9XXX_EVENT_POST_RCO_ON,
-	WCD9XXX_EVENT_PRE_RCO_OFF,
-	WCD9XXX_EVENT_POST_RCO_OFF,
-
-	WCD9XXX_EVENT_PRE_MCLK_ON,
-	WCD9XXX_EVENT_POST_MCLK_ON,
-	WCD9XXX_EVENT_PRE_MCLK_OFF,
-	WCD9XXX_EVENT_POST_MCLK_OFF,
-
-	WCD9XXX_EVENT_PRE_BG_OFF,
-	WCD9XXX_EVENT_POST_BG_OFF,
-	WCD9XXX_EVENT_PRE_BG_AUDIO_ON,
-	WCD9XXX_EVENT_POST_BG_AUDIO_ON,
-	WCD9XXX_EVENT_PRE_BG_MBHC_ON,
-	WCD9XXX_EVENT_POST_BG_MBHC_ON,
-
-	WCD9XXX_EVENT_PRE_MICBIAS_1_OFF,
-	WCD9XXX_EVENT_POST_MICBIAS_1_OFF,
-	WCD9XXX_EVENT_PRE_MICBIAS_2_OFF,
-	WCD9XXX_EVENT_POST_MICBIAS_2_OFF,
-	WCD9XXX_EVENT_PRE_MICBIAS_3_OFF,
-	WCD9XXX_EVENT_POST_MICBIAS_3_OFF,
-	WCD9XXX_EVENT_PRE_MICBIAS_4_OFF,
-	WCD9XXX_EVENT_POST_MICBIAS_4_OFF,
-	WCD9XXX_EVENT_PRE_MICBIAS_1_ON,
-	WCD9XXX_EVENT_POST_MICBIAS_1_ON,
-	WCD9XXX_EVENT_PRE_MICBIAS_2_ON,
-	WCD9XXX_EVENT_POST_MICBIAS_2_ON,
-	WCD9XXX_EVENT_PRE_MICBIAS_3_ON,
-	WCD9XXX_EVENT_POST_MICBIAS_3_ON,
-	WCD9XXX_EVENT_PRE_MICBIAS_4_ON,
-	WCD9XXX_EVENT_POST_MICBIAS_4_ON,
-
-	WCD9XXX_EVENT_PRE_CFILT_1_OFF,
-	WCD9XXX_EVENT_POST_CFILT_1_OFF,
-	WCD9XXX_EVENT_PRE_CFILT_2_OFF,
-	WCD9XXX_EVENT_POST_CFILT_2_OFF,
-	WCD9XXX_EVENT_PRE_CFILT_3_OFF,
-	WCD9XXX_EVENT_POST_CFILT_3_OFF,
-	WCD9XXX_EVENT_PRE_CFILT_1_ON,
-	WCD9XXX_EVENT_POST_CFILT_1_ON,
-	WCD9XXX_EVENT_PRE_CFILT_2_ON,
-	WCD9XXX_EVENT_POST_CFILT_2_ON,
-	WCD9XXX_EVENT_PRE_CFILT_3_ON,
-	WCD9XXX_EVENT_POST_CFILT_3_ON,
-
-	WCD9XXX_EVENT_PRE_HPHL_PA_ON,
-	WCD9XXX_EVENT_POST_HPHL_PA_OFF,
-	WCD9XXX_EVENT_PRE_HPHR_PA_ON,
-	WCD9XXX_EVENT_POST_HPHR_PA_OFF,
-
-	WCD9XXX_EVENT_POST_RESUME,
-
-	WCD9XXX_EVENT_PRE_TX_3_ON,
-	WCD9XXX_EVENT_POST_TX_3_OFF,
-
-	WCD9XXX_EVENT_LAST,
-};
-
-struct wcd9xxx_resmgr_cb {
-	int (*cdc_rco_ctrl)(struct snd_soc_codec *, bool);
-};
-
-struct wcd9xxx_resmgr {
-	struct snd_soc_codec *codec;
-	struct wcd9xxx_core_resource *core_res;
-
-	u32 rx_bias_count;
-
-	/*
-	 * bandgap_type, bg_audio_users and bg_mbhc_users have to be
-	 * referred/manipulated after acquiring codec_bg_clk_lock mutex
-	 */
-	enum wcd9xxx_bandgap_type bandgap_type;
-	u16 bg_audio_users;
-	u16 bg_mbhc_users;
-
-	/*
-	 * clk_type, clk_rco_users and clk_mclk_users have to be
-	 * referred/manipulated after acquiring codec_bg_clk_lock mutex
-	 */
-	enum wcd9xxx_clock_type clk_type;
-	u16 clk_rco_users;
-	u16 clk_mclk_users;
-	u16 ext_clk_users;
-
-	/* cfilt users per cfilts */
-	u16 cfilt_users[WCD9XXX_NUM_OF_CFILT];
-
-	struct wcd9xxx_reg_address *reg_addr;
-
-	struct wcd9xxx_pdata *pdata;
-
-	struct wcd9xxx_micbias_setting *micbias_pdata;
-
-	struct blocking_notifier_head notifier;
-	/* Notifier needs mbhc pointer with resmgr */
-	struct wcd9xxx_mbhc *mbhc;
-
-	unsigned long cond_flags;
-	unsigned long cond_avail_flags;
-	struct list_head update_bit_cond_h;
-	struct mutex update_bit_cond_lock;
-
-	/*
-	 * Currently, only used for mbhc purpose, to protect
-	 * concurrent execution of mbhc threaded irq handlers and
-	 * kill race between DAPM and MBHC. But can serve as a
-	 * general lock to protect codec resource
-	 */
-	struct mutex codec_resource_lock;
-	struct mutex codec_bg_clk_lock;
-
-	enum wcd9xxx_cdc_type codec_type;
-
-	const struct wcd9xxx_resmgr_cb *resmgr_cb;
-};
-
-int wcd9xxx_resmgr_init(struct wcd9xxx_resmgr *resmgr,
-			struct snd_soc_codec *codec,
-			struct wcd9xxx_core_resource *core_res,
-			struct wcd9xxx_pdata *pdata,
-			struct wcd9xxx_micbias_setting *micbias_pdata,
-			struct wcd9xxx_reg_address *reg_addr,
-			const struct wcd9xxx_resmgr_cb *resmgr_cb,
-			enum wcd9xxx_cdc_type cdc_type);
-void wcd9xxx_resmgr_deinit(struct wcd9xxx_resmgr *resmgr);
-
-int wcd9xxx_resmgr_enable_config_mode(struct wcd9xxx_resmgr *resmgr,
-				int enable);
-
-void wcd9xxx_resmgr_enable_rx_bias(struct wcd9xxx_resmgr *resmgr, u32 enable);
-void wcd9xxx_resmgr_get_clk_block(struct wcd9xxx_resmgr *resmgr,
-				  enum wcd9xxx_clock_type type);
-void wcd9xxx_resmgr_put_clk_block(struct wcd9xxx_resmgr *resmgr,
-				  enum wcd9xxx_clock_type type);
-void wcd9xxx_resmgr_get_bandgap(struct wcd9xxx_resmgr *resmgr,
-				const enum wcd9xxx_bandgap_type choice);
-void wcd9xxx_resmgr_put_bandgap(struct wcd9xxx_resmgr *resmgr,
-				enum wcd9xxx_bandgap_type choice);
-void wcd9xxx_resmgr_cfilt_get(struct wcd9xxx_resmgr *resmgr,
-			      enum wcd9xxx_cfilt_sel cfilt_sel);
-void wcd9xxx_resmgr_cfilt_put(struct wcd9xxx_resmgr *resmgr,
-			      enum wcd9xxx_cfilt_sel cfilt_sel);
-int wcd9xxx_resmgr_get_clk_type(struct wcd9xxx_resmgr *resmgr);
-
-void wcd9xxx_resmgr_bcl_lock(struct wcd9xxx_resmgr *resmgr);
-void wcd9xxx_resmgr_post_ssr(struct wcd9xxx_resmgr *resmgr);
-#define WCD9XXX_BCL_LOCK(resmgr)			\
-{							\
-	pr_debug("%s: Acquiring BCL\n", __func__);	\
-	wcd9xxx_resmgr_bcl_lock(resmgr);			\
-	pr_debug("%s: Acquiring BCL done\n", __func__);	\
-}
-
-void wcd9xxx_resmgr_bcl_unlock(struct wcd9xxx_resmgr *resmgr);
-#define WCD9XXX_BCL_UNLOCK(resmgr)			\
-{							\
-	pr_debug("%s: Release BCL\n", __func__);	\
-	wcd9xxx_resmgr_bcl_unlock(resmgr);			\
-}
-
-#define WCD9XXX_BCL_ASSERT_LOCKED(resmgr)		\
-{							\
-	WARN_ONCE(!mutex_is_locked(&resmgr->codec_resource_lock), \
-		  "%s: BCL should have acquired\n", __func__); \
-}
-
-#define WCD9XXX_BG_CLK_LOCK(resmgr)			\
-{							\
-	struct wcd9xxx_resmgr *__resmgr = resmgr;	\
-	pr_debug("%s: Acquiring BG_CLK\n", __func__);	\
-	mutex_lock(&__resmgr->codec_bg_clk_lock);	\
-	pr_debug("%s: Acquiring BG_CLK done\n", __func__);	\
-}
-
-#define WCD9XXX_BG_CLK_UNLOCK(resmgr)			\
-{							\
-	struct wcd9xxx_resmgr *__resmgr = resmgr;	\
-	pr_debug("%s: Releasing BG_CLK\n", __func__);	\
-	mutex_unlock(&__resmgr->codec_bg_clk_lock);	\
-}
-
-#define WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr)		\
-{							\
-	WARN_ONCE(!mutex_is_locked(&resmgr->codec_bg_clk_lock), \
-		  "%s: BG_CLK lock should have acquired\n", __func__); \
-}
-
-const char *wcd9xxx_get_event_string(enum wcd9xxx_notify_event type);
-int wcd9xxx_resmgr_get_k_val(struct wcd9xxx_resmgr *resmgr,
-			     unsigned int cfilt_mv);
-int wcd9xxx_resmgr_register_notifier(struct wcd9xxx_resmgr *resmgr,
-				     struct notifier_block *nblock);
-int wcd9xxx_resmgr_unregister_notifier(struct wcd9xxx_resmgr *resmgr,
-				       struct notifier_block *nblock);
-void wcd9xxx_resmgr_notifier_call(struct wcd9xxx_resmgr *resmgr,
-				  const enum wcd9xxx_notify_event e);
-
-enum wcd9xxx_resmgr_cond {
-	WCD9XXX_COND_HPH = 0x01, /* Headphone */
-	WCD9XXX_COND_HPH_MIC = 0x02, /* Microphone on the headset */
-};
-void wcd9xxx_regmgr_cond_register(struct wcd9xxx_resmgr *resmgr,
-				  unsigned long condbits);
-void wcd9xxx_regmgr_cond_deregister(struct wcd9xxx_resmgr *resmgr,
-				    unsigned long condbits);
-int wcd9xxx_resmgr_rm_cond_update_bits(struct wcd9xxx_resmgr *resmgr,
-				       enum wcd9xxx_resmgr_cond cond,
-				       unsigned short reg, int shift,
-				       bool invert);
-int wcd9xxx_resmgr_add_cond_update_bits(struct wcd9xxx_resmgr *resmgr,
-					enum wcd9xxx_resmgr_cond cond,
-					unsigned short reg, int shift,
-					bool invert);
-void wcd9xxx_resmgr_cond_update_cond(struct wcd9xxx_resmgr *resmgr,
-				     enum wcd9xxx_resmgr_cond cond, bool set);
-
-#endif /* __WCD9XXX_COMMON_H__ */
diff --git a/sound/soc/codecs/wcd9xxx-soc-init.c b/sound/soc/codecs/wcd9xxx-soc-init.c
new file mode 100644
index 0000000..fa8abb7
--- /dev/null
+++ b/sound/soc/codecs/wcd9xxx-soc-init.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <sound/wcd-dsp-mgr.h>
+#include "audio-ext-clk-up.h"
+
+static int __init wcd9xxx_soc_init(void)
+{
+	int ret = 0;
+
+	ret = wcd_dsp_mgr_init();
+	if (!ret) {
+		ret = audio_ref_clk_platform_init();
+		if (ret) {
+			pr_err("%s: init extclk fail: %d\n", __func__, ret);
+			wcd_dsp_mgr_exit();
+		}
+	} else {
+		pr_err("%s: init dsp mgr fail: %d\n", __func__, ret);
+	}
+
+	return ret;
+}
+module_init(wcd9xxx_soc_init);
+
+static void __exit wcd9xxx_soc_exit(void)
+{
+	audio_ref_clk_platform_exit();
+	wcd_dsp_mgr_exit();
+}
+module_exit(wcd9xxx_soc_exit);
+
+MODULE_DESCRIPTION("WCD9XXX CODEC soc init driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/wcd_cpe_core.c b/sound/soc/codecs/wcd_cpe_core.c
index 153cc2e..f2a20d51 100644
--- a/sound/soc/codecs/wcd_cpe_core.c
+++ b/sound/soc/codecs/wcd_cpe_core.c
@@ -648,6 +648,7 @@
 done:
 	return core;
 }
+EXPORT_SYMBOL(wcd_cpe_get_core_handle);
 
 /*
  * svass_engine_irq: threaded interrupt handler for svass engine irq
diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c
index ef493a8..77aea10 100644
--- a/sound/soc/codecs/wsa881x.c
+++ b/sound/soc/codecs/wsa881x.c
@@ -1365,6 +1365,7 @@
 		/* Retry after 1 msec delay */
 		usleep_range(1000, 1100);
 	}
+	pdev->dev_num = devnum;
 	regcache_mark_dirty(wsa881x->regmap);
 	regcache_sync(wsa881x->regmap);
 	return 0;
diff --git a/sound/soc/msm/Kconfig b/sound/soc/msm/Kconfig
index 18585749..c557ae0 100644
--- a/sound/soc/msm/Kconfig
+++ b/sound/soc/msm/Kconfig
@@ -192,8 +192,21 @@
 	 the machine driver and the corresponding
 	 DAI-links
 
-config SND_SOC_MSM8998
+config SND_SOC_MACHINE_MSM8998
 	tristate "SoC Machine driver for MSM8998 boards"
+	select SND_SOC_WSA881X
+	select SND_SOC_WCD9335
+	select SND_SOC_WCD934X
+	select SND_SOC_CPE
+
+	help
+	To enable the machine driver and the
+	corresponding DAI-links on MSM8998.
+	All platform specific audio modules are
+	enabled here.
+
+config SND_SOC_MSM8998
+	tristate "Sound SoC drivers to interface with DSP"
 	depends on ARCH_QCOM
 	select SND_SOC_COMPRESS
 	select SND_SOC_QDSP6V2
@@ -205,13 +218,9 @@
 	select MSM_QDSP6_PDR
 	select MSM_QDSP6_NOTIFIER
 	select MSM_QDSP6V2_CODECS
-	select SND_SOC_WCD9335
-	select SND_SOC_WCD934X
-	select SND_SOC_WSA881X
 	select SND_SOC_MSM_HDMI_CODEC_RX
 	select DTS_SRS_TM
 	select QTI_PP
-	select SND_SOC_CPE
 	select MSM_ULTRASOUND
 	select DOLBY_LICENSE
 	select SND_HWDEP
@@ -235,6 +244,17 @@
 	 the machine driver and the corresponding
 	 DAI-links
 
+config SND_SOC_MACHINE_SDM845
+	tristate "SoC Machine driver for SDM845 boards"
+	select SND_SOC_WSA881X
+	select SND_SOC_WCD934X
+
+	help
+	To enable the machine driver and the
+	corresponding DAI-links on SDM845.
+	All platform specific audio modules are
+	enabled here.
+
 config SND_SOC_SDM845
 	tristate "SoC Machine driver for SDM845 boards"
 	depends on ARCH_QCOM
@@ -248,8 +268,6 @@
 	select MSM_QDSP6_PDR
 	select MSM_QDSP6_NOTIFIER
 	select MSM_QDSP6V2_CODECS
-	select SND_SOC_WCD934X
-	select SND_SOC_WSA881X
 	select DTS_SRS_TM
 	select QTI_PP
 	select MSM_ULTRASOUND
diff --git a/sound/soc/msm/Makefile b/sound/soc/msm/Makefile
index 5105cd9..caf8843 100644
--- a/sound/soc/msm/Makefile
+++ b/sound/soc/msm/Makefile
@@ -18,7 +18,7 @@
 
 # for MSM8998 sound card driver
 snd-soc-msm8998-objs := msm8998.o
-obj-$(CONFIG_SND_SOC_MSM8998) += snd-soc-msm8998.o
+obj-$(CONFIG_SND_SOC_MACHINE_MSM8998) += snd-soc-msm8998.o
 
 # for SDM660 sound card driver
 snd-soc-sdm660-common-objs := sdm660-common.o
@@ -36,4 +36,4 @@
 
 # for SDM845 sound card driver
 snd-soc-sdm845-objs := sdm845.o
-obj-$(CONFIG_SND_SOC_SDM845) += snd-soc-sdm845.o
+obj-$(CONFIG_SND_SOC_MACHINE_SDM845) += snd-soc-sdm845.o
diff --git a/sound/soc/msm/qdsp6v2/audio_cal_utils.c b/sound/soc/msm/qdsp6v2/audio_cal_utils.c
index f5c6d6f..7e69a7f 100644
--- a/sound/soc/msm/qdsp6v2/audio_cal_utils.c
+++ b/sound/soc/msm/qdsp6v2/audio_cal_utils.c
@@ -356,6 +356,15 @@
 	return cal_type;
 }
 
+/**
+ * cal_utils_create_cal_types
+ *
+ * @num_cal_types: number of types
+ * @cal_type: pointer to the cal types pointer
+ * @info: pointer to info
+ *
+ * Returns 0 on success, EINVAL otherwise
+ */
 int cal_utils_create_cal_types(int num_cal_types,
 			struct cal_type_data **cal_type,
 			struct cal_type_info *info)
@@ -411,6 +420,7 @@
 done:
 	return ret;
 }
+EXPORT_SYMBOL(cal_utils_create_cal_types);
 
 static void delete_cal_block(struct cal_block_data *cal_block)
 {
@@ -497,6 +507,13 @@
 	return;
 }
 
+/**
+ * cal_utils_get_only_cal_block
+ *
+ * @cal_type: pointer to the cal type
+ *
+ * Returns cal_block structure
+ */
 struct cal_block_data *cal_utils_get_only_cal_block(
 			struct cal_type_data *cal_type)
 {
@@ -516,7 +533,16 @@
 done:
 	return cal_block;
 }
+EXPORT_SYMBOL(cal_utils_get_only_cal_block);
 
+/**
+ * cal_utils_get_only_cal_block
+ *
+ * @cal_block: pointer to cal block struct
+ * @user_data: pointer to user data
+ *
+ * Returns true on match
+ */
 bool cal_utils_match_buf_num(struct cal_block_data *cal_block,
 					void *user_data)
 {
@@ -528,6 +554,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL(cal_utils_match_buf_num);
 
 static struct cal_block_data *get_matching_cal_block(
 					struct cal_type_data *cal_type,
@@ -759,6 +786,17 @@
 	return ret;
 }
 
+/**
+ * cal_utils_alloc_cal
+ *
+ * @data_size: size of data to allocate
+ * @data: data pointer
+ * @cal_type: pointer to the cal type
+ * @client_info_size: client info size
+ * @client_info: pointer to client info
+ *
+ * Returns 0 on success, appropriate error code otherwise
+ */
 int cal_utils_alloc_cal(size_t data_size, void *data,
 			struct cal_type_data *cal_type,
 			size_t client_info_size, void *client_info)
@@ -827,7 +865,17 @@
 done:
 	return ret;
 }
+EXPORT_SYMBOL(cal_utils_alloc_cal);
 
+/**
+ * cal_utils_dealloc_cal
+ *
+ * @data_size: size of data to allocate
+ * @data: data pointer
+ * @cal_type: pointer to the cal type
+ *
+ * Returns 0 on success, appropriate error code otherwise
+ */
 int cal_utils_dealloc_cal(size_t data_size, void *data,
 			struct cal_type_data *cal_type)
 {
@@ -887,7 +935,19 @@
 done:
 	return ret;
 }
+EXPORT_SYMBOL(cal_utils_dealloc_cal);
 
+/**
+ * cal_utils_set_cal
+ *
+ * @data_size: size of data to allocate
+ * @data: data pointer
+ * @cal_type: pointer to the cal type
+ * @client_info_size: client info size
+ * @client_info: pointer to client info
+ *
+ * Returns 0 on success, appropriate error code otherwise
+ */
 int cal_utils_set_cal(size_t data_size, void *data,
 			struct cal_type_data *cal_type,
 			size_t client_info_size, void *client_info)
@@ -967,3 +1027,4 @@
 done:
 	return ret;
 }
+EXPORT_SYMBOL(cal_utils_set_cal);
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 019cbae..d67296f 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -10605,10 +10605,60 @@
 	return 0;
 }
 
+static int msm_routing_put_app_type_gain_control(struct snd_kcontrol *kcontrol,
+					  struct snd_ctl_elem_value *ucontrol)
+{
+	int j, fe_id, be_id, port_type;
+	int ret = 0;
+	unsigned long copp;
+	struct msm_pcm_routing_bdai_data *bedai;
+	int dir = ucontrol->value.integer.value[0] ? SESSION_TYPE_TX :
+						     SESSION_TYPE_RX;
+	int app_type = ucontrol->value.integer.value[1];
+	int gain = (ucontrol->value.integer.value[2] +
+		    ucontrol->value.integer.value[3])/2;
+
+	port_type = (dir == SESSION_TYPE_RX) ? MSM_AFE_PORT_TYPE_RX :
+					       MSM_AFE_PORT_TYPE_TX;
+
+	mutex_lock(&routing_lock);
+	for (be_id = 0; be_id < MSM_BACKEND_DAI_MAX; be_id++) {
+		if (is_be_dai_extproc(be_id))
+			continue;
+
+		bedai = &msm_bedais[be_id];
+		if (afe_get_port_type(bedai->port_id) != port_type)
+			continue;
+
+		if (!bedai->active)
+			continue;
+
+		for (fe_id = 0; fe_id < MSM_FRONTEND_DAI_MAX; fe_id++) {
+			if (!test_bit(fe_id, &bedai->fe_sessions[0]))
+				continue;
+
+			if (app_type !=
+			    fe_dai_app_type_cfg[fe_id][dir][be_id].app_type)
+				continue;
+
+			copp = session_copp_map[fe_id][dir][be_id];
+			for (j = 0; j < MAX_COPPS_PER_PORT; j++) {
+				if (!test_bit(j, &copp))
+					continue;
+				ret |= adm_set_volume(bedai->port_id, j, gain);
+			}
+		}
+	}
+	mutex_unlock(&routing_lock);
+	return ret ? -EINVAL : 0;
+}
+
 static const struct snd_kcontrol_new app_type_cfg_controls[] = {
 	SOC_SINGLE_MULTI_EXT("App Type Config", SND_SOC_NOPM, 0,
 	0xFFFFFFFF, 0, 128, msm_routing_get_app_type_cfg_control,
 	msm_routing_put_app_type_cfg_control),
+	SOC_SINGLE_MULTI_EXT("App Type Gain", SND_SOC_NOPM, 0,
+	0x2000, 0, 4, NULL, msm_routing_put_app_type_gain_control)
 };
 
 static int msm_routing_get_lsm_app_type_cfg_control(
diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c
index ebb8eff..e1ce947 100644
--- a/sound/soc/msm/qdsp6v2/q6afe.c
+++ b/sound/soc/msm/qdsp6v2/q6afe.c
@@ -2125,6 +2125,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL(afe_set_config);
 
 /*
  * afe_clear_config - If SSR happens ADSP loses AFE configs, let AFE driver know
@@ -2135,6 +2136,7 @@
 {
 	clear_bit(config, &afe_configured_cmd);
 }
+EXPORT_SYMBOL(afe_clear_config);
 
 bool afe_has_config(enum afe_config_type config)
 {
@@ -5749,6 +5751,14 @@
 	return ret;
 }
 
+/**
+ * afe_set_lpass_clk_cfg - Set AFE clk config
+ *
+ * @index: port index
+ * @cfg: pointer to clk set struct
+ *
+ * Returns 0 on success, appropriate error code otherwise
+ */
 int afe_set_lpass_clk_cfg(int index, struct afe_clk_set *cfg)
 {
 	struct afe_lpass_clk_config_command_v2 clk_cfg;
@@ -5829,7 +5839,16 @@
 	mutex_unlock(&this_afe.afe_cmd_lock);
 	return ret;
 }
+EXPORT_SYMBOL(afe_set_lpass_clk_cfg);
 
+/**
+ * afe_set_lpass_clock_v2 - Enable AFE lpass clock
+ *
+ * @port_id: AFE port id
+ * @cfg: pointer to clk set struct
+ *
+ * Returns 0 on success, appropriate error code otherwise
+ */
 int afe_set_lpass_clock_v2(u16 port_id, struct afe_clk_set *cfg)
 {
 	int index = 0;
@@ -5855,6 +5874,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL(afe_set_lpass_clock_v2);
 
 int afe_set_lpass_internal_digital_codec_clock(u16 port_id,
 			struct afe_digital_clk_cfg *cfg)
diff --git a/sound/soc/msm/qdsp6v2/q6core.c b/sound/soc/msm/qdsp6v2/q6core.c
index f6675a2..3aaaa35 100644
--- a/sound/soc/msm/qdsp6v2/q6core.c
+++ b/sound/soc/msm/qdsp6v2/q6core.c
@@ -381,6 +381,11 @@
 	return rc;
 }
 
+/**
+ * q6core_is_adsp_ready - check adsp ready status
+ *
+ * Returns true if adsp is ready otherwise returns false
+ */
 bool q6core_is_adsp_ready(void)
 {
 	int rc = 0;
@@ -419,7 +424,7 @@
 	mutex_unlock(&(q6core_lcl.cmd_lock));
 	return ret;
 }
-
+EXPORT_SYMBOL(q6core_is_adsp_ready);
 
 static int q6core_map_memory_regions(phys_addr_t *buf_add, uint32_t mempool_id,
 			uint32_t *bufsz, uint32_t bufcnt, uint32_t *map_handle)
diff --git a/sound/soc/msm/sdm845.c b/sound/soc/msm/sdm845.c
index e699760..d3c4e05 100644
--- a/sound/soc/msm/sdm845.c
+++ b/sound/soc/msm/sdm845.c
@@ -493,6 +493,8 @@
 static SOC_ENUM_SINGLE_EXT_DECL(tert_mi2s_tx_chs, mi2s_ch_text);
 static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_rx_chs, mi2s_ch_text);
 static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_tx_chs, mi2s_ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(mi2s_rx_format, bit_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(mi2s_tx_format, bit_format_text);
 static SOC_ENUM_SINGLE_EXT_DECL(hifi_function, hifi_text);
 
 static struct platform_device *spdev;
@@ -2250,6 +2252,54 @@
 	return sample_rate;
 }
 
+static int mi2s_get_format(int value)
+{
+	int format;
+
+	switch (value) {
+	case 0:
+		format = SNDRV_PCM_FORMAT_S16_LE;
+		break;
+	case 1:
+		format = SNDRV_PCM_FORMAT_S24_LE;
+		break;
+	case 2:
+		format = SNDRV_PCM_FORMAT_S24_3LE;
+		break;
+	case 3:
+		format = SNDRV_PCM_FORMAT_S32_LE;
+		break;
+	default:
+		format = SNDRV_PCM_FORMAT_S16_LE;
+		break;
+	}
+	return format;
+}
+
+static int mi2s_get_format_value(int format)
+{
+	int value;
+
+	switch (format) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		value = 0;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		value = 1;
+		break;
+	case SNDRV_PCM_FORMAT_S24_3LE:
+		value = 2;
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		value = 3;
+		break;
+	default:
+		value = 0;
+		break;
+	}
+	return value;
+}
+
 static int mi2s_rx_sample_rate_put(struct snd_kcontrol *kcontrol,
 				   struct snd_ctl_elem_value *ucontrol)
 {
@@ -2382,6 +2432,78 @@
 	return 1;
 }
 
+static int msm_mi2s_rx_format_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = mi2s_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	ucontrol->value.enumerated.item[0] =
+		mi2s_get_format_value(mi2s_rx_cfg[idx].bit_format);
+
+	pr_debug("%s: idx[%d]_rx_format = %d, item = %d\n", __func__,
+		idx, mi2s_rx_cfg[idx].bit_format,
+		ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int msm_mi2s_rx_format_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = mi2s_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	mi2s_rx_cfg[idx].bit_format =
+		mi2s_get_format(ucontrol->value.enumerated.item[0]);
+
+	pr_debug("%s: idx[%d]_rx_format = %d, item = %d\n", __func__,
+		  idx, mi2s_rx_cfg[idx].bit_format,
+		  ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int msm_mi2s_tx_format_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = mi2s_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	ucontrol->value.enumerated.item[0] =
+		mi2s_get_format_value(mi2s_tx_cfg[idx].bit_format);
+
+	pr_debug("%s: idx[%d]_tx_format = %d, item = %d\n", __func__,
+		idx, mi2s_tx_cfg[idx].bit_format,
+		ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int msm_mi2s_tx_format_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = mi2s_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	mi2s_tx_cfg[idx].bit_format =
+		mi2s_get_format(ucontrol->value.enumerated.item[0]);
+
+	pr_debug("%s: idx[%d]_tx_format = %d, item = %d\n", __func__,
+		  idx, mi2s_tx_cfg[idx].bit_format,
+		  ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
 static int msm_hifi_ctrl(struct snd_soc_codec *codec)
 {
 	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
@@ -2627,6 +2749,22 @@
 			msm_mi2s_rx_ch_get, msm_mi2s_rx_ch_put),
 	SOC_ENUM_EXT("QUAT_MI2S_TX Channels", quat_mi2s_tx_chs,
 			msm_mi2s_tx_ch_get, msm_mi2s_tx_ch_put),
+	SOC_ENUM_EXT("PRIM_MI2S_RX Format", mi2s_rx_format,
+			msm_mi2s_rx_format_get, msm_mi2s_rx_format_put),
+	SOC_ENUM_EXT("PRIM_MI2S_TX Format", mi2s_tx_format,
+			msm_mi2s_tx_format_get, msm_mi2s_tx_format_put),
+	SOC_ENUM_EXT("SEC_MI2S_RX Format", mi2s_rx_format,
+			msm_mi2s_rx_format_get, msm_mi2s_rx_format_put),
+	SOC_ENUM_EXT("SEC_MI2S_TX Format", mi2s_tx_format,
+			msm_mi2s_tx_format_get, msm_mi2s_tx_format_put),
+	SOC_ENUM_EXT("TERT_MI2S_RX Format", mi2s_rx_format,
+			msm_mi2s_rx_format_get, msm_mi2s_rx_format_put),
+	SOC_ENUM_EXT("TERT_MI2S_TX Format", mi2s_tx_format,
+			msm_mi2s_tx_format_get, msm_mi2s_tx_format_put),
+	SOC_ENUM_EXT("QUAT_MI2S_RX Format", mi2s_rx_format,
+			msm_mi2s_rx_format_get, msm_mi2s_rx_format_put),
+	SOC_ENUM_EXT("QUAT_MI2S_TX Format", mi2s_tx_format,
+			msm_mi2s_tx_format_get, msm_mi2s_tx_format_put),
 	SOC_ENUM_EXT("HiFi Function", hifi_function, msm_hifi_get,
 			msm_hifi_put),
 };
@@ -3052,48 +3190,64 @@
 		break;
 
 	case MSM_BACKEND_DAI_PRI_MI2S_RX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_rx_cfg[PRIM_MI2S].bit_format);
 		rate->min = rate->max = mi2s_rx_cfg[PRIM_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_rx_cfg[PRIM_MI2S].channels;
 		break;
 
 	case MSM_BACKEND_DAI_PRI_MI2S_TX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_tx_cfg[PRIM_MI2S].bit_format);
 		rate->min = rate->max = mi2s_tx_cfg[PRIM_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_tx_cfg[PRIM_MI2S].channels;
 		break;
 
 	case MSM_BACKEND_DAI_SECONDARY_MI2S_RX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_rx_cfg[SEC_MI2S].bit_format);
 		rate->min = rate->max = mi2s_rx_cfg[SEC_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_rx_cfg[SEC_MI2S].channels;
 		break;
 
 	case MSM_BACKEND_DAI_SECONDARY_MI2S_TX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_tx_cfg[SEC_MI2S].bit_format);
 		rate->min = rate->max = mi2s_tx_cfg[SEC_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_tx_cfg[SEC_MI2S].channels;
 		break;
 
 	case MSM_BACKEND_DAI_TERTIARY_MI2S_RX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_rx_cfg[TERT_MI2S].bit_format);
 		rate->min = rate->max = mi2s_rx_cfg[TERT_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_rx_cfg[TERT_MI2S].channels;
 		break;
 
 	case MSM_BACKEND_DAI_TERTIARY_MI2S_TX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_tx_cfg[TERT_MI2S].bit_format);
 		rate->min = rate->max = mi2s_tx_cfg[TERT_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_tx_cfg[TERT_MI2S].channels;
 		break;
 
 	case MSM_BACKEND_DAI_QUATERNARY_MI2S_RX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_rx_cfg[QUAT_MI2S].bit_format);
 		rate->min = rate->max = mi2s_rx_cfg[QUAT_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_rx_cfg[QUAT_MI2S].channels;
 		break;
 
 	case MSM_BACKEND_DAI_QUATERNARY_MI2S_TX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_tx_cfg[QUAT_MI2S].bit_format);
 		rate->min = rate->max = mi2s_tx_cfg[QUAT_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_tx_cfg[QUAT_MI2S].channels;
@@ -3855,6 +4009,7 @@
 	u32 bit_per_sample;
 
 	switch (bit_format) {
+	case SNDRV_PCM_FORMAT_S32_LE:
 	case SNDRV_PCM_FORMAT_S24_3LE:
 	case SNDRV_PCM_FORMAT_S24_LE:
 		bit_per_sample = 32;