Merge "Bluetooth: Initiate dedicated bonding if pin/key missing on remote device" into msm-3.0
diff --git a/Documentation/devicetree/bindings/arm/msm/rpm-regulator-smd.txt b/Documentation/devicetree/bindings/arm/msm/rpm-regulator-smd.txt
new file mode 100644
index 0000000..786635f
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/rpm-regulator-smd.txt
@@ -0,0 +1,153 @@
+Qualcomm RPM Regulators
+
+rpm-regulator-smd is a regulator driver which supports regulators inside of
+PMICs which are controlled by the RPM processor.  Communication with the RPM
+processor takes place over SMD.
+
+Required structure:
+- RPM regulators must be described in two levels of devices nodes.  The first
+	level describes the interface with the RPM.  The second level describes
+	properties of one regulator framework interface (of potentially many) to
+	the regulator.
+
+[First Level Nodes]
+
+Required properties:
+- compatible:          Must be "qcom,rpm-regulator-smd-resource"
+- qcom,resource-name:  Resource name string for this regulator to be used in RPM
+			transactions.  Length is 4 characters max.
+- qcom,resource-id:    Resource instance ID for this regulator to be used in RPM
+			transactions.
+- qcom,regulator-type: Type of this regulator.  Supported values are:
+				0 = LDO
+				1 = SMPS
+				2 = VS
+				3 = NCP
+
+Optional properties:
+- qcom,allow-atomic:   Flag specifying if atomic access is allowed for this
+			regulator.  Supported values are:
+				0 or not present = mutex locks used
+				1 = spinlocks used
+- qcom,enable-time:    Time in us to delay after enabling the regulator
+- qcom,hpm-min-load:   Load current in uA which corresponds to the minimum load
+			which requires the regulator to be in high power mode.
+
+[Second Level Nodes]
+
+Required properties:
+- compatible:          Must be "qcom,rpm-regulator-smd"
+- regulator-name:      A string used as a descriptive name for regulator outputs
+- qcom,set:            Specifies which sets that requests made with this
+			regulator interface should be sent to.  Regulator
+			requests sent in the active set take effect immediately.
+			Requests sent in the sleep set take effect when the Apps
+			processor transitions into RPM assisted power collapse.
+			Supported values are:
+				1 = Active set only
+				2 = Sleep set only
+				3 = Both active and sleep sets
+
+
+
+Optional properties:
+- parent-supply:               phandle to the parent supply/regulator node
+- qcom,system-load:            Load in uA present on regulator that is not
+				captured by any consumer request
+The following properties specify initial values for parameters to be sent to the
+RPM in regulator requests.
+- qcom,init-enable:            0 = regulator disabled
+			       1 = regulator enabled
+- qcom,init-voltage:           Voltage in uV
+- qcom,init-current:           Current in mA
+- qcom,init-ldo-mode:          Operating mode to be used with LDO regulators
+				Supported values are:
+					0 = mode determined by current requests
+					1 = force HPM (NPM)
+- qcom,init-smps-mode:         Operating mode to be used with SMPS regulators
+				Supported values are:
+					0 = auto; hardware determines mode
+					1 = mode determined by current requests
+					2 = force HPM (PWM)
+- qcom,init-pin-ctrl-enable:   Bit mask specifying which hardware pins should be
+				used to enable the regulator, if any; supported
+				bits are:
+					0 = ignore all hardware enable signals
+					BIT(0) = follow HW0_EN signal
+					BIT(1) = follow HW1_EN signal
+					BIT(2) = follow HW2_EN signal
+					BIT(3) = follow HW3_EN signal
+- qcom,init-pin-ctrl-mode:     Bit mask specifying which hardware pins should be
+				used to force the regulator into high power
+				mode, if any.  Supported bits are:
+					0 = ignore all hardware enable signals
+					BIT(0) = follow HW0_EN signal
+					BIT(1) = follow HW1_EN signal
+					BIT(2) = follow HW2_EN signal
+					BIT(3) = follow HW3_EN signal
+					BIT(4) = follow PMIC awake state
+- qcom,init-frequency:         Switching frequency in MHz for SMPS regulators.
+				Supported values are:
+					 0 = Don't care about frequency used
+					 1 = 19.20
+					 2 = 9.60
+					 3 = 6.40
+					 4 = 4.80
+					 5 = 3.84
+					 6 = 3.20
+					 7 = 2.74
+					 8 = 2.40
+					 9 = 2.13
+					10 = 1.92
+					11 = 1.75
+					12 = 1.60
+					13 = 1.48
+					14 = 1.37
+					15 = 1.28
+					16 = 1.20
+- qcom,init-head-room:         Voltage head room in uV required for the
+				regulator
+- qcom,init-quiet-mode:        Specify that quiet mode is needed for an SMPS
+				regulator in order to have lower output noise.
+				Supported values are:
+					0 = No quiet mode
+					1 = Quiet mode
+					2 = Super quiet mode
+- qcom,init-freq-reason:       Consumer requiring specified frequency for an
+				SMPS regulator.  Supported values are:
+					0 = None
+					1 = Bluetooth
+					2 = GPS
+					4 = WLAN
+					8 = WAN
+
+All properties specified within the core regulator framework can also be used in
+second level nodes.  These bindings can be found in:
+Documentation/devicetree/bindings/regulator/regulator.txt.
+
+Example:
+
+rpm-regulator-smpb1 {
+	qcom,resource-name = "smpb";
+	qcom,resource-id = <1>;
+	qcom,regulator-type = <1>;
+	qcom,hpm-min-load = <100000>;
+	compatible = "qcom,rpm-regulator-smd-resource";
+	status = "disabled";
+
+	pm8841_s1: regulator-s1 {
+		regulator-name = "8841_s1";
+		qcom,set = <3>;
+		regulator-min-microvolt = <900000>;
+		regulator-max-microvolt = <1150000>;
+		qcom,init-voltage = <1150000>;
+		compatible = "qcom,rpm-regulator-smd";
+	};
+	pm8841_s1_ao: regulator-s1-ao {
+		regulator-name = "8841_s1_ao";
+		qcom,set = <1>;
+		regulator-min-microvolt = <900000>;
+		regulator-max-microvolt = <1150000>;
+		compatible = "qcom,rpm-regulator-smd";
+	};
+};
diff --git a/Documentation/devicetree/bindings/iommu/msm_iommu.txt b/Documentation/devicetree/bindings/iommu/msm_iommu.txt
new file mode 100644
index 0000000..67933e7
--- /dev/null
+++ b/Documentation/devicetree/bindings/iommu/msm_iommu.txt
@@ -0,0 +1,35 @@
+* Qualcomm MSM IOMMU
+
+Required properties:
+- compatible : one of:
+	- "qcom,msm-smmu-v2"
+- reg : offset and length of the register set for the device.
+
+- List of sub nodes, one for each of the translation context banks supported.
+  Each sub node has the following required properties:
+
+  - reg : offset and length of the register set for the context bank.
+  - interrupts : should contain the context bank interrupt.
+  - qcom,iommu-ctx-sids : List of stream identifiers associated with this
+    translation context.
+  - qcom,iommu-ctx-name : Name of the context bank
+
+Example:
+
+        qcom,iommu@fda64000 {
+                compatible = "qcom,msm-smmu-v2";
+                reg = <0xfda64000 0x10000>;
+
+                qcom,iommu-ctx@fda6c000 {
+                        reg = <0xfda6c000 0x1000>;
+                        interrupts = <0 70 0>;
+                        qcom,iommu-ctx-sids = <0 2>;
+			qcom,iommu-ctx-name = "ctx_0";
+                };
+                qcom,iommu-ctx@fda6d000 {
+                        reg = <0xfda6d000 0x1000>;
+                        interrupts = <0 71 0>;
+                        qcom,iommu-ctx-sids = <1>;
+			qcom,iommu-ctx-name = "ctx_1";
+                };
+        };
diff --git a/Documentation/devicetree/bindings/prng/msm-rng.txt b/Documentation/devicetree/bindings/prng/msm-rng.txt
new file mode 100644
index 0000000..3d55808
--- /dev/null
+++ b/Documentation/devicetree/bindings/prng/msm-rng.txt
@@ -0,0 +1,12 @@
+* RNG (Random Number Generator)
+
+Required properties:
+- compatible : Should be "qcom,msm-rng"
+- reg        : Offset and length of the register set for the device
+
+Example:
+
+        qcom,msm-rng@f9bff000 {
+                              compatible = "qcom,msm-rng";
+                              reg = <0xf9bff000 0x200>;
+        };
diff --git a/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt b/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt
new file mode 100644
index 0000000..cd7bdce
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt
@@ -0,0 +1,21 @@
+Qualcomm Global Distributed Switch Controller (GDSC) Regulator Driver
+
+The GDSC driver, implemented under the regulator framework, is responsible for
+safely collapsing and restoring power to peripheral cores on chipsets like
+msm-copper for power savings.
+
+Required properties:
+ - compatible:      Must be "qcom,gdsc"
+ - regulator-name:  A string used as a descriptive name for regulator outputs
+ - reg:             The address of the GDSCR register
+
+Optional properties:
+ - parent-supply:   phandle to the parent supply/regulator node
+
+Example:
+	gdsc_oxili_gx: qcom,gdsc@fd8c4024 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_oxili_gx";
+		parent-supply = <&pm8841_s4>;
+		reg = <0xfd8c4024 0x4>;
+	};
diff --git a/Documentation/dvb/qcom-mpq.txt b/Documentation/dvb/qcom-mpq.txt
new file mode 100644
index 0000000..28f5d39
--- /dev/null
+++ b/Documentation/dvb/qcom-mpq.txt
@@ -0,0 +1,409 @@
+Introduction
+============
+MPQ DVB Adapter implements Digital Video Broadcasting devices according
+to LinuxTV (linuxtv.org) defined API and infrastructure.
+
+The implemented devices are dvb/demux devices, dvb/dvr devices and
+dvb/video devices.
+
+These devices are used in Qualcomm's MPQ chipsets that support
+broadcast applications.
+
+dvb/demux is responsible to receive a digital stream broadcasted over
+the air from a hardware unit (TSPP - Transport stream packet processor,
+or TSIF - Transport Stream Interface) and separates the stream into its
+various sub-streams such as video, audio and auxiliary data.
+The separation operation is named demuxing.
+
+dvb/dvr is used in conjunction with dvb/demux to re-play a digital
+stream from memory or to record stream to memory.
+
+dvb/video is used to handle the video decoding, it receives compressed
+video from dvb/demux through a stream-buffer interface and interacts
+with the existing HW video driver to perform the decoding.
+
+For more information on the TSIF interface, please refer to TSIF
+documentation under "Documentation/arm/msm/tsif.txt".
+For more information on the TSPP interface, please refer to TSPP
+documentation under "Documentation/arm/msm/tspp.txt".
+For more information on DVB-API definition, please refer dvb
+documentation under "Documentation/dvb/readme.txt".
+
+Hardware description
+====================
+dvb/demux, dvb/dvr and dvb/video do not interact with a hardware directly;
+The implementation of these drivers is done using the kernel API of TSPP,
+TSIF and video drivers.
+
+Software description
+====================
+
+Terminology
+-----------
+Stream: A stream is a TS packet source
+  - For example, MPEG2 Transport Stream from TSIF0
+Filter: Enables TS packet filtering and routing according to PID (packet ID)
+  - The decision regarding which PIDs in the stream will be routed
+    is done via filters, each demux open request corresponds to a filter.
+  - Filters can pass TS packets as-is (for recording), assemble them into
+    "other" PES packets (for PES packets read by client), assemble and send
+    them to decoder (for decoder PES), or assemble them into sections.
+Service: A service is a set of PIDs as defined in the service PMT.
+  Each service may be carried in a different transport stream or part of the
+  same transport stream. Processing a service means either preparing the
+  data for display and/or for recording.
+
+Requirments
+-----------
+1. Demuxing from different sources:
+   - Live transport stream inputs (TSIF)
+   - Memory inputs
+2. Support different packet formats:
+   - 188-bytes transport packets
+   - 192-bytes transport packets
+3. PID filtering
+4. Output of the following data:
+   - Decoder PES: PES (video and/or audio) that can be directed to HW decoders
+     in tunneling mode (without interaction of user-space).
+   - Other PES: a non-decoder PES, such as subtitle, teletext. The consumer
+     of this data is user-space that reads the data through standard read
+     calls.
+   - Sections: Sections are used by user-space to acquire different kinds of
+     information such as channels list, program user guide, etc.
+   - Transport Stream Packets: Transport stream packets of specific PIDs as
+     they were received in the input stream. User-space can use those to
+     record specific services and/or to perform time-shift buffer.
+   - PCR/STC: Pairs of PCR/STC can be used by user-space to perform
+     clock-recovery.
+   - Frame-indexing: For recorded stream, demux provides indexing of the
+     I-frames within the stream that can be used for trick-modes operations
+     while playing a recorded file.
+5. Support decryption of scrambled transport packets.
+6. Support recording of scrambled streams.
+8. Section filtering.
+
+Control path
+------------
+1. Client opens a demux device. Open request is done on the same demux
+   device for each filter.
+2. Client may configure the demux's internal ring-buffer size used to
+   hold the data for user-space (or default is used).
+3. Client configures the opened filter either to capture sections,
+   TS packets (for recording) or PES (decoder or non-decoder PES).
+   - The demux configures the underlying HW accordingly through
+     TSPP or TSIF kernel APIs
+   - demux receives notification of new data from the underlying HW and
+     performs demuxing operation based on the configuration.
+4. Client can then read data received from the selected filter.
+
+Data path
+---------
+For each filter that is opened, demux manages a circular buffer that
+holds the captured filter data; Client read commands extract data from
+the relevant ring buffer. Data loss can occur if a client cannot keep up
+with stream bandwidth.
+
+For PES data tunneled to decoder, demux manages a stream-buffer used to
+transfer the PES data to the decoder. The stream-buffer is built from
+two ring-buffers: One holding the PES payload (elementary stream) and
+the other holding PES parameters extracted from the PES header. The
+ring-buffer with PES parameters points to the location of respective PES
+payload in the PES payload ring-buffer.
+
+To allow concurrency of multiple stream processing, multiple demux/dvr
+devices exist. Each demux devices handles a single stream input. The
+number of demux devices is configurable depending on the required number
+of concurrent stream processing.
+
+The client configures each demux device with the stream to process,
+by default, all devices are configured to process stream from memory.
+The default setting can be changed by issuing ioctl that configures
+the demux source to either TSIF0 or TSIF1. For specific TSIF input,
+only one demux device may process it at a time.
+
+Background Processing
+---------------------
+When demux receives notifications from underlying HW drivers about new
+data, it schedules work to a single-threaded workqueue to process the
+notification.
+
+The processing is the action of demuxing of the new data; it may sleep
+as it locks against the demux data-structure that may be accessed by
+user-space in the meanwhile.
+
+A single threaded workqueue exists for each live input (TSIF0 or TSIF1)
+to process the inputs in parallel.
+
+Dependencies
+------------
+The demux driver depends on the following kernel drivers and subsystems:
+1. TSIF driver: Used to receive TS packets from TSIF interface for
+   targets supporting TSIF only.
+2. TSPP driver: Used to receive TS packets and/or PES from TSPP
+   interface for targets supporting TSPP.
+3. TZ-COM: Used to communicate with TrustZone to handle scrambled
+   streams.
+4. ION: Used to allocate memory for buffers holding decoder-data in
+   case the data is tunneled between demux and decoders.
+   Also used to allocate memory for TSPP/TSIF output pipes.
+5. dvb-core: Existing Linux infrastructure used for implementation of
+   dvb devices.
+
+Design
+======
+
+Goals
+-----
+The demux driver is designed to:
+1. Fulfil the requirements listed above.
+2. Be able to work on different chipsets having different HW
+   capabilities. For example, some chipsets are equipped with TSIF only,
+   others are equipped with TSPP of different versions.
+
+Design Blocks
+-------------
+Demux implementation hooks to the existing Linux dvb-core
+infrastructure as follows:
+
+     +----------+  +------------------------------------------+
+     |          |  |             MPQ Demux Driver             |
+     |          |  | +----------+  +----------+  +----------+ |
+     |          |  | | MPQ DMX  |  | MPQ DMX  |  | MPQ DMX  | |
+     | QCOM MPQ |  | | TSIF     |  | TSPPv1   |  | TSPPv2   | |
+     |  Adapter |  | | Plugin   |  | Plugin   |  | Plugin   | |
+     |          |  | +----------+  +----------+  +----------+ |
+     |          |  | +--------------------------------------+ |
+     |          |  | |       MPQ Demux Common Services      | |
+     |          |  | +--------------------------------------+ |
+     +----------+  +------------------------------------------+
+     +--------------------------------------------------------+
+     |                    Linux DVB Core                      |
+     |     +----------+      +----------+       +----------+  |
+     |     |   DVB    |      | DVB DMX  |       |  DVB     |  |
+     |     |  Demux   |      |  Device  |       |  Device  |  |
+     |     +----------+      +----------+       +----------+  |
+     +--------------------------------------------------------+
+
+The new developed code is QCOM MPQ Adapter and the MPQ Demux driver
+with the various MPQ-DMX Plugins.
+
+QCOM MPQ Adapter registers a new DVB adapter to Linux dvb-core.
+The MPQ DVB adapter is built as a separate kernel module. Using it
+demux and video devices can register themselves to the adapter.
+
+MPQ-DMX plugins exist to hook to dvb-core demux implementation
+depending on the HW capabilities. Only one of these plugins might be
+compiled and run at a time on the target.
+As the name of each plugin implies, one plugin implements demux
+functionality for targets supporting TSIF only, and the others
+implement pluging for targets supporting TSPP in different versions.
+
+The plugin implementation is not hooked to specific chipset as
+different chipsets might have the same HW capability.
+
+The MPQ-DMX Plugin Common Services provides common services that are
+used by all plugins, such as registrations of demux devices to
+the dvb-core.
+
+The demux plugin is built as a separate kernel module. Each plugin
+hooks to the DVB-Demux by providing set of pointers to functions
+required for DVB-Demux and dvb-core operation. The actual
+implementation of these function differs between the plugins depending
+on the HW capabilities. The plugins may be viewed as "classes"
+inheriting from DVB-Demux "class".
+
+Interface to TSPP/TSIF Drivers
+------------------------------
+Each demux plugin interacts with the kernel API of the relevant driver
+(either TSIF or TSPP) to receive TS packets or other kinds of data
+depending on the HW capabilities.
+
+The demux uses the kernel API of TSIF and TSPP drivers and registers
+callback triggered when new data is received. The callback schedules
+work to a single-threaded workqueue to process the data. The actual
+processing of the data depends on the HW capabilities.
+
+Interface to TZ-COM Driver
+--------------------------
+For cases HW does not support descrambling, the descrambling is
+performed by communicating with TZ using TZ-COM kernel API.
+
+ION driver is used to allocate input and output buffers provided to TZ.
+
+Interface to Decoders
+---------------------
+The interface to the decoders is done through a stream-buffer interface.
+The design aims not to have direct calls between dvb/demux and
+dvb/video for de-coupling and generality. dvb/demux and dvb/video
+interact only with stream-buffer API.
+
+Stream buffer is built of two ring-buffers, one holding the PES payload
+(the video elementary stream) and the other holding parameters from PES
+headers required by decoders.
+
+The separation to two ring-buffers allows locking the payload buffer
+as secured buffer that only the decoder's HW may access while allowing
+the software to access the PES headers which are not required to be
+secured. Locking of the payload buffer is done when the data should be
+secured (scrambled video stream for example).
+
+The stream-buffer API make use of dvb-ring buffer implementation that
+is part of dvb-core.
+
+SMP/multi-core
+==============
+Driver is fully SMP aware.
+
+Interface
+=========
+
+User-space API
+--------------
+dvb/demux and dvb/dvr each expose a char device interface to user-space
+as defined by linuxtv.org. Extension to this interface is done to add
+new features required by MPQ use-cases. The extensions preserve backward
+compatibility of the API defined by linuxtv.org
+
+The devices appear in file-system under:
+/dev/dvb/adapter0/demuxN
+/dev/dvb/adapter0/dvrN
+
+Where "N" ranges between 0 to total number of demux devices defined.
+The default configuration is 4 devices.
+
+Extensions to this API (through new ioctl) exist to provide the
+following functionality:
+
+1. DMX_SET_TS_PACKET_FORMAT: Set the transport stream TS packet format.
+   Configures whether the stream fed to demux from memory is with TS packet
+   of 188 bytes long, 192 bytes long, etc.
+   Default is 188 bytes long to preserve backward compatibility.
+
+   Returns the following values:
+   0 in case of success.
+   -EINVAL if the parameter is invalid.
+   -EBUSY if demux is already running.
+
+2. DMX_SET_DECODER_BUFFER_SIZE: Set the decoder's buffer size.
+   For data tunneled to decoder, client can configure the size of the buffer
+   holding the PES payload.
+   Default is set to the fixed size value that exists in current dvb-core to
+   preserve backward compatibility.
+
+   Returns the following values:
+   0 in case of success.
+   -EINVAL if the parameter is invalid.
+   -EBUSY if demux is already running.
+
+3. DMX_SET_TS_OUT_FORMAT: Set the TS packet recording format.
+   Indicates whether the TS packet used for recording should be in 188 or 192
+   bytes long format. In case of 192-packets output, 4 bytes zero timestamp
+   is attached to the original 188 packet.
+   Default is set for 188 to preserve backward compatibility.
+
+   Returns the following values:
+   0 in case of success.
+   -EINVAL if the parameter is invalid.
+   -EBUSY if demux is already running.
+
+4. Added support for mmap for direct access to input/output buffers.
+   User can either use the original read/write syscalls or use mmap
+   on the specific file-handle. Several ioctls were exposed so that
+   user can find-out the status of the buffers (DMX_GET_BUFFER_STATUS),
+   to notify demux when data is consumed (DMX_RELEASE_DATA) or notify
+   dvr when data is fed (DMX_FEED_DATA).
+
+5. DMX_SET_PLAYBACK_MODE: Set playback mode in memory input.
+   In memory input, contrary to live input, playback can be in pull mode,
+   where if one of output buffers is full, demux stalls waiting for free space,
+   this would cause DVR input buffer fullness to accumulate.
+
+   Returns the following values:
+   0 in case of success.
+   -EINVAL if the parameter is invalid.
+   -EBUSY if demux is already running.
+
+debugfs
+-------
+debugfs is used for debug purposes.
+
+Directory in debugfs is created for each demux device.
+
+Each directory includes several performance counters of the specific demux:
+Total demuxing time, total CRC time, HW notification rate, HW notification
+buffer size.
+
+
+Exported Kernel API
+-------------------
+MPQ adapter exports the following kernel API:
+1. Getter API for the registered MPQ adapter handle.
+   This is used by demux plugin as well as dvb/video implementation to
+   register their devices to that adapter.
+2. Stream buffer API: Used to tunnel the data between dvb/demux and
+   decoders. The API is used by dvb/demux and by decoders to write/read
+   tunneled data.
+3. Stream buffer interface registration: Used to register stream-buffer
+   interfaces. When demux driver is asked to tunnel data to a decoder,
+   the demux allocates a stream-buffer to be shared between demux and
+   the decoder. For the decoder to retrieve the info of the
+   stream-buffer it should connect to, stream-buffer registration API
+   exist.
+   The demux registers the new allocated stream buffer handle to MPQ
+   Adapter, and the decoder may query the registered interface through
+   MPQ Adapter.
+
+Driver parameters
+=================
+There are three kernel modules required for DVB API operation:
+1. dvb-core.ko: This is an existing Linux module for dvb functionality.
+   The parameters for this module are the one defined by linuxtv.org.
+   An additional parameter was added to specify whether to collect
+   performance debug information exposed through debugfs.
+   Parameter name: dvb_demux_performancecheck
+
+2. mpq-adapter.ko: MPQ DVB adapter module. Has a parameter to
+   specify the adapter number, the number (X) is the same as the one
+   that appears in /dev/dvb/adapterX. Default is 0.
+   Parameter name: adapter_nr
+
+3. mpq-dmx-hw-plugin.ko: Module for demux HW plugin. Receives as a
+   parameter the number of required demux devices. Default is set to the
+   number specified in kernel configuration.
+   Parameter name: mpq_demux_device_num
+
+Config options
+==============
+New kernel configurations is available (through make menuconfig) to
+enable MPQ based adapter functionality. The following configurations
+exist:
+1. Control whether to enable QCOM MPQ DVB adapter (tri-state).
+   It depends on having dvb-core enabled.
+2. If MPQ adapter is enabled:
+   2.1. Control whether to enable MPQ dvb/demux (tri-state)
+   2.2. Control whether to enable MPQ dvb/video (tri-state)
+   2.3. If dvb/demux is enabled:
+        2.3.1. Configure the number of demux devices. Default is 4.
+        2.3.2. Select the desired demux plugin. Each plugin would appear
+               in the list of options depending whether the respective
+               driver (TSIF/TSPP) is enabled or not.
+
+Dependencies
+============
+1. The implementation depends on having dvb-core enabled.
+2. Each demux plugin depends on whether the relevant driver it uses
+   is enabled. TSIF plugin depends on TSIF driver and TSPP plugins
+   depend on TSPP driver.
+3. There's no communication to other processors.
+
+User space utilities
+====================
+N/A
+
+Other
+=====
+N/A
+
+Known issues
+============
+N/A
diff --git a/Documentation/mmc/mmc-dev-attrs.txt b/Documentation/mmc/mmc-dev-attrs.txt
index 8898a95..70fa101 100644
--- a/Documentation/mmc/mmc-dev-attrs.txt
+++ b/Documentation/mmc/mmc-dev-attrs.txt
@@ -8,6 +8,23 @@
 
 	force_ro		Enforce read-only access even if write protect switch is off.
 
+	num_wr_reqs_to_start_packing 	This attribute is used to determine
+	the trigger for activating the write packing, in case the write
+	packing control feature is enabled.
+
+	When the MMC manages to reach a point where num_wr_reqs_to_start_packing
+	write requests could be packed, it enables the write packing feature.
+	This allows us to start the write packing only when it is beneficial
+	and has minimum affect on the read latency.
+
+	The number of potential packed requests that will trigger the packing
+	can be configured via sysfs by writing the required value to:
+	/sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
+
+	The default value of num_wr_reqs_to_start_packing was determined by
+	running parallel lmdd write and lmdd read operations and calculating
+	the max number of packed writes requests.
+
 SD and MMC Device Attributes
 ============================
 
diff --git a/arch/arm/boot/dts/msm-gdsc.dtsi b/arch/arm/boot/dts/msm-gdsc.dtsi
new file mode 100644
index 0000000..f83fe76
--- /dev/null
+++ b/arch/arm/boot/dts/msm-gdsc.dtsi
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/include/ "skeleton.dtsi"
+
+/ {
+	gdsc_venus: qcom,gdsc@fd8c1024 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_venus";
+		reg = <0xfd8c1024 0x4>;
+	};
+
+	gdsc_mdss: qcom,gdsc@fd8c2304 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_mdss";
+		reg = <0xfd8c2304 0x4>;
+	};
+
+	gdsc_jpeg: qcom,gdsc@fd8c35a4 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_jpeg";
+		reg = <0xfd8c35a4 0x4>;
+	};
+
+	gdsc_vfe: qcom,gdsc@fd8c36a4 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_vfe";
+		reg = <0xfd8c36a4 0x4>;
+	};
+
+	gdsc_oxili_gx: qcom,gdsc@fd8c4024 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_oxili_gx";
+		reg = <0xfd8c4024 0x4>;
+	};
+
+	gdsc_oxili_cx: qcom,gdsc@fd8c4034 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_oxili_cx";
+		reg = <0xfd8c4034 0x4>;
+	};
+
+	gdsc_usb_hsic: qcom,gdsc@fc400404 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_usb_hsic";
+		reg = <0xfc400404 0x4>;
+	};
+};
diff --git a/arch/arm/boot/dts/msm-pm8x41-rpm-regulator.dtsi b/arch/arm/boot/dts/msm-pm8x41-rpm-regulator.dtsi
new file mode 100644
index 0000000..019112a
--- /dev/null
+++ b/arch/arm/boot/dts/msm-pm8x41-rpm-regulator.dtsi
@@ -0,0 +1,587 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/ {
+	qcom,rpm-smd {
+		rpm-regulator-smpb1 {
+			qcom,resource-name = "smpb";
+			qcom,resource-id = <1>;
+			qcom,regulator-type = <1>;
+			qcom,hpm-min-load = <100000>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-s1 {
+				regulator-name = "8841_s1";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-smpb2 {
+			qcom,resource-name = "smpb";
+			qcom,resource-id = <2>;
+			qcom,regulator-type = <1>;
+			qcom,hpm-min-load = <100000>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-s2 {
+				regulator-name = "8841_s2";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-smpb3 {
+			qcom,resource-name = "smpb";
+			qcom,resource-id = <3>;
+			qcom,regulator-type = <1>;
+			qcom,hpm-min-load = <100000>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-s3 {
+				regulator-name = "8841_s3";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-smpb4 {
+			qcom,resource-name = "smpb";
+			qcom,resource-id = <4>;
+			qcom,regulator-type = <1>;
+			qcom,hpm-min-load = <100000>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-s4 {
+				regulator-name = "8841_s4";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-smpa1 {
+			qcom,resource-name = "smpa";
+			qcom,resource-id = <1>;
+			qcom,regulator-type = <1>;
+			qcom,hpm-min-load = <100000>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-s1 {
+				regulator-name = "8941_s1";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-smpa2 {
+			qcom,resource-name = "smpa";
+			qcom,resource-id = <2>;
+			qcom,regulator-type = <1>;
+			qcom,hpm-min-load = <100000>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-s2 {
+				regulator-name = "8941_s2";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-smpa3 {
+			qcom,resource-name = "smpa";
+			qcom,resource-id = <3>;
+			qcom,regulator-type = <1>;
+			qcom,hpm-min-load = <100000>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-s3 {
+				regulator-name = "8941_s3";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-ldoa1 {
+			qcom,resource-name = "ldoa";
+			qcom,resource-id = <1>;
+			qcom,regulator-type = <0>;
+			qcom,hpm-min-load = <10000>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-l1 {
+				regulator-name = "8941_l1";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-ldoa2 {
+			qcom,resource-name = "ldoa";
+			qcom,resource-id = <2>;
+			qcom,regulator-type = <0>;
+			qcom,hpm-min-load = <10000>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-l2 {
+				regulator-name = "8941_l2";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-ldoa3 {
+			qcom,resource-name = "ldoa";
+			qcom,resource-id = <3>;
+			qcom,regulator-type = <0>;
+			qcom,hpm-min-load = <10000>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-l3 {
+				regulator-name = "8941_l3";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-ldoa4 {
+			qcom,resource-name = "ldoa";
+			qcom,resource-id = <4>;
+			qcom,regulator-type = <0>;
+			qcom,hpm-min-load = <10000>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-l4 {
+				regulator-name = "8941_l4";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-ldoa5 {
+			qcom,resource-name = "ldoa";
+			qcom,resource-id = <5>;
+			qcom,regulator-type = <0>;
+			qcom,hpm-min-load = <10000>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-l5 {
+				regulator-name = "8941_l5";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-ldoa6 {
+			qcom,resource-name = "ldoa";
+			qcom,resource-id = <6>;
+			qcom,regulator-type = <0>;
+			qcom,hpm-min-load = <10000>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-l6 {
+				regulator-name = "8941_l6";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-ldoa7 {
+			qcom,resource-name = "ldoa";
+			qcom,resource-id = <7>;
+			qcom,regulator-type = <0>;
+			qcom,hpm-min-load = <10000>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-l7 {
+				regulator-name = "8941_l7";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-ldoa8 {
+			qcom,resource-name = "ldoa";
+			qcom,resource-id = <8>;
+			qcom,regulator-type = <0>;
+			qcom,hpm-min-load = <10000>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-l8 {
+				regulator-name = "8941_l8";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-ldoa9 {
+			qcom,resource-name = "ldoa";
+			qcom,resource-id = <9>;
+			qcom,regulator-type = <0>;
+			qcom,hpm-min-load = <10000>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-l9 {
+				regulator-name = "8941_l9";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-ldoa10 {
+			qcom,resource-name = "ldoa";
+			qcom,resource-id = <10>;
+			qcom,regulator-type = <0>;
+			qcom,hpm-min-load = <10000>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-l10 {
+				regulator-name = "8941_l10";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-ldoa11 {
+			qcom,resource-name = "ldoa";
+			qcom,resource-id = <11>;
+			qcom,regulator-type = <0>;
+			qcom,hpm-min-load = <10000>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-l11 {
+				regulator-name = "8941_l11";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-ldoa12 {
+			qcom,resource-name = "ldoa";
+			qcom,resource-id = <12>;
+			qcom,regulator-type = <0>;
+			qcom,hpm-min-load = <10000>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-l12 {
+				regulator-name = "8941_l12";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-ldoa13 {
+			qcom,resource-name = "ldoa";
+			qcom,resource-id = <13>;
+			qcom,regulator-type = <0>;
+			qcom,hpm-min-load = <10000>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-l13 {
+				regulator-name = "8941_l13";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-ldoa14 {
+			qcom,resource-name = "ldoa";
+			qcom,resource-id = <14>;
+			qcom,regulator-type = <0>;
+			qcom,hpm-min-load = <10000>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-l14 {
+				regulator-name = "8941_l14";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-ldoa15 {
+			qcom,resource-name = "ldoa";
+			qcom,resource-id = <15>;
+			qcom,regulator-type = <0>;
+			qcom,hpm-min-load = <10000>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-l15 {
+				regulator-name = "8941_l15";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-ldoa16 {
+			qcom,resource-name = "ldoa";
+			qcom,resource-id = <16>;
+			qcom,regulator-type = <0>;
+			qcom,hpm-min-load = <10000>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-l16 {
+				regulator-name = "8941_l16";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-ldoa17 {
+			qcom,resource-name = "ldoa";
+			qcom,resource-id = <17>;
+			qcom,regulator-type = <0>;
+			qcom,hpm-min-load = <10000>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-l17 {
+				regulator-name = "8941_l17";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-ldoa18 {
+			qcom,resource-name = "ldoa";
+			qcom,resource-id = <18>;
+			qcom,regulator-type = <0>;
+			qcom,hpm-min-load = <10000>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-l18 {
+				regulator-name = "8941_l18";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-ldoa19 {
+			qcom,resource-name = "ldoa";
+			qcom,resource-id = <19>;
+			qcom,regulator-type = <0>;
+			qcom,hpm-min-load = <10000>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-l19 {
+				regulator-name = "8941_l19";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-ldoa20 {
+			qcom,resource-name = "ldoa";
+			qcom,resource-id = <20>;
+			qcom,regulator-type = <0>;
+			qcom,hpm-min-load = <10000>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-l20 {
+				regulator-name = "8941_l20";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-ldoa21 {
+			qcom,resource-name = "ldoa";
+			qcom,resource-id = <21>;
+			qcom,regulator-type = <0>;
+			qcom,hpm-min-load = <10000>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-l21 {
+				regulator-name = "8941_l21";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-ldoa22 {
+			qcom,resource-name = "ldoa";
+			qcom,resource-id = <22>;
+			qcom,regulator-type = <0>;
+			qcom,hpm-min-load = <10000>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-l22 {
+				regulator-name = "8941_l22";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-ldoa23 {
+			qcom,resource-name = "ldoa";
+			qcom,resource-id = <23>;
+			qcom,regulator-type = <0>;
+			qcom,hpm-min-load = <10000>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-l23 {
+				regulator-name = "8941_l23";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-ldoa24 {
+			qcom,resource-name = "ldoa";
+			qcom,resource-id = <24>;
+			qcom,regulator-type = <0>;
+			qcom,hpm-min-load = <10000>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-l24 {
+				regulator-name = "8941_l24";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		/* TODO: find out correct resource names for LVS vs MVS */
+		rpm-regulator-vsa1 {
+			qcom,resource-name = "vsa";
+			qcom,resource-id = <1>;
+			qcom,regulator-type = <2>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-lvs1 {
+				regulator-name = "8941_lvs1";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-vsa2 {
+			qcom,resource-name = "vsa";
+			qcom,resource-id = <2>;
+			qcom,regulator-type = <2>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-lvs2 {
+				regulator-name = "8941_lvs2";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-vsa3 {
+			qcom,resource-name = "vsa";
+			qcom,resource-id = <3>;
+			qcom,regulator-type = <2>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-lvs3 {
+				regulator-name = "8941_lvs3";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-vsa4 {
+			qcom,resource-name = "vsa";
+			qcom,resource-id = <4>;
+			qcom,regulator-type = <2>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-mvs1 {
+				regulator-name = "8941_mvs1";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-vsa5 {
+			qcom,resource-name = "vsa";
+			qcom,resource-id = <5>;
+			qcom,regulator-type = <2>;
+			compatible = "qcom,rpm-regulator-smd-resource";
+			status = "disabled";
+
+			regulator-mvs2 {
+				regulator-name = "8941_mvs2";
+				qcom,set = <3>;
+				status = "disabled";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/msm9625.dts b/arch/arm/boot/dts/msm9625.dts
new file mode 100644
index 0000000..d5aed00
--- /dev/null
+++ b/arch/arm/boot/dts/msm9625.dts
@@ -0,0 +1,47 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/include/ "skeleton.dtsi"
+
+/ {
+	model = "Qualcomm MSM 9625";
+	compatible = "qcom,msm9625";
+	interrupt-parent = <&intc>;
+
+	intc: interrupt-controller@F9000000 {
+		compatible = "qcom,msm-qgic2";
+		interrupt-controller;
+		#interrupt-cells = <3>;
+		reg = <0xF9000000 0x1000>,
+		      <0xF9002000 0x1000>;
+	};
+
+	msmgpio: gpio@fd510000 {
+		compatible = "qcom,msm-gpio";
+		interrupt-controller;
+		#interrupt-cells = <2>;
+		reg = <0xfd510000 0x4000>;
+	};
+
+	timer {
+		compatible = "qcom,msm-qtimer", "arm,armv7-timer";
+		interrupts = <0 7 0>;
+		clock-frequency = <5000000>;
+	};
+
+	serial@f991f000 {
+		compatible = "qcom,msm-lsuart-v14";
+		reg = <0xf991f000 0x1000>;
+		interrupts = <0 109 0>;
+	};
+};
diff --git a/arch/arm/boot/dts/msmcopper-iommu.dtsi b/arch/arm/boot/dts/msmcopper-iommu.dtsi
new file mode 100644
index 0000000..e0ce8ac
--- /dev/null
+++ b/arch/arm/boot/dts/msmcopper-iommu.dtsi
@@ -0,0 +1,88 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/ {
+	jpeg: qcom,iommu@fda64000 {
+		compatible = "qcom,msm-smmu-v2";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+		reg = <0xfda64000 0x10000>;
+
+		qcom,iommu-ctx@fda6c000 {
+			reg = <0xfda6c000 0x1000>;
+			interrupts = <0 69 0>;
+			qcom,iommu-ctx-sids = <0>;
+			qcom,iommu-ctx-name = "jpeg_enc0";
+		};
+		qcom,iommu-ctx@fda6d000 {
+			reg = <0xfda6d000 0x1000>;
+			interrupts = <0 70 0>;
+			qcom,iommu-ctx-sids = <1>;
+			qcom,iommu-ctx-name = "jpeg_enc1";
+		};
+		qcom,iommu-ctx@fda6e000 {
+			reg = <0xfda6e000 0x1000>;
+			interrupts = <0 71 0>;
+			qcom,iommu-ctx-sids = <2>;
+			qcom,iommu-ctx-name = "jpeg_dec";
+		};
+	};
+
+	mdp: qcom,iommu@fd928000 {
+		compatible = "qcom,msm-smmu-v2";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+		reg = <0xfd928000 0x10000>;
+
+		qcom,iommu-ctx@fd930000 {
+			reg = <0xfd930000 0x1000>;
+			interrupts = <0 74 0>;
+			qcom,iommu-ctx-sids = <0>;
+			qcom,iommu-ctx-name = "mdp_0";
+		};
+		qcom,iommu-ctx@fd931000 {
+			reg = <0xfd931000 0x1000>;
+			interrupts = <0 75 0>;
+			qcom,iommu-ctx-sids = <1>;
+			qcom,iommu-ctx-name = "mdp_1";
+		};
+	};
+
+	venus: qcom,iommu@fdc84000 {
+		compatible = "qcom,msm-smmu-v2";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+		reg = <0xfdc84000 0x10000>;
+
+		qcom,iommu-ctx@fdc8c000 {
+			reg = <0xfdc8c000 0x1000>;
+			interrupts = <0 43 0>;
+			qcom,iommu-ctx-sids = <0 1 2 3 4 5>;
+			qcom,iommu-ctx-name = "venus_ns";
+		};
+		qcom,iommu-ctx@fdc8d000 {
+			reg = <0xfdc8d000 0x1000>;
+			interrupts = <0 42 0>;
+			qcom,iommu-ctx-sids = <0x80 0x81 0x82 0x83 0x84 0x85>;
+			qcom,iommu-ctx-name = "venus_cp";
+		};
+		qcom,iommu-ctx@fdc8e000 {
+			reg = <0xfdc8e000 0x1000>;
+			interrupts = <0 41 0>;
+			qcom,iommu-ctx-sids = <0xc0 0xc6>;
+			qcom,iommu-ctx-name = "venus_fw";
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/msmcopper-rumi.dts b/arch/arm/boot/dts/msmcopper-rumi.dts
index d6e23ad..5bfb228 100644
--- a/arch/arm/boot/dts/msmcopper-rumi.dts
+++ b/arch/arm/boot/dts/msmcopper-rumi.dts
@@ -75,6 +75,23 @@
 		};
 	};
 
+	i2c@f9966000 {
+		status = "disable";
+	};
+
+	i2c@f9967000 {
+		cell-index = <0>;
+		compatible = "qcom,i2c-qup";
+		reg = <0Xf9967000 0x1000>;
+		reg-names = "qup_phys_addr";
+		interrupts = <0 105 0>;
+		interrupt-names = "qup_err_intr";
+		qcom,i2c-bus-freq = <100000>;
+		qcom,i2c-src-freq = <24000000>;
+		gpios = <&msmgpio 83 0>, /* DAT  */
+			<&msmgpio 84 0>; /* CLK */
+	};
+
 	slim@fe12f000 {
 		status = "disable";
 	};
@@ -83,10 +100,6 @@
 		status = "disable";
 	};
 
-	i2c@f9966000 {
-		status = "disable";
-	};
-
 	qcom,ssusb@F9200000 {
 		status = "disable";
 	};
diff --git a/arch/arm/boot/dts/msmcopper.dtsi b/arch/arm/boot/dts/msmcopper.dtsi
index 2230e0e..60f59d3 100644
--- a/arch/arm/boot/dts/msmcopper.dtsi
+++ b/arch/arm/boot/dts/msmcopper.dtsi
@@ -12,10 +12,13 @@
 
 /include/ "skeleton.dtsi"
 /include/ "msmcopper_pm.dtsi"
+/include/ "msm-pm8x41-rpm-regulator.dtsi"
 /include/ "msm-pm8841.dtsi"
 /include/ "msm-pm8941.dtsi"
 /include/ "msmcopper-regulator.dtsi"
 /include/ "msmcopper-gpio.dtsi"
+/include/ "msmcopper-iommu.dtsi"
+/include/ "msm-gdsc.dtsi"
 
 / {
 	model = "Qualcomm MSM Copper";
@@ -130,6 +133,7 @@
 		qcom,bam-dma-res-pipes = <6>;
 	};
 
+
 	spi@f9924000 {
 		compatible = "qcom,spi-qup-v2";
 		reg = <0xf9924000 0x1000>;
@@ -273,6 +277,10 @@
 		qcom,dwc-usb3-msm-dbm-eps = <4>;
 	};
 
+	gdsc_oxili_gx: qcom,gdsc@fd8c4024 {
+		parent-supply = <&pm8841_s4>;
+	};
+
 	qcom,lpass@fe200000 {
 		compatible = "qcom,pil-q6v5-lpass";
 		reg = <0xfe200000 0x00100>,
@@ -300,4 +308,9 @@
 		rpm-channel-name = "rpm_requests";
 		rpm-channel-type = <15>; /* SMD_APPS_RPM */
 	};
+
+        qcom,msm-rng@f9bff000 {
+               compatible = "qcom,msm-rng";
+               reg = <0xf9bff000 0x200>;
+        };
 };
diff --git a/arch/arm/configs/msm-copper_defconfig b/arch/arm/configs/msm-copper_defconfig
index de469da..e9360d1 100644
--- a/arch/arm/configs/msm-copper_defconfig
+++ b/arch/arm/configs/msm-copper_defconfig
@@ -43,6 +43,7 @@
 CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
 # CONFIG_MSM_HW3D is not set
 CONFIG_MSM_PIL_LPASS_QDSP6V5=y
+CONFIG_MSM_PIL_PRONTO=y
 CONFIG_MSM_DIRECT_SCLK_ACCESS=y
 CONFIG_MSM_OCMEM=y
 CONFIG_NO_HZ=y
@@ -203,3 +204,5 @@
 CONFIG_CRC_CCITT=y
 CONFIG_LIBCRC32C=y
 CONFIG_MSM_TZ_LOG=y
+CONFIG_HW_RANDOM_MSM=y
+
diff --git a/arch/arm/configs/msm8960-perf_defconfig b/arch/arm/configs/msm8960-perf_defconfig
index c93598d..f524828 100644
--- a/arch/arm/configs/msm8960-perf_defconfig
+++ b/arch/arm/configs/msm8960-perf_defconfig
@@ -61,7 +61,9 @@
 # CONFIG_MSM_PROC_COMM is not set
 CONFIG_MSM_SMD=y
 CONFIG_MSM_SMD_PKG4=y
+CONFIG_MSM_PCIE=y
 CONFIG_MSM_BAM_DMUX=y
+CONFIG_MSM_RMNET_SMUX=y
 CONFIG_MSM_DSPS=y
 CONFIG_MSM_IPC_ROUTER=y
 CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
@@ -92,6 +94,8 @@
 CONFIG_MSM_DCVS=y
 CONFIG_MSM_HSIC_SYSMON=y
 CONFIG_STRICT_MEMORY_RWX=y
+CONFIG_PCI=y
+CONFIG_PCI_MSI=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_SMP=y
@@ -279,6 +283,9 @@
 CONFIG_INPUT_UINPUT=y
 CONFIG_INPUT_PMIC8XXX_PWRKEY=y
 # CONFIG_LEGACY_PTYS is not set
+CONFIG_N_SMUX=y
+CONFIG_N_SMUX_LOOPBACK=y
+CONFIG_SMUX_CTL=y
 CONFIG_SERIAL_MSM_HS=y
 CONFIG_SERIAL_MSM_HSL=y
 CONFIG_SERIAL_MSM_HSL_CONSOLE=y
@@ -302,6 +309,7 @@
 CONFIG_SMB349_CHARGER=y
 CONFIG_PM8921_CHARGER=y
 CONFIG_PM8921_BMS=y
+CONFIG_PM8921_BCL=y
 CONFIG_SENSORS_PM8XXX_ADC=y
 CONFIG_SENSORS_EPM_ADC=y
 CONFIG_THERMAL=y
diff --git a/arch/arm/configs/msm8960_defconfig b/arch/arm/configs/msm8960_defconfig
index 06211d5..1c2876d 100644
--- a/arch/arm/configs/msm8960_defconfig
+++ b/arch/arm/configs/msm8960_defconfig
@@ -60,7 +60,9 @@
 # CONFIG_MSM_PROC_COMM is not set
 CONFIG_MSM_SMD=y
 CONFIG_MSM_SMD_PKG4=y
+CONFIG_MSM_PCIE=y
 CONFIG_MSM_BAM_DMUX=y
+CONFIG_MSM_RMNET_SMUX=y
 CONFIG_MSM_DSPS=y
 CONFIG_MSM_IPC_ROUTER=y
 CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
@@ -99,6 +101,8 @@
 CONFIG_MSM_CACHE_DUMP_ON_PANIC=y
 CONFIG_MSM_HSIC_SYSMON=y
 CONFIG_STRICT_MEMORY_RWX=y
+CONFIG_PCI=y
+CONFIG_PCI_MSI=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_SMP=y
@@ -288,6 +292,9 @@
 CONFIG_INPUT_UINPUT=y
 CONFIG_INPUT_PMIC8XXX_PWRKEY=y
 # CONFIG_LEGACY_PTYS is not set
+CONFIG_N_SMUX=y
+CONFIG_N_SMUX_LOOPBACK=y
+CONFIG_SMUX_CTL=y
 CONFIG_SERIAL_MSM_HS=y
 CONFIG_SERIAL_MSM_HSL=y
 CONFIG_SERIAL_MSM_HSL_CONSOLE=y
@@ -311,6 +318,7 @@
 CONFIG_SMB349_CHARGER=y
 CONFIG_PM8921_CHARGER=y
 CONFIG_PM8921_BMS=y
+CONFIG_PM8921_BCL=y
 CONFIG_SENSORS_PM8XXX_ADC=y
 CONFIG_SENSORS_EPM_ADC=y
 CONFIG_THERMAL=y
diff --git a/arch/arm/configs/msm9625_defconfig b/arch/arm/configs/msm9625_defconfig
new file mode 100644
index 0000000..89fb888
--- /dev/null
+++ b/arch/arm/configs/msm9625_defconfig
@@ -0,0 +1,107 @@
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_SCHED=y
+# CONFIG_FAIR_GROUP_SCHED is not set
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_ARCH_MSM=y
+CONFIG_ARCH_MSM9625=y
+# CONFIG_MSM_STACKED_MEMORY is not set
+CONFIG_CPU_HAS_L2_PMU=y
+# CONFIG_MSM_FIQ_SUPPORT is not set
+# CONFIG_MSM_PROC_COMM is not set
+CONFIG_MSM_DIRECT_SCLK_ACCESS=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_ARM_ARCH_TIMER=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+CONFIG_HIGHMEM=y
+CONFIG_VMALLOC_RESERVE=0x19000000
+CONFIG_USE_OF=y
+CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ATAG_DTB_COMPAT=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_SUSPEND is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_MISC_DEVICES=y
+# CONFIG_ANDROID_PMEM is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=m
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_MSM_HSL=y
+CONFIG_SERIAL_MSM_HSL_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_DEBUG_GPIO=y
+CONFIG_GPIO_SYSFS=y
+# CONFIG_HWMON is not set
+# CONFIG_MFD_SUPPORT is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_USER=y
+CONFIG_KEYS=y
+CONFIG_CRYPTO_AUTHENC=y
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_ARC4=y
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_DEFLATE=y
+# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC_CCITT=y
+CONFIG_CRC16=y
+CONFIG_LIBCRC32C=y
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 423e71e..a24498c 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -94,6 +94,7 @@
 	select MSM_SECURE_IO
 	select MSM_DALRPC
 	select MSM_QDSP6_APR
+	select MSM_QDSP6_CODECS
 	select MSM_NATIVE_RESTART
 	select ARCH_INLINE_SPIN_TRYLOCK
 	select ARCH_INLINE_SPIN_TRYLOCK_BH
@@ -144,6 +145,7 @@
 	select MSM_RPM
 	select MSM_XO
 	select MSM_QDSP6_APR
+	select MSM_QDSP6_CODECS
 	select MSM_PIL
 	select MSM_AUDIO_QDSP6 if SND_SOC
 	select CPU_HAS_L2_PMU
@@ -155,7 +157,6 @@
 	select ARCH_POPULATES_NODE_MAP
 	select ARCH_SPARSEMEM_ENABLE
 	select ARCH_HAS_HOLES_MEMORYMODEL
-	select ENABLE_DMM
 	select MEMORY_HOTPLUG if ENABLE_DMM
 	select MEMORY_HOTREMOVE if ENABLE_DMM
 	select ARCH_ENABLE_MEMORY_HOTPLUG if ENABLE_DMM
@@ -186,6 +187,7 @@
 	select MSM_RPM
 	select MSM_XO
 	select MSM_QDSP6_APR
+	select MSM_QDSP6_CODECS
 	select MSM_PIL
 	select MSM_AUDIO_QDSP6 if SND_SOC
 	select CPU_HAS_L2_PMU
@@ -197,7 +199,6 @@
 	select ARCH_POPULATES_NODE_MAP
 	select ARCH_SPARSEMEM_ENABLE
 	select ARCH_HAS_HOLES_MEMORYMODEL
-	select ENABLE_DMM
 	select MEMORY_HOTPLUG if ENABLE_DMM
 	select MEMORY_HOTREMOVE if ENABLE_DMM
 	select ARCH_ENABLE_MEMORY_HOTPLUG if ENABLE_DMM
@@ -222,6 +223,7 @@
 	select MSM_REMOTE_SPINLOCK_SFPB
 	select MSM_PIL
 	select MSM_QDSP6_APR
+	select MSM_QDSP6_CODECS
 	select MSM_AUDIO_QDSP6 if SND_SOC
 	select MULTI_IRQ_HANDLER
 	select MSM_RPM
@@ -230,7 +232,6 @@
 	select MSM_PM8X60 if PM
 	select CPU_HAS_L2_PMU
 	select HOLES_IN_ZONE if SPARSEMEM
-	select ENABLE_DMM
 	select MEMORY_HOTPLUG if ENABLE_DMM
 	select MEMORY_HOTREMOVE if ENABLE_DMM
 	select ARCH_ENABLE_MEMORY_HOTPLUG if ENABLE_DMM
@@ -259,6 +260,7 @@
 	select MSM_L2_SPM
 	select MSM_PM8X60 if PM
 	select MSM_RPM_SMD
+	select REGULATOR
 
 config ARCH_FSM9XXX
 	bool "FSM9XXX"
@@ -310,6 +312,7 @@
 	select SMP
 	select MSM_SMP
 	select CPU_V7
+	select MSM_GPIOMUX
 	select MULTI_IRQ_HANDLER
 	select MSM_V2_TLMM
 
@@ -882,7 +885,7 @@
 	default "0x80200000" if ARCH_APQ8064
 	default "0x80200000" if ARCH_MSM8960
 	default "0x80200000" if ARCH_MSM8930
-	default "0x20200000" if ARCH_MSMCOPPER
+	default "0x00000000" if ARCH_MSMCOPPER
 	default "0x10000000" if ARCH_FSM9XXX
 	default "0x20200000" if ARCH_MSM9625
 	default "0x00200000" if !MSM_STACKED_MEMORY
@@ -944,14 +947,6 @@
 	help
 	  Say Y here if high speed MSM UART v1.4 is present.
 
-config DEBUG_MSM8930_UART
-	bool "Kernel low-level debugging messages via MSM 8930 UART"
-	depends on ARCH_MSM8930 && DEBUG_LL
-	select MSM_HAS_DEBUG_UART_HS
-	help
-	  Say Y here if you want the debug print routines to direct
-	  their output to the serial port on MSM 8930 devices.
-
 config MSM_DEBUG_UART_PHYS
 	hex
 	default 0xA9A00000 if (ARCH_MSM7X27 || ARCH_QSD8X50) && DEBUG_MSM_UART1
@@ -998,13 +993,28 @@
 
 	config DEBUG_MSM8960_UART
 		bool "Kernel low-level debugging messages via MSM 8960 UART"
-		depends on ARCH_MSM8960
-		select DEBUG_MSM8930_UART
+		depends on ARCH_MSM8960 && DEBUG_LL
 		select MSM_HAS_DEBUG_UART_HS
 		help
 		  Say Y here if you want the debug print routines to direct
 		  their output to the serial port on MSM 8960 devices.
 
+	config DEBUG_MSM8930_UART
+		bool "Kernel low-level debugging messages via MSM 8930 UART"
+		depends on ARCH_MSM8930 && DEBUG_LL
+		select MSM_HAS_DEBUG_UART_HS
+		help
+		Say Y here if you want the debug print routines to direct
+		their output to the serial port on MSM 8930 devices.
+
+	config DEBUG_APQ8064_UART
+		bool "Kernel low-level debugging messages via APQ 8064 UART"
+		depends on ARCH_APQ8064 && DEBUG_LL
+		select MSM_HAS_DEBUG_UART_HS
+		help
+		Say Y here if you want the debug print routines to direct
+		their output to the serial port on APQ 8064 devices.
+
 	config DEBUG_MSMCOPPER_UART
 		bool "Kernel low-level debugging messages via MSM Copper UART"
 		depends on ARCH_MSMCOPPER
@@ -1800,6 +1810,18 @@
 	  voltages and other parameters of the various power rails supplied
 	  by some Qualcomm PMICs.
 
+config MSM_RPM_REGULATOR_SMD
+	bool "SMD RPM regulator driver"
+	depends on REGULATOR
+	depends on OF
+	depends on MSM_RPM_SMD
+	help
+	  Compile in support for the SMD RPM regulator driver which is used for
+	  setting voltages and other parameters of the various power rails
+	  supplied by some Qualcomm PMICs.  The SMD RPM regulator driver should
+	  be used on systems which contain an RPM which communicates with the
+	  application processor over SMD.
+
 config MSM_PIL
 	bool "Peripheral image loading"
 	select FW_LOADER
@@ -1994,7 +2016,7 @@
 
 config MSM_IOMMU
 	bool "MSM IOMMU Support"
-	depends on ARCH_MSM8X60 || ARCH_MSM8960 || ARCH_APQ8064
+	depends on ARCH_MSM8X60 || ARCH_MSM8960 || ARCH_APQ8064 || ARCH_MSMCOPPER
 	select IOMMU_API
 	default n
 	help
@@ -2174,6 +2196,15 @@
 	  used by audio driver to configure QDSP6's
 	  ASM, ADM and AFE.
 
+config MSM_QDSP6_CODECS
+	bool "Audio Codecs on QDSP6 APR "
+	depends on MSM_SMD
+	default n
+	help
+	  Enable Audio codecs with APR IPC protocol support between
+	  application processor and QDSP6. APR is
+	  used by audio driver to configure QDSP6's
+	  ASM, ADM and AFE.
 
 config MSM_AUDIO_QDSP6
         bool "QDSP6 HW Audio support"
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 6a90d6d..3659055 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -107,11 +107,13 @@
 ifndef CONFIG_ARCH_MSM8X60
 ifndef CONFIG_ARCH_APQ8064
 ifndef CONFIG_ARCH_MSMCOPPER
+ifndef CONFIG_ARCH_MSM9625
 	obj-y += nand_partitions.o
 endif
 endif
 endif
 endif
+endif
 obj-$(CONFIG_MSM_SDIO_TTY) += sdio_tty.o
 obj-$(CONFIG_MSM_SMD_TTY) += smd_tty.o
 obj-$(CONFIG_MSM_SMD_QMI) += smd_qmi.o
@@ -186,6 +188,8 @@
 obj-$(CONFIG_ARCH_APQ8064) += rpm-regulator-8960.o
 endif
 
+obj-$(CONFIG_MSM_RPM_REGULATOR_SMD) += rpm-regulator-smd.o
+
 ifdef CONFIG_MSM_SUBSYSTEM_RESTART
 	obj-y += subsystem_notif.o
 	obj-y += subsystem_restart.o
@@ -281,6 +285,8 @@
 obj-$(CONFIG_ARCH_MSMCOPPER) += board-copper.o board-dt.o board-copper-regulator.o board-copper-gpiomux.o
 obj-$(CONFIG_ARCH_MSMCOPPER) += acpuclock-krait.o acpuclock-copper.o
 obj-$(CONFIG_ARCH_MSMCOPPER) += clock-local2.o clock-pll.o clock-copper.o
+obj-$(CONFIG_ARCH_MSMCOPPER) += gdsc.o
+obj-$(CONFIG_ARCH_MSM9625) += board-9625.o board-9625-gpiomux.o
 
 obj-$(CONFIG_MACH_SAPPHIRE) += board-sapphire.o board-sapphire-gpio.o
 obj-$(CONFIG_MACH_SAPPHIRE) += board-sapphire-keypad.o board-sapphire-panel.o
@@ -313,7 +319,10 @@
 obj-$(CONFIG_MSM_BUS_SCALING) += msm_bus/
 obj-$(CONFIG_MSM_BUSPM_DEV) += msm-buspm-dev.o
 
-obj-$(CONFIG_MSM_IOMMU)		+= iommu.o iommu_dev.o devices-iommu.o iommu_domains.o
+obj-$(CONFIG_MSM_IOMMU)	+= iommu.o iommu_dev.o devices-iommu.o iommu_domains.o
+ifdef CONFIG_OF
+obj-$(CONFIG_MSM_IOMMU)	+= iommu-v2.o iommu_dev-v2.o iommu_pagetable.o
+endif
 
 ifdef CONFIG_VCM
 obj-$(CONFIG_ARCH_MSM8X60) += board-msm8x60-vcm.o
@@ -328,6 +337,8 @@
 obj-$(CONFIG_ARCH_APQ8064) += gpiomux-v2.o gpiomux.o
 obj-$(CONFIG_ARCH_MSM9615) += gpiomux-v2.o gpiomux.o
 obj-$(CONFIG_ARCH_MSMCOPPER) += gpiomux-v2.o gpiomux.o
+obj-$(CONFIG_ARCH_MSM9625) += gpiomux-v2.o gpiomux.o
+
 
 ifdef CONFIG_FSM9XXX_TLMM
 obj-y   += gpio-fsm9xxx.o
diff --git a/arch/arm/mach-msm/Makefile.boot b/arch/arm/mach-msm/Makefile.boot
index 58c630e..bd8d153 100644
--- a/arch/arm/mach-msm/Makefile.boot
+++ b/arch/arm/mach-msm/Makefile.boot
@@ -46,11 +46,14 @@
    zreladdr-$(CONFIG_ARCH_APQ8064)	:= 0x80208000
 
 # MSMCOPPER
-   zreladdr-$(CONFIG_ARCH_MSMCOPPER)	:= 0x20208000
+   zreladdr-$(CONFIG_ARCH_MSMCOPPER)	:= 0x00008000
 
 # MSM9615
    zreladdr-$(CONFIG_ARCH_MSM9615)	:= 0x40808000
 
+# MSM9625
+   zreladdr-$(CONFIG_ARCH_MSM9625)	:= 0x20208000
+
 # FSM9XXX
    zreladdr-$(CONFIG_ARCH_FSM9XXX)	:= 0x10008000
 params_phys-$(CONFIG_ARCH_FSM9XXX)	:= 0x10000100
diff --git a/arch/arm/mach-msm/acpuclock-7627.c b/arch/arm/mach-msm/acpuclock-7627.c
index 99311d4..7c2c556 100644
--- a/arch/arm/mach-msm/acpuclock-7627.c
+++ b/arch/arm/mach-msm/acpuclock-7627.c
@@ -249,6 +249,35 @@
 	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
 };
 
+/* 8625 PLL4 @ 1152MHz with GSM capable modem */
+static struct clkctl_acpu_speed pll0_960_pll1_245_pll2_1200_pll4_1152[] = {
+	{ 0, 19200, ACPU_PLL_TCXO, 0, 0, 2400, 3, 0, 30720 },
+	{ 0, 61440, ACPU_PLL_1, 1, 3,  7680, 3, 1, 61440 },
+	{ 1, 122880, ACPU_PLL_1, 1, 1,  15360, 3, 2, 61440 },
+	{ 1, 245760, ACPU_PLL_1, 1, 0, 30720, 3, 3, 61440 },
+	{ 1, 320000, ACPU_PLL_0, 4, 2, 40000, 3, 4, 122880 },
+	{ 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 5, 122880 },
+	{ 0, 576000, ACPU_PLL_4, 6, 1, 72000, 3, 6, 160000 },
+	{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
+	{ 1, 1152000, ACPU_PLL_4, 6, 0, 144000, 3, 7, 200000},
+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+};
+
+/* 8625 PLL4 @ 1115MHz with CDMA capable modem */
+static struct clkctl_acpu_speed pll0_960_pll1_196_pll2_1200_pll4_1152[] = {
+	{ 0, 19200, ACPU_PLL_TCXO, 0, 0, 2400, 3, 0, 24576 },
+	{ 0, 65536, ACPU_PLL_1, 1, 3,  8192, 3, 1, 49152 },
+	{ 1, 98304, ACPU_PLL_1, 1, 1,  12288, 3, 2, 49152 },
+	{ 1, 196608, ACPU_PLL_1, 1, 0, 24576, 3, 3, 98304 },
+	{ 1, 320000, ACPU_PLL_0, 4, 2, 40000, 3, 4, 122880 },
+	{ 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 5, 122880 },
+	{ 0, 576000, ACPU_PLL_4, 6, 1, 72000, 3, 6, 160000 },
+	{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
+	{ 1, 1152000, ACPU_PLL_4, 6, 0, 144000, 3, 7, 200000},
+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+};
+
+
 /* 7625a PLL2 @ 1200MHz with GSM capable modem */
 static struct clkctl_acpu_speed pll0_960_pll1_245_pll2_1200_25a[] = {
 	{ 0, 19200, ACPU_PLL_TCXO, 0, 0, 2400, 3, 0, 30720 },
@@ -365,6 +394,8 @@
 	PLL_CONFIG(960, 589, 1200, 1008),
 	PLL_CONFIG(960, 245, 1200, 1209),
 	PLL_CONFIG(960, 196, 1200, 1209),
+	PLL_CONFIG(960, 245, 1200, 1152),
+	PLL_CONFIG(960, 196, 1200, 1152),
 	{ 0, 0, 0, 0, 0 }
 };
 
diff --git a/arch/arm/mach-msm/acpuclock-8960.c b/arch/arm/mach-msm/acpuclock-8960.c
index 6986a29..f467aba 100644
--- a/arch/arm/mach-msm/acpuclock-8960.c
+++ b/arch/arm/mach-msm/acpuclock-8960.c
@@ -513,15 +513,15 @@
 	{ 1, {   918000, HFPLL, 1, 0, 0x22 }, L2(7),  1100000 },
 	{ 0, {   972000, HFPLL, 1, 0, 0x24 }, L2(7),  1125000 },
 	{ 1, {  1026000, HFPLL, 1, 0, 0x26 }, L2(7),  1125000 },
-	{ 0, {  1080000, HFPLL, 1, 0, 0x28 }, L2(16), 1175000 },
-	{ 1, {  1134000, HFPLL, 1, 0, 0x2A }, L2(16), 1175000 },
-	{ 0, {  1188000, HFPLL, 1, 0, 0x2C }, L2(16), 1200000 },
-	{ 1, {  1242000, HFPLL, 1, 0, 0x2E }, L2(16), 1200000 },
-	{ 0, {  1296000, HFPLL, 1, 0, 0x30 }, L2(16), 1225000 },
-	{ 1, {  1350000, HFPLL, 1, 0, 0x32 }, L2(16), 1225000 },
-	{ 0, {  1404000, HFPLL, 1, 0, 0x34 }, L2(16), 1237500 },
-	{ 1, {  1458000, HFPLL, 1, 0, 0x36 }, L2(16), 1237500 },
-	{ 1, {  1512000, HFPLL, 1, 0, 0x38 }, L2(16), 1250000 },
+	{ 0, {  1080000, HFPLL, 1, 0, 0x28 }, L2(19), 1175000 },
+	{ 1, {  1134000, HFPLL, 1, 0, 0x2A }, L2(19), 1175000 },
+	{ 0, {  1188000, HFPLL, 1, 0, 0x2C }, L2(19), 1200000 },
+	{ 1, {  1242000, HFPLL, 1, 0, 0x2E }, L2(19), 1200000 },
+	{ 0, {  1296000, HFPLL, 1, 0, 0x30 }, L2(19), 1225000 },
+	{ 1, {  1350000, HFPLL, 1, 0, 0x32 }, L2(19), 1225000 },
+	{ 0, {  1404000, HFPLL, 1, 0, 0x34 }, L2(19), 1237500 },
+	{ 1, {  1458000, HFPLL, 1, 0, 0x36 }, L2(19), 1237500 },
+	{ 1, {  1512000, HFPLL, 1, 0, 0x38 }, L2(19), 1250000 },
 	{ 0, { 0 } }
 };
 
@@ -540,15 +540,15 @@
 	{ 1, {   918000, HFPLL, 1, 0, 0x22 }, L2(7),  1050000 },
 	{ 0, {   972000, HFPLL, 1, 0, 0x24 }, L2(7),  1075000 },
 	{ 1, {  1026000, HFPLL, 1, 0, 0x26 }, L2(7),  1075000 },
-	{ 0, {  1080000, HFPLL, 1, 0, 0x28 }, L2(16), 1125000 },
-	{ 1, {  1134000, HFPLL, 1, 0, 0x2A }, L2(16), 1125000 },
-	{ 0, {  1188000, HFPLL, 1, 0, 0x2C }, L2(16), 1150000 },
-	{ 1, {  1242000, HFPLL, 1, 0, 0x2E }, L2(16), 1150000 },
-	{ 0, {  1296000, HFPLL, 1, 0, 0x30 }, L2(16), 1175000 },
-	{ 1, {  1350000, HFPLL, 1, 0, 0x32 }, L2(16), 1175000 },
-	{ 0, {  1404000, HFPLL, 1, 0, 0x34 }, L2(16), 1187500 },
-	{ 1, {  1458000, HFPLL, 1, 0, 0x36 }, L2(16), 1187500 },
-	{ 1, {  1512000, HFPLL, 1, 0, 0x38 }, L2(16), 1200000 },
+	{ 0, {  1080000, HFPLL, 1, 0, 0x28 }, L2(19), 1125000 },
+	{ 1, {  1134000, HFPLL, 1, 0, 0x2A }, L2(19), 1125000 },
+	{ 0, {  1188000, HFPLL, 1, 0, 0x2C }, L2(19), 1150000 },
+	{ 1, {  1242000, HFPLL, 1, 0, 0x2E }, L2(19), 1150000 },
+	{ 0, {  1296000, HFPLL, 1, 0, 0x30 }, L2(19), 1175000 },
+	{ 1, {  1350000, HFPLL, 1, 0, 0x32 }, L2(19), 1175000 },
+	{ 0, {  1404000, HFPLL, 1, 0, 0x34 }, L2(19), 1187500 },
+	{ 1, {  1458000, HFPLL, 1, 0, 0x36 }, L2(19), 1187500 },
+	{ 1, {  1512000, HFPLL, 1, 0, 0x38 }, L2(19), 1200000 },
 	{ 0, { 0 } }
 };
 
@@ -567,15 +567,15 @@
 	{ 1, {   918000, HFPLL, 1, 0, 0x22 }, L2(7),  1000000 },
 	{ 0, {   972000, HFPLL, 1, 0, 0x24 }, L2(7),  1025000 },
 	{ 1, {  1026000, HFPLL, 1, 0, 0x26 }, L2(7),  1025000 },
-	{ 0, {  1080000, HFPLL, 1, 0, 0x28 }, L2(16), 1075000 },
-	{ 1, {  1134000, HFPLL, 1, 0, 0x2A }, L2(16), 1075000 },
-	{ 0, {  1188000, HFPLL, 1, 0, 0x2C }, L2(16), 1100000 },
-	{ 1, {  1242000, HFPLL, 1, 0, 0x2E }, L2(16), 1100000 },
-	{ 0, {  1296000, HFPLL, 1, 0, 0x30 }, L2(16), 1125000 },
-	{ 1, {  1350000, HFPLL, 1, 0, 0x32 }, L2(16), 1125000 },
-	{ 0, {  1404000, HFPLL, 1, 0, 0x34 }, L2(16), 1137500 },
-	{ 1, {  1458000, HFPLL, 1, 0, 0x36 }, L2(16), 1137500 },
-	{ 1, {  1512000, HFPLL, 1, 0, 0x38 }, L2(16), 1150000 },
+	{ 0, {  1080000, HFPLL, 1, 0, 0x28 }, L2(19), 1075000 },
+	{ 1, {  1134000, HFPLL, 1, 0, 0x2A }, L2(19), 1075000 },
+	{ 0, {  1188000, HFPLL, 1, 0, 0x2C }, L2(19), 1100000 },
+	{ 1, {  1242000, HFPLL, 1, 0, 0x2E }, L2(19), 1100000 },
+	{ 0, {  1296000, HFPLL, 1, 0, 0x30 }, L2(19), 1125000 },
+	{ 1, {  1350000, HFPLL, 1, 0, 0x32 }, L2(19), 1125000 },
+	{ 0, {  1404000, HFPLL, 1, 0, 0x34 }, L2(19), 1137500 },
+	{ 1, {  1458000, HFPLL, 1, 0, 0x36 }, L2(19), 1137500 },
+	{ 1, {  1512000, HFPLL, 1, 0, 0x38 }, L2(19), 1150000 },
 	{ 0, { 0 } }
 };
 
diff --git a/arch/arm/mach-msm/acpuclock-8x60.c b/arch/arm/mach-msm/acpuclock-8x60.c
index 787483b..48efa18 100644
--- a/arch/arm/mach-msm/acpuclock-8x60.c
+++ b/arch/arm/mach-msm/acpuclock-8x60.c
@@ -217,40 +217,7 @@
 };
 
 /* SCPLL frequencies = 2 * 27 MHz * L_VAL */
-static struct clkctl_acpu_speed acpu_freq_tbl_slowest[] = {
-  { {1, 1},  192000,  ACPU_PLL_8, 3, 1, 0, 0,    L2(1),   800000, 0x03006000},
-  /* MAX_AXI row is used to source CPU cores and L2 from the AFAB clock. */
-  { {0, 0},  MAX_AXI, ACPU_AFAB,  1, 0, 0, 0,    L2(0),   825000, 0x03006000},
-  { {1, 1},  384000,  ACPU_PLL_8, 3, 0, 0, 0,    L2(1),   825000, 0x03006000},
-  { {1, 1},  432000,  ACPU_SCPLL, 0, 0, 1, 0x08, L2(1),   850000, 0x03006000},
-  { {1, 1},  486000,  ACPU_SCPLL, 0, 0, 1, 0x09, L2(2),   850000, 0x03006000},
-  { {1, 1},  540000,  ACPU_SCPLL, 0, 0, 1, 0x0A, L2(3),   875000, 0x03006000},
-  { {1, 1},  594000,  ACPU_SCPLL, 0, 0, 1, 0x0B, L2(4),   875000, 0x03006000},
-  { {1, 1},  648000,  ACPU_SCPLL, 0, 0, 1, 0x0C, L2(5),   900000, 0x03006000},
-  { {1, 1},  702000,  ACPU_SCPLL, 0, 0, 1, 0x0D, L2(6),   900000, 0x03006000},
-  { {1, 1},  756000,  ACPU_SCPLL, 0, 0, 1, 0x0E, L2(7),   925000, 0x03006000},
-  { {1, 1},  810000,  ACPU_SCPLL, 0, 0, 1, 0x0F, L2(8),   975000, 0x03006000},
-  { {1, 1},  864000,  ACPU_SCPLL, 0, 0, 1, 0x10, L2(9),   975000, 0x03006000},
-  { {1, 1},  918000,  ACPU_SCPLL, 0, 0, 1, 0x11, L2(10), 1000000, 0x03006000},
-  { {1, 1},  972000,  ACPU_SCPLL, 0, 0, 1, 0x12, L2(11), 1025000, 0x03006000},
-  { {1, 1}, 1026000,  ACPU_SCPLL, 0, 0, 1, 0x13, L2(12), 1025000, 0x03006000},
-  { {1, 1}, 1080000,  ACPU_SCPLL, 0, 0, 1, 0x14, L2(13), 1050000, 0x03006000},
-  { {1, 1}, 1134000,  ACPU_SCPLL, 0, 0, 1, 0x15, L2(14), 1075000, 0x03006000},
-  { {1, 1}, 1188000,  ACPU_SCPLL, 0, 0, 1, 0x16, L2(15), 1100000, 0x03006000},
-  { {1, 1}, 1242000,  ACPU_SCPLL, 0, 0, 1, 0x17, L2(16), 1125000, 0x03006000},
-  { {1, 1}, 1296000,  ACPU_SCPLL, 0, 0, 1, 0x18, L2(17), 1150000, 0x03006000},
-  { {1, 1}, 1350000,  ACPU_SCPLL, 0, 0, 1, 0x19, L2(18), 1175000, 0x03006000},
-  { {1, 1}, 1404000,  ACPU_SCPLL, 0, 0, 1, 0x1A, L2(19), 1200000, 0x03006000},
-  { {1, 1}, 1458000,  ACPU_SCPLL, 0, 0, 1, 0x1B, L2(19), 1225000, 0x03006000},
-  { {1, 1}, 1512000,  ACPU_SCPLL, 0, 0, 1, 0x1C, L2(19), 1250000, 0x03006000},
-  { {1, 1}, 1566000,  ACPU_SCPLL, 0, 0, 1, 0x1D, L2(19), 1275000, 0x03006000},
-  { {1, 1}, 1620000,  ACPU_SCPLL, 0, 0, 1, 0x1E, L2(19), 1300000, 0x03006000},
-  { {1, 1}, 1674000,  ACPU_SCPLL, 0, 0, 1, 0x1F, L2(19), 1325000, 0x03006000},
-  { {0, 0}, 0 },
-};
-
-/* SCPLL frequencies = 2 * 27 MHz * L_VAL */
-static struct clkctl_acpu_speed acpu_freq_tbl_slower[] = {
+static struct clkctl_acpu_speed acpu_freq_tbl_1512mhz_slow[] = {
   { {1, 1},  192000,  ACPU_PLL_8, 3, 1, 0, 0,    L2(1),   800000, 0x03006000},
   /* MAX_AXI row is used to source CPU cores and L2 from the AFAB clock. */
   { {0, 0},  MAX_AXI, ACPU_AFAB,  1, 0, 0, 0,    L2(0),   825000, 0x03006000},
@@ -276,47 +243,11 @@
   { {1, 1}, 1404000,  ACPU_SCPLL, 0, 0, 1, 0x1A, L2(19), 1175000, 0x03006000},
   { {1, 1}, 1458000,  ACPU_SCPLL, 0, 0, 1, 0x1B, L2(19), 1200000, 0x03006000},
   { {1, 1}, 1512000,  ACPU_SCPLL, 0, 0, 1, 0x1C, L2(19), 1225000, 0x03006000},
-  { {1, 1}, 1566000,  ACPU_SCPLL, 0, 0, 1, 0x1D, L2(19), 1250000, 0x03006000},
-  { {1, 1}, 1620000,  ACPU_SCPLL, 0, 0, 1, 0x1E, L2(19), 1275000, 0x03006000},
-  { {1, 1}, 1674000,  ACPU_SCPLL, 0, 0, 1, 0x1F, L2(19), 1300000, 0x03006000},
   { {0, 0}, 0 },
 };
 
 /* SCPLL frequencies = 2 * 27 MHz * L_VAL */
-static struct clkctl_acpu_speed acpu_freq_tbl_slow[] = {
-  { {1, 1},  192000,  ACPU_PLL_8, 3, 1, 0, 0,    L2(1),   800000, 0x03006000},
-  /* MAX_AXI row is used to source CPU cores and L2 from the AFAB clock. */
-  { {0, 0},  MAX_AXI, ACPU_AFAB,  1, 0, 0, 0,    L2(0),   825000, 0x03006000},
-  { {1, 1},  384000,  ACPU_PLL_8, 3, 0, 0, 0,    L2(1),   825000, 0x03006000},
-  { {1, 1},  432000,  ACPU_SCPLL, 0, 0, 1, 0x08, L2(1),   850000, 0x03006000},
-  { {1, 1},  486000,  ACPU_SCPLL, 0, 0, 1, 0x09, L2(2),   850000, 0x03006000},
-  { {1, 1},  540000,  ACPU_SCPLL, 0, 0, 1, 0x0A, L2(3),   875000, 0x03006000},
-  { {1, 1},  594000,  ACPU_SCPLL, 0, 0, 1, 0x0B, L2(4),   875000, 0x03006000},
-  { {1, 1},  648000,  ACPU_SCPLL, 0, 0, 1, 0x0C, L2(5),   900000, 0x03006000},
-  { {1, 1},  702000,  ACPU_SCPLL, 0, 0, 1, 0x0D, L2(6),   900000, 0x03006000},
-  { {1, 1},  756000,  ACPU_SCPLL, 0, 0, 1, 0x0E, L2(7),   925000, 0x03006000},
-  { {1, 1},  810000,  ACPU_SCPLL, 0, 0, 1, 0x0F, L2(8),   975000, 0x03006000},
-  { {1, 1},  864000,  ACPU_SCPLL, 0, 0, 1, 0x10, L2(9),   975000, 0x03006000},
-  { {1, 1},  918000,  ACPU_SCPLL, 0, 0, 1, 0x11, L2(10), 1000000, 0x03006000},
-  { {1, 1},  972000,  ACPU_SCPLL, 0, 0, 1, 0x12, L2(11), 1025000, 0x03006000},
-  { {1, 1}, 1026000,  ACPU_SCPLL, 0, 0, 1, 0x13, L2(12), 1025000, 0x03006000},
-  { {1, 1}, 1080000,  ACPU_SCPLL, 0, 0, 1, 0x14, L2(13), 1050000, 0x03006000},
-  { {1, 1}, 1134000,  ACPU_SCPLL, 0, 0, 1, 0x15, L2(14), 1075000, 0x03006000},
-  { {1, 1}, 1188000,  ACPU_SCPLL, 0, 0, 1, 0x16, L2(15), 1100000, 0x03006000},
-  { {1, 1}, 1242000,  ACPU_SCPLL, 0, 0, 1, 0x17, L2(16), 1125000, 0x03006000},
-  { {1, 1}, 1296000,  ACPU_SCPLL, 0, 0, 1, 0x18, L2(17), 1150000, 0x03006000},
-  { {1, 1}, 1350000,  ACPU_SCPLL, 0, 0, 1, 0x19, L2(18), 1150000, 0x03006000},
-  { {1, 1}, 1404000,  ACPU_SCPLL, 0, 0, 1, 0x1A, L2(19), 1175000, 0x03006000},
-  { {1, 1}, 1458000,  ACPU_SCPLL, 0, 0, 1, 0x1B, L2(19), 1200000, 0x03006000},
-  { {1, 1}, 1512000,  ACPU_SCPLL, 0, 0, 1, 0x1C, L2(19), 1225000, 0x03006000},
-  { {1, 1}, 1566000,  ACPU_SCPLL, 0, 0, 1, 0x1D, L2(19), 1225000, 0x03006000},
-  { {1, 1}, 1620000,  ACPU_SCPLL, 0, 0, 1, 0x1E, L2(19), 1225000, 0x03006000},
-  { {1, 1}, 1674000,  ACPU_SCPLL, 0, 0, 1, 0x1F, L2(19), 1250000, 0x03006000},
-  { {0, 0}, 0 },
-};
-
-/* SCPLL frequencies = 2 * 27 MHz * L_VAL */
-static struct clkctl_acpu_speed acpu_freq_tbl_nom[] = {
+static struct clkctl_acpu_speed acpu_freq_tbl_1512mhz_nom[] = {
   { {1, 1},  192000,  ACPU_PLL_8, 3, 1, 0, 0,    L2(1),   800000, 0x03006000},
   /* MAX_AXI row is used to source CPU cores and L2 from the AFAB clock. */
   { {0, 0},  MAX_AXI, ACPU_AFAB,  1, 0, 0, 0,    L2(0),   825000, 0x03006000},
@@ -342,14 +273,11 @@
   { {1, 1}, 1404000,  ACPU_SCPLL, 0, 0, 1, 0x1A, L2(19), 1150000, 0x03006000},
   { {1, 1}, 1458000,  ACPU_SCPLL, 0, 0, 1, 0x1B, L2(19), 1150000, 0x03006000},
   { {1, 1}, 1512000,  ACPU_SCPLL, 0, 0, 1, 0x1C, L2(19), 1175000, 0x03006000},
-  { {1, 1}, 1566000,  ACPU_SCPLL, 0, 0, 1, 0x1D, L2(19), 1175000, 0x03006000},
-  { {1, 1}, 1620000,  ACPU_SCPLL, 0, 0, 1, 0x1E, L2(19), 1200000, 0x03006000},
-  { {1, 1}, 1674000,  ACPU_SCPLL, 0, 0, 1, 0x1F, L2(19), 1200000, 0x03006000},
   { {0, 0}, 0 },
 };
 
 /* SCPLL frequencies = 2 * 27 MHz * L_VAL */
-static struct clkctl_acpu_speed acpu_freq_tbl_fast[] = {
+static struct clkctl_acpu_speed acpu_freq_tbl_1512mhz_fast[] = {
   { {1, 1},  192000,  ACPU_PLL_8, 3, 1, 0, 0,    L2(1),   800000, 0x03006000},
   /* MAX_AXI row is used to source CPU cores and L2 from the AFAB clock. */
   { {0, 0},  MAX_AXI, ACPU_AFAB,  1, 0, 0, 0,    L2(0),   825000, 0x03006000},
@@ -375,13 +303,141 @@
   { {1, 1}, 1404000,  ACPU_SCPLL, 0, 0, 1, 0x1A, L2(19), 1100000, 0x03006000},
   { {1, 1}, 1458000,  ACPU_SCPLL, 0, 0, 1, 0x1B, L2(19), 1100000, 0x03006000},
   { {1, 1}, 1512000,  ACPU_SCPLL, 0, 0, 1, 0x1C, L2(19), 1125000, 0x03006000},
-  { {1, 1}, 1566000,  ACPU_SCPLL, 0, 0, 1, 0x1D, L2(19), 1125000, 0x03006000},
+  { {0, 0}, 0 },
+};
+
+/* SCPLL frequencies = 2 * 27 MHz * L_VAL */
+static struct clkctl_acpu_speed acpu_freq_tbl_1674mhz_slower[] = {
+  { {1, 1},  192000,  ACPU_PLL_8, 3, 1, 0, 0,    L2(1),   775000, 0x03006000},
+  /* MAX_AXI row is used to source CPU cores and L2 from the AFAB clock. */
+  { {0, 0},  MAX_AXI, ACPU_AFAB,  1, 0, 0, 0,    L2(0),   775000, 0x03006000},
+  { {1, 1},  384000,  ACPU_PLL_8, 3, 0, 0, 0,    L2(1),   775000, 0x03006000},
+  { {1, 1},  432000,  ACPU_SCPLL, 0, 0, 1, 0x08, L2(1),   775000, 0x03006000},
+  { {1, 1},  486000,  ACPU_SCPLL, 0, 0, 1, 0x09, L2(2),   775000, 0x03006000},
+  { {1, 1},  540000,  ACPU_SCPLL, 0, 0, 1, 0x0A, L2(3),   787500, 0x03006000},
+  { {1, 1},  594000,  ACPU_SCPLL, 0, 0, 1, 0x0B, L2(4),   800000, 0x03006000},
+  { {1, 1},  648000,  ACPU_SCPLL, 0, 0, 1, 0x0C, L2(5),   825000, 0x03006000},
+  { {1, 1},  702000,  ACPU_SCPLL, 0, 0, 1, 0x0D, L2(6),   837500, 0x03006000},
+  { {1, 1},  756000,  ACPU_SCPLL, 0, 0, 1, 0x0E, L2(7),   850000, 0x03006000},
+  { {1, 1},  810000,  ACPU_SCPLL, 0, 0, 1, 0x0F, L2(8),   875000, 0x03006000},
+  { {1, 1},  864000,  ACPU_SCPLL, 0, 0, 1, 0x10, L2(9),   900000, 0x03006000},
+  { {1, 1},  918000,  ACPU_SCPLL, 0, 0, 1, 0x11, L2(10),  912500, 0x03006000},
+  { {1, 1},  972000,  ACPU_SCPLL, 0, 0, 1, 0x12, L2(11),  937500, 0x03006000},
+  { {1, 1}, 1026000,  ACPU_SCPLL, 0, 0, 1, 0x13, L2(12),  962500, 0x03006000},
+  { {1, 1}, 1080000,  ACPU_SCPLL, 0, 0, 1, 0x14, L2(13),  987500, 0x03006000},
+  { {1, 1}, 1134000,  ACPU_SCPLL, 0, 0, 1, 0x15, L2(14), 1012500, 0x03006000},
+  { {1, 1}, 1188000,  ACPU_SCPLL, 0, 0, 1, 0x16, L2(15), 1025000, 0x03006000},
+  { {1, 1}, 1242000,  ACPU_SCPLL, 0, 0, 1, 0x17, L2(16), 1062500, 0x03006000},
+  { {1, 1}, 1296000,  ACPU_SCPLL, 0, 0, 1, 0x18, L2(17), 1087500, 0x03006000},
+  { {1, 1}, 1350000,  ACPU_SCPLL, 0, 0, 1, 0x19, L2(18), 1100000, 0x03006000},
+  { {1, 1}, 1404000,  ACPU_SCPLL, 0, 0, 1, 0x1A, L2(19), 1125000, 0x03006000},
+  { {1, 1}, 1458000,  ACPU_SCPLL, 0, 0, 1, 0x1B, L2(19), 1150000, 0x03006000},
+  { {1, 1}, 1512000,  ACPU_SCPLL, 0, 0, 1, 0x1C, L2(19), 1187500, 0x03006000},
+  { {1, 1}, 1566000,  ACPU_SCPLL, 0, 0, 1, 0x1D, L2(19), 1225000, 0x03006000},
+  { {1, 1}, 1620000,  ACPU_SCPLL, 0, 0, 1, 0x1E, L2(19), 1262500, 0x03006000},
+  { {1, 1}, 1674000,  ACPU_SCPLL, 0, 0, 1, 0x1F, L2(19), 1300000, 0x03006000},
+  { {0, 0}, 0 },
+};
+
+/* SCPLL frequencies = 2 * 27 MHz * L_VAL */
+static struct clkctl_acpu_speed acpu_freq_tbl_1674mhz_slow[] = {
+  { {1, 1},  192000,  ACPU_PLL_8, 3, 1, 0, 0,    L2(1),   775000, 0x03006000},
+  /* MAX_AXI row is used to source CPU cores and L2 from the AFAB clock. */
+  { {0, 0},  MAX_AXI, ACPU_AFAB,  1, 0, 0, 0,    L2(0),   775000, 0x03006000},
+  { {1, 1},  384000,  ACPU_PLL_8, 3, 0, 0, 0,    L2(1),   775000, 0x03006000},
+  { {1, 1},  432000,  ACPU_SCPLL, 0, 0, 1, 0x08, L2(1),   775000, 0x03006000},
+  { {1, 1},  486000,  ACPU_SCPLL, 0, 0, 1, 0x09, L2(2),   775000, 0x03006000},
+  { {1, 1},  540000,  ACPU_SCPLL, 0, 0, 1, 0x0A, L2(3),   787500, 0x03006000},
+  { {1, 1},  594000,  ACPU_SCPLL, 0, 0, 1, 0x0B, L2(4),   800000, 0x03006000},
+  { {1, 1},  648000,  ACPU_SCPLL, 0, 0, 1, 0x0C, L2(5),   825000, 0x03006000},
+  { {1, 1},  702000,  ACPU_SCPLL, 0, 0, 1, 0x0D, L2(6),   837500, 0x03006000},
+  { {1, 1},  756000,  ACPU_SCPLL, 0, 0, 1, 0x0E, L2(7),   850000, 0x03006000},
+  { {1, 1},  810000,  ACPU_SCPLL, 0, 0, 1, 0x0F, L2(8),   862500, 0x03006000},
+  { {1, 1},  864000,  ACPU_SCPLL, 0, 0, 1, 0x10, L2(9),   887500, 0x03006000},
+  { {1, 1},  918000,  ACPU_SCPLL, 0, 0, 1, 0x11, L2(10),  900000, 0x03006000},
+  { {1, 1},  972000,  ACPU_SCPLL, 0, 0, 1, 0x12, L2(11),  925000, 0x03006000},
+  { {1, 1}, 1026000,  ACPU_SCPLL, 0, 0, 1, 0x13, L2(12),  937500, 0x03006000},
+  { {1, 1}, 1080000,  ACPU_SCPLL, 0, 0, 1, 0x14, L2(13),  962500, 0x03006000},
+  { {1, 1}, 1134000,  ACPU_SCPLL, 0, 0, 1, 0x15, L2(14),  987500, 0x03006000},
+  { {1, 1}, 1188000,  ACPU_SCPLL, 0, 0, 1, 0x16, L2(15), 1000000, 0x03006000},
+  { {1, 1}, 1242000,  ACPU_SCPLL, 0, 0, 1, 0x17, L2(16), 1025000, 0x03006000},
+  { {1, 1}, 1296000,  ACPU_SCPLL, 0, 0, 1, 0x18, L2(17), 1050000, 0x03006000},
+  { {1, 1}, 1350000,  ACPU_SCPLL, 0, 0, 1, 0x19, L2(18), 1062500, 0x03006000},
+  { {1, 1}, 1404000,  ACPU_SCPLL, 0, 0, 1, 0x1A, L2(19), 1087500, 0x03006000},
+  { {1, 1}, 1458000,  ACPU_SCPLL, 0, 0, 1, 0x1B, L2(19), 1112500, 0x03006000},
+  { {1, 1}, 1512000,  ACPU_SCPLL, 0, 0, 1, 0x1C, L2(19), 1150000, 0x03006000},
+  { {1, 1}, 1566000,  ACPU_SCPLL, 0, 0, 1, 0x1D, L2(19), 1175000, 0x03006000},
+  { {1, 1}, 1620000,  ACPU_SCPLL, 0, 0, 1, 0x1E, L2(19), 1212500, 0x03006000},
+  { {1, 1}, 1674000,  ACPU_SCPLL, 0, 0, 1, 0x1F, L2(19), 1250000, 0x03006000},
+  { {0, 0}, 0 },
+};
+
+/* SCPLL frequencies = 2 * 27 MHz * L_VAL */
+static struct clkctl_acpu_speed acpu_freq_tbl_1674mhz_nom[] = {
+  { {1, 1},  192000,  ACPU_PLL_8, 3, 1, 0, 0,    L2(1),   775000, 0x03006000},
+  /* MAX_AXI row is used to source CPU cores and L2 from the AFAB clock. */
+  { {0, 0},  MAX_AXI, ACPU_AFAB,  1, 0, 0, 0,    L2(0),   775000, 0x03006000},
+  { {1, 1},  384000,  ACPU_PLL_8, 3, 0, 0, 0,    L2(1),   775000, 0x03006000},
+  { {1, 1},  432000,  ACPU_SCPLL, 0, 0, 1, 0x08, L2(1),   775000, 0x03006000},
+  { {1, 1},  486000,  ACPU_SCPLL, 0, 0, 1, 0x09, L2(2),   775000, 0x03006000},
+  { {1, 1},  540000,  ACPU_SCPLL, 0, 0, 1, 0x0A, L2(3),   787500, 0x03006000},
+  { {1, 1},  594000,  ACPU_SCPLL, 0, 0, 1, 0x0B, L2(4),   800000, 0x03006000},
+  { {1, 1},  648000,  ACPU_SCPLL, 0, 0, 1, 0x0C, L2(5),   812500, 0x03006000},
+  { {1, 1},  702000,  ACPU_SCPLL, 0, 0, 1, 0x0D, L2(6),   825000, 0x03006000},
+  { {1, 1},  756000,  ACPU_SCPLL, 0, 0, 1, 0x0E, L2(7),   837500, 0x03006000},
+  { {1, 1},  810000,  ACPU_SCPLL, 0, 0, 1, 0x0F, L2(8),   850000, 0x03006000},
+  { {1, 1},  864000,  ACPU_SCPLL, 0, 0, 1, 0x10, L2(9),   875000, 0x03006000},
+  { {1, 1},  918000,  ACPU_SCPLL, 0, 0, 1, 0x11, L2(10),  887500, 0x03006000},
+  { {1, 1},  972000,  ACPU_SCPLL, 0, 0, 1, 0x12, L2(11),  900000, 0x03006000},
+  { {1, 1}, 1026000,  ACPU_SCPLL, 0, 0, 1, 0x13, L2(12),  912500, 0x03006000},
+  { {1, 1}, 1080000,  ACPU_SCPLL, 0, 0, 1, 0x14, L2(13),  937500, 0x03006000},
+  { {1, 1}, 1134000,  ACPU_SCPLL, 0, 0, 1, 0x15, L2(14),  950000, 0x03006000},
+  { {1, 1}, 1188000,  ACPU_SCPLL, 0, 0, 1, 0x16, L2(15),  975000, 0x03006000},
+  { {1, 1}, 1242000,  ACPU_SCPLL, 0, 0, 1, 0x17, L2(16),  987500, 0x03006000},
+  { {1, 1}, 1296000,  ACPU_SCPLL, 0, 0, 1, 0x18, L2(17), 1012500, 0x03006000},
+  { {1, 1}, 1350000,  ACPU_SCPLL, 0, 0, 1, 0x19, L2(18), 1025000, 0x03006000},
+  { {1, 1}, 1404000,  ACPU_SCPLL, 0, 0, 1, 0x1A, L2(19), 1050000, 0x03006000},
+  { {1, 1}, 1458000,  ACPU_SCPLL, 0, 0, 1, 0x1B, L2(19), 1075000, 0x03006000},
+  { {1, 1}, 1512000,  ACPU_SCPLL, 0, 0, 1, 0x1C, L2(19), 1112500, 0x03006000},
+  { {1, 1}, 1566000,  ACPU_SCPLL, 0, 0, 1, 0x1D, L2(19), 1137500, 0x03006000},
+  { {1, 1}, 1620000,  ACPU_SCPLL, 0, 0, 1, 0x1E, L2(19), 1175000, 0x03006000},
+  { {1, 1}, 1674000,  ACPU_SCPLL, 0, 0, 1, 0x1F, L2(19), 1200000, 0x03006000},
+  { {0, 0}, 0 },
+};
+
+/* SCPLL frequencies = 2 * 27 MHz * L_VAL */
+static struct clkctl_acpu_speed acpu_freq_tbl_1674mhz_fast[] = {
+  { {1, 1},  192000,  ACPU_PLL_8, 3, 1, 0, 0,    L2(1),   775000, 0x03006000},
+  /* MAX_AXI row is used to source CPU cores and L2 from the AFAB clock. */
+  { {0, 0},  MAX_AXI, ACPU_AFAB,  1, 0, 0, 0,    L2(0),   775000, 0x03006000},
+  { {1, 1},  384000,  ACPU_PLL_8, 3, 0, 0, 0,    L2(1),   775000, 0x03006000},
+  { {1, 1},  432000,  ACPU_SCPLL, 0, 0, 1, 0x08, L2(1),   775000, 0x03006000},
+  { {1, 1},  486000,  ACPU_SCPLL, 0, 0, 1, 0x09, L2(2),   775000, 0x03006000},
+  { {1, 1},  540000,  ACPU_SCPLL, 0, 0, 1, 0x0A, L2(3),   775000, 0x03006000},
+  { {1, 1},  594000,  ACPU_SCPLL, 0, 0, 1, 0x0B, L2(4),   787500, 0x03006000},
+  { {1, 1},  648000,  ACPU_SCPLL, 0, 0, 1, 0x0C, L2(5),   800000, 0x03006000},
+  { {1, 1},  702000,  ACPU_SCPLL, 0, 0, 1, 0x0D, L2(6),   812500, 0x03006000},
+  { {1, 1},  756000,  ACPU_SCPLL, 0, 0, 1, 0x0E, L2(7),   825000, 0x03006000},
+  { {1, 1},  810000,  ACPU_SCPLL, 0, 0, 1, 0x0F, L2(8),   837500, 0x03006000},
+  { {1, 1},  864000,  ACPU_SCPLL, 0, 0, 1, 0x10, L2(9),   862500, 0x03006000},
+  { {1, 1},  918000,  ACPU_SCPLL, 0, 0, 1, 0x11, L2(10),  875000, 0x03006000},
+  { {1, 1},  972000,  ACPU_SCPLL, 0, 0, 1, 0x12, L2(11),  887500, 0x03006000},
+  { {1, 1}, 1026000,  ACPU_SCPLL, 0, 0, 1, 0x13, L2(12),  900000, 0x03006000},
+  { {1, 1}, 1080000,  ACPU_SCPLL, 0, 0, 1, 0x14, L2(13),  925000, 0x03006000},
+  { {1, 1}, 1134000,  ACPU_SCPLL, 0, 0, 1, 0x15, L2(14),  937500, 0x03006000},
+  { {1, 1}, 1188000,  ACPU_SCPLL, 0, 0, 1, 0x16, L2(15),  950000, 0x03006000},
+  { {1, 1}, 1242000,  ACPU_SCPLL, 0, 0, 1, 0x17, L2(16),  962500, 0x03006000},
+  { {1, 1}, 1296000,  ACPU_SCPLL, 0, 0, 1, 0x18, L2(17),  975000, 0x03006000},
+  { {1, 1}, 1350000,  ACPU_SCPLL, 0, 0, 1, 0x19, L2(18), 1000000, 0x03006000},
+  { {1, 1}, 1404000,  ACPU_SCPLL, 0, 0, 1, 0x1A, L2(19), 1025000, 0x03006000},
+  { {1, 1}, 1458000,  ACPU_SCPLL, 0, 0, 1, 0x1B, L2(19), 1050000, 0x03006000},
+  { {1, 1}, 1512000,  ACPU_SCPLL, 0, 0, 1, 0x1C, L2(19), 1075000, 0x03006000},
+  { {1, 1}, 1566000,  ACPU_SCPLL, 0, 0, 1, 0x1D, L2(19), 1100000, 0x03006000},
   { {1, 1}, 1620000,  ACPU_SCPLL, 0, 0, 1, 0x1E, L2(19), 1125000, 0x03006000},
   { {1, 1}, 1674000,  ACPU_SCPLL, 0, 0, 1, 0x1F, L2(19), 1150000, 0x03006000},
   { {0, 0}, 0 },
 };
 
-
 /* acpu_freq_tbl row to use when reconfiguring SC/L2 PLLs. */
 #define CAL_IDX 1
 
@@ -930,7 +986,7 @@
 
 static __init struct clkctl_acpu_speed *select_freq_plan(void)
 {
-	uint32_t pte_efuse, speed_bin, pvs, max_khz;
+	uint32_t pte_efuse, speed_bin, pvs;
 	struct clkctl_acpu_speed *f;
 
 	pte_efuse = readl_relaxed(QFPROM_PTE_EFUSE_ADDR);
@@ -944,67 +1000,55 @@
 		pvs = (pte_efuse >> 13) & 0x7;
 
 	if (speed_bin == 0x2) {
-		max_khz = 1674000;
 		switch (pvs) {
 		case 0x7:
-		case 0x5:
-			acpu_freq_tbl = acpu_freq_tbl_slowest;
-			pr_info("ACPU PVS: Slowest\n");
-			break;
 		case 0x4:
-			acpu_freq_tbl = acpu_freq_tbl_slower;
+			acpu_freq_tbl = acpu_freq_tbl_1674mhz_slower;
 			pr_info("ACPU PVS: Slower\n");
 			break;
 		case 0x0:
-			acpu_freq_tbl = acpu_freq_tbl_slow;
+			acpu_freq_tbl = acpu_freq_tbl_1674mhz_slow;
 			pr_info("ACPU PVS: Slow\n");
 			break;
 		case 0x1:
-			acpu_freq_tbl = acpu_freq_tbl_nom;
+			acpu_freq_tbl = acpu_freq_tbl_1674mhz_nom;
 			pr_info("ACPU PVS: Nominal\n");
 			break;
 		case 0x3:
-			acpu_freq_tbl = acpu_freq_tbl_fast;
+			acpu_freq_tbl = acpu_freq_tbl_1674mhz_fast;
 			pr_info("ACPU PVS: Fast\n");
 			break;
 		default:
-			acpu_freq_tbl = acpu_freq_tbl_slowest;
-			pr_warn("ACPU PVS: Unknown. Defaulting to slowest.\n");
+			acpu_freq_tbl = acpu_freq_tbl_1674mhz_slower;
+			pr_warn("ACPU PVS: Unknown. Defaulting to slower.\n");
 			break;
 		}
 	} else if (speed_bin == 0x1) {
-		max_khz = 1512000;
 		switch (pvs) {
 		case 0x0:
 		case 0x7:
-			acpu_freq_tbl = acpu_freq_tbl_slow;
+			acpu_freq_tbl = acpu_freq_tbl_1512mhz_slow;
 			pr_info("ACPU PVS: Slow\n");
 			break;
 		case 0x1:
-			acpu_freq_tbl = acpu_freq_tbl_nom;
+			acpu_freq_tbl = acpu_freq_tbl_1512mhz_nom;
 			pr_info("ACPU PVS: Nominal\n");
 			break;
 		case 0x3:
-			acpu_freq_tbl = acpu_freq_tbl_fast;
+			acpu_freq_tbl = acpu_freq_tbl_1512mhz_fast;
 			pr_info("ACPU PVS: Fast\n");
 			break;
 		default:
-			acpu_freq_tbl = acpu_freq_tbl_slow;
+			acpu_freq_tbl = acpu_freq_tbl_1512mhz_slow;
 			pr_warn("ACPU PVS: Unknown. Defaulting to slow.\n");
 			break;
 		}
 	} else {
-		max_khz = 1188000;
 		acpu_freq_tbl = acpu_freq_tbl_1188mhz;
 	}
 
-	/* Truncate the table based to max_khz. */
-	for (f = acpu_freq_tbl; f->acpuclk_khz != 0; f++) {
-		if (f->acpuclk_khz > max_khz) {
-			f->acpuclk_khz = 0;
-			break;
-		}
-	}
+	for (f = acpu_freq_tbl; f->acpuclk_khz != 0; f++)
+		;
 	f--;
 	pr_info("Max ACPU freq: %u KHz\n", f->acpuclk_khz);
 
diff --git a/arch/arm/mach-msm/board-8064-gpiomux.c b/arch/arm/mach-msm/board-8064-gpiomux.c
index e873498..8008cff 100644
--- a/arch/arm/mach-msm/board-8064-gpiomux.c
+++ b/arch/arm/mach-msm/board-8064-gpiomux.c
@@ -507,6 +507,19 @@
 	.dir = GPIOMUX_OUT_LOW,
 };
 
+static struct gpiomux_setting hsic_wakeup_act_cfg = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv = GPIOMUX_DRV_8MA,
+	.pull = GPIOMUX_PULL_DOWN,
+	.dir = GPIOMUX_IN,
+};
+
+static struct gpiomux_setting hsic_wakeup_sus_cfg = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv = GPIOMUX_DRV_2MA,
+	.pull = GPIOMUX_PULL_DOWN,
+	.dir = GPIOMUX_IN,
+};
 
 static struct msm_gpiomux_config apq8064_hsic_configs[] = {
 	{
@@ -523,6 +536,13 @@
 			[GPIOMUX_SUSPENDED] = &hsic_sus_cfg,
 		},
 	},
+	{
+		.gpio = 47,              /* wake up */
+		.settings = {
+			[GPIOMUX_ACTIVE] = &hsic_wakeup_act_cfg,
+			[GPIOMUX_SUSPENDED] = &hsic_wakeup_sus_cfg,
+		},
+	},
 };
 #endif
 
@@ -777,7 +797,7 @@
 	.pull = GPIOMUX_PULL_DOWN,
 };
 
-static struct gpiomux_setting ap2mdm_pon_reset_n_cfg = {
+static struct gpiomux_setting ap2mdm_soft_reset_cfg = {
 	.func = GPIOMUX_FUNC_GPIO,
 	.drv = GPIOMUX_DRV_8MA,
 	.pull = GPIOMUX_PULL_DOWN,
@@ -818,11 +838,11 @@
 			[GPIOMUX_SUSPENDED] = &ap2mdm_cfg,
 		}
 	},
-	/* AP2MDM_PON_RESET_N */
+	/* AP2MDM_SOFT_RESET, aka AP2MDM_PON_RESET_N */
 	{
 		.gpio = 27,
 		.settings = {
-			[GPIOMUX_SUSPENDED] = &ap2mdm_pon_reset_n_cfg,
+			[GPIOMUX_SUSPENDED] = &ap2mdm_soft_reset_cfg,
 		}
 	},
 	/* AP2MDM_WAKEUP */
diff --git a/arch/arm/mach-msm/board-8064.c b/arch/arm/mach-msm/board-8064.c
index 7ab3894..0234de6 100644
--- a/arch/arm/mach-msm/board-8064.c
+++ b/arch/arm/mach-msm/board-8064.c
@@ -91,6 +91,7 @@
 #endif
 
 #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
+#define HOLE_SIZE		0x20000
 #define MSM_PMEM_KERNEL_EBI1_SIZE  0x65000
 #ifdef CONFIG_MSM_IOMMU
 #define MSM_ION_MM_SIZE		0x3800000
@@ -103,7 +104,7 @@
 #define MSM_ION_QSECOM_SIZE	0x600000 /* (6MB) */
 #define MSM_ION_HEAP_NUM	8
 #endif
-#define MSM_ION_MM_FW_SIZE	0x200000 /* (2MB) */
+#define MSM_ION_MM_FW_SIZE	(0x200000 - HOLE_SIZE) /* (2MB - 128KB) */
 #define MSM_ION_MFC_SIZE	SZ_8K
 #define MSM_ION_AUDIO_SIZE	MSM_PMEM_AUDIO_SIZE
 #else
@@ -111,10 +112,11 @@
 #define MSM_ION_HEAP_NUM	1
 #endif
 
-#define APQ8064_FIXED_AREA_START 0xa0000000
+#define APQ8064_FIXED_AREA_START (0xa0000000 - (MSM_ION_MM_FW_SIZE + \
+							HOLE_SIZE))
 #define MAX_FIXED_AREA_SIZE	0x10000000
-#define MSM_MM_FW_SIZE		0x200000
-#define APQ8064_FW_START	(APQ8064_FIXED_AREA_START - MSM_MM_FW_SIZE)
+#define MSM_MM_FW_SIZE		(0x200000 - HOLE_SIZE)
+#define APQ8064_FW_START	APQ8064_FIXED_AREA_START
 
 /* PCIe power enable pmic gpio */
 #define PCIE_PWR_EN_PMIC_GPIO 13
@@ -505,19 +507,20 @@
 		return;
 
 	if (apq8064_fmem_pdata.size) {
-		apq8064_fmem_pdata.reserved_size_low = fixed_low_size;
+		apq8064_fmem_pdata.reserved_size_low = fixed_low_size +
+								HOLE_SIZE;
 		apq8064_fmem_pdata.reserved_size_high = fixed_high_size;
 	}
 
 	/* Since the fixed area may be carved out of lowmem,
 	 * make sure the length is a multiple of 1M.
 	 */
-	fixed_size = (fixed_size + MSM_MM_FW_SIZE + SECTION_SIZE - 1)
+	fixed_size = (fixed_size + HOLE_SIZE + SECTION_SIZE - 1)
 		& SECTION_MASK;
 	apq8064_reserve_fixed_area(fixed_size);
 
 	fixed_low_start = APQ8064_FIXED_AREA_START;
-	fixed_middle_start = fixed_low_start + fixed_low_size;
+	fixed_middle_start = fixed_low_start + fixed_low_size + HOLE_SIZE;
 	fixed_high_start = fixed_middle_start + fixed_middle_size;
 
 	for (i = 0; i < apq8064_ion_pdata.nr; ++i) {
@@ -525,11 +528,13 @@
 
 		if (heap->extra_data) {
 			int fixed_position = NOT_FIXED;
+			struct ion_cp_heap_pdata *pdata;
 
 			switch (heap->type) {
 			case ION_HEAP_TYPE_CP:
-				fixed_position = ((struct ion_cp_heap_pdata *)
-					heap->extra_data)->fixed_position;
+				pdata =
+				(struct ion_cp_heap_pdata *)heap->extra_data;
+				fixed_position = pdata->fixed_position;
 				break;
 			case ION_HEAP_TYPE_CARVEOUT:
 				fixed_position = ((struct ion_co_heap_pdata *)
@@ -545,6 +550,9 @@
 				break;
 			case FIXED_MIDDLE:
 				heap->base = fixed_middle_start;
+				pdata->secure_base = fixed_middle_start
+								- HOLE_SIZE;
+				pdata->secure_size = HOLE_SIZE + heap->size;
 				break;
 			case FIXED_HIGH:
 				heap->base = fixed_high_start;
@@ -562,6 +570,17 @@
 	apq8064_mdp_writeback(apq8064_reserve_table);
 }
 
+static void __init reserve_cache_dump_memory(void)
+{
+#ifdef CONFIG_MSM_CACHE_DUMP
+	unsigned int total;
+
+	total = apq8064_cache_dump_pdata.l1_size +
+		apq8064_cache_dump_pdata.l2_size;
+	apq8064_reserve_table[MEMTYPE_EBI1].size += total;
+#endif
+}
+
 static void __init apq8064_calculate_reserve_sizes(void)
 {
 	size_pmem_devices();
@@ -569,6 +588,7 @@
 	reserve_ion_memory();
 	reserve_mdp_memory();
 	reserve_rtb_memory();
+	reserve_cache_dump_memory();
 }
 
 static struct reserve_info apq8064_reserve_info __initdata = {
@@ -1225,7 +1245,7 @@
 	/* T6 Object */
 	0, 0, 0, 0, 0, 0,
 	/* T38 Object */
-	14, 1, 0, 22, 2, 12, 0, 0, 0, 0,
+	14, 2, 0, 24, 5, 12, 0, 0, 0, 0,
 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1237,7 +1257,7 @@
 	/* T8 Object */
 	25, 0, 20, 20, 0, 0, 0, 0, 0, 0,
 	/* T9 Object */
-	131, 0, 0, 26, 42, 0, 32, 80, 2, 5,
+	139, 0, 0, 26, 42, 0, 32, 80, 2, 5,
 	0, 5, 5, 0, 10, 30, 10, 10, 255, 2,
 	85, 5, 0, 5, 9, 5, 12, 35, 70, 40,
 	20, 5, 0, 0, 0,
@@ -1247,13 +1267,13 @@
 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 	0, 0, 0, 0, 0, 0, 0, 0, 0,
 	/* T25 Object */
-	3, 0, 60, 115, 156, 99,
+	1, 0, 60, 115, 156, 99,
 	/* T27 Object */
 	0, 0, 0, 0, 0, 0, 0,
 	/* T40 Object */
 	0, 0, 0, 0, 0,
 	/* T42 Object */
-	2, 0, 255, 0, 255, 0, 0, 0, 0, 0,
+	0, 0, 255, 0, 255, 0, 0, 0, 0, 0,
 	/* T43 Object */
 	0, 0, 0, 0, 0, 0, 0, 64, 0, 8,
 	16,
@@ -1262,7 +1282,7 @@
 	/* T47 Object */
 	0, 0, 0, 0, 0, 0, 3, 64, 66, 0,
 	/* T48 Object */
-	31, 64, 64, 0, 0, 0, 0, 0, 0, 0,
+	1, 64, 64, 0, 0, 0, 0, 0, 0, 0,
 	32, 40, 0, 10, 10, 0, 0, 100, 10, 90,
 	0, 0, 0, 0, 0, 0, 0, 10, 1, 10,
 	52, 10, 12, 0, 33, 0, 1, 0, 0, 0,
@@ -1678,6 +1698,8 @@
 static struct mdm_platform_data mdm_platform_data = {
 	.mdm_version = "3.0",
 	.ramdump_delay_ms = 2000,
+	.early_power_on = 1,
+	.sfr_query = 1,
 	.peripheral_platform_device = &apq8064_device_hsic_host,
 };
 
@@ -1754,62 +1776,56 @@
 		MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT,
 		MSM_RPMRS_LIMITS(ON, ACTIVE, MAX, ACTIVE),
 		true,
-		100, 650, 801, 200,
+		1, 784, 180000, 100,
 	},
 
 	{
 		MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE,
 		MSM_RPMRS_LIMITS(ON, ACTIVE, MAX, ACTIVE),
 		true,
-		2000, 200, 576000, 2000,
+		1300, 228, 1200000, 2000,
 	},
 
 	{
 		MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
 		MSM_RPMRS_LIMITS(ON, GDHS, MAX, ACTIVE),
 		false,
-		8500, 51, 1122000, 8500,
+		2000, 138, 1208400, 3200,
 	},
 
 	{
 		MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
-		MSM_RPMRS_LIMITS(ON, HSFS_OPEN, MAX, ACTIVE),
-		false,
-		9000, 51, 1130300, 9000,
-	},
-	{
-		MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
 		MSM_RPMRS_LIMITS(ON, HSFS_OPEN, ACTIVE, RET_HIGH),
 		false,
-		10000, 51, 1130300, 10000,
+		6000, 119, 1850300, 9000,
 	},
 
 	{
 		MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
 		MSM_RPMRS_LIMITS(OFF, GDHS, MAX, ACTIVE),
 		false,
-		12000, 14, 2205900, 12000,
+		9200, 68, 2839200, 16400,
 	},
 
 	{
 		MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
 		MSM_RPMRS_LIMITS(OFF, HSFS_OPEN, MAX, ACTIVE),
 		false,
-		18000, 12, 2364250, 18000,
+		10300, 63, 3128000, 18200,
 	},
 
 	{
 		MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
 		MSM_RPMRS_LIMITS(OFF, HSFS_OPEN, ACTIVE, RET_HIGH),
 		false,
-		23500, 10, 2667000, 23500,
+		18000, 10, 4602600, 27000,
 	},
 
 	{
 		MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
 		MSM_RPMRS_LIMITS(OFF, HSFS_OPEN, RET_HIGH, RET_LOW),
 		false,
-		29700, 5, 2867000, 30000,
+		20000, 2, 5752000, 32000,
 	},
 };
 
@@ -2201,6 +2217,7 @@
 	&msm8960_gemini_device,
 	&apq8064_iommu_domain_device,
 	&msm_tsens_device,
+	&apq8064_cache_dump_device,
 };
 
 static struct platform_device *sim_devices[] __initdata = {
@@ -3032,6 +3049,7 @@
 	.handle_irq = gic_handle_irq,
 	.timer = &msm_timer,
 	.init_machine = apq8064_cdp_init,
+	.init_early = apq8064_allocate_memory_regions,
 	.init_very_early = apq8064_early_reserve,
 MACHINE_END
 
@@ -3042,6 +3060,7 @@
 	.handle_irq = gic_handle_irq,
 	.timer = &msm_timer,
 	.init_machine = apq8064_cdp_init,
+	.init_early = apq8064_allocate_memory_regions,
 	.init_very_early = apq8064_early_reserve,
 MACHINE_END
 
diff --git a/arch/arm/mach-msm/board-8064.h b/arch/arm/mach-msm/board-8064.h
index 67e0e6f..c992865 100644
--- a/arch/arm/mach-msm/board-8064.h
+++ b/arch/arm/mach-msm/board-8064.h
@@ -20,6 +20,7 @@
 #include <mach/irqs.h>
 #include <mach/rpm-regulator.h>
 #include <mach/msm_rtb.h>
+#include <mach/msm_cache_dump.h>
 
 /* Macros assume PMIC GPIOs and MPPs start at 1 */
 #define PM8921_GPIO_BASE		NR_GPIO_IRQS
@@ -143,4 +144,5 @@
 };
 
 extern struct msm_rtb_platform_data apq8064_rtb_pdata;
+extern struct msm_cache_dump_platform_data apq8064_cache_dump_pdata;
 #endif
diff --git a/arch/arm/mach-msm/board-8930.c b/arch/arm/mach-msm/board-8930.c
index 0fff814..7fc5487 100644
--- a/arch/arm/mach-msm/board-8930.c
+++ b/arch/arm/mach-msm/board-8930.c
@@ -1650,16 +1650,16 @@
 	/* T6 Object */
 	 0, 0, 0, 0, 0, 0,
 	/* T38 Object */
-	 15, 2, 0, 15, 12, 11, 0, 0,
+	 15, 3, 0, 15, 12, 11, 0, 0,
 	/* T7 Object */
-	 48, 255, 25,
+	32, 16, 50,
 	/* T8 Object */
-	 27, 0, 5, 1, 0, 0, 8, 8, 0, 0,
+	 30, 0, 5, 1, 0, 0, 8, 8, 0, 0,
 	/* T9 Object */
-	 131, 0, 0, 19, 11, 0, 16, 35, 1, 3,
-	 10, 15, 1, 11, 4, 5, 40, 10, 43, 4,
-	 54, 2, 0, 0, 0, 0, 143, 40, 143, 80,
-	 18, 15, 50, 50, 2,
+	 131, 0, 0, 19, 11, 0, 16, 43, 2, 3,
+	 10, 7, 2, 0, 4, 5, 35, 10, 43, 4,
+	 54, 2, 15, 32, 38, 38, 143, 40, 143, 80,
+	 7, 9, 50, 50, 2,
 	/* T15 Object */
 	 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 	 0,
@@ -1679,13 +1679,13 @@
 	/* T42 Object */
 	 0, 0, 0, 0, 0, 0, 0, 0,
 	/* T46 Object */
-	 0, 3, 16, 48, 0, 0, 1, 0, 0,
+	 0, 3, 8, 16, 0, 0, 1, 0, 0,
 	/* T47 Object */
 	 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 	/* T48 Object */
-	 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	 0, 0, 8, 0, 0, 0, 0, 0, 0, 0,
+	 0, 0, 0, 0, 0, 0, 0, 100, 4, 64,
+	 0, 0, 5, 42, 0, 0, 0, 0, 0, 0,
 	 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 	 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 	 0, 0, 0, 0,
@@ -2126,62 +2126,56 @@
 		MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT,
 		MSM_RPMRS_LIMITS(ON, ACTIVE, MAX, ACTIVE),
 		true,
-		100, 650, 801, 200,
+		1, 784, 180000, 100,
 	},
 
 	{
 		MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE,
 		MSM_RPMRS_LIMITS(ON, ACTIVE, MAX, ACTIVE),
 		true,
-		2000, 200, 576000, 2000,
+		1300, 228, 1200000, 2000,
 	},
 
 	{
 		MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
 		MSM_RPMRS_LIMITS(ON, GDHS, MAX, ACTIVE),
 		false,
-		8500, 51, 1122000, 8500,
+		2000, 138, 1208400, 3200,
 	},
 
 	{
 		MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
-		MSM_RPMRS_LIMITS(ON, HSFS_OPEN, MAX, ACTIVE),
-		false,
-		9000, 51, 1130300, 9000,
-	},
-	{
-		MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
 		MSM_RPMRS_LIMITS(ON, HSFS_OPEN, ACTIVE, RET_HIGH),
 		false,
-		10000, 51, 1130300, 10000,
+		6000, 119, 1850300, 9000,
 	},
 
 	{
 		MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
 		MSM_RPMRS_LIMITS(OFF, GDHS, MAX, ACTIVE),
 		false,
-		12000, 14, 2205900, 12000,
+		9200, 68, 2839200, 16400,
 	},
 
 	{
 		MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
 		MSM_RPMRS_LIMITS(OFF, HSFS_OPEN, MAX, ACTIVE),
 		false,
-		18000, 12, 2364250, 18000,
+		10300, 63, 3128000, 18200,
 	},
 
 	{
 		MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
 		MSM_RPMRS_LIMITS(OFF, HSFS_OPEN, ACTIVE, RET_HIGH),
 		false,
-		23500, 10, 2667000, 23500,
+		18000, 10, 4602600, 27000,
 	},
 
 	{
 		MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
 		MSM_RPMRS_LIMITS(OFF, HSFS_OPEN, RET_HIGH, RET_LOW),
 		false,
-		29700, 5, 2867000, 30000,
+		20000, 2, 5752000, 32000,
 	},
 };
 
@@ -2195,17 +2189,17 @@
 		[MSM_RPMRS_VDD_MEM_MAX]		= 1150000,
 	},
 	.vdd_dig_levels = {
-		[MSM_RPMRS_VDD_DIG_RET_LOW]	= 500000,
-		[MSM_RPMRS_VDD_DIG_RET_HIGH]	= 750000,
-		[MSM_RPMRS_VDD_DIG_ACTIVE]	= 950000,
-		[MSM_RPMRS_VDD_DIG_MAX]		= 1150000,
+		[MSM_RPMRS_VDD_DIG_RET_LOW]	= 0,
+		[MSM_RPMRS_VDD_DIG_RET_HIGH]	= 0,
+		[MSM_RPMRS_VDD_DIG_ACTIVE]	= 1,
+		[MSM_RPMRS_VDD_DIG_MAX]		= 3,
 	},
 	.vdd_mask = 0x7FFFFF,
 	.rpmrs_target_id = {
 		[MSM_RPMRS_ID_PXO_CLK]		= MSM_RPM_ID_PXO_CLK,
 		[MSM_RPMRS_ID_L2_CACHE_CTL]	= MSM_RPM_ID_LAST,
-		[MSM_RPMRS_ID_VDD_DIG_0]	= MSM_RPM_ID_PM8038_S1_0,
-		[MSM_RPMRS_ID_VDD_DIG_1]	= MSM_RPM_ID_PM8038_S1_1,
+		[MSM_RPMRS_ID_VDD_DIG_0]	= MSM_RPM_ID_VOLTAGE_CORNER,
+		[MSM_RPMRS_ID_VDD_DIG_1]	= MSM_RPM_ID_LAST,
 		[MSM_RPMRS_ID_VDD_MEM_0]	= MSM_RPM_ID_PM8038_L24_0,
 		[MSM_RPMRS_ID_VDD_MEM_1]	= MSM_RPM_ID_PM8038_L24_1,
 		[MSM_RPMRS_ID_RPM_CTL]		= MSM_RPM_ID_RPM_CTL,
diff --git a/arch/arm/mach-msm/board-8960-gpiomux.c b/arch/arm/mach-msm/board-8960-gpiomux.c
index 1c6c600..94cafae 100644
--- a/arch/arm/mach-msm/board-8960-gpiomux.c
+++ b/arch/arm/mach-msm/board-8960-gpiomux.c
@@ -710,49 +710,56 @@
 	},
 };
 
-static struct msm_gpiomux_config mdm_configs[] __initdata = {
+static struct msm_gpiomux_config sglte_configs[] __initdata = {
 	/* AP2MDM_STATUS */
 	{
-		.gpio = 94,
+		.gpio = 77,
 		.settings = {
 			[GPIOMUX_SUSPENDED] = &ap2mdm_cfg,
 		}
 	},
 	/* MDM2AP_STATUS */
 	{
-		.gpio = 69,
+		.gpio = 24,
 		.settings = {
 			[GPIOMUX_SUSPENDED] = &mdm2ap_status_cfg,
 		}
 	},
 	/* MDM2AP_ERRFATAL */
 	{
-		.gpio = 70,
+		.gpio = 40,
 		.settings = {
 			[GPIOMUX_SUSPENDED] = &mdm2ap_errfatal_cfg,
 		}
 	},
 	/* AP2MDM_ERRFATAL */
 	{
-		.gpio = 95,
+		.gpio = 80,
 		.settings = {
 			[GPIOMUX_SUSPENDED] = &ap2mdm_cfg,
 		}
 	},
 	/* AP2MDM_KPDPWR_N */
 	{
-		.gpio = 81,
+		.gpio = 79,
 		.settings = {
 			[GPIOMUX_SUSPENDED] = &ap2mdm_kpdpwr_n_cfg,
 		}
 	},
-	/* AP2MDM_PMIC_RESET_N */
+	/* AP2MDM_PMIC_PWR_EN */
 	{
-		.gpio = 80,
+		.gpio = 22,
 		.settings = {
 			[GPIOMUX_SUSPENDED] = &ap2mdm_kpdpwr_n_cfg,
 		}
-	}
+	},
+	/* AP2MDM_SOFT_RESET */
+	{
+		.gpio = 78,
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &ap2mdm_cfg,
+		}
+	},
 };
 
 static struct msm_gpiomux_config msm8960_mdp_vsync_configs[] __initdata = {
@@ -948,13 +955,9 @@
 		msm_gpiomux_install(hap_lvl_shft_config,
 			ARRAY_SIZE(hap_lvl_shft_config));
 
-	if (PLATFORM_IS_CHARM25())
-		msm_gpiomux_install(mdm_configs,
-			ARRAY_SIZE(mdm_configs));
-
 #ifdef CONFIG_USB_EHCI_MSM_HSIC
 	if ((SOCINFO_VERSION_MAJOR(socinfo_get_version()) != 1) &&
-		(PLATFORM_IS_CHARM25() || machine_is_msm8960_liquid()))
+		machine_is_msm8960_liquid())
 		msm_gpiomux_install(msm8960_hsic_configs,
 			ARRAY_SIZE(msm8960_hsic_configs));
 
@@ -992,5 +995,10 @@
 	msm_gpiomux_install(msm8960_sdcc2_configs,
 		ARRAY_SIZE(msm8960_sdcc2_configs));
 #endif
+
+	if (socinfo_get_platform_subtype() == PLATFORM_SUBTYPE_SGLTE)
+		msm_gpiomux_install(sglte_configs,
+			ARRAY_SIZE(sglte_configs));
+
 	return 0;
 }
diff --git a/arch/arm/mach-msm/board-8960-regulator.c b/arch/arm/mach-msm/board-8960-regulator.c
index edb6f03..8291cc7 100644
--- a/arch/arm/mach-msm/board-8960-regulator.c
+++ b/arch/arm/mach-msm/board-8960-regulator.c
@@ -543,7 +543,7 @@
 	RPM_LDO(L22,	 0, 1, 0, 2750000, 2750000, NULL,      0, 0),
 	RPM_LDO(L23,	 1, 1, 1, 1800000, 1800000, "8921_s8", 10000, 10000),
 	RPM_LDO(L24,	 0, 1, 1,  750000, 1150000, "8921_s1", 10000, 10000),
-	RPM_LDO(L25,	 1, 1, 0, 1225000, 1225000, "8921_s1", 10000, 10000),
+	RPM_LDO(L25,	 1, 1, 0, 1250000, 1250000, "8921_s1", 10000, 10000),
 
 	/*	ID     a_on pd ss		    supply */
 	RPM_VS(LVS1,	 0, 1, 0,		    "8921_s4"),
diff --git a/arch/arm/mach-msm/board-8960.c b/arch/arm/mach-msm/board-8960.c
index c103fa8..79af1a7 100644
--- a/arch/arm/mach-msm/board-8960.c
+++ b/arch/arm/mach-msm/board-8960.c
@@ -701,31 +701,11 @@
 static void __init reserve_cache_dump_memory(void)
 {
 #ifdef CONFIG_MSM_CACHE_DUMP
-	unsigned int spare;
-	unsigned int l1_size;
-	unsigned int l2_size;
 	unsigned int total;
-	int ret;
 
-	ret = scm_call(L1C_SERVICE_ID, L1C_BUFFER_GET_SIZE_COMMAND_ID, &spare,
-		sizeof(spare), &l1_size, sizeof(l1_size));
-
-	if (ret)
-		/* Fall back to something reasonable here */
-		l1_size = L1_BUFFER_SIZE;
-
-	ret = scm_call(L1C_SERVICE_ID, L2C_BUFFER_GET_SIZE_COMMAND_ID, &spare,
-		sizeof(spare), &l2_size, sizeof(l2_size));
-
-	if (ret)
-		/* Fall back to something reasonable here */
-		l2_size = L2_BUFFER_SIZE;
-
-	total = l1_size + l2_size;
-
+	total = msm8960_cache_dump_pdata.l1_size +
+		msm8960_cache_dump_pdata.l2_size;
 	msm8960_reserve_table[MEMTYPE_EBI1].size += total;
-	msm8960_cache_dump_pdata.l1_size = l1_size;
-	msm8960_cache_dump_pdata.l2_size = l2_size;
 #endif
 }
 
@@ -915,13 +895,13 @@
 	{
 		.name = "VDDD_CDC_D",
 		.min_uV = 1225000,
-		.max_uV = 1225000,
+		.max_uV = 1250000,
 		.optimum_uA = WCD9XXX_VDDD_CDC_D_CUR_MAX,
 	},
 	{
 		.name = "CDC_VDDA_A_1P2V",
 		.min_uV = 1225000,
-		.max_uV = 1225000,
+		.max_uV = 1250000,
 		.optimum_uA = WCD9XXX_VDDD_CDC_A_CUR_MAX,
 	},
 	},
@@ -982,13 +962,13 @@
 	{
 		.name = "VDDD_CDC_D",
 		.min_uV = 1225000,
-		.max_uV = 1225000,
+		.max_uV = 1250000,
 		.optimum_uA = WCD9XXX_VDDD_CDC_D_CUR_MAX,
 	},
 	{
 		.name = "CDC_VDDA_A_1P2V",
 		.min_uV = 1225000,
-		.max_uV = 1225000,
+		.max_uV = 1250000,
 		.optimum_uA = WCD9XXX_VDDD_CDC_A_CUR_MAX,
 	},
 	},
@@ -1298,68 +1278,11 @@
 };
 #endif
 
-#define MDM2AP_ERRFATAL			70
-#define AP2MDM_ERRFATAL			95
-#define MDM2AP_STATUS			69
-#define AP2MDM_STATUS			94
-#define AP2MDM_PMIC_RESET_N		80
-#define AP2MDM_KPDPWR_N			81
-
-static struct resource mdm_resources[] = {
-	{
-		.start	= MDM2AP_ERRFATAL,
-		.end	= MDM2AP_ERRFATAL,
-		.name	= "MDM2AP_ERRFATAL",
-		.flags	= IORESOURCE_IO,
-	},
-	{
-		.start	= AP2MDM_ERRFATAL,
-		.end	= AP2MDM_ERRFATAL,
-		.name	= "AP2MDM_ERRFATAL",
-		.flags	= IORESOURCE_IO,
-	},
-	{
-		.start	= MDM2AP_STATUS,
-		.end	= MDM2AP_STATUS,
-		.name	= "MDM2AP_STATUS",
-		.flags	= IORESOURCE_IO,
-	},
-	{
-		.start	= AP2MDM_STATUS,
-		.end	= AP2MDM_STATUS,
-		.name	= "AP2MDM_STATUS",
-		.flags	= IORESOURCE_IO,
-	},
-	{
-		.start	= AP2MDM_PMIC_RESET_N,
-		.end	= AP2MDM_PMIC_RESET_N,
-		.name	= "AP2MDM_PMIC_RESET_N",
-		.flags	= IORESOURCE_IO,
-	},
-	{
-		.start	= AP2MDM_KPDPWR_N,
-		.end	= AP2MDM_KPDPWR_N,
-		.name	= "AP2MDM_KPDPWR_N",
-		.flags	= IORESOURCE_IO,
-	},
-};
-
-static struct mdm_platform_data mdm_platform_data = {
-	.mdm_version = "2.5",
-};
-
-static struct platform_device mdm_device = {
-	.name		= "mdm2_modem",
-	.id		= -1,
-	.num_resources	= ARRAY_SIZE(mdm_resources),
-	.resource	= mdm_resources,
-	.dev		= {
-		.platform_data = &mdm_platform_data,
-	},
-};
-
-static struct platform_device *mdm_devices[] __initdata = {
-	&mdm_device,
+static struct mdm_platform_data sglte_platform_data = {
+	.mdm_version = "4.0",
+	.ramdump_delay_ms = 1000,
+	.soft_reset_inverted = 1,
+	.peripheral_platform_device = NULL,
 };
 
 #define MSM_SHARED_RAM_PHYS 0x80000000
@@ -2034,7 +1957,7 @@
 	/* T6 Object */
 	0, 0, 0, 0, 0, 0,
 	/* T38 Object */
-	12, 2, 0, 17, 1, 12, 0, 0, 0, 0,
+	12, 3, 0, 24, 5, 12, 0, 0, 0, 0,
 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -2046,7 +1969,7 @@
 	/* T8 Object */
 	25, 0, 20, 20, 0, 0, 20, 50, 0, 0,
 	/* T9 Object */
-	131, 0, 0, 26, 42, 0, 32, 80, 2, 5,
+	139, 0, 0, 26, 42, 0, 32, 80, 2, 5,
 	0, 5, 5, 0, 10, 30, 10, 10, 255, 2,
 	85, 5, 10, 10, 10, 10, 135, 55, 70, 40,
 	10, 5, 0, 0, 0,
@@ -2056,13 +1979,13 @@
 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 	0, 0, 0, 0, 0, 0, 0, 0, 0,
 	/* T25 Object */
-	3, 0, 60, 115, 156, 99,
+	1, 0, 60, 115, 156, 99,
 	/* T27 Object */
 	0, 0, 0, 0, 0, 0, 0,
 	/* T40 Object */
 	0, 0, 0, 0, 0,
 	/* T42 Object */
-	2, 0, 255, 0, 255, 0, 0, 0, 0, 0,
+	0, 0, 255, 0, 255, 0, 0, 0, 0, 0,
 	/* T43 Object */
 	0, 0, 0, 0, 0, 0, 0, 64, 0, 8,
 	16,
@@ -2071,7 +1994,7 @@
 	/* T47 Object */
 	0, 0, 0, 0, 0, 0, 3, 64, 66, 0,
 	/* T48 Object */
-	31, 64, 64, 0, 0, 0, 0, 0, 0, 0,
+	1, 64, 64, 0, 0, 0, 0, 0, 0, 0,
 	48, 40, 0, 10, 10, 0, 0, 100, 10, 80,
 	0, 0, 0, 0, 0, 0, 0, 0, 1, 0,
 	52, 0, 12, 0, 17, 0, 1, 0, 0, 0,
@@ -2724,62 +2647,56 @@
 		MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT,
 		MSM_RPMRS_LIMITS(ON, ACTIVE, MAX, ACTIVE),
 		true,
-		100, 650, 801, 200,
+		1, 784, 180000, 100,
 	},
 
 	{
 		MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE,
 		MSM_RPMRS_LIMITS(ON, ACTIVE, MAX, ACTIVE),
 		true,
-		2000, 200, 576000, 2000,
+		1300, 228, 1200000, 2000,
 	},
 
 	{
 		MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
 		MSM_RPMRS_LIMITS(ON, GDHS, MAX, ACTIVE),
 		false,
-		8500, 51, 1122000, 8500,
+		2000, 138, 1208400, 3200,
 	},
 
 	{
 		MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
-		MSM_RPMRS_LIMITS(ON, HSFS_OPEN, MAX, ACTIVE),
-		false,
-		9000, 51, 1130300, 9000,
-	},
-	{
-		MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
 		MSM_RPMRS_LIMITS(ON, HSFS_OPEN, ACTIVE, RET_HIGH),
 		false,
-		10000, 51, 1130300, 10000,
+		6000, 119, 1850300, 9000,
 	},
 
 	{
 		MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
 		MSM_RPMRS_LIMITS(OFF, GDHS, MAX, ACTIVE),
 		false,
-		12000, 14, 2205900, 12000,
+		9200, 68, 2839200, 16400,
 	},
 
 	{
 		MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
 		MSM_RPMRS_LIMITS(OFF, HSFS_OPEN, MAX, ACTIVE),
 		false,
-		18000, 12, 2364250, 18000,
+		10300, 63, 3128000, 18200,
 	},
 
 	{
 		MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
 		MSM_RPMRS_LIMITS(OFF, HSFS_OPEN, ACTIVE, RET_HIGH),
 		false,
-		23500, 10, 2667000, 23500,
+		18000, 10, 4602600, 27000,
 	},
 
 	{
 		MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
 		MSM_RPMRS_LIMITS(OFF, HSFS_OPEN, RET_HIGH, RET_LOW),
 		false,
-		29700, 5, 2867000, 30000,
+		20000, 2, 5752000, 32000,
 	},
 };
 
@@ -2893,7 +2810,7 @@
 	if (SOCINFO_VERSION_MAJOR(version) == 1)
 		return;
 
-	if (PLATFORM_IS_CHARM25() || machine_is_msm8960_liquid())
+	if (machine_is_msm8960_liquid())
 		platform_device_register(&msm_device_hsic_host);
 #endif
 }
@@ -3167,8 +3084,10 @@
 	change_memory_power = &msm8960_change_memory_power;
 	BUG_ON(msm_pm_boot_init(&msm_pm_boot_pdata));
 	msm_pm_init_sleep_status_data(&msm_pm_slp_sts_data);
-	if (PLATFORM_IS_CHARM25())
-		platform_add_devices(mdm_devices, ARRAY_SIZE(mdm_devices));
+	if (socinfo_get_platform_subtype() == PLATFORM_SUBTYPE_SGLTE) {
+		mdm_sglte_device.dev.platform_data = &sglte_platform_data;
+		platform_device_register(&mdm_sglte_device);
+	}
 }
 
 MACHINE_START(MSM8960_SIM, "QCT MSM8960 SIMULATOR")
diff --git a/arch/arm/mach-msm/board-8960.h b/arch/arm/mach-msm/board-8960.h
index 925c5b4..261055e 100644
--- a/arch/arm/mach-msm/board-8960.h
+++ b/arch/arm/mach-msm/board-8960.h
@@ -86,11 +86,6 @@
 void msm8960_set_display_params(char *prim_panel, char *ext_panel);
 void msm8960_pm8921_gpio_mpp_init(void);
 void msm8960_mdp_writeback(struct memtype_reserve *reserve_table);
-#define PLATFORM_IS_CHARM25() \
-	(machine_is_msm8960_cdp() && \
-		(socinfo_get_platform_subtype() == 1) \
-	)
-
 #define MSM_8960_GSBI4_QUP_I2C_BUS_ID 4
 #define MSM_8960_GSBI3_QUP_I2C_BUS_ID 3
 #define MSM_8960_GSBI10_QUP_I2C_BUS_ID 10
diff --git a/arch/arm/mach-msm/board-9615.c b/arch/arm/mach-msm/board-9615.c
index 67697d2..9ad2c5e 100644
--- a/arch/arm/mach-msm/board-9615.c
+++ b/arch/arm/mach-msm/board-9615.c
@@ -851,9 +851,7 @@
 #ifdef CONFIG_LTC4088_CHARGER
 	&msm_device_charger,
 #endif
-#ifndef CONFIG_USB_CI13XXX_MSM_HSIC
 	&msm_device_otg,
-#endif
 	&msm_device_hsic_peripheral,
 	&msm_device_gadget_peripheral,
 	&msm_device_hsusb_host,
diff --git a/arch/arm/mach-msm/board-9625-gpiomux.c b/arch/arm/mach-msm/board-9625-gpiomux.c
new file mode 100644
index 0000000..e28c734
--- /dev/null
+++ b/arch/arm/mach-msm/board-9625-gpiomux.c
@@ -0,0 +1,53 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <mach/board.h>
+#include <mach/gpio.h>
+#include <mach/gpiomux.h>
+
+static struct gpiomux_setting gpio_uart_config = {
+	.func = GPIOMUX_FUNC_2,
+	.drv = GPIOMUX_DRV_16MA,
+	.pull = GPIOMUX_PULL_NONE,
+	.dir = GPIOMUX_OUT_HIGH,
+};
+
+static struct msm_gpiomux_config msm_blsp_configs[] __initdata = {
+	{
+		.gpio      = 45,	       /* BLSP1 UART TX */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &gpio_uart_config,
+		},
+	},
+	{
+		.gpio      = 46,	       /* BLSP1 UART RX */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &gpio_uart_config,
+		},
+	},
+};
+
+void __init msm9625_init_gpiomux(void)
+{
+	int rc;
+
+	rc = msm_gpiomux_init(NR_GPIO_IRQS);
+	if (rc) {
+		pr_err(KERN_ERR "msm9625_init_gpiomux failed %d\n", rc);
+		return;
+	}
+
+	msm_gpiomux_install(msm_blsp_configs, ARRAY_SIZE(msm_blsp_configs));
+}
diff --git a/arch/arm/mach-msm/board-9625.c b/arch/arm/mach-msm/board-9625.c
new file mode 100644
index 0000000..60dfe3c
--- /dev/null
+++ b/arch/arm/mach-msm/board-9625.c
@@ -0,0 +1,104 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/memory.h>
+#include <asm/mach/map.h>
+#include <asm/hardware/gic.h>
+#include <asm/arch_timer.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/time.h>
+#include <mach/socinfo.h>
+#include <mach/board.h>
+#include <mach/gpio.h>
+#include "clock.h"
+
+static struct clk_lookup msm_clocks_dummy[] = {
+	CLK_DUMMY("core_clk",   BLSP1_UART_CLK, "msm_serial_hsl.0", OFF),
+	CLK_DUMMY("iface_clk",  BLSP1_UART_CLK, "msm_serial_hsl.0", OFF),
+	CLK_DUMMY("phy_clk", NULL, "msm_otg", OFF),
+	CLK_DUMMY("core_clk", NULL, "msm_otg", OFF),
+	CLK_DUMMY("alt_core_clk", NULL, "msm_otg", OFF),
+	CLK_DUMMY("iface_clk", NULL, "msm_otg", OFF),
+	CLK_DUMMY("xo", NULL, "msm_otg", OFF),
+	CLK_DUMMY("dfab_clk",	DFAB_CLK,	NULL, 0),
+	CLK_DUMMY("dma_bam_pclk",	DMA_BAM_P_CLK,	NULL, 0),
+	CLK_DUMMY("mem_clk",	NULL,	NULL, 0),
+	CLK_DUMMY("core_clk",	NULL,	"spi_qsd.1",	OFF),
+	CLK_DUMMY("iface_clk",	NULL,	"spi_qsd.1",	OFF),
+	CLK_DUMMY("core_clk",	NULL,	"f9966000.i2c", 0),
+	CLK_DUMMY("iface_clk",	NULL,	"f9966000.i2c", 0),
+	CLK_DUMMY("core_clk",	NULL,	"fe12f000.slim",	OFF),
+};
+
+struct clock_init_data msm_dummy_clock_init_data __initdata = {
+	.table = msm_clocks_dummy,
+	.size = ARRAY_SIZE(msm_clocks_dummy),
+};
+
+static struct of_device_id irq_match[] __initdata  = {
+	{ .compatible = "qcom,msm-qgic2", .data = gic_of_init, },
+	{ .compatible = "qcom,msm-gpio", .data = msm_gpio_of_init, },
+	{}
+};
+
+static const char *msm9625_dt_match[] __initconst = {
+	"qcom,msm9625",
+	NULL
+};
+
+static struct of_dev_auxdata msm9625_auxdata_lookup[] __initdata = {
+	OF_DEV_AUXDATA("qcom,msm-lsuart-v14", 0xF991F000, \
+			"msm_serial_hsl.0", NULL),
+	{}
+};
+
+void __init msm9625_init_irq(void)
+{
+	of_irq_init(irq_match);
+}
+
+static void __init msm_dt_timer_init(void)
+{
+	arch_timer_of_register();
+}
+
+static struct sys_timer msm_dt_timer = {
+	.init = msm_dt_timer_init
+};
+
+void __init msm9625_init(void)
+{
+	if (socinfo_init() < 0)
+		pr_err("%s: socinfo_init() failed\n", __func__);
+	msm_clock_init(&msm_dummy_clock_init_data);
+	of_platform_populate(NULL, of_default_bus_match_table,
+			msm9625_auxdata_lookup, NULL);
+}
+
+DT_MACHINE_START(MSM_DT, "Qualcomm MSM (Flattened Device Tree)")
+	.map_io = msm_map_msm9625_io,
+	.init_irq = msm9625_init_irq,
+	.init_machine = msm9625_init,
+	.handle_irq = gic_handle_irq,
+	.timer = &msm_dt_timer,
+	.dt_compat = msm9625_dt_match,
+	.nr_irqs = -1,
+MACHINE_END
diff --git a/arch/arm/mach-msm/board-copper-gpiomux.c b/arch/arm/mach-msm/board-copper-gpiomux.c
index 0ea33c7..caba698 100644
--- a/arch/arm/mach-msm/board-copper-gpiomux.c
+++ b/arch/arm/mach-msm/board-copper-gpiomux.c
@@ -54,6 +54,12 @@
 	},
 };
 #endif
+static struct gpiomux_setting gpio_i2c_config = {
+	.func = GPIOMUX_FUNC_3,
+	.drv = GPIOMUX_DRV_8MA,
+	.pull = GPIOMUX_PULL_NONE,
+};
+
 
 static struct msm_gpiomux_config msm_blsp_configs[] __initdata = {
 #if defined(CONFIG_KS8851) || defined(CONFIG_KS8851_MODULE)
@@ -83,6 +89,18 @@
 	},
 #endif
 	{
+		.gpio      = 83,		/* BLSP11 QUP I2C_DAT */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &gpio_i2c_config,
+		},
+	},
+	{
+		.gpio      = 84,		/* BLSP11 QUP I2C_CLK */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &gpio_i2c_config,
+		},
+	},
+	{
 		.gpio      = 45,	       /* BLSP8 UART TX */
 		.settings = {
 			[GPIOMUX_SUSPENDED] = &gpio_uart_config,
diff --git a/arch/arm/mach-msm/board-copper.c b/arch/arm/mach-msm/board-copper.c
index f5fe3d1..62bab86 100644
--- a/arch/arm/mach-msm/board-copper.c
+++ b/arch/arm/mach-msm/board-copper.c
@@ -40,6 +40,7 @@
 #include <mach/msm_memtypes.h>
 #include <mach/msm_smd.h>
 #include <mach/rpm-smd.h>
+#include <mach/rpm-regulator-smd.h>
 #include <mach/qpnp-int.h>
 #include <mach/socinfo.h>
 #include "clock.h"
@@ -392,7 +393,7 @@
 };
 
 #define SHARED_IMEM_TZ_BASE 0xFE805720
-static struct resource tzlog_resources[] = {
+static struct resource copper_tzlog_resources[] = {
 	{
 		.start = SHARED_IMEM_TZ_BASE,
 		.end = SHARED_IMEM_TZ_BASE + SZ_4K - 1,
@@ -400,30 +401,13 @@
 	},
 };
 
-struct platform_device apq_device_tz_log = {
+struct platform_device copper_device_tz_log = {
 	.name		= "tz_log",
 	.id		= 0,
-	.num_resources	= ARRAY_SIZE(tzlog_resources),
-	.resource	= tzlog_resources,
+	.num_resources	= ARRAY_SIZE(copper_tzlog_resources),
+	.resource	= copper_tzlog_resources,
 };
 
-#ifdef CONFIG_HW_RANDOM_MSM
-/* PRNG device */
-#define MSM_PRNG_PHYS  0xF9BFF000
-static struct resource rng_resources = {
-	.flags = IORESOURCE_MEM,
-	.start = MSM_PRNG_PHYS,
-	.end   = MSM_PRNG_PHYS + SZ_512 - 1,
-};
-
-struct platform_device msm8974_device_rng = {
-	.name          = "msm_rng",
-	.id            = 0,
-	.num_resources = 1,
-	.resource      = &rng_resources,
-};
-#endif
-
 
 void __init msm_copper_add_devices(void)
 {
@@ -434,10 +418,7 @@
 	platform_device_register(&android_usb_device);
 	platform_add_devices(msm_copper_stub_regulator_devices,
 					msm_copper_stub_regulator_devices_len);
-	platform_device_register(&apq_device_tz_log);
-#ifdef CONFIG_HW_RANDOM_MSM
-	platform_device_register(&msm8974_device_rng);
-#endif
+	platform_device_register(&copper_device_tz_log);
 }
 
 /*
@@ -450,6 +431,7 @@
 {
 	msm_smd_init();
 	msm_rpm_driver_init();
+	rpm_regulator_smd_driver_init();
 	msm_spm_device_init();
 	regulator_stub_init();
 }
@@ -515,6 +497,8 @@
 			"pil-q6v5-lpass", NULL),
 	OF_DEV_AUXDATA("qcom,pil-pronto", 0xFB21B000, \
 			"pil_pronto", NULL),
+	OF_DEV_AUXDATA("qcom,msm-rng", 0xF9BFF000, \
+			"msm_rng", NULL),
 	{}
 };
 
diff --git a/arch/arm/mach-msm/board-msm7627a-bt.c b/arch/arm/mach-msm/board-msm7627a-bt.c
index 75d5f15..4567c76 100644
--- a/arch/arm/mach-msm/board-msm7627a-bt.c
+++ b/arch/arm/mach-msm/board-msm7627a-bt.c
@@ -627,13 +627,16 @@
 
 	for (i = 0; i < ARRAY_SIZE(bt_vregs); i++) {
 		if (IS_ERR_OR_NULL(bt_vregs[i].reg)) {
-			rc = bt_vregs[i].reg ?
-				PTR_ERR(bt_vregs[i].reg) :
-				-ENODEV;
-			dev_err(&msm_bt_power_device.dev,
-				"%s: invalid regulator handle for %s: %d\n",
+			bt_vregs[i].reg =
+				regulator_get(&msm_bt_power_device.dev,
+						bt_vregs[i].name);
+			if (IS_ERR(bt_vregs[i].reg)) {
+				rc = PTR_ERR(bt_vregs[i].reg);
+				dev_err(&msm_bt_power_device.dev,
+					"%s: could not get regulator %s: %d\n",
 					__func__, bt_vregs[i].name, rc);
-			goto reg_disable;
+				goto reg_disable;
+			}
 		}
 
 		rc = on ? regulator_set_voltage(bt_vregs[i].reg,
@@ -687,6 +690,7 @@
 			i--;
 			regulator_disable(bt_vregs[i].reg);
 			regulator_put(bt_vregs[i].reg);
+			bt_vregs[i].reg = NULL;
 		}
 	}
 	return rc;
diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
index e172481..e2ec60f 100644
--- a/arch/arm/mach-msm/board-msm8x60.c
+++ b/arch/arm/mach-msm/board-msm8x60.c
@@ -106,6 +106,7 @@
 
 #include <linux/ion.h>
 #include <mach/ion.h>
+#include <mach/msm_rtb.h>
 
 #define MSM_SHARED_RAM_PHYS 0x40000000
 #define MDM2AP_SYNC 129
@@ -4860,6 +4861,29 @@
 	},
 };
 
+static struct msm_rtb_platform_data msm_rtb_pdata = {
+	.size = SZ_1M,
+};
+
+static int __init msm_rtb_set_buffer_size(char *p)
+{
+	int s;
+
+	s = memparse(p, NULL);
+	msm_rtb_pdata.size = ALIGN(s, SZ_4K);
+	return 0;
+}
+early_param("msm_rtb_size", msm_rtb_set_buffer_size);
+
+
+static struct platform_device msm_rtb_device = {
+	.name           = "msm_rtb",
+	.id             = -1,
+	.dev            = {
+		.platform_data = &msm_rtb_pdata,
+	},
+};
+
 static void pmic8058_xoadc_mpp_config(void)
 {
 	int rc, i;
@@ -5261,7 +5285,7 @@
 #endif
 	&msm8660_device_watchdog,
 	&msm_device_tz_log,
-
+	&msm_rtb_device,
 };
 
 #ifdef CONFIG_ION_MSM
@@ -5507,12 +5531,20 @@
 
 static void __init reserve_mdp_memory(void);
 
+static void __init reserve_rtb_memory(void)
+{
+#if defined(CONFIG_MSM_RTB)
+	msm8x60_reserve_table[MEMTYPE_EBI1].size += msm_rtb_pdata.size;
+#endif
+}
+
 static void __init msm8x60_calculate_reserve_sizes(void)
 {
 	size_pmem_devices();
 	reserve_pmem_memory();
 	reserve_ion_memory();
 	reserve_mdp_memory();
+	reserve_rtb_memory();
 }
 
 static int msm8x60_paddr_to_memtype(unsigned int paddr)
diff --git a/arch/arm/mach-msm/clock-7x30.c b/arch/arm/mach-msm/clock-7x30.c
index fcd6386..cc48fe7 100644
--- a/arch/arm/mach-msm/clock-7x30.c
+++ b/arch/arm/mach-msm/clock-7x30.c
@@ -221,9 +221,15 @@
 	pcom_xo_enable(PCOM_XO_TCXO, PCOM_XO_DISABLE);
 }
 
+static enum handoff xo_clk_handoff(struct clk *clk)
+{
+	return HANDOFF_ENABLED_CLK;
+}
+
 static struct clk_ops clk_ops_tcxo = {
 	.enable = tcxo_clk_enable,
 	.disable = tcxo_clk_disable,
+	.handoff = xo_clk_handoff,
 	.is_local = pcom_is_local,
 };
 
@@ -250,6 +256,7 @@
 static struct clk_ops clk_ops_lpxo = {
 	.enable = lpxo_clk_enable,
 	.disable = lpxo_clk_disable,
+	.handoff = xo_clk_handoff,
 	.is_local = pcom_is_local,
 };
 
diff --git a/arch/arm/mach-msm/clock-8960.c b/arch/arm/mach-msm/clock-8960.c
index 3ee59b1..a1b9c1c 100644
--- a/arch/arm/mach-msm/clock-8960.c
+++ b/arch/arm/mach-msm/clock-8960.c
@@ -770,6 +770,8 @@
 	.b = {
 		.ctl_reg = MAXI_EN5_REG,
 		.en_mask = BIT(12),
+		.hwcg_reg = MAXI_EN5_REG,
+		.hwcg_mask = BIT(11),
 		.reset_reg = SW_RESET_AXI_REG,
 		.reset_mask = BIT(16),
 		.halt_reg = DBG_BUS_VEC_J_REG,
@@ -787,6 +789,8 @@
 	.b = {
 		.ctl_reg = MAXI_EN5_REG,
 		.en_mask = BIT(25),
+		.hwcg_reg = MAXI_EN5_REG,
+		.hwcg_mask = BIT(24),
 		.reset_reg = SW_RESET_AXI_REG,
 		.reset_mask = BIT(17),
 		.halt_reg = DBG_BUS_VEC_J_REG,
@@ -1176,6 +1180,8 @@
 	.b = {
 		.ctl_reg = AHB_EN3_REG,
 		.en_mask = BIT(1),
+		.hwcg_reg = AHB_EN3_REG,
+		.hwcg_mask = BIT(0),
 		.reset_reg = SW_RESET_AHB2_REG,
 		.reset_mask = BIT(2),
 		.halt_reg = DBG_BUS_VEC_J_REG,
@@ -5991,7 +5997,7 @@
 	 * the clock is halted. The sleep and wake-up delays are set to safe
 	 * values.
 	 */
-	if (cpu_is_msm8960()) {
+	if (cpu_is_msm8960() || cpu_is_apq8064()) {
 		rmwreg(0x44000000, AHB_EN_REG,  0x6C000103);
 		writel_relaxed(0x3C7097F9, AHB_EN2_REG);
 	} else {
@@ -5999,7 +6005,7 @@
 		writel_relaxed(0x000007F9, AHB_EN2_REG);
 	}
 	if (cpu_is_apq8064())
-		rmwreg(0x00000000, AHB_EN3_REG, 0x00000001);
+		rmwreg(0x00000001, AHB_EN3_REG, 0x00000001);
 
 	/* Deassert all locally-owned MM AHB resets. */
 	rmwreg(0, SW_RESET_AHB_REG, 0xFFF7DFFF);
@@ -6008,8 +6014,9 @@
 	/* Initialize MM AXI registers: Enable HW gating for all clocks that
 	 * support it. Also set FORCE_CORE_ON bits, and any sleep and wake-up
 	 * delays to safe values. */
-	if (cpu_is_msm8960() &&
-			SOCINFO_VERSION_MAJOR(socinfo_get_version()) >= 3) {
+	if ((cpu_is_msm8960() &&
+			SOCINFO_VERSION_MAJOR(socinfo_get_version()) >= 3) ||
+			cpu_is_apq8064()) {
 		rmwreg(0x0003AFF9, MAXI_EN_REG,  0x0803FFFF);
 		rmwreg(0x3A27FCFF, MAXI_EN2_REG, 0x3A3FFFFF);
 		rmwreg(0x0027FCFF, MAXI_EN4_REG, 0x017FFFFF);
@@ -6020,10 +6027,10 @@
 	}
 	rmwreg(0x0027FCFF, MAXI_EN3_REG, 0x003FFFFF);
 	if (cpu_is_apq8064())
-		rmwreg(0x009FE4FF, MAXI_EN5_REG, 0x01FFEFFF);
+		rmwreg(0x019FECFF, MAXI_EN5_REG, 0x01FFEFFF);
 	if (cpu_is_msm8930())
 		rmwreg(0x000004FF, MAXI_EN5_REG, 0x00000FFF);
-	if (cpu_is_msm8960())
+	if (cpu_is_msm8960() || cpu_is_apq8064())
 		rmwreg(0x00003C38, SAXI_EN_REG,  0x00003FFF);
 	else
 		rmwreg(0x000003C7, SAXI_EN_REG,  0x00003FFF);
diff --git a/arch/arm/mach-msm/clock-8x60.c b/arch/arm/mach-msm/clock-8x60.c
index 6972302..7aed579 100644
--- a/arch/arm/mach-msm/clock-8x60.c
+++ b/arch/arm/mach-msm/clock-8x60.c
@@ -363,11 +363,22 @@
 	return false;
 }
 
+static enum handoff pll4_clk_handoff(struct clk *clk)
+{
+	struct msm_rpm_iv_pair iv = { MSM_RPM_ID_PLL_4 };
+	int rc = msm_rpm_get_status(&iv, 1);
+	if (rc < 0 || !iv.value)
+		return HANDOFF_DISABLED_CLK;
+
+	return HANDOFF_ENABLED_CLK;
+}
+
 static struct clk_ops clk_ops_pll4 = {
 	.enable = pll4_clk_enable,
 	.disable = pll4_clk_disable,
 	.get_parent = pll4_clk_get_parent,
 	.is_local = pll4_clk_is_local,
+	.handoff = pll4_clk_handoff,
 };
 
 static struct fixed_clk pll4_clk = {
diff --git a/arch/arm/mach-msm/clock-copper.c b/arch/arm/mach-msm/clock-copper.c
index c0245a3..4d5f773 100644
--- a/arch/arm/mach-msm/clock-copper.c
+++ b/arch/arm/mach-msm/clock-copper.c
@@ -598,9 +598,16 @@
 	return;
 }
 
+static enum handoff cxo_clk_handoff(struct clk *clk)
+{
+	/* TODO: Remove from here once the rpm xo clock is ready. */
+	return HANDOFF_ENABLED_CLK;
+}
+
 static struct clk_ops clk_ops_cxo = {
 	.enable = cxo_clk_enable,
 	.disable = cxo_clk_disable,
+	.handoff = cxo_clk_handoff,
 };
 
 static struct fixed_clk cxo_clk_src = {
@@ -4716,6 +4723,7 @@
 static struct clk_lookup msm_clocks_copper[] = {
 	CLK_LOOKUP("xo",	cxo_clk_src.c,	"msm_otg"),
 	CLK_LOOKUP("xo",	cxo_clk_src.c,	"pil-q6v5-lpass"),
+	CLK_LOOKUP("xo",	cxo_clk_src.c,	"pil_pronto"),
 	CLK_LOOKUP("measure",	measure_clk.c,	"debug"),
 
 	CLK_LOOKUP("dma_bam_pclk", gcc_bam_dma_ahb_clk.c, "msm_sps"),
@@ -4846,7 +4854,10 @@
 	CLK_LOOKUP("core_clk", camss_jpeg_jpeg0_clk.c, ""),
 	CLK_LOOKUP("core_clk", camss_jpeg_jpeg1_clk.c, ""),
 	CLK_LOOKUP("core_clk", camss_jpeg_jpeg2_clk.c, ""),
-	CLK_LOOKUP("iface_clk", camss_jpeg_jpeg_ahb_clk.c, ""),
+	CLK_LOOKUP("iface_clk", camss_jpeg_jpeg_ahb_clk.c,
+						"fda64000.qcom,iommu"),
+	CLK_LOOKUP("core_clk", camss_jpeg_jpeg_axi_clk.c,
+						"fda64000.qcom,iommu"),
 	CLK_LOOKUP("bus_clk", camss_jpeg_jpeg_axi_clk.c, ""),
 	CLK_LOOKUP("bus_clk", camss_jpeg_jpeg_ocmemnoc_clk.c, ""),
 	CLK_LOOKUP("core_clk", camss_mclk0_clk.c, ""),
@@ -4867,11 +4878,15 @@
 	CLK_LOOKUP("iface_clk", camss_vfe_vfe_ahb_clk.c, ""),
 	CLK_LOOKUP("bus_clk", camss_vfe_vfe_axi_clk.c, ""),
 	CLK_LOOKUP("bus_clk", camss_vfe_vfe_ocmemnoc_clk.c, ""),
-	CLK_LOOKUP("iface_clk", mdss_ahb_clk.c, ""),
+	CLK_LOOKUP("iface_clk", mdss_ahb_clk.c, "fd928000.qcom,iommu"),
+	CLK_LOOKUP("core_clk", mdss_axi_clk.c, "fd928000.qcom,iommu"),
 	CLK_LOOKUP("bus_clk", mdss_axi_clk.c, ""),
 	CLK_LOOKUP("core_clk", oxili_gfx3d_clk.c, ""),
 	CLK_LOOKUP("iface_clk", oxilicx_ahb_clk.c, ""),
 	CLK_LOOKUP("bus_clk", oxilicx_axi_clk.c, ""),
+	CLK_LOOKUP("iface_clk", venus0_ahb_clk.c, "fdc84000.qcom,iommu"),
+	CLK_LOOKUP("core_clk", venus0_axi_clk.c, "fdc84000.qcom,iommu"),
+	CLK_LOOKUP("bus_clk", venus0_axi_clk.c, ""),
 
 	/* LPASS clocks */
 	CLK_LOOKUP("core_clk", audio_core_slimbus_core_clk.c, "fe12f000.slim"),
@@ -4910,7 +4925,7 @@
 	CLK_LOOKUP("bus_clk",  q6ss_ahb_lfabif_clk.c, "pil-q6v5-lpass"),
 	CLK_LOOKUP("mem_clk", gcc_boot_rom_ahb_clk.c, ""),
 	CLK_LOOKUP("bus_clk",  gcc_mss_cfg_ahb_clk.c, ""),
-	CLK_DUMMY("core_clk", PRNG_CLK , "msm_rng.0", OFF),
+	CLK_LOOKUP("core_clk", gcc_prng_ahb_clk.c, "msm_rng"),
 
 	/* TODO: Remove dummy clocks as soon as they become unnecessary */
 	CLK_DUMMY("phy_clk",       NULL,    "msm_otg", OFF),
diff --git a/arch/arm/mach-msm/clock-pcom.c b/arch/arm/mach-msm/clock-pcom.c
index 58cea99..658fa2a 100644
--- a/arch/arm/mach-msm/clock-pcom.c
+++ b/arch/arm/mach-msm/clock-pcom.c
@@ -175,6 +175,18 @@
 	return false;
 }
 
+static enum handoff pc_clk_handoff(struct clk *clk)
+{
+	/*
+	 * Handoff clock state only since querying and caching the rate here
+	 * would incur more overhead than it would ever save.
+	 */
+	if (pc_clk_is_enabled(clk))
+		return HANDOFF_ENABLED_CLK;
+
+	return HANDOFF_DISABLED_CLK;
+}
+
 struct clk_ops clk_ops_pcom = {
 	.enable = pc_clk_enable,
 	.disable = pc_clk_disable,
@@ -187,6 +199,7 @@
 	.is_enabled = pc_clk_is_enabled,
 	.round_rate = pc_clk_round_rate,
 	.is_local = pc_clk_is_local,
+	.handoff = pc_clk_handoff,
 };
 
 struct clk_ops clk_ops_pcom_ext_config = {
@@ -201,5 +214,6 @@
 	.is_enabled = pc_clk_is_enabled,
 	.round_rate = pc_clk_round_rate,
 	.is_local = pc_clk_is_local,
+	.handoff = pc_clk_handoff,
 };
 
diff --git a/arch/arm/mach-msm/clock-pll.c b/arch/arm/mach-msm/clock-pll.c
index ead4fcb..d839911 100644
--- a/arch/arm/mach-msm/clock-pll.c
+++ b/arch/arm/mach-msm/clock-pll.c
@@ -110,12 +110,22 @@
 	return !!(readl_relaxed(PLL_STATUS_REG(pll)) & pll->status_mask);
 }
 
+static enum handoff pll_vote_clk_handoff(struct clk *clk)
+{
+	struct pll_vote_clk *pll = to_pll_vote_clk(clk);
+	if (readl_relaxed(PLL_EN_REG(pll)) & pll->en_mask)
+		return HANDOFF_ENABLED_CLK;
+
+	return HANDOFF_DISABLED_CLK;
+}
+
 struct clk_ops clk_ops_pll_vote = {
 	.enable = pll_vote_clk_enable,
 	.disable = pll_vote_clk_disable,
 	.auto_off = pll_vote_clk_disable,
 	.is_enabled = pll_vote_clk_is_enabled,
 	.get_parent = pll_vote_clk_get_parent,
+	.handoff = pll_vote_clk_handoff,
 };
 
 static void __pll_clk_enable_reg(void __iomem *mode_reg)
@@ -181,6 +191,18 @@
 	spin_unlock_irqrestore(&pll_reg_lock, flags);
 }
 
+static enum handoff local_pll_clk_handoff(struct clk *clk)
+{
+	struct pll_clk *pll = to_pll_clk(clk);
+	u32 mode = readl_relaxed(PLL_MODE_REG(pll));
+	u32 mask = PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL;
+
+	if ((mode & mask) == mask)
+		return HANDOFF_ENABLED_CLK;
+
+	return HANDOFF_DISABLED_CLK;
+}
+
 static struct clk *local_pll_clk_get_parent(struct clk *clk)
 {
 	struct pll_clk *pll = to_pll_clk(clk);
@@ -281,6 +303,7 @@
 	.enable = local_pll_clk_enable,
 	.disable = local_pll_clk_disable,
 	.auto_off = local_pll_clk_disable,
+	.handoff = local_pll_clk_handoff,
 	.get_parent = local_pll_clk_get_parent,
 };
 
@@ -297,6 +320,7 @@
 	{41, 800000000},
 	{50, 960000000},
 	{52, 1008000000},
+	{60, 1152000000},
 	{62, 1200000000},
 	{63, 1209600000},
 	{0, 0},
diff --git a/arch/arm/mach-msm/devices-8064.c b/arch/arm/mach-msm/devices-8064.c
index 41980b3..ef9b62a 100644
--- a/arch/arm/mach-msm/devices-8064.c
+++ b/arch/arm/mach-msm/devices-8064.c
@@ -42,6 +42,7 @@
 #include "rpm_log.h"
 #include <mach/mpm.h>
 #include <mach/iommu_domains.h>
+#include <mach/msm_cache_dump.h>
 
 /* Address of GSBI blocks */
 #define MSM_GSBI1_PHYS		0x12440000
@@ -771,10 +772,10 @@
 		.flags	= IORESOURCE_IRQ,
 	},
 	{
-		.start	= MSM_GPIO_TO_INT(88),
-		.end	= MSM_GPIO_TO_INT(88),
-		.name	= "wakeup_irq",
-		.flags	= IORESOURCE_IRQ,
+		.start	= 47,
+		.end	= 47,
+		.name	= "wakeup",
+		.flags	= IORESOURCE_IO,
 	},
 };
 
@@ -2140,11 +2141,12 @@
 };
 #endif
 
+/* AP2MDM_SOFT_RESET is implemented by the PON_RESET_N gpio */
 #define MDM2AP_ERRFATAL			19
 #define AP2MDM_ERRFATAL			18
 #define MDM2AP_STATUS			49
 #define AP2MDM_STATUS			48
-#define AP2MDM_PMIC_RESET_N		27
+#define AP2MDM_SOFT_RESET		27
 #define AP2MDM_WAKEUP			35
 
 static struct resource mdm_resources[] = {
@@ -2173,9 +2175,9 @@
 		.flags	= IORESOURCE_IO,
 	},
 	{
-		.start	= AP2MDM_PMIC_RESET_N,
-		.end	= AP2MDM_PMIC_RESET_N,
-		.name	= "AP2MDM_PMIC_RESET_N",
+		.start	= AP2MDM_SOFT_RESET,
+		.end	= AP2MDM_SOFT_RESET,
+		.name	= "AP2MDM_SOFT_RESET",
 		.flags	= IORESOURCE_IO,
 	},
 	{
@@ -2587,3 +2589,23 @@
 		.platform_data = &apq8064_rtb_pdata,
 	},
 };
+
+#define APQ8064_L1_SIZE  SZ_1M
+/*
+ * The actual L2 size is smaller but we need a larger buffer
+ * size to store other dump information
+ */
+#define APQ8064_L2_SIZE  SZ_8M
+
+struct msm_cache_dump_platform_data apq8064_cache_dump_pdata = {
+	.l2_size = APQ8064_L2_SIZE,
+	.l1_size = APQ8064_L1_SIZE,
+};
+
+struct platform_device apq8064_cache_dump_device = {
+	.name           = "msm_cache_dump",
+	.id             = -1,
+	.dev            = {
+		.platform_data = &apq8064_cache_dump_pdata,
+	},
+};
diff --git a/arch/arm/mach-msm/devices-8930.c b/arch/arm/mach-msm/devices-8930.c
index 4ad73f9..9de2213 100644
--- a/arch/arm/mach-msm/devices-8930.c
+++ b/arch/arm/mach-msm/devices-8930.c
@@ -126,6 +126,7 @@
 		MSM_RPM_MAP(8930, CXO_BUFFERS, CXO_BUFFERS, 1),
 		MSM_RPM_MAP(8930, USB_OTG_SWITCH, USB_OTG_SWITCH, 1),
 		MSM_RPM_MAP(8930, HDMI_SWITCH, HDMI_SWITCH, 1),
+		MSM_RPM_MAP(8930, DDR_DMM_0, DDR_DMM, 2),
 		MSM_RPM_MAP(8930, QDSS_CLK, QDSS_CLK, 1),
 		MSM_RPM_MAP(8930, VOLTAGE_CORNER, VOLTAGE_CORNER, 1),
 	},
@@ -230,6 +231,8 @@
 		MSM_RPM_STATUS_ID_MAP(8930, CXO_BUFFERS),
 		MSM_RPM_STATUS_ID_MAP(8930, USB_OTG_SWITCH),
 		MSM_RPM_STATUS_ID_MAP(8930, HDMI_SWITCH),
+		MSM_RPM_STATUS_ID_MAP(8930, DDR_DMM_0),
+		MSM_RPM_STATUS_ID_MAP(8930, DDR_DMM_1),
 		MSM_RPM_STATUS_ID_MAP(8930, QDSS_CLK),
 		MSM_RPM_STATUS_ID_MAP(8930, VOLTAGE_CORNER),
 	},
diff --git a/arch/arm/mach-msm/devices-8960.c b/arch/arm/mach-msm/devices-8960.c
index e474e36..be364e7 100644
--- a/arch/arm/mach-msm/devices-8960.c
+++ b/arch/arm/mach-msm/devices-8960.c
@@ -3631,9 +3631,16 @@
 	},
 };
 
+#define MSM_8960_L1_SIZE  SZ_1M
+/*
+ * The actual L2 size is smaller but we need a larger buffer
+ * size to store other dump information
+ */
+#define MSM_8960_L2_SIZE  SZ_4M
+
 struct msm_cache_dump_platform_data msm8960_cache_dump_pdata = {
-	.l2_size = L2_BUFFER_SIZE,
-	.l1_size = L1_BUFFER_SIZE,
+	.l2_size = MSM_8960_L2_SIZE,
+	.l1_size = MSM_8960_L1_SIZE,
 };
 
 struct platform_device msm8960_cache_dump_device = {
@@ -3643,3 +3650,63 @@
 		.platform_data = &msm8960_cache_dump_pdata,
 	},
 };
+
+#define MDM2AP_ERRFATAL			40
+#define AP2MDM_ERRFATAL			80
+#define MDM2AP_STATUS			24
+#define AP2MDM_STATUS			77
+#define AP2MDM_PMIC_PWR_EN		22
+#define AP2MDM_KPDPWR_N			79
+#define AP2MDM_SOFT_RESET		78
+
+static struct resource sglte_resources[] = {
+	{
+		.start	= MDM2AP_ERRFATAL,
+		.end	= MDM2AP_ERRFATAL,
+		.name	= "MDM2AP_ERRFATAL",
+		.flags	= IORESOURCE_IO,
+	},
+	{
+		.start	= AP2MDM_ERRFATAL,
+		.end	= AP2MDM_ERRFATAL,
+		.name	= "AP2MDM_ERRFATAL",
+		.flags	= IORESOURCE_IO,
+	},
+	{
+		.start	= MDM2AP_STATUS,
+		.end	= MDM2AP_STATUS,
+		.name	= "MDM2AP_STATUS",
+		.flags	= IORESOURCE_IO,
+	},
+	{
+		.start	= AP2MDM_STATUS,
+		.end	= AP2MDM_STATUS,
+		.name	= "AP2MDM_STATUS",
+		.flags	= IORESOURCE_IO,
+	},
+	{
+		.start	= AP2MDM_PMIC_PWR_EN,
+		.end	= AP2MDM_PMIC_PWR_EN,
+		.name	= "AP2MDM_PMIC_PWR_EN",
+		.flags	= IORESOURCE_IO,
+	},
+	{
+		.start	= AP2MDM_KPDPWR_N,
+		.end	= AP2MDM_KPDPWR_N,
+		.name	= "AP2MDM_KPDPWR_N",
+		.flags	= IORESOURCE_IO,
+	},
+	{
+		.start	= AP2MDM_SOFT_RESET,
+		.end	= AP2MDM_SOFT_RESET,
+		.name	= "AP2MDM_SOFT_RESET",
+		.flags	= IORESOURCE_IO,
+	},
+};
+
+struct platform_device mdm_sglte_device = {
+	.name		= "mdm2_modem",
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(sglte_resources),
+	.resource	= sglte_resources,
+};
diff --git a/arch/arm/mach-msm/devices-iommu.c b/arch/arm/mach-msm/devices-iommu.c
index 2e0253b..f5676d3 100644
--- a/arch/arm/mach-msm/devices-iommu.c
+++ b/arch/arm/mach-msm/devices-iommu.c
@@ -987,8 +987,8 @@
 static int __init iommu_init(void)
 {
 	int ret;
-	if (!msm_soc_version_supports_iommu()) {
-		pr_err("IOMMU is not supported on this SoC version.\n");
+	if (!msm_soc_version_supports_iommu_v1()) {
+		pr_err("IOMMU v1 is not supported on this SoC version.\n");
 		return -ENODEV;
 	}
 
diff --git a/arch/arm/mach-msm/devices.h b/arch/arm/mach-msm/devices.h
index 100d99a..f8ab18a 100644
--- a/arch/arm/mach-msm/devices.h
+++ b/arch/arm/mach-msm/devices.h
@@ -396,7 +396,8 @@
 extern struct platform_device apq8064_rtb_device;
 
 extern struct platform_device msm8960_cache_dump_device;
+extern struct platform_device apq8064_cache_dump_device;
 
-extern struct platform_device apq_device_tz_log;
+extern struct platform_device copper_device_tz_log;
 
-extern struct platform_device msm8974_device_rng;
+extern struct platform_device mdm_sglte_device;
diff --git a/arch/arm/mach-msm/gdsc.c b/arch/arm/mach-msm/gdsc.c
new file mode 100644
index 0000000..a77ac24
--- /dev/null
+++ b/arch/arm/mach-msm/gdsc.c
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+#define PWR_ON_MASK		BIT(31)
+#define EN_REST_WAIT_MASK	(0xF << 20)
+#define EN_FEW_WAIT_MASK	(0xF << 16)
+#define CLK_DIS_WAIT_MASK	(0xF << 12)
+#define SW_OVERRIDE_MASK	BIT(2)
+#define HW_CONTROL_MASK		BIT(1)
+#define SW_COLLAPSE_MASK	BIT(0)
+
+/* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
+#define EN_REST_WAIT_VAL	(0x2 << 20)
+#define EN_FEW_WAIT_VAL		(0x2 << 16)
+#define CLK_DIS_WAIT_VAL	(0x2 << 12)
+
+#define TIMEOUT_US		10
+
+struct gdsc {
+	struct regulator_dev	*rdev;
+	struct regulator_desc	rdesc;
+	void __iomem		*gdscr;
+};
+
+static int gdsc_is_enabled(struct regulator_dev *rdev)
+{
+	struct gdsc *sc = rdev_get_drvdata(rdev);
+
+	return !!(readl_relaxed(sc->gdscr) & PWR_ON_MASK);
+}
+
+static int gdsc_enable(struct regulator_dev *rdev)
+{
+	struct gdsc *sc = rdev_get_drvdata(rdev);
+	uint32_t regval;
+	int ret;
+
+	regval = readl_relaxed(sc->gdscr);
+	regval &= ~SW_COLLAPSE_MASK;
+	writel_relaxed(regval, sc->gdscr);
+
+	ret = readl_tight_poll_timeout(sc->gdscr, regval, regval & PWR_ON_MASK,
+				       TIMEOUT_US);
+	if (ret)
+		dev_err(&rdev->dev, "%s enable timed out\n", sc->rdesc.name);
+
+	return ret;
+}
+
+static int gdsc_disable(struct regulator_dev *rdev)
+{
+	struct gdsc *sc = rdev_get_drvdata(rdev);
+	uint32_t regval;
+	int ret;
+
+	regval = readl_relaxed(sc->gdscr);
+	regval |= SW_COLLAPSE_MASK;
+	writel_relaxed(regval, sc->gdscr);
+
+	ret = readl_tight_poll_timeout(sc->gdscr, regval,
+				       !(regval & PWR_ON_MASK), TIMEOUT_US);
+	if (ret)
+		dev_err(&rdev->dev, "%s disable timed out\n", sc->rdesc.name);
+
+	return ret;
+}
+
+static struct regulator_ops gdsc_ops = {
+	.is_enabled = gdsc_is_enabled,
+	.enable = gdsc_enable,
+	.disable = gdsc_disable,
+};
+
+static int __devinit gdsc_probe(struct platform_device *pdev)
+{
+	static atomic_t gdsc_count = ATOMIC_INIT(-1);
+	struct regulator_init_data *init_data;
+	struct resource *res;
+	struct gdsc *sc;
+	uint32_t regval;
+	int ret;
+
+	sc = devm_kzalloc(&pdev->dev, sizeof(struct gdsc), GFP_KERNEL);
+	if (sc == NULL)
+		return -ENOMEM;
+
+	init_data = of_get_regulator_init_data(&pdev->dev);
+	if (init_data == NULL)
+		return -ENOMEM;
+
+	if (of_get_property(pdev->dev.of_node, "parent-supply", NULL))
+		init_data->supply_regulator = "parent";
+
+	ret = of_property_read_string(pdev->dev.of_node, "regulator-name",
+				      &sc->rdesc.name);
+	if (ret)
+		return ret;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL)
+		return -EINVAL;
+	sc->gdscr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+	if (sc->gdscr == NULL)
+		return -ENOMEM;
+
+	sc->rdesc.id = atomic_inc_return(&gdsc_count);
+	sc->rdesc.ops = &gdsc_ops;
+	sc->rdesc.type = REGULATOR_VOLTAGE;
+	sc->rdesc.owner = THIS_MODULE;
+	platform_set_drvdata(pdev, sc);
+
+	/*
+	 * Disable HW trigger: collapse/restore occur based on registers writes.
+	 * Disable SW override: Use hardware state-machine for sequencing.
+	 */
+	regval = readl_relaxed(sc->gdscr);
+	regval &= ~(HW_CONTROL_MASK | SW_OVERRIDE_MASK);
+
+	/* Configure wait time between states. */
+	regval &= ~(EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK);
+	regval |= EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL;
+	writel_relaxed(regval, sc->gdscr);
+
+	sc->rdev = regulator_register(&sc->rdesc, &pdev->dev, init_data, sc,
+				      pdev->dev.of_node);
+	if (IS_ERR(sc->rdev)) {
+		dev_err(&pdev->dev, "regulator_register(\"%s\") failed.\n",
+			sc->rdesc.name);
+		return PTR_ERR(sc->rdev);
+	}
+
+	return 0;
+}
+
+static int __devexit gdsc_remove(struct platform_device *pdev)
+{
+	struct gdsc *sc = platform_get_drvdata(pdev);
+	regulator_unregister(sc->rdev);
+	return 0;
+}
+
+static struct of_device_id gdsc_match_table[] = {
+	{ .compatible = "qcom,gdsc" },
+	{}
+};
+
+static struct platform_driver gdsc_driver = {
+	.probe		= gdsc_probe,
+	.remove		= __devexit_p(gdsc_remove),
+	.driver		= {
+		.name		= "gdsc",
+		.of_match_table = gdsc_match_table,
+		.owner		= THIS_MODULE,
+	},
+};
+
+static int __init gdsc_init(void)
+{
+	return platform_driver_register(&gdsc_driver);
+}
+subsys_initcall(gdsc_init);
+
+static void __exit gdsc_exit(void)
+{
+	platform_driver_unregister(&gdsc_driver);
+}
+module_exit(gdsc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Copper GDSC power rail regulator driver");
diff --git a/arch/arm/mach-msm/gss-8064.c b/arch/arm/mach-msm/gss-8064.c
index 3c475d6..126f8e0 100644
--- a/arch/arm/mach-msm/gss-8064.c
+++ b/arch/arm/mach-msm/gss-8064.c
@@ -23,6 +23,7 @@
 #include <linux/fs.h>
 
 #include <mach/irqs.h>
+#include <mach/msm_smsm.h>
 #include <mach/scm.h>
 #include <mach/peripheral-loader.h>
 #include <mach/subsystem_restart.h>
@@ -42,6 +43,32 @@
 
 static int crash_shutdown;
 
+#define MAX_SSR_REASON_LEN 81U
+
+static void log_gss_sfr(void)
+{
+	u32 size;
+	char *smem_reason, reason[MAX_SSR_REASON_LEN];
+
+	smem_reason = smem_get_entry(SMEM_SSR_REASON_MSS0, &size);
+	if (!smem_reason || !size) {
+		pr_err("GSS subsystem failure reason: (unknown, smem_get_entry failed).\n");
+		return;
+	}
+	if (!smem_reason[0]) {
+		pr_err("GSS subsystem failure reason: (unknown, init string found).\n");
+		return;
+	}
+
+	size = min(size, MAX_SSR_REASON_LEN-1);
+	memcpy(reason, smem_reason, size);
+	reason[size] = '\0';
+	pr_err("GSS subsystem failure reason: %s.\n", reason);
+
+	smem_reason[0] = '\0';
+	wmb();
+}
+
 static void gss_fatal_fn(struct work_struct *work)
 {
 	uint32_t panic_smsm_states = SMSM_RESET | SMSM_SYSTEM_DOWNLOAD;
@@ -58,6 +85,7 @@
 		pr_err("GSS SMSM state changed to SMSM_RESET.\n"
 			"Probable err_fatal on the GSS. "
 			"Calling subsystem restart...\n");
+		log_gss_sfr();
 		subsystem_restart("gss");
 
 	} else if (gss_state & reset_smsm_states) {
@@ -68,6 +96,7 @@
 		kernel_restart(NULL);
 	} else {
 		/* TODO: Bus unlock code/sequence goes _here_ */
+		log_gss_sfr();
 		subsystem_restart("gss");
 	}
 }
@@ -84,6 +113,7 @@
 		pr_err("GSS SMSM state changed to SMSM_RESET.\n"
 			"Probable err_fatal on the GSS. "
 			"Calling subsystem restart...\n");
+		log_gss_sfr();
 		subsystem_restart("gss");
 	}
 }
diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h
index 17ac3ac..5e2eaf1 100644
--- a/arch/arm/mach-msm/include/mach/board.h
+++ b/arch/arm/mach-msm/include/mach/board.h
@@ -556,6 +556,7 @@
 void msm_map_fsm9xxx_io(void);
 void msm_map_copper_io(void);
 void msm_map_msm8625_io(void);
+void msm_map_msm9625_io(void);
 void msm_init_irq(void);
 void msm_copper_init_irq(void);
 void vic_handle_irq(struct pt_regs *regs);
diff --git a/arch/arm/mach-msm/include/mach/camera.h b/arch/arm/mach-msm/include/mach/camera.h
index ba5b8ac..47d9b5f 100644
--- a/arch/arm/mach-msm/include/mach/camera.h
+++ b/arch/arm/mach-msm/include/mach/camera.h
@@ -24,8 +24,8 @@
 
 #include <mach/board.h>
 #include <media/msm_camera.h>
-#include <mach/msm_subsystem_map.h>
 #include <linux/ion.h>
+#include <mach/iommu_domains.h>
 
 #define CONFIG_MSM_CAMERA_DEBUG
 #ifdef CONFIG_MSM_CAMERA_DEBUG
diff --git a/arch/arm/mach-msm/include/mach/iommu.h b/arch/arm/mach-msm/include/mach/iommu.h
index a6f27d7..b57ae10 100644
--- a/arch/arm/mach-msm/include/mach/iommu.h
+++ b/arch/arm/mach-msm/include/mach/iommu.h
@@ -18,6 +18,7 @@
 #include <mach/socinfo.h>
 
 extern pgprot_t     pgprot_kernel;
+extern struct platform_device *msm_iommu_root_dev;
 
 /* Domain attributes */
 #define MSM_IOMMU_DOMAIN_PT_CACHEABLE	0x1
@@ -104,6 +105,7 @@
  * message and dump useful IOMMU registers.
  */
 irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id);
+irqreturn_t msm_iommu_fault_handler_v2(int irq, void *dev_id);
 
 #ifdef CONFIG_MSM_IOMMU
 /*
@@ -121,8 +123,17 @@
 
 #endif
 
-static inline int msm_soc_version_supports_iommu(void)
+static inline int msm_soc_version_supports_iommu_v1(void)
 {
+#ifdef CONFIG_OF
+	struct device_node *node;
+
+	node = of_find_compatible_node(NULL, NULL, "qcom,msm-smmu-v2");
+	if (node) {
+		of_node_put(node);
+		return 0;
+	}
+#endif
 	if (cpu_is_msm8960() &&
 	    SOCINFO_VERSION_MAJOR(socinfo_get_version()) < 2)
 		return 0;
diff --git a/arch/arm/mach-msm/include/mach/iommu_hw-v2.h b/arch/arm/mach-msm/include/mach/iommu_hw-v2.h
new file mode 100644
index 0000000..fac13b3
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/iommu_hw-v2.h
@@ -0,0 +1,2111 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_IOMMU_HW_V2_H
+#define __ARCH_ARM_MACH_MSM_IOMMU_HW_V2_H
+
+#define CTX_SHIFT  12
+#define CTX_OFFSET 0x8000
+
+#define MAX_NUM_SMR 128
+
+#define GET_GLOBAL_REG(reg, base) (readl_relaxed((base) + (reg)))
+#define GET_CTX_REG(reg, base, ctx) \
+	(readl_relaxed((base) + CTX_OFFSET + (reg) + ((ctx) << CTX_SHIFT)))
+
+#define SET_GLOBAL_REG(reg, base, val)	writel_relaxed((val), ((base) + (reg)))
+
+#define SET_CTX_REG(reg, base, ctx, val) \
+	writel_relaxed((val), \
+		((base) + CTX_OFFSET + (reg) + ((ctx) << CTX_SHIFT)))
+
+/* Wrappers for numbered registers */
+#define SET_GLOBAL_REG_N(b, n, r, v) SET_GLOBAL_REG((b), ((r) + (n << 2)), (v))
+#define GET_GLOBAL_REG_N(b, n, r)    GET_GLOBAL_REG((b), ((r) + (n << 2)))
+
+/* Field wrappers */
+#define GET_GLOBAL_FIELD(b, r, F) \
+	GET_FIELD(((b) + (r)), r##_##F##_MASK, r##_##F##_SHIFT)
+#define GET_CONTEXT_FIELD(b, c, r, F) \
+	GET_FIELD(((b) + CTX_OFFSET + (r) + ((c) << CTX_SHIFT)), \
+			r##_##F##_MASK, r##_##F##_SHIFT)
+
+#define SET_GLOBAL_FIELD(b, r, F, v) \
+	SET_FIELD(((b) + (r)), r##_##F##_MASK, r##_##F##_SHIFT, (v))
+#define SET_CONTEXT_FIELD(b, c, r, F, v) \
+	SET_FIELD(((b) + CTX_OFFSET + (r) + ((c) << CTX_SHIFT)), \
+			r##_##F##_MASK, r##_##F##_SHIFT, (v))
+
+/* Wrappers for numbered field registers */
+#define SET_GLOBAL_FIELD_N(b, n, r, F, v) \
+	SET_FIELD(((b) + ((n) << 2) + (r)), r##_##F##_MASK, r##_##F##_SHIFT, v)
+#define GET_GLOBAL_FIELD_N(b, n, r, F) \
+	GET_FIELD(((b) + ((n) << 2) + (r)), r##_##F##_MASK, r##_##F##_SHIFT)
+
+#define GET_FIELD(addr, mask, shift) ((readl_relaxed(addr) >> (shift)) & (mask))
+
+#define SET_FIELD(addr, mask, shift, v) \
+do { \
+	int t = readl_relaxed(addr); \
+	writel_relaxed((t & ~((mask) << (shift))) + (((v) & \
+			(mask)) << (shift)), addr); \
+} while (0)
+
+
+/* Global register space 0 setters / getters */
+#define SET_CR0(b, v)            SET_GLOBAL_REG(CR0, (b), (v))
+#define SET_SCR1(b, v)           SET_GLOBAL_REG(SCR1, (b), (v))
+#define SET_CR2(b, v)            SET_GLOBAL_REG(CR2, (b), (v))
+#define SET_ACR(b, v)            SET_GLOBAL_REG(ACR, (b), (v))
+#define SET_IDR0(b, N, v)        SET_GLOBAL_REG(IDR0, (b), (v))
+#define SET_IDR1(b, N, v)        SET_GLOBAL_REG(IDR1, (b), (v))
+#define SET_IDR2(b, N, v)        SET_GLOBAL_REG(IDR2, (b), (v))
+#define SET_IDR7(b, N, v)        SET_GLOBAL_REG(IDR7, (b), (v))
+#define SET_GFAR(b, v)           SET_GLOBAL_REG(GFAR, (b), (v))
+#define SET_GFSR(b, v)           SET_GLOBAL_REG(GFSR, (b), (v))
+#define SET_GFSRRESTORE(b, v)    SET_GLOBAL_REG(GFSRRESTORE, (b), (v))
+#define SET_GFSYNR0(b, v)        SET_GLOBAL_REG(GFSYNR0, (b), (v))
+#define SET_GFSYNR1(b, v)        SET_GLOBAL_REG(GFSYNR1, (b), (v))
+#define SET_GFSYNR2(b, v)        SET_GLOBAL_REG(GFSYNR2, (b), (v))
+#define SET_TLBIVMID(b, v)       SET_GLOBAL_REG(TLBIVMID, (b), (v))
+#define SET_TLBIALLNSNH(b, v)    SET_GLOBAL_REG(TLBIALLNSNH, (b), (v))
+#define SET_TLBIALLH(b, v)       SET_GLOBAL_REG(TLBIALLH, (b), (v))
+#define SET_TLBGSYNC(b, v)       SET_GLOBAL_REG(TLBGSYNC, (b), (v))
+#define SET_TLBGSTATUS(b, v)     SET_GLOBAL_REG(TLBSTATUS, (b), (v))
+#define SET_TLBIVAH(b, v)        SET_GLOBAL_REG(TLBIVAH, (b), (v))
+#define SET_GATS1UR(b, v)        SET_GLOBAL_REG(GATS1UR, (b), (v))
+#define SET_GATS1UW(b, v)        SET_GLOBAL_REG(GATS1UW, (b), (v))
+#define SET_GATS1PR(b, v)        SET_GLOBAL_REG(GATS1PR, (b), (v))
+#define SET_GATS1PW(b, v)        SET_GLOBAL_REG(GATS1PW, (b), (v))
+#define SET_GATS12UR(b, v)       SET_GLOBAL_REG(GATS12UR, (b), (v))
+#define SET_GATS12UW(b, v)       SET_GLOBAL_REG(GATS12UW, (b), (v))
+#define SET_GATS12PR(b, v)       SET_GLOBAL_REG(GATS12PR, (b), (v))
+#define SET_GATS12PW(b, v)       SET_GLOBAL_REG(GATS12PW, (b), (v))
+#define SET_GPAR(b, v)           SET_GLOBAL_REG(GPAR, (b), (v))
+#define SET_GATSR(b, v)          SET_GLOBAL_REG(GATSR, (b), (v))
+#define SET_NSCR0(b, v)          SET_GLOBAL_REG(NSCR0, (b), (v))
+#define SET_NSCR2(b, v)          SET_GLOBAL_REG(NSCR2, (b), (v))
+#define SET_NSACR(b, v)          SET_GLOBAL_REG(NSACR, (b), (v))
+#define SET_PMCR(b, v)           SET_GLOBAL_REG(PMCR, (b), (v))
+#define SET_SMR_N(b, N, v)       SET_GLOBAL_REG_N(SMR, N, (b), (v))
+#define SET_S2CR_N(b, N, v)      SET_GLOBAL_REG_N(S2CR, N, (b), (v))
+
+#define GET_CR0(b)               GET_GLOBAL_REG(CR0, (b))
+#define GET_SCR1(b)              GET_GLOBAL_REG(SCR1, (b))
+#define GET_CR2(b)               GET_GLOBAL_REG(CR2, (b))
+#define GET_ACR(b)               GET_GLOBAL_REG(ACR, (b))
+#define GET_IDR0(b, N)           GET_GLOBAL_REG(IDR0, (b))
+#define GET_IDR1(b, N)           GET_GLOBAL_REG(IDR1, (b))
+#define GET_IDR2(b, N)           GET_GLOBAL_REG(IDR2, (b))
+#define GET_IDR7(b, N)           GET_GLOBAL_REG(IDR7, (b))
+#define GET_GFAR(b)              GET_GLOBAL_REG(GFAR, (b))
+#define GET_GFSR(b)              GET_GLOBAL_REG(GFSR, (b))
+#define GET_GFSRRESTORE(b)       GET_GLOBAL_REG(GFSRRESTORE, (b))
+#define GET_GFSYNR0(b)           GET_GLOBAL_REG(GFSYNR0, (b))
+#define GET_GFSYNR1(b)           GET_GLOBAL_REG(GFSYNR1, (b))
+#define GET_GFSYNR2(b)           GET_GLOBAL_REG(GFSYNR2, (b))
+#define GET_TLBIVMID(b)          GET_GLOBAL_REG(TLBIVMID, (b))
+#define GET_TLBIALLNSNH(b)       GET_GLOBAL_REG(TLBIALLNSNH, (b))
+#define GET_TLBIALLH(b)          GET_GLOBAL_REG(TLBIALLH, (b))
+#define GET_TLBGSYNC(b)          GET_GLOBAL_REG(TLBGSYNC, (b))
+#define GET_TLBGSTATUS(b)        GET_GLOBAL_REG(TLBSTATUS, (b))
+#define GET_TLBIVAH(b)           GET_GLOBAL_REG(TLBIVAH, (b))
+#define GET_GATS1UR(b)           GET_GLOBAL_REG(GATS1UR, (b))
+#define GET_GATS1UW(b)           GET_GLOBAL_REG(GATS1UW, (b))
+#define GET_GATS1PR(b)           GET_GLOBAL_REG(GATS1PR, (b))
+#define GET_GATS1PW(b)           GET_GLOBAL_REG(GATS1PW, (b))
+#define GET_GATS12UR(b)          GET_GLOBAL_REG(GATS12UR, (b))
+#define GET_GATS12UW(b)          GET_GLOBAL_REG(GATS12UW, (b))
+#define GET_GATS12PR(b)          GET_GLOBAL_REG(GATS12PR, (b))
+#define GET_GATS12PW(b)          GET_GLOBAL_REG(GATS12PW, (b))
+#define GET_GPAR(b)              GET_GLOBAL_REG(GPAR, (b))
+#define GET_GATSR(b)             GET_GLOBAL_REG(GATSR, (b))
+#define GET_NSCR0(b)             GET_GLOBAL_REG(NSCR0, (b))
+#define GET_NSCR2(b)             GET_GLOBAL_REG(NSCR2, (b))
+#define GET_NSACR(b)             GET_GLOBAL_REG(NSACR, (b))
+#define GET_PMCR(b, v)           GET_GLOBAL_REG(PMCR, (b))
+#define GET_SMR_N(b, N)          GET_GLOBAL_REG_N(SMR, N, (b))
+#define GET_S2CR_N(b, N)         GET_GLOBAL_REG_N(S2CR, N, (b))
+
+/* Global register space 1 setters / getters */
+#define SET_CBAR_N(b, N, v)      SET_GLOBAL_REG_N(CBAR, N, (b), (v))
+#define SET_CBFRSYNRA_N(b, N, v) SET_GLOBAL_REG_N(CBFRSYNRA, N, (b), (v))
+
+#define GET_CBAR_N(b, N)         GET_GLOBAL_REG_N(CBAR, N, (b))
+#define GET_CBFRSYNRA_N(b, N)    GET_GLOBAL_REG_N(CBFRSYNRA, N, (b))
+
+/* Implementation defined register setters/getters */
+#define SET_PREDICTIONDIS0(b, v) SET_GLOBAL_REG(PREDICTIONDIS0, (b), (v))
+#define SET_PREDICTIONDIS1(b, v) SET_GLOBAL_REG(PREDICTIONDIS1, (b), (v))
+#define SET_S1L1BFBLP0(b, v)     SET_GLOBAL_REG(S1L1BFBLP0, (b), (v))
+
+/* SSD register setters/getters */
+#define SET_SSDR_N(b, N, v)      SET_GLOBAL_REG_N(SSDR_N, N, (b), (v))
+
+#define GET_SSDR_N(b, N)         GET_GLOBAL_REG_N(SSDR_N, N, (b))
+
+/* Context bank register setters/getters */
+#define SET_SCTLR(b, c, v)       SET_CTX_REG(CB_SCTLR, (b), (c), (v))
+#define SET_ACTLR(b, c, v)       SET_CTX_REG(CB_ACTLR, (b), (c), (v))
+#define SET_RESUME(b, c, v)      SET_CTX_REG(CB_RESUME, (b), (c), (v))
+#define SET_TTBR0(b, c, v)       SET_CTX_REG(CB_TTBR0, (b), (c), (v))
+#define SET_TTBR1(b, c, v)       SET_CTX_REG(CB_TTBR1, (b), (c), (v))
+#define SET_TTBCR(b, c, v)       SET_CTX_REG(CB_TTBCR, (b), (c), (v))
+#define SET_CONTEXTIDR(b, c, v)  SET_CTX_REG(CB_CONTEXTIDR, (b), (c), (v))
+#define SET_PRRR(b, c, v)        SET_CTX_REG(CB_PRRR, (b), (c), (v))
+#define SET_NMRR(b, c, v)        SET_CTX_REG(CB_NMRR, (b), (c), (v))
+#define SET_PAR(b, c, v)         SET_CTX_REG(CB_PAR, (b), (c), (v))
+#define SET_FSR(b, c, v)         SET_CTX_REG(CB_FSR, (b), (c), (v))
+#define SET_FSRRESTORE(b, c, v)  SET_CTX_REG(CB_FSRRESTORE, (b), (c), (v))
+#define SET_FAR(b, c, v)         SET_CTX_REG(CB_FAR, (b), (c), (v))
+#define SET_FSYNR0(b, c, v)      SET_CTX_REG(CB_FSYNR0, (b), (c), (v))
+#define SET_FSYNR1(b, c, v)      SET_CTX_REG(CB_FSYNR1, (b), (c), (v))
+#define SET_TLBIVA(b, c, v)      SET_CTX_REG(CB_TLBIVA, (b), (c), (v))
+#define SET_TLBIVAA(b, c, v)     SET_CTX_REG(CB_TLBIVAA, (b), (c), (v))
+#define SET_TLBIASID(b, c, v)    SET_CTX_REG(CB_TLBIASID, (b), (c), (v))
+#define SET_TLBIALL(b, c, v)     SET_CTX_REG(CB_TLBIALL, (b), (c), (v))
+#define SET_TLBIVAL(b, c, v)     SET_CTX_REG(CB_TLBIVAL, (b), (c), (v))
+#define SET_TLBIVAAL(b, c, v)    SET_CTX_REG(CB_TLBIVAAL, (b), (c), (v))
+#define SET_TLBSYNC(b, c, v)     SET_CTX_REG(CB_TLBSYNC, (b), (c), (v))
+#define SET_TLBSTATUS(b, c, v)   SET_CTX_REG(CB_TLBSTATUS, (b), (c), (v))
+#define SET_ATS1PR(b, c, v)      SET_CTX_REG(CB_ATS1PR, (b), (c), (v))
+#define SET_ATS1PW(b, c, v)      SET_CTX_REG(CB_ATS1PW, (b), (c), (v))
+#define SET_ATS1UR(b, c, v)      SET_CTX_REG(CB_ATS1UR, (b), (c), (v))
+#define SET_ATS1UW(b, c, v)      SET_CTX_REG(CB_ATS1UW, (b), (c), (v))
+#define SET_ATSR(b, c, v)        SET_CTX_REG(CB_ATSR, (b), (c), (v))
+
+#define GET_SCTLR(b, c)          GET_CTX_REG(CB_SCTLR, (b), (c))
+#define GET_ACTLR(b, c)          GET_CTX_REG(CB_ACTLR, (b), (c))
+#define GET_RESUME(b, c)         GET_CTX_REG(CB_RESUME, (b), (c))
+#define GET_TTBR0(b, c)          GET_CTX_REG(CB_TTBR0, (b), (c))
+#define GET_TTBR1(b, c)          GET_CTX_REG(CB_TTBR1, (b), (c))
+#define GET_TTBCR(b, c)          GET_CTX_REG(CB_TTBCR, (b), (c))
+#define GET_CONTEXTIDR(b, c)     GET_CTX_REG(CB_CONTEXTIDR, (b), (c))
+#define GET_PRRR(b, c)           GET_CTX_REG(CB_PRRR, (b), (c))
+#define GET_NMRR(b, c)           GET_CTX_REG(CB_NMRR, (b), (c))
+#define GET_PAR(b, c)            GET_CTX_REG(CB_PAR, (b), (c))
+#define GET_FSR(b, c)            GET_CTX_REG(CB_FSR, (b), (c))
+#define GET_FSRRESTORE(b, c)     GET_CTX_REG(CB_FSRRESTORE, (b), (c))
+#define GET_FAR(b, c)            GET_CTX_REG(CB_FAR, (b), (c))
+#define GET_FSYNR0(b, c)         GET_CTX_REG(CB_FSYNR0, (b), (c))
+#define GET_FSYNR1(b, c)         GET_CTX_REG(CB_FSYNR1, (b), (c))
+#define GET_TLBIVA(b, c)         GET_CTX_REG(CB_TLBIVA, (b), (c))
+#define GET_TLBIVAA(b, c)        GET_CTX_REG(CB_TLBIVAA, (b), (c))
+#define GET_TLBIASID(b, c)       GET_CTX_REG(CB_TLBIASID, (b), (c))
+#define GET_TLBIALL(b, c)        GET_CTX_REG(CB_TLBIALL, (b), (c))
+#define GET_TLBIVAL(b, c)        GET_CTX_REG(CB_TLBIVAL, (b), (c))
+#define GET_TLBIVAAL(b, c)       GET_CTX_REG(CB_TLBIVAAL, (b), (c))
+#define GET_TLBSYNC(b, c)        GET_CTX_REG(CB_TLBSYNC, (b), (c))
+#define GET_TLBSTATUS(b, c)      GET_CTX_REG(CB_TLBSTATUS, (b), (c))
+#define GET_ATS1PR(b, c)         GET_CTX_REG(CB_ATS1PR, (b), (c))
+#define GET_ATS1PW(b, c)         GET_CTX_REG(CB_ATS1PW, (b), (c))
+#define GET_ATS1UR(b, c)         GET_CTX_REG(CB_ATS1UR, (b), (c))
+#define GET_ATS1UW(b, c)         GET_CTX_REG(CB_ATS1UW, (b), (c))
+#define GET_ATSR(b, c)           GET_CTX_REG(CB_ATSR, (b), (c))
+
+/* Global Register field setters / getters */
+/* Configuration Register: CR0 */
+#define SET_CR0_NSCFG(b, v)        SET_GLOBAL_FIELD(b, CR0, NSCFG, v)
+#define SET_CR0_WACFG(b, v)        SET_GLOBAL_FIELD(b, CR0, WACFG, v)
+#define SET_CR0_RACFG(b, v)        SET_GLOBAL_FIELD(b, CR0, RACFG, v)
+#define SET_CR0_SHCFG(b, v)        SET_GLOBAL_FIELD(b, CR0, SHCFG, v)
+#define SET_CR0_SMCFCFG(b, v)      SET_GLOBAL_FIELD(b, CR0, SMCFCFG, v)
+#define SET_CR0_MTCFG(b, v)        SET_GLOBAL_FIELD(b, CR0, MTCFG, v)
+#define SET_CR0_BSU(b, v)          SET_GLOBAL_FIELD(b, CR0, BSU, v)
+#define SET_CR0_FB(b, v)           SET_GLOBAL_FIELD(b, CR0, FB, v)
+#define SET_CR0_PTM(b, v)          SET_GLOBAL_FIELD(b, CR0, PTM, v)
+#define SET_CR0_VMIDPNE(b, v)      SET_GLOBAL_FIELD(b, CR0, VMIDPNE, v)
+#define SET_CR0_USFCFG(b, v)       SET_GLOBAL_FIELD(b, CR0, USFCFG, v)
+#define SET_CR0_GSE(b, v)          SET_GLOBAL_FIELD(b, CR0, GSE, v)
+#define SET_CR0_STALLD(b, v)       SET_GLOBAL_FIELD(b, CR0, STALLD, v)
+#define SET_CR0_TRANSIENTCFG(b, v) SET_GLOBAL_FIELD(b, CR0, TRANSIENTCFG, v)
+#define SET_CR0_GCFGFIE(b, v)      SET_GLOBAL_FIELD(b, CR0, GCFGFIE, v)
+#define SET_CR0_GCFGFRE(b, v)      SET_GLOBAL_FIELD(b, CR0, GCFGFRE, v)
+#define SET_CR0_GFIE(b, v)         SET_GLOBAL_FIELD(b, CR0, GFIE, v)
+#define SET_CR0_GFRE(b, v)         SET_GLOBAL_FIELD(b, CR0, GFRE, v)
+#define SET_CR0_CLIENTPD(b, v)     SET_GLOBAL_FIELD(b, CR0, CLIENTPD, v)
+
+#define GET_CR0_NSCFG(b)           GET_GLOBAL_FIELD(b, CR0, NSCFG)
+#define GET_CR0_WACFG(b)           GET_GLOBAL_FIELD(b, CR0, WACFG)
+#define GET_CR0_RACFG(b)           GET_GLOBAL_FIELD(b, CR0, RACFG)
+#define GET_CR0_SHCFG(b)           GET_GLOBAL_FIELD(b, CR0, SHCFG)
+#define GET_CR0_SMCFCFG(b)         GET_GLOBAL_FIELD(b, CR0, SMCFCFG)
+#define GET_CR0_MTCFG(b)           GET_GLOBAL_FIELD(b, CR0, MTCFG)
+#define GET_CR0_BSU(b)             GET_GLOBAL_FIELD(b, CR0, BSU)
+#define GET_CR0_FB(b)              GET_GLOBAL_FIELD(b, CR0, FB)
+#define GET_CR0_PTM(b)             GET_GLOBAL_FIELD(b, CR0, PTM)
+#define GET_CR0_VMIDPNE(b)         GET_GLOBAL_FIELD(b, CR0, VMIDPNE)
+#define GET_CR0_USFCFG(b)          GET_GLOBAL_FIELD(b, CR0, USFCFG)
+#define GET_CR0_GSE(b)             GET_GLOBAL_FIELD(b, CR0, GSE)
+#define GET_CR0_STALLD(b)          GET_GLOBAL_FIELD(b, CR0, STALLD)
+#define GET_CR0_TRANSIENTCFG(b)    GET_GLOBAL_FIELD(b, CR0, TRANSIENTCFG)
+#define GET_CR0_GCFGFIE(b)         GET_GLOBAL_FIELD(b, CR0, GCFGFIE)
+#define GET_CR0_GCFGFRE(b)         GET_GLOBAL_FIELD(b, CR0, GCFGFRE)
+#define GET_CR0_GFIE(b)            GET_GLOBAL_FIELD(b, CR0, GFIE)
+#define GET_CR0_GFRE(b)            GET_GLOBAL_FIELD(b, CR0, GFRE)
+#define GET_CR0_CLIENTPD(b)        GET_GLOBAL_FIELD(b, CR0, CLIENTPD)
+
+/* Configuration Register: CR2 */
+#define SET_CR2_BPVMID(b, v)     SET_GLOBAL_FIELD(b, CR2, BPVMID, v)
+
+#define GET_CR2_BPVMID(b)        GET_GLOBAL_FIELD(b, CR2, BPVMID)
+
+/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */
+#define SET_GATS1PR_ADDR(b, v)   SET_GLOBAL_FIELD(b, GATS1PR, ADDR, v)
+#define SET_GATS1PR_NDX(b, v)    SET_GLOBAL_FIELD(b, GATS1PR, NDX, v)
+
+#define GET_GATS1PR_ADDR(b)      GET_GLOBAL_FIELD(b, GATS1PR, ADDR)
+#define GET_GATS1PR_NDX(b)       GET_GLOBAL_FIELD(b, GATS1PR, NDX)
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define SET_GATS1PW_ADDR(b, v)   SET_GLOBAL_FIELD(b, GATS1PW, ADDR, v)
+#define SET_GATS1PW_NDX(b, v)    SET_GLOBAL_FIELD(b, GATS1PW, NDX, v)
+
+#define GET_GATS1PW_ADDR(b)      GET_GLOBAL_FIELD(b, GATS1PW, ADDR)
+#define GET_GATS1PW_NDX(b)       GET_GLOBAL_FIELD(b, GATS1PW, NDX)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define SET_GATS1UR_ADDR(b, v)   SET_GLOBAL_FIELD(b, GATS1UR, ADDR, v)
+#define SET_GATS1UR_NDX(b, v)    SET_GLOBAL_FIELD(b, GATS1UR, NDX, v)
+
+#define GET_GATS1UR_ADDR(b)      GET_GLOBAL_FIELD(b, GATS1UR, ADDR)
+#define GET_GATS1UR_NDX(b)       GET_GLOBAL_FIELD(b, GATS1UR, NDX)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UW */
+#define SET_GATS1UW_ADDR(b, v)   SET_GLOBAL_FIELD(b, GATS1UW, ADDR, v)
+#define SET_GATS1UW_NDX(b, v)    SET_GLOBAL_FIELD(b, GATS1UW, NDX, v)
+
+#define GET_GATS1UW_ADDR(b)      GET_GLOBAL_FIELD(b, GATS1UW, ADDR)
+#define GET_GATS1UW_NDX(b)       GET_GLOBAL_FIELD(b, GATS1UW, NDX)
+
+/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS12PR */
+#define SET_GATS12PR_ADDR(b, v)  SET_GLOBAL_FIELD(b, GATS12PR, ADDR, v)
+#define SET_GATS12PR_NDX(b, v)   SET_GLOBAL_FIELD(b, GATS12PR, NDX, v)
+
+#define GET_GATS12PR_ADDR(b)     GET_GLOBAL_FIELD(b, GATS12PR, ADDR)
+#define GET_GATS12PR_NDX(b)      GET_GLOBAL_FIELD(b, GATS12PR, NDX)
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define SET_GATS12PW_ADDR(b, v)  SET_GLOBAL_FIELD(b, GATS12PW, ADDR, v)
+#define SET_GATS12PW_NDX(b, v)   SET_GLOBAL_FIELD(b, GATS12PW, NDX, v)
+
+#define GET_GATS12PW_ADDR(b)     GET_GLOBAL_FIELD(b, GATS12PW, ADDR)
+#define GET_GATS12PW_NDX(b)      GET_GLOBAL_FIELD(b, GATS12PW, NDX)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define SET_GATS12UR_ADDR(b, v)  SET_GLOBAL_FIELD(b, GATS12UR, ADDR, v)
+#define SET_GATS12UR_NDX(b, v)   SET_GLOBAL_FIELD(b, GATS12UR, NDX, v)
+
+#define GET_GATS12UR_ADDR(b)     GET_GLOBAL_FIELD(b, GATS12UR, ADDR)
+#define GET_GATS12UR_NDX(b)      GET_GLOBAL_FIELD(b, GATS12UR, NDX)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UW */
+#define SET_GATS12UW_ADDR(b, v)  SET_GLOBAL_FIELD(b, GATS12UW, ADDR, v)
+#define SET_GATS12UW_NDX(b, v)   SET_GLOBAL_FIELD(b, GATS12UW, NDX, v)
+
+#define GET_GATS12UW_ADDR(b)     GET_GLOBAL_FIELD(b, GATS12UW, ADDR)
+#define GET_GATS12UW_NDX(b)      GET_GLOBAL_FIELD(b, GATS12UW, NDX)
+
+/* Global Address Translation Status Register: GATSR */
+#define SET_GATSR_ACTIVE(b, v)   SET_GLOBAL_FIELD(b, GATSR, ACTIVE, v)
+
+#define GET_GATSR_ACTIVE(b)      GET_GLOBAL_FIELD(b, GATSR, ACTIVE)
+
+/* Global Fault Address Register: GFAR */
+#define SET_GFAR_FADDR(b, v)     SET_GLOBAL_FIELD(b, GFAR, FADDR, v)
+
+#define GET_GFAR_FADDR(b)        GET_GLOBAL_FIELD(b, GFAR, FADDR)
+
+/* Global Fault Status Register: GFSR */
+#define SET_GFSR_ICF(b, v)        SET_GLOBAL_FIELD(b, GFSR, ICF, v)
+#define SET_GFSR_USF(b, v)        SET_GLOBAL_FIELD(b, GFSR, USF, v)
+#define SET_GFSR_SMCF(b, v)       SET_GLOBAL_FIELD(b, GFSR, SMCF, v)
+#define SET_GFSR_UCBF(b, v)       SET_GLOBAL_FIELD(b, GFSR, UCBF, v)
+#define SET_GFSR_UCIF(b, v)       SET_GLOBAL_FIELD(b, GFSR, UCIF, v)
+#define SET_GFSR_CAF(b, v)        SET_GLOBAL_FIELD(b, GFSR, CAF, v)
+#define SET_GFSR_EF(b, v)         SET_GLOBAL_FIELD(b, GFSR, EF, v)
+#define SET_GFSR_PF(b, v)         SET_GLOBAL_FIELD(b, GFSR, PF, v)
+#define SET_GFSR_MULTI(b, v)      SET_GLOBAL_FIELD(b, GFSR, MULTI, v)
+
+#define GET_GFSR_ICF(b)           GET_GLOBAL_FIELD(b, GFSR, ICF)
+#define GET_GFSR_USF(b)           GET_GLOBAL_FIELD(b, GFSR, USF)
+#define GET_GFSR_SMCF(b)          GET_GLOBAL_FIELD(b, GFSR, SMCF)
+#define GET_GFSR_UCBF(b)          GET_GLOBAL_FIELD(b, GFSR, UCBF)
+#define GET_GFSR_UCIF(b)          GET_GLOBAL_FIELD(b, GFSR, UCIF)
+#define GET_GFSR_CAF(b)           GET_GLOBAL_FIELD(b, GFSR, CAF)
+#define GET_GFSR_EF(b)            GET_GLOBAL_FIELD(b, GFSR, EF)
+#define GET_GFSR_PF(b)            GET_GLOBAL_FIELD(b, GFSR, PF)
+#define GET_GFSR_MULTI(b)         GET_GLOBAL_FIELD(b, GFSR, MULTI)
+
+/* Global Fault Syndrome Register 0: GFSYNR0 */
+#define SET_GFSYNR0_NESTED(b, v)  SET_GLOBAL_FIELD(b, GFSYNR0, NESTED, v)
+#define SET_GFSYNR0_WNR(b, v)     SET_GLOBAL_FIELD(b, GFSYNR0, WNR, v)
+#define SET_GFSYNR0_PNU(b, v)     SET_GLOBAL_FIELD(b, GFSYNR0, PNU, v)
+#define SET_GFSYNR0_IND(b, v)     SET_GLOBAL_FIELD(b, GFSYNR0, IND, v)
+#define SET_GFSYNR0_NSSTATE(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, NSSTATE, v)
+#define SET_GFSYNR0_NSATTR(b, v)  SET_GLOBAL_FIELD(b, GFSYNR0, NSATTR, v)
+
+#define GET_GFSYNR0_NESTED(b)     GET_GLOBAL_FIELD(b, GFSYNR0, NESTED)
+#define GET_GFSYNR0_WNR(b)        GET_GLOBAL_FIELD(b, GFSYNR0, WNR)
+#define GET_GFSYNR0_PNU(b)        GET_GLOBAL_FIELD(b, GFSYNR0, PNU)
+#define GET_GFSYNR0_IND(b)        GET_GLOBAL_FIELD(b, GFSYNR0, IND)
+#define GET_GFSYNR0_NSSTATE(b)    GET_GLOBAL_FIELD(b, GFSYNR0, NSSTATE)
+#define GET_GFSYNR0_NSATTR(b)     GET_GLOBAL_FIELD(b, GFSYNR0, NSATTR)
+
+/* Global Fault Syndrome Register 1: GFSYNR1 */
+#define SET_GFSYNR1_SID(b, v)     SET_GLOBAL_FIELD(b, GFSYNR1, SID, v)
+
+#define GET_GFSYNR1_SID(b)        GET_GLOBAL_FIELD(b, GFSYNR1, SID)
+
+/* Global Physical Address Register: GPAR */
+#define SET_GPAR_F(b, v)          SET_GLOBAL_FIELD(b, GPAR, F, v)
+#define SET_GPAR_SS(b, v)         SET_GLOBAL_FIELD(b, GPAR, SS, v)
+#define SET_GPAR_OUTER(b, v)      SET_GLOBAL_FIELD(b, GPAR, OUTER, v)
+#define SET_GPAR_INNER(b, v)      SET_GLOBAL_FIELD(b, GPAR, INNER, v)
+#define SET_GPAR_SH(b, v)         SET_GLOBAL_FIELD(b, GPAR, SH, v)
+#define SET_GPAR_NS(b, v)         SET_GLOBAL_FIELD(b, GPAR, NS, v)
+#define SET_GPAR_NOS(b, v)        SET_GLOBAL_FIELD(b, GPAR, NOS, v)
+#define SET_GPAR_PA(b, v)         SET_GLOBAL_FIELD(b, GPAR, PA, v)
+#define SET_GPAR_TF(b, v)         SET_GLOBAL_FIELD(b, GPAR, TF, v)
+#define SET_GPAR_AFF(b, v)        SET_GLOBAL_FIELD(b, GPAR, AFF, v)
+#define SET_GPAR_PF(b, v)         SET_GLOBAL_FIELD(b, GPAR, PF, v)
+#define SET_GPAR_EF(b, v)         SET_GLOBAL_FIELD(b, GPAR, EF, v)
+#define SET_GPAR_TLCMCF(b, v)     SET_GLOBAL_FIELD(b, GPAR, TLCMCF, v)
+#define SET_GPAR_TLBLKF(b, v)     SET_GLOBAL_FIELD(b, GPAR, TLBLKF, v)
+#define SET_GPAR_UCBF(b, v)       SET_GLOBAL_FIELD(b, GPAR, UCBF, v)
+
+#define GET_GPAR_F(b)             GET_GLOBAL_FIELD(b, GPAR, F)
+#define GET_GPAR_SS(b)            GET_GLOBAL_FIELD(b, GPAR, SS)
+#define GET_GPAR_OUTER(b)         GET_GLOBAL_FIELD(b, GPAR, OUTER)
+#define GET_GPAR_INNER(b)         GET_GLOBAL_FIELD(b, GPAR, INNER)
+#define GET_GPAR_SH(b)            GET_GLOBAL_FIELD(b, GPAR, SH)
+#define GET_GPAR_NS(b)            GET_GLOBAL_FIELD(b, GPAR, NS)
+#define GET_GPAR_NOS(b)           GET_GLOBAL_FIELD(b, GPAR, NOS)
+#define GET_GPAR_PA(b)            GET_GLOBAL_FIELD(b, GPAR, PA)
+#define GET_GPAR_TF(b)            GET_GLOBAL_FIELD(b, GPAR, TF)
+#define GET_GPAR_AFF(b)           GET_GLOBAL_FIELD(b, GPAR, AFF)
+#define GET_GPAR_PF(b)            GET_GLOBAL_FIELD(b, GPAR, PF)
+#define GET_GPAR_EF(b)            GET_GLOBAL_FIELD(b, GPAR, EF)
+#define GET_GPAR_TLCMCF(b)        GET_GLOBAL_FIELD(b, GPAR, TLCMCF)
+#define GET_GPAR_TLBLKF(b)        GET_GLOBAL_FIELD(b, GPAR, TLBLKF)
+#define GET_GPAR_UCBF(b)          GET_GLOBAL_FIELD(b, GPAR, UCBF)
+
+/* Identification Register: IDR0 */
+#define SET_IDR0_NUMSMRG(b, v)    SET_GLOBAL_FIELD(b, IDR0, NUMSMRG, v)
+#define SET_IDR0_NUMSIDB(b, v)    SET_GLOBAL_FIELD(b, IDR0, NUMSIDB, v)
+#define SET_IDR0_BTM(b, v)        SET_GLOBAL_FIELD(b, IDR0, BTM, v)
+#define SET_IDR0_CTTW(b, v)       SET_GLOBAL_FIELD(b, IDR0, CTTW, v)
+#define SET_IDR0_NUMIRPT(b, v)    SET_GLOBAL_FIELD(b, IDR0, NUMIRPT, v)
+#define SET_IDR0_PTFS(b, v)       SET_GLOBAL_FIELD(b, IDR0, PTFS, v)
+#define SET_IDR0_SMS(b, v)        SET_GLOBAL_FIELD(b, IDR0, SMS, v)
+#define SET_IDR0_NTS(b, v)        SET_GLOBAL_FIELD(b, IDR0, NTS, v)
+#define SET_IDR0_S2TS(b, v)       SET_GLOBAL_FIELD(b, IDR0, S2TS, v)
+#define SET_IDR0_S1TS(b, v)       SET_GLOBAL_FIELD(b, IDR0, S1TS, v)
+#define SET_IDR0_SES(b, v)        SET_GLOBAL_FIELD(b, IDR0, SES, v)
+
+#define GET_IDR0_NUMSMRG(b)       GET_GLOBAL_FIELD(b, IDR0, NUMSMRG)
+#define GET_IDR0_NUMSIDB(b)       GET_GLOBAL_FIELD(b, IDR0, NUMSIDB)
+#define GET_IDR0_BTM(b)           GET_GLOBAL_FIELD(b, IDR0, BTM)
+#define GET_IDR0_CTTW(b)          GET_GLOBAL_FIELD(b, IDR0, CTTW)
+#define GET_IDR0_NUMIRPT(b)       GET_GLOBAL_FIELD(b, IDR0, NUMIRPT)
+#define GET_IDR0_PTFS(b)          GET_GLOBAL_FIELD(b, IDR0, PTFS)
+#define GET_IDR0_SMS(b)           GET_GLOBAL_FIELD(b, IDR0, SMS)
+#define GET_IDR0_NTS(b)           GET_GLOBAL_FIELD(b, IDR0, NTS)
+#define GET_IDR0_S2TS(b)          GET_GLOBAL_FIELD(b, IDR0, S2TS)
+#define GET_IDR0_S1TS(b)          GET_GLOBAL_FIELD(b, IDR0, S1TS)
+#define GET_IDR0_SES(b)           GET_GLOBAL_FIELD(b, IDR0, SES)
+
+/* Identification Register: IDR1 */
+#define SET_IDR1_NUMCB(b, v)       SET_GLOBAL_FIELD(b, IDR1, NUMCB, v)
+#define SET_IDR1_NUMSSDNDXB(b, v)  SET_GLOBAL_FIELD(b, IDR1, NUMSSDNDXB, v)
+#define SET_IDR1_SSDTP(b, v)       SET_GLOBAL_FIELD(b, IDR1, SSDTP, v)
+#define SET_IDR1_SMCD(b, v)        SET_GLOBAL_FIELD(b, IDR1, SMCD, v)
+#define SET_IDR1_NUMS2CB(b, v)     SET_GLOBAL_FIELD(b, IDR1, NUMS2CB, v)
+#define SET_IDR1_NUMPAGENDXB(b, v) SET_GLOBAL_FIELD(b, IDR1, NUMPAGENDXB, v)
+#define SET_IDR1_PAGESIZE(b, v)    SET_GLOBAL_FIELD(b, IDR1, PAGESIZE, v)
+
+#define GET_IDR1_NUMCB(b)          GET_GLOBAL_FIELD(b, IDR1, NUMCB)
+#define GET_IDR1_NUMSSDNDXB(b)     GET_GLOBAL_FIELD(b, IDR1, NUMSSDNDXB)
+#define GET_IDR1_SSDTP(b)          GET_GLOBAL_FIELD(b, IDR1, SSDTP)
+#define GET_IDR1_SMCD(b)           GET_GLOBAL_FIELD(b, IDR1, SMCD)
+#define GET_IDR1_NUMS2CB(b)        GET_GLOBAL_FIELD(b, IDR1, NUMS2CB)
+#define GET_IDR1_NUMPAGENDXB(b)    GET_GLOBAL_FIELD(b, IDR1, NUMPAGENDXB)
+#define GET_IDR1_PAGESIZE(b)       GET_GLOBAL_FIELD(b, IDR1, PAGESIZE)
+
+/* Identification Register: IDR2 */
+#define SET_IDR2_IAS(b, v)       SET_GLOBAL_FIELD(b, IDR2, IAS, v)
+#define SET_IDR2_OAS(b, v)       SET_GLOBAL_FIELD(b, IDR2, OAS, v)
+
+#define GET_IDR2_IAS(b)          GET_GLOBAL_FIELD(b, IDR2, IAS)
+#define GET_IDR2_OAS(b)          GET_GLOBAL_FIELD(b, IDR2, OAS)
+
+/* Identification Register: IDR7 */
+#define SET_IDR7_MINOR(b, v)     SET_GLOBAL_FIELD(b, IDR7, MINOR, v)
+#define SET_IDR7_MAJOR(b, v)     SET_GLOBAL_FIELD(b, IDR7, MAJOR, v)
+
+#define GET_IDR7_MINOR(b)        GET_GLOBAL_FIELD(b, IDR7, MINOR)
+#define GET_IDR7_MAJOR(b)        GET_GLOBAL_FIELD(b, IDR7, MAJOR)
+
+/* Stream to Context Register: S2CR_N */
+#define SET_S2CR_CBNDX(b, n, v)   SET_GLOBAL_FIELD_N(b, n, S2CR, CBNDX, v)
+#define SET_S2CR_SHCFG(b, n, v)   SET_GLOBAL_FIELD_N(b, n, S2CR, SHCFG, v)
+#define SET_S2CR_MTCFG(b, n, v)   SET_GLOBAL_FIELD_N(b, n, S2CR, MTCFG, v)
+#define SET_S2CR_MEMATTR(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, MEMATTR, v)
+#define SET_S2CR_TYPE(b, n, v)    SET_GLOBAL_FIELD_N(b, n, S2CR, TYPE, v)
+#define SET_S2CR_NSCFG(b, n, v)   SET_GLOBAL_FIELD_N(b, n, S2CR, NSCFG, v)
+#define SET_S2CR_RACFG(b, n, v)   SET_GLOBAL_FIELD_N(b, n, S2CR, RACFG, v)
+#define SET_S2CR_WACFG(b, n, v)   SET_GLOBAL_FIELD_N(b, n, S2CR, WACFG, v)
+#define SET_S2CR_PRIVCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, PRIVCFG, v)
+#define SET_S2CR_INSTCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, INSTCFG, v)
+#define SET_S2CR_TRANSIENTCFG(b, n, v) \
+				SET_GLOBAL_FIELD_N(b, n, S2CR, TRANSIENTCFG, v)
+#define SET_S2CR_VMID(b, n, v)    SET_GLOBAL_FIELD_N(b, n, S2CR, VMID, v)
+#define SET_S2CR_BSU(b, n, v)     SET_GLOBAL_FIELD_N(b, n, S2CR, BSU, v)
+#define SET_S2CR_FB(b, n, v)      SET_GLOBAL_FIELD_N(b, n, S2CR, FB, v)
+
+#define GET_S2CR_CBNDX(b, n)      GET_GLOBAL_FIELD_N(b, n, S2CR, CBNDX)
+#define GET_S2CR_SHCFG(b, n)      GET_GLOBAL_FIELD_N(b, n, S2CR, SHCFG)
+#define GET_S2CR_MTCFG(b, n)      GET_GLOBAL_FIELD_N(b, n, S2CR, MTCFG)
+#define GET_S2CR_MEMATTR(b, n)    GET_GLOBAL_FIELD_N(b, n, S2CR, MEMATTR)
+#define GET_S2CR_TYPE(b, n)       GET_GLOBAL_FIELD_N(b, n, S2CR, TYPE)
+#define GET_S2CR_NSCFG(b, n)      GET_GLOBAL_FIELD_N(b, n, S2CR, NSCFG)
+#define GET_S2CR_RACFG(b, n)      GET_GLOBAL_FIELD_N(b, n, S2CR, RACFG)
+#define GET_S2CR_WACFG(b, n)      GET_GLOBAL_FIELD_N(b, n, S2CR, WACFG)
+#define GET_S2CR_PRIVCFG(b, n)    GET_GLOBAL_FIELD_N(b, n, S2CR, PRIVCFG)
+#define GET_S2CR_INSTCFG(b, n)    GET_GLOBAL_FIELD_N(b, n, S2CR, INSTCFG)
+#define GET_S2CR_TRANSIENTCFG(b, n) \
+				GET_GLOBAL_FIELD_N(b, n, S2CR, TRANSIENTCFG)
+#define GET_S2CR_VMID(b, n)       GET_GLOBAL_FIELD_N(b, n, S2CR, VMID)
+#define GET_S2CR_BSU(b, n)        GET_GLOBAL_FIELD_N(b, n, S2CR, BSU)
+#define GET_S2CR_FB(b, n)         GET_GLOBAL_FIELD_N(b, n, S2CR, FB)
+
+/* Stream Match Register: SMR_N */
+#define SET_SMR_ID(b, n, v)       SET_GLOBAL_FIELD_N(b, n, SMR, ID, v)
+#define SET_SMR_MASK(b, n, v)     SET_GLOBAL_FIELD_N(b, n, SMR, MASK, v)
+#define SET_SMR_VALID(b, n, v)    SET_GLOBAL_FIELD_N(b, n, SMR, VALID, v)
+
+#define GET_SMR_ID(b, n)          GET_GLOBAL_FIELD_N(b, n, SMR, ID)
+#define GET_SMR_MASK(b, n)        GET_GLOBAL_FIELD_N(b, n, SMR, MASK)
+#define GET_SMR_VALID(b, n)       GET_GLOBAL_FIELD_N(b, n, SMR, VALID)
+
+/* Global TLB Status: TLBGSTATUS */
+#define SET_TLBGSTATUS_GSACTIVE(b, v) \
+				SET_GLOBAL_FIELD(b, TLBGSTATUS, GSACTIVE, v)
+
+#define GET_TLBGSTATUS_GSACTIVE(b)    \
+				GET_GLOBAL_FIELD(b, TLBGSTATUS, GSACTIVE)
+
+/* Invalidate Hyp TLB by VA: TLBIVAH */
+#define SET_TLBIVAH_ADDR(b, v)  SET_GLOBAL_FIELD(b, TLBIVAH, ADDR, v)
+
+#define GET_TLBIVAH_ADDR(b)     GET_GLOBAL_FIELD(b, TLBIVAH, ADDR)
+
+/* Invalidate TLB by VMID: TLBIVMID */
+#define SET_TLBIVMID_VMID(b, v) SET_GLOBAL_FIELD(b, TLBIVMID, VMID, v)
+
+#define GET_TLBIVMID_VMID(b)    GET_GLOBAL_FIELD(b, TLBIVMID, VMID)
+
+/* Global Register Space 1 Field setters/getters*/
+/* Context Bank Attribute Register: CBAR_N */
+#define SET_CBAR_VMID(b, n, v)     SET_GLOBAL_FIELD_N(b, n, CBAR, VMID, v)
+#define SET_CBAR_CBNDX(b, n, v)    SET_GLOBAL_FIELD_N(b, n, CBAR, CBNDX, v)
+#define SET_CBAR_BPSHCFG(b, n, v)  SET_GLOBAL_FIELD_N(b, n, CBAR, BPSHCFG, v)
+#define SET_CBAR_HYPC(b, n, v)     SET_GLOBAL_FIELD_N(b, n, CBAR, HYPC, v)
+#define SET_CBAR_FB(b, n, v)       SET_GLOBAL_FIELD_N(b, n, CBAR, FB, v)
+#define SET_CBAR_MEMATTR(b, n, v)  SET_GLOBAL_FIELD_N(b, n, CBAR, MEMATTR, v)
+#define SET_CBAR_TYPE(b, n, v)     SET_GLOBAL_FIELD_N(b, n, CBAR, TYPE, v)
+#define SET_CBAR_BSU(b, n, v)      SET_GLOBAL_FIELD_N(b, n, CBAR, BSU, v)
+#define SET_CBAR_RACFG(b, n, v)    SET_GLOBAL_FIELD_N(b, n, CBAR, RACFG, v)
+#define SET_CBAR_WACFG(b, n, v)    SET_GLOBAL_FIELD_N(b, n, CBAR, WACFG, v)
+#define SET_CBAR_IRPTNDX(b, n, v)  SET_GLOBAL_FIELD_N(b, n, CBAR, IRPTNDX, v)
+
+#define GET_CBAR_VMID(b, n)        GET_GLOBAL_FIELD_N(b, n, CBAR, VMID)
+#define GET_CBAR_CBNDX(b, n)       GET_GLOBAL_FIELD_N(b, n, CBAR, CBNDX)
+#define GET_CBAR_BPSHCFG(b, n)     GET_GLOBAL_FIELD_N(b, n, CBAR, BPSHCFG)
+#define GET_CBAR_HYPC(b, n)        GET_GLOBAL_FIELD_N(b, n, CBAR, HYPC)
+#define GET_CBAR_FB(b, n)          GET_GLOBAL_FIELD_N(b, n, CBAR, FB)
+#define GET_CBAR_MEMATTR(b, n)     GET_GLOBAL_FIELD_N(b, n, CBAR, MEMATTR)
+#define GET_CBAR_TYPE(b, n)        GET_GLOBAL_FIELD_N(b, n, CBAR, TYPE)
+#define GET_CBAR_BSU(b, n)         GET_GLOBAL_FIELD_N(b, n, CBAR, BSU)
+#define GET_CBAR_RACFG(b, n)       GET_GLOBAL_FIELD_N(b, n, CBAR, RACFG)
+#define GET_CBAR_WACFG(b, n)       GET_GLOBAL_FIELD_N(b, n, CBAR, WACFG)
+#define GET_CBAR_IRPTNDX(b, n)     GET_GLOBAL_FIELD_N(b, n, CBAR, IRPTNDX)
+
+/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA_N */
+#define SET_CBFRSYNRA_SID(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBFRSYNRA, SID, v)
+
+#define GET_CBFRSYNRA_SID(b, n)    GET_GLOBAL_FIELD_N(b, n, CBFRSYNRA, SID)
+
+/* Stage 1 Context Bank Format Fields */
+#define SET_CB_ACTLR_REQPRIORITY (b, c, v) \
+		SET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITY, v)
+#define SET_CB_ACTLR_REQPRIORITYCFG(b, c, v) \
+		SET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITYCFG, v)
+#define SET_CB_ACTLR_PRIVCFG(b, c, v) \
+		SET_CONTEXT_FIELD(b, c, CB_ACTLR, PRIVCFG, v)
+#define SET_CB_ACTLR_BPRCOSH(b, c, v) \
+		SET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCOSH, v)
+#define SET_CB_ACTLR_BPRCISH(b, c, v) \
+		SET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCISH, v)
+#define SET_CB_ACTLR_BPRCNSH(b, c, v) \
+		SET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCNSH, v)
+
+#define GET_CB_ACTLR_REQPRIORITY (b, c) \
+		GET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITY)
+#define GET_CB_ACTLR_REQPRIORITYCFG(b, c) \
+		GET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITYCFG)
+#define GET_CB_ACTLR_PRIVCFG(b, c)  GET_CONTEXT_FIELD(b, c, CB_ACTLR, PRIVCFG)
+#define GET_CB_ACTLR_BPRCOSH(b, c)  GET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCOSH)
+#define GET_CB_ACTLR_BPRCISH(b, c)  GET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCISH)
+#define GET_CB_ACTLR_BPRCNSH(b, c)  GET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCNSH)
+
+/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */
+#define SET_CB_ATS1PR_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1PR, ADDR, v)
+
+#define GET_CB_ATS1PR_ADDR(b, c)    GET_CONTEXT_FIELD(b, c, CB_ATS1PR, ADDR)
+
+/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */
+#define SET_CB_ATS1PW_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1PW, ADDR, v)
+
+#define GET_CB_ATS1PW_ADDR(b, c)    GET_CONTEXT_FIELD(b, c, CB_ATS1PW, ADDR)
+
+/* Address Translation, Stage 1, User Read: CB_ATS1UR */
+#define SET_CB_ATS1UR_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1UR, ADDR, v)
+
+#define GET_CB_ATS1UR_ADDR(b, c)    GET_CONTEXT_FIELD(b, c, CB_ATS1UR, ADDR)
+
+/* Address Translation, Stage 1, User Write: CB_ATS1UW */
+#define SET_CB_ATS1UW_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1UW, ADDR, v)
+
+#define GET_CB_ATS1UW_ADDR(b, c)    GET_CONTEXT_FIELD(b, c, CB_ATS1UW, ADDR)
+
+/* Address Translation Status Register: CB_ATSR */
+#define SET_CB_ATSR_ACTIVE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATSR, ACTIVE, v)
+
+#define GET_CB_ATSR_ACTIVE(b, c)    GET_CONTEXT_FIELD(b, c, CB_ATSR, ACTIVE)
+
+/* Context ID Register: CB_CONTEXTIDR */
+#define SET_CB_CONTEXTIDR_ASID(b, c, v) \
+			SET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, ASID, v)
+#define SET_CB_CONTEXTIDR_PROCID(b, c, v) \
+			SET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, PROCID, v)
+
+#define GET_CB_CONTEXTIDR_ASID(b, c)    \
+			GET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, ASID)
+#define GET_CB_CONTEXTIDR_PROCID(b, c)    \
+			GET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, PROCID)
+
+/* Fault Address Register: CB_FAR */
+#define SET_CB_FAR_FADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FAR, FADDR, v)
+
+#define GET_CB_FAR_FADDR(b, c)    GET_CONTEXT_FIELD(b, c, CB_FAR, FADDR)
+
+/* Fault Status Register: CB_FSR */
+#define SET_CB_FSR_TF(b, c, v)     SET_CONTEXT_FIELD(b, c, CB_FSR, TF, v)
+#define SET_CB_FSR_AFF(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_FSR, AFF, v)
+#define SET_CB_FSR_PF(b, c, v)     SET_CONTEXT_FIELD(b, c, CB_FSR, PF, v)
+#define SET_CB_FSR_EF(b, c, v)     SET_CONTEXT_FIELD(b, c, CB_FSR, EF, v)
+#define SET_CB_FSR_TLBMCF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, TLBMCF, v)
+#define SET_CB_FSR_TLBLKF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, TLBLKF, v)
+#define SET_CB_FSR_SS(b, c, v)     SET_CONTEXT_FIELD(b, c, CB_FSR, SS, v)
+#define SET_CB_FSR_MULTI(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_FSR, MULTI, v)
+
+#define GET_CB_FSR_TF(b, c)        GET_CONTEXT_FIELD(b, c, CB_FSR, TF)
+#define GET_CB_FSR_AFF(b, c)       GET_CONTEXT_FIELD(b, c, CB_FSR, AFF)
+#define GET_CB_FSR_PF(b, c)        GET_CONTEXT_FIELD(b, c, CB_FSR, PF)
+#define GET_CB_FSR_EF(b, c)        GET_CONTEXT_FIELD(b, c, CB_FSR, EF)
+#define GET_CB_FSR_TLBMCF(b, c)    GET_CONTEXT_FIELD(b, c, CB_FSR, TLBMCF)
+#define GET_CB_FSR_TLBLKF(b, c)    GET_CONTEXT_FIELD(b, c, CB_FSR, TLBLKF)
+#define GET_CB_FSR_SS(b, c)        GET_CONTEXT_FIELD(b, c, CB_FSR, SS)
+#define GET_CB_FSR_MULTI(b, c)     GET_CONTEXT_FIELD(b, c, CB_FSR, MULTI)
+
+/* Fault Syndrome Register 0: CB_FSYNR0 */
+#define SET_CB_FSYNR0_PLVL(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, PLVL, v)
+#define SET_CB_FSYNR0_S1PTWF(b, c, v) \
+				SET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1PTWF, v)
+#define SET_CB_FSYNR0_WNR(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_FSYNR0, WNR, v)
+#define SET_CB_FSYNR0_PNU(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_FSYNR0, PNU, v)
+#define SET_CB_FSYNR0_IND(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_FSYNR0, IND, v)
+#define SET_CB_FSYNR0_NSSTATE(b, c, v) \
+				SET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSSTATE, v)
+#define SET_CB_FSYNR0_NSATTR(b, c, v) \
+				SET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSATTR, v)
+#define SET_CB_FSYNR0_ATOF(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_FSYNR0, ATOF, v)
+#define SET_CB_FSYNR0_PTWF(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_FSYNR0, PTWF, v)
+#define SET_CB_FSYNR0_AFR(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_FSYNR0, AFR, v)
+#define SET_CB_FSYNR0_S1CBNDX(b, c, v) \
+				SET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1CBNDX, v)
+
+#define GET_CB_FSYNR0_PLVL(b, c)    GET_CONTEXT_FIELD(b, c, CB_FSYNR0, PLVL)
+#define GET_CB_FSYNR0_S1PTWF(b, c)    \
+				GET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1PTWF)
+#define GET_CB_FSYNR0_WNR(b, c)     GET_CONTEXT_FIELD(b, c, CB_FSYNR0, WNR)
+#define GET_CB_FSYNR0_PNU(b, c)     GET_CONTEXT_FIELD(b, c, CB_FSYNR0, PNU)
+#define GET_CB_FSYNR0_IND(b, c)     GET_CONTEXT_FIELD(b, c, CB_FSYNR0, IND)
+#define GET_CB_FSYNR0_NSSTATE(b, c)    \
+				GET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSSTATE)
+#define GET_CB_FSYNR0_NSATTR(b, c)    \
+				GET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSATTR)
+#define GET_CB_FSYNR0_ATOF(b, c)     GET_CONTEXT_FIELD(b, c, CB_FSYNR0, ATOF)
+#define GET_CB_FSYNR0_PTWF(b, c)     GET_CONTEXT_FIELD(b, c, CB_FSYNR0, PTWF)
+#define GET_CB_FSYNR0_AFR(b, c)      GET_CONTEXT_FIELD(b, c, CB_FSYNR0, AFR)
+#define GET_CB_FSYNR0_S1CBNDX(b, c)    \
+				GET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1CBNDX)
+
+/* Normal Memory Remap Register: CB_NMRR */
+#define SET_CB_NMRR_IR0(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, IR0, v)
+#define SET_CB_NMRR_IR1(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, IR1, v)
+#define SET_CB_NMRR_IR2(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, IR2, v)
+#define SET_CB_NMRR_IR3(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, IR3, v)
+#define SET_CB_NMRR_IR4(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, IR4, v)
+#define SET_CB_NMRR_IR5(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, IR5, v)
+#define SET_CB_NMRR_IR6(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, IR6, v)
+#define SET_CB_NMRR_IR7(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, IR7, v)
+#define SET_CB_NMRR_OR0(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, OR0, v)
+#define SET_CB_NMRR_OR1(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, OR1, v)
+#define SET_CB_NMRR_OR2(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, OR2, v)
+#define SET_CB_NMRR_OR3(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, OR3, v)
+#define SET_CB_NMRR_OR4(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, OR4, v)
+#define SET_CB_NMRR_OR5(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, OR5, v)
+#define SET_CB_NMRR_OR6(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, OR6, v)
+#define SET_CB_NMRR_OR7(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, OR7, v)
+
+#define GET_CB_NMRR_IR0(b, c)       GET_CONTEXT_FIELD(b, c, CB_NMRR, IR0)
+#define GET_CB_NMRR_IR1(b, c)       GET_CONTEXT_FIELD(b, c, CB_NMRR, IR1)
+#define GET_CB_NMRR_IR2(b, c)       GET_CONTEXT_FIELD(b, c, CB_NMRR, IR2)
+#define GET_CB_NMRR_IR3(b, c)       GET_CONTEXT_FIELD(b, c, CB_NMRR, IR3)
+#define GET_CB_NMRR_IR4(b, c)       GET_CONTEXT_FIELD(b, c, CB_NMRR, IR4)
+#define GET_CB_NMRR_IR5(b, c)       GET_CONTEXT_FIELD(b, c, CB_NMRR, IR5)
+#define GET_CB_NMRR_IR6(b, c)       GET_CONTEXT_FIELD(b, c, CB_NMRR, IR6)
+#define GET_CB_NMRR_IR7(b, c)       GET_CONTEXT_FIELD(b, c, CB_NMRR, IR7)
+#define GET_CB_NMRR_OR0(b, c)       GET_CONTEXT_FIELD(b, c, CB_NMRR, OR0)
+#define GET_CB_NMRR_OR1(b, c)       GET_CONTEXT_FIELD(b, c, CB_NMRR, OR1)
+#define GET_CB_NMRR_OR2(b, c)       GET_CONTEXT_FIELD(b, c, CB_NMRR, OR2)
+#define GET_CB_NMRR_OR3(b, c)       GET_CONTEXT_FIELD(b, c, CB_NMRR, OR3)
+#define GET_CB_NMRR_OR4(b, c)       GET_CONTEXT_FIELD(b, c, CB_NMRR, OR4)
+#define GET_CB_NMRR_OR5(b, c)       GET_CONTEXT_FIELD(b, c, CB_NMRR, OR5)
+
+/* Physical Address Register: CB_PAR */
+#define SET_CB_PAR_F(b, c, v)       SET_CONTEXT_FIELD(b, c, CB_PAR, F, v)
+#define SET_CB_PAR_SS(b, c, v)      SET_CONTEXT_FIELD(b, c, CB_PAR, SS, v)
+#define SET_CB_PAR_OUTER(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_PAR, OUTER, v)
+#define SET_CB_PAR_INNER(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_PAR, INNER, v)
+#define SET_CB_PAR_SH(b, c, v)      SET_CONTEXT_FIELD(b, c, CB_PAR, SH, v)
+#define SET_CB_PAR_NS(b, c, v)      SET_CONTEXT_FIELD(b, c, CB_PAR, NS, v)
+#define SET_CB_PAR_NOS(b, c, v)     SET_CONTEXT_FIELD(b, c, CB_PAR, NOS, v)
+#define SET_CB_PAR_PA(b, c, v)      SET_CONTEXT_FIELD(b, c, CB_PAR, PA, v)
+#define SET_CB_PAR_TF(b, c, v)      SET_CONTEXT_FIELD(b, c, CB_PAR, TF, v)
+#define SET_CB_PAR_AFF(b, c, v)     SET_CONTEXT_FIELD(b, c, CB_PAR, AFF, v)
+#define SET_CB_PAR_PF(b, c, v)      SET_CONTEXT_FIELD(b, c, CB_PAR, PF, v)
+#define SET_CB_PAR_TLBMCF(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_PAR, TLBMCF, v)
+#define SET_CB_PAR_TLBLKF(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_PAR, TLBLKF, v)
+#define SET_CB_PAR_ATOT(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_PAR, ATOT, v)
+#define SET_CB_PAR_PLVL(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_PAR, PLVL, v)
+#define SET_CB_PAR_STAGE(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_PAR, STAGE, v)
+
+#define GET_CB_PAR_F(b, c)          GET_CONTEXT_FIELD(b, c, CB_PAR, F)
+#define GET_CB_PAR_SS(b, c)         GET_CONTEXT_FIELD(b, c, CB_PAR, SS)
+#define GET_CB_PAR_OUTER(b, c)      GET_CONTEXT_FIELD(b, c, CB_PAR, OUTER)
+#define GET_CB_PAR_INNER(b, c)      GET_CONTEXT_FIELD(b, c, CB_PAR, INNER)
+#define GET_CB_PAR_SH(b, c)         GET_CONTEXT_FIELD(b, c, CB_PAR, SH)
+#define GET_CB_PAR_NS(b, c)         GET_CONTEXT_FIELD(b, c, CB_PAR, NS)
+#define GET_CB_PAR_NOS(b, c)        GET_CONTEXT_FIELD(b, c, CB_PAR, NOS)
+#define GET_CB_PAR_PA(b, c)         GET_CONTEXT_FIELD(b, c, CB_PAR, PA)
+#define GET_CB_PAR_TF(b, c)         GET_CONTEXT_FIELD(b, c, CB_PAR, TF)
+#define GET_CB_PAR_AFF(b, c)        GET_CONTEXT_FIELD(b, c, CB_PAR, AFF)
+#define GET_CB_PAR_PF(b, c)         GET_CONTEXT_FIELD(b, c, CB_PAR, PF)
+#define GET_CB_PAR_TLBMCF(b, c)     GET_CONTEXT_FIELD(b, c, CB_PAR, TLBMCF)
+#define GET_CB_PAR_TLBLKF(b, c)     GET_CONTEXT_FIELD(b, c, CB_PAR, TLBLKF)
+#define GET_CB_PAR_ATOT(b, c)       GET_CONTEXT_FIELD(b, c, CB_PAR, ATOT)
+#define GET_CB_PAR_PLVL(b, c)       GET_CONTEXT_FIELD(b, c, CB_PAR, PLVL)
+#define GET_CB_PAR_STAGE(b, c)      GET_CONTEXT_FIELD(b, c, CB_PAR, STAGE)
+
+/* Primary Region Remap Register: CB_PRRR */
+#define SET_CB_PRRR_TR0(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_PRRR, TR0, v)
+#define SET_CB_PRRR_TR1(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_PRRR, TR1, v)
+#define SET_CB_PRRR_TR2(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_PRRR, TR2, v)
+#define SET_CB_PRRR_TR3(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_PRRR, TR3, v)
+#define SET_CB_PRRR_TR4(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_PRRR, TR4, v)
+#define SET_CB_PRRR_TR5(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_PRRR, TR5, v)
+#define SET_CB_PRRR_TR6(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_PRRR, TR6, v)
+#define SET_CB_PRRR_TR7(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_PRRR, TR7, v)
+#define SET_CB_PRRR_DS0(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_PRRR, DS0, v)
+#define SET_CB_PRRR_DS1(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_PRRR, DS1, v)
+#define SET_CB_PRRR_NS0(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_PRRR, NS0, v)
+#define SET_CB_PRRR_NS1(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_PRRR, NS1, v)
+#define SET_CB_PRRR_NOS0(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS0, v)
+#define SET_CB_PRRR_NOS1(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS1, v)
+#define SET_CB_PRRR_NOS2(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS2, v)
+#define SET_CB_PRRR_NOS3(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS3, v)
+#define SET_CB_PRRR_NOS4(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS4, v)
+#define SET_CB_PRRR_NOS5(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS5, v)
+#define SET_CB_PRRR_NOS6(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS6, v)
+#define SET_CB_PRRR_NOS7(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS7, v)
+
+#define GET_CB_PRRR_TR0(b, c)       GET_CONTEXT_FIELD(b, c, CB_PRRR, TR0)
+#define GET_CB_PRRR_TR1(b, c)       GET_CONTEXT_FIELD(b, c, CB_PRRR, TR1)
+#define GET_CB_PRRR_TR2(b, c)       GET_CONTEXT_FIELD(b, c, CB_PRRR, TR2)
+#define GET_CB_PRRR_TR3(b, c)       GET_CONTEXT_FIELD(b, c, CB_PRRR, TR3)
+#define GET_CB_PRRR_TR4(b, c)       GET_CONTEXT_FIELD(b, c, CB_PRRR, TR4)
+#define GET_CB_PRRR_TR5(b, c)       GET_CONTEXT_FIELD(b, c, CB_PRRR, TR5)
+#define GET_CB_PRRR_TR6(b, c)       GET_CONTEXT_FIELD(b, c, CB_PRRR, TR6)
+#define GET_CB_PRRR_TR7(b, c)       GET_CONTEXT_FIELD(b, c, CB_PRRR, TR7)
+#define GET_CB_PRRR_DS0(b, c)       GET_CONTEXT_FIELD(b, c, CB_PRRR, DS0)
+#define GET_CB_PRRR_DS1(b, c)       GET_CONTEXT_FIELD(b, c, CB_PRRR, DS1)
+#define GET_CB_PRRR_NS0(b, c)       GET_CONTEXT_FIELD(b, c, CB_PRRR, NS0)
+#define GET_CB_PRRR_NS1(b, c)       GET_CONTEXT_FIELD(b, c, CB_PRRR, NS1)
+#define GET_CB_PRRR_NOS0(b, c)      GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS0)
+#define GET_CB_PRRR_NOS1(b, c)      GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS1)
+#define GET_CB_PRRR_NOS2(b, c)      GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS2)
+#define GET_CB_PRRR_NOS3(b, c)      GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS3)
+#define GET_CB_PRRR_NOS4(b, c)      GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS4)
+#define GET_CB_PRRR_NOS5(b, c)      GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS5)
+#define GET_CB_PRRR_NOS6(b, c)      GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS6)
+#define GET_CB_PRRR_NOS7(b, c)      GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS7)
+
+/* Transaction Resume: CB_RESUME */
+#define SET_CB_RESUME_TNR(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_RESUME, TNR, v)
+
+#define GET_CB_RESUME_TNR(b, c)     GET_CONTEXT_FIELD(b, c, CB_RESUME, TNR)
+
+/* System Control Register: CB_SCTLR */
+#define SET_CB_SCTLR_M(b, c, v)     SET_CONTEXT_FIELD(b, c, CB_SCTLR, M, v)
+#define SET_CB_SCTLR_TRE(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_SCTLR, TRE, v)
+#define SET_CB_SCTLR_AFE(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_SCTLR, AFE, v)
+#define SET_CB_SCTLR_AFFD(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_SCTLR, AFFD, v)
+#define SET_CB_SCTLR_E(b, c, v)     SET_CONTEXT_FIELD(b, c, CB_SCTLR, E, v)
+#define SET_CB_SCTLR_CFRE(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_SCTLR, CFRE, v)
+#define SET_CB_SCTLR_CFIE(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_SCTLR, CFIE, v)
+#define SET_CB_SCTLR_CFCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, CFCFG, v)
+#define SET_CB_SCTLR_HUPCF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, HUPCF, v)
+#define SET_CB_SCTLR_WXN(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_SCTLR, WXN, v)
+#define SET_CB_SCTLR_UWXN(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_SCTLR, UWXN, v)
+#define SET_CB_SCTLR_ASIDPNE(b, c, v) \
+			SET_CONTEXT_FIELD(b, c, CB_SCTLR, ASIDPNE, v)
+#define SET_CB_SCTLR_TRANSIENTCFG(b, c, v) \
+			SET_CONTEXT_FIELD(b, c, CB_SCTLR, TRANSIENTCFG, v)
+#define SET_CB_SCTLR_MEMATTR(b, c, v) \
+			SET_CONTEXT_FIELD(b, c, CB_SCTLR, MEMATTR, v)
+#define SET_CB_SCTLR_MTCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, MTCFG, v)
+#define SET_CB_SCTLR_SHCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, SHCFG, v)
+#define SET_CB_SCTLR_RACFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, RACFG, v)
+#define SET_CB_SCTLR_WACFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, WACFG, v)
+#define SET_CB_SCTLR_NSCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, NSCFG, v)
+
+#define GET_CB_SCTLR_M(b, c)        GET_CONTEXT_FIELD(b, c, CB_SCTLR, M)
+#define GET_CB_SCTLR_TRE(b, c)      GET_CONTEXT_FIELD(b, c, CB_SCTLR, TRE)
+#define GET_CB_SCTLR_AFE(b, c)      GET_CONTEXT_FIELD(b, c, CB_SCTLR, AFE)
+#define GET_CB_SCTLR_AFFD(b, c)     GET_CONTEXT_FIELD(b, c, CB_SCTLR, AFFD)
+#define GET_CB_SCTLR_E(b, c)        GET_CONTEXT_FIELD(b, c, CB_SCTLR, E)
+#define GET_CB_SCTLR_CFRE(b, c)     GET_CONTEXT_FIELD(b, c, CB_SCTLR, CFRE)
+#define GET_CB_SCTLR_CFIE(b, c)     GET_CONTEXT_FIELD(b, c, CB_SCTLR, CFIE)
+#define GET_CB_SCTLR_CFCFG(b, c)    GET_CONTEXT_FIELD(b, c, CB_SCTLR, CFCFG)
+#define GET_CB_SCTLR_HUPCF(b, c)    GET_CONTEXT_FIELD(b, c, CB_SCTLR, HUPCF)
+#define GET_CB_SCTLR_WXN(b, c)      GET_CONTEXT_FIELD(b, c, CB_SCTLR, WXN)
+#define GET_CB_SCTLR_UWXN(b, c)     GET_CONTEXT_FIELD(b, c, CB_SCTLR, UWXN)
+#define GET_CB_SCTLR_ASIDPNE(b, c)    \
+			GET_CONTEXT_FIELD(b, c, CB_SCTLR, ASIDPNE)
+#define GET_CB_SCTLR_TRANSIENTCFG(b, c)    \
+			GET_CONTEXT_FIELD(b, c, CB_SCTLR, TRANSIENTCFG)
+#define GET_CB_SCTLR_MEMATTR(b, c)    \
+			GET_CONTEXT_FIELD(b, c, CB_SCTLR, MEMATTR)
+#define GET_CB_SCTLR_MTCFG(b, c)    GET_CONTEXT_FIELD(b, c, CB_SCTLR, MTCFG)
+#define GET_CB_SCTLR_SHCFG(b, c)    GET_CONTEXT_FIELD(b, c, CB_SCTLR, SHCFG)
+#define GET_CB_SCTLR_RACFG(b, c)    GET_CONTEXT_FIELD(b, c, CB_SCTLR, RACFG)
+#define GET_CB_SCTLR_WACFG(b, c)    GET_CONTEXT_FIELD(b, c, CB_SCTLR, WACFG)
+#define GET_CB_SCTLR_NSCFG(b, c)    GET_CONTEXT_FIELD(b, c, CB_SCTLR, NSCFG)
+
+/* Invalidate TLB by ASID: CB_TLBIASID */
+#define SET_CB_TLBIASID_ASID(b, c, v) \
+				SET_CONTEXT_FIELD(b, c, CB_TLBIASID, ASID, v)
+
+#define GET_CB_TLBIASID_ASID(b, c)    \
+				GET_CONTEXT_FIELD(b, c, CB_TLBIASID, ASID)
+
+/* Invalidate TLB by VA: CB_TLBIVA */
+#define SET_CB_TLBIVA_ASID(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVA, ASID, v)
+#define SET_CB_TLBIVA_VA(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_TLBIVA, VA, v)
+
+#define GET_CB_TLBIVA_ASID(b, c)    GET_CONTEXT_FIELD(b, c, CB_TLBIVA, ASID)
+#define GET_CB_TLBIVA_VA(b, c)      GET_CONTEXT_FIELD(b, c, CB_TLBIVA, VA)
+
+/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */
+#define SET_CB_TLBIVAA_VA(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_TLBIVAA, VA, v)
+
+#define GET_CB_TLBIVAA_VA(b, c)     GET_CONTEXT_FIELD(b, c, CB_TLBIVAA, VA)
+
+/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */
+#define SET_CB_TLBIVAAL_VA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVAAL, VA, v)
+
+#define GET_CB_TLBIVAAL_VA(b, c)    GET_CONTEXT_FIELD(b, c, CB_TLBIVAAL, VA)
+
+/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */
+#define SET_CB_TLBIVAL_ASID(b, c, v) \
+			SET_CONTEXT_FIELD(b, c, CB_TLBIVAL, ASID, v)
+#define SET_CB_TLBIVAL_VA(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_TLBIVAL, VA, v)
+
+#define GET_CB_TLBIVAL_ASID(b, c)    \
+			GET_CONTEXT_FIELD(b, c, CB_TLBIVAL, ASID)
+#define GET_CB_TLBIVAL_VA(b, c)      GET_CONTEXT_FIELD(b, c, CB_TLBIVAL, VA)
+
+/* TLB Status: CB_TLBSTATUS */
+#define SET_CB_TLBSTATUS_SACTIVE(b, c, v) \
+			SET_CONTEXT_FIELD(b, c, CB_TLBSTATUS, SACTIVE, v)
+
+#define GET_CB_TLBSTATUS_SACTIVE(b, c)    \
+			GET_CONTEXT_FIELD(b, c, CB_TLBSTATUS, SACTIVE)
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define SET_CB_TTBCR_T0SZ(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_TTBCR, T0SZ, v)
+#define SET_CB_TTBCR_PD0(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_TTBCR, PD0, v)
+#define SET_CB_TTBCR_PD1(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_TTBCR, PD1, v)
+#define SET_CB_TTBCR_NSCFG0(b, c, v) \
+			SET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG0, v)
+#define SET_CB_TTBCR_NSCFG1(b, c, v) \
+			SET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG1, v)
+#define SET_CB_TTBCR_EAE(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_TTBCR, EAE, v)
+
+#define GET_CB_TTBCR_T0SZ(b, c)      GET_CONTEXT_FIELD(b, c, CB_TTBCR, T0SZ)
+#define GET_CB_TTBCR_PD0(b, c)       GET_CONTEXT_FIELD(b, c, CB_TTBCR, PD0)
+#define GET_CB_TTBCR_PD1(b, c)       GET_CONTEXT_FIELD(b, c, CB_TTBCR, PD1)
+#define GET_CB_TTBCR_NSCFG0(b, c)    \
+			GET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG0)
+#define GET_CB_TTBCR_NSCFG1(b, c)    \
+			GET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG1)
+#define GET_CB_TTBCR_EAE(b, c)       GET_CONTEXT_FIELD(b, c, CB_TTBCR, EAE)
+
+/* Translation Table Base Register 0: CB_TTBR */
+#define SET_CB_TTBR0_IRGN1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN1, v)
+#define SET_CB_TTBR0_S(b, c, v)     SET_CONTEXT_FIELD(b, c, CB_TTBR0, S, v)
+#define SET_CB_TTBR0_RGN(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_TTBR0, RGN, v)
+#define SET_CB_TTBR0_NOS(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_TTBR0, NOS, v)
+#define SET_CB_TTBR0_IRGN0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN0, v)
+#define SET_CB_TTBR0_ADDR(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_TTBR0, ADDR, v)
+
+#define GET_CB_TTBR0_IRGN1(b, c)    GET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN1)
+#define GET_CB_TTBR0_S(b, c)        GET_CONTEXT_FIELD(b, c, CB_TTBR0, S)
+#define GET_CB_TTBR0_RGN(b, c)      GET_CONTEXT_FIELD(b, c, CB_TTBR0, RGN)
+#define GET_CB_TTBR0_NOS(b, c)      GET_CONTEXT_FIELD(b, c, CB_TTBR0, NOS)
+#define GET_CB_TTBR0_IRGN0(b, c)    GET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN0)
+#define GET_CB_TTBR0_ADDR(b, c)     GET_CONTEXT_FIELD(b, c, CB_TTBR0, ADDR)
+
+/* Translation Table Base Register 1: CB_TTBR1 */
+#define SET_CB_TTBR1_IRGN1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN1, v)
+#define SET_CB_TTBR1_0S(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_TTBR1, S, v)
+#define SET_CB_TTBR1_RGN(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_TTBR1, RGN, v)
+#define SET_CB_TTBR1_NOS(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_TTBR1, NOS, v)
+#define SET_CB_TTBR1_IRGN0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN0, v)
+#define SET_CB_TTBR1_ADDR(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_TTBR1, ADDR, v)
+
+#define GET_CB_TTBR1_IRGN1(b, c)    GET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN1)
+#define GET_CB_TTBR1_0S(b, c)       GET_CONTEXT_FIELD(b, c, CB_TTBR1, S)
+#define GET_CB_TTBR1_RGN(b, c)      GET_CONTEXT_FIELD(b, c, CB_TTBR1, RGN)
+#define GET_CB_TTBR1_NOS(b, c)      GET_CONTEXT_FIELD(b, c, CB_TTBR1, NOS)
+#define GET_CB_TTBR1_IRGN0(b, c)    GET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN0)
+#define GET_CB_TTBR1_ADDR(b, c)     GET_CONTEXT_FIELD(b, c, CB_TTBR1, ADDR)
+
+/* Global Register Space 0 */
+#define CR0		(0x0000)
+#define SCR1		(0x0004)
+#define CR2		(0x0008)
+#define ACR		(0x0010)
+#define IDR0		(0x0020)
+#define IDR1		(0x0024)
+#define IDR2		(0x0028)
+#define IDR7		(0x003C)
+#define GFAR		(0x0040)
+#define GFSR		(0x0044)
+#define GFSRRESTORE	(0x004C)
+#define GFSYNR0		(0x0050)
+#define GFSYNR1		(0x0054)
+#define GFSYNR2		(0x0058)
+#define TLBIVMID	(0x0064)
+#define TLBIALLNSNH	(0x0068)
+#define TLBIALLH	(0x006C)
+#define TLBGSYNC	(0x0070)
+#define TLBGSTATUS	(0x0074)
+#define TLBIVAH		(0x0078)
+#define GATS1UR		(0x0100)
+#define GATS1UW		(0x0108)
+#define GATS1PR		(0x0110)
+#define GATS1PW		(0x0118)
+#define GATS12UR	(0x0120)
+#define GATS12UW	(0x0128)
+#define GATS12PR	(0x0130)
+#define GATS12PW	(0x0138)
+#define GPAR		(0x0180)
+#define GATSR		(0x0188)
+#define NSCR0		(0x0400)
+#define NSCR2		(0x0408)
+#define NSACR		(0x0410)
+#define SMR		(0x0800)
+#define S2CR		(0x0C00)
+
+/* Global Register Space 1 */
+#define CBAR		(0x1000)
+#define CBFRSYNRA	(0x1400)
+
+/* Implementation defined Register Space */
+#define PREDICTIONDIS0	(0x204C)
+#define PREDICTIONDIS1	(0x2050)
+#define S1L1BFBLP0	(0x215C)
+
+/* Performance Monitoring Register Space */
+#define PMEVCNTR_N	(0x3000)
+#define PMEVTYPER_N	(0x3400)
+#define PMCGCR_N	(0x3800)
+#define PMCGSMR_N	(0x3A00)
+#define PMCNTENSET_N	(0x3C00)
+#define PMCNTENCLR_N	(0x3C20)
+#define PMINTENSET_N	(0x3C40)
+#define PMINTENCLR_N	(0x3C60)
+#define PMOVSCLR_N	(0x3C80)
+#define PMOVSSET_N	(0x3CC0)
+#define PMCFGR		(0x3E00)
+#define PMCR		(0x3E04)
+#define PMCEID0		(0x3E20)
+#define PMCEID1		(0x3E24)
+#define PMAUTHSTATUS	(0x3FB8)
+#define PMDEVTYPE	(0x3FCC)
+
+/* Secure Status Determination Address Space */
+#define SSDR_N		(0x4000)
+
+/* Stage 1 Context Bank Format */
+#define CB_SCTLR	(0x000)
+#define CB_ACTLR	(0x004)
+#define CB_RESUME	(0x008)
+#define CB_TTBR0	(0x020)
+#define CB_TTBR1	(0x028)
+#define CB_TTBCR	(0x030)
+#define CB_CONTEXTIDR	(0x034)
+#define CB_PRRR		(0x038)
+#define CB_NMRR		(0x03C)
+#define CB_PAR		(0x050)
+#define CB_FSR		(0x058)
+#define CB_FSRRESTORE	(0x05C)
+#define CB_FAR		(0x060)
+#define CB_FSYNR0	(0x068)
+#define CB_FSYNR1	(0x06C)
+#define CB_TLBIVA	(0x600)
+#define CB_TLBIVAA	(0x608)
+#define CB_TLBIASID	(0x610)
+#define CB_TLBIALL	(0x618)
+#define CB_TLBIVAL	(0x620)
+#define CB_TLBIVAAL	(0x628)
+#define CB_TLBSYNC	(0x7F0)
+#define CB_TLBSTATUS	(0x7F4)
+#define CB_ATS1PR	(0x800)
+#define CB_ATS1PW	(0x808)
+#define CB_ATS1UR	(0x810)
+#define CB_ATS1UW	(0x818)
+#define CB_ATSR		(0x8F0)
+#define CB_PMXEVCNTR_N	(0xE00)
+#define CB_PMXEVTYPER_N	(0xE80)
+#define CB_PMCFGR	(0xF00)
+#define CB_PMCR		(0xF04)
+#define CB_PMCEID0	(0xF20)
+#define CB_PMCEID1	(0xF24)
+#define CB_PMCNTENSET	(0xF40)
+#define CB_PMCNTENCLR	(0xF44)
+#define CB_PMINTENSET	(0xF48)
+#define CB_PMINTENCLR	(0xF4C)
+#define CB_PMOVSCLR	(0xF50)
+#define CB_PMOVSSET	(0xF58)
+#define CB_PMAUTHSTATUS	(0xFB8)
+
+/* Global Register Fields */
+/* Configuration Register: CR0 */
+#define CR0_NSCFG         (CR0_NSCFG_MASK         << CR0_NSCFG_SHIFT)
+#define CR0_WACFG         (CR0_WACFG_MASK         << CR0_WACFG_SHIFT)
+#define CR0_RACFG         (CR0_RACFG_MASK         << CR0_RACFG_SHIFT)
+#define CR0_SHCFG         (CR0_SHCFG_MASK         << CR0_SHCFG_SHIFT)
+#define CR0_SMCFCFG       (CR0_SMCFCFG_MASK       << CR0_SMCFCFG_SHIFT)
+#define CR0_MTCFG         (CR0_MTCFG_MASK         << CR0_MTCFG_SHIFT)
+#define CR0_MEMATTR       (CR0_MEMATTR_MASK       << CR0_MEMATTR_SHIFT)
+#define CR0_BSU           (CR0_BSU_MASK           << CR0_BSU_SHIFT)
+#define CR0_FB            (CR0_FB_MASK            << CR0_FB_SHIFT)
+#define CR0_PTM           (CR0_PTM_MASK           << CR0_PTM_SHIFT)
+#define CR0_VMIDPNE       (CR0_VMIDPNE_MASK       << CR0_VMIDPNE_SHIFT)
+#define CR0_USFCFG        (CR0_USFCFG_MASK        << CR0_USFCFG_SHIFT)
+#define CR0_GSE           (CR0_GSE_MASK           << CR0_GSE_SHIFT)
+#define CR0_STALLD        (CR0_STALLD_MASK        << CR0_STALLD_SHIFT)
+#define CR0_TRANSIENTCFG  (CR0_TRANSIENTCFG_MASK  << CR0_TRANSIENTCFG_SHIFT)
+#define CR0_GCFGFIE       (CR0_GCFGFIE_MASK       << CR0_GCFGFIE_SHIFT)
+#define CR0_GCFGFRE       (CR0_GCFGFRE_MASK       << CR0_GCFGFRE_SHIFT)
+#define CR0_GFIE          (CR0_GFIE_MASK          << CR0_GFIE_SHIFT)
+#define CR0_GFRE          (CR0_GFRE_MASK          << CR0_GFRE_SHIFT)
+#define CR0_CLIENTPD      (CR0_CLIENTPD_MASK      << CR0_CLIENTPD_SHIFT)
+
+/* Configuration Register: CR2 */
+#define CR2_BPVMID        (CR2_BPVMID_MASK << CR2_BPVMID_SHIFT)
+
+/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */
+#define GATS1PR_ADDR  (GATS1PR_ADDR_MASK  << GATS1PR_ADDR_SHIFT)
+#define GATS1PR_NDX   (GATS1PR_NDX_MASK   << GATS1PR_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define GATS1PW_ADDR  (GATS1PW_ADDR_MASK  << GATS1PW_ADDR_SHIFT)
+#define GATS1PW_NDX   (GATS1PW_NDX_MASK   << GATS1PW_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define GATS1UR_ADDR  (GATS1UR_ADDR_MASK  << GATS1UR_ADDR_SHIFT)
+#define GATS1UR_NDX   (GATS1UR_NDX_MASK   << GATS1UR_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1, User Write: GATS1UW */
+#define GATS1UW_ADDR  (GATS1UW_ADDR_MASK  << GATS1UW_ADDR_SHIFT)
+#define GATS1UW_NDX   (GATS1UW_NDX_MASK   << GATS1UW_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS1PR */
+#define GATS12PR_ADDR (GATS12PR_ADDR_MASK << GATS12PR_ADDR_SHIFT)
+#define GATS12PR_NDX  (GATS12PR_NDX_MASK  << GATS12PR_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1 and 2, Privileged Write: GATS1PW */
+#define GATS12PW_ADDR (GATS12PW_ADDR_MASK << GATS12PW_ADDR_SHIFT)
+#define GATS12PW_NDX  (GATS12PW_NDX_MASK  << GATS12PW_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1 and 2, User Read: GATS1UR */
+#define GATS12UR_ADDR (GATS12UR_ADDR_MASK << GATS12UR_ADDR_SHIFT)
+#define GATS12UR_NDX  (GATS12UR_NDX_MASK  << GATS12UR_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1 and 2, User Write: GATS1UW */
+#define GATS12UW_ADDR (GATS12UW_ADDR_MASK << GATS12UW_ADDR_SHIFT)
+#define GATS12UW_NDX  (GATS12UW_NDX_MASK  << GATS12UW_NDX_SHIFT)
+
+/* Global Address Translation Status Register: GATSR */
+#define GATSR_ACTIVE  (GATSR_ACTIVE_MASK  << GATSR_ACTIVE_SHIFT)
+
+/* Global Fault Address Register: GFAR */
+#define GFAR_FADDR    (GFAR_FADDR_MASK << GFAR_FADDR_SHIFT)
+
+/* Global Fault Status Register: GFSR */
+#define GFSR_ICF      (GFSR_ICF_MASK   << GFSR_ICF_SHIFT)
+#define GFSR_USF      (GFSR_USF_MASK   << GFSR_USF_SHIFT)
+#define GFSR_SMCF     (GFSR_SMCF_MASK  << GFSR_SMCF_SHIFT)
+#define GFSR_UCBF     (GFSR_UCBF_MASK  << GFSR_UCBF_SHIFT)
+#define GFSR_UCIF     (GFSR_UCIF_MASK  << GFSR_UCIF_SHIFT)
+#define GFSR_CAF      (GFSR_CAF_MASK   << GFSR_CAF_SHIFT)
+#define GFSR_EF       (GFSR_EF_MASK    << GFSR_EF_SHIFT)
+#define GFSR_PF       (GFSR_PF_MASK    << GFSR_PF_SHIFT)
+#define GFSR_MULTI    (GFSR_MULTI_MASK << GFSR_MULTI_SHIFT)
+
+/* Global Fault Syndrome Register 0: GFSYNR0 */
+#define GFSYNR0_NESTED  (GFSYNR0_NESTED_MASK  << GFSYNR0_NESTED_SHIFT)
+#define GFSYNR0_WNR     (GFSYNR0_WNR_MASK     << GFSYNR0_WNR_SHIFT)
+#define GFSYNR0_PNU     (GFSYNR0_PNU_MASK     << GFSYNR0_PNU_SHIFT)
+#define GFSYNR0_IND     (GFSYNR0_IND_MASK     << GFSYNR0_IND_SHIFT)
+#define GFSYNR0_NSSTATE (GFSYNR0_NSSTATE_MASK << GFSYNR0_NSSTATE_SHIFT)
+#define GFSYNR0_NSATTR  (GFSYNR0_NSATTR_MASK  << GFSYNR0_NSATTR_SHIFT)
+
+/* Global Fault Syndrome Register 1: GFSYNR1 */
+#define GFSYNR1_SID     (GFSYNR1_SID_MASK     << GFSYNR1_SID_SHIFT)
+
+/* Global Physical Address Register: GPAR */
+#define GPAR_F          (GPAR_F_MASK      << GPAR_F_SHIFT)
+#define GPAR_SS         (GPAR_SS_MASK     << GPAR_SS_SHIFT)
+#define GPAR_OUTER      (GPAR_OUTER_MASK  << GPAR_OUTER_SHIFT)
+#define GPAR_INNER      (GPAR_INNER_MASK  << GPAR_INNER_SHIFT)
+#define GPAR_SH         (GPAR_SH_MASK     << GPAR_SH_SHIFT)
+#define GPAR_NS         (GPAR_NS_MASK     << GPAR_NS_SHIFT)
+#define GPAR_NOS        (GPAR_NOS_MASK    << GPAR_NOS_SHIFT)
+#define GPAR_PA         (GPAR_PA_MASK     << GPAR_PA_SHIFT)
+#define GPAR_TF         (GPAR_TF_MASK     << GPAR_TF_SHIFT)
+#define GPAR_AFF        (GPAR_AFF_MASK    << GPAR_AFF_SHIFT)
+#define GPAR_PF         (GPAR_PF_MASK     << GPAR_PF_SHIFT)
+#define GPAR_EF         (GPAR_EF_MASK     << GPAR_EF_SHIFT)
+#define GPAR_TLCMCF     (GPAR_TLBMCF_MASK << GPAR_TLCMCF_SHIFT)
+#define GPAR_TLBLKF     (GPAR_TLBLKF_MASK << GPAR_TLBLKF_SHIFT)
+#define GPAR_UCBF       (GPAR_UCBF_MASK   << GFAR_UCBF_SHIFT)
+
+/* Identification Register: IDR0 */
+#define IDR0_NUMSMRG    (IDR0_NUMSMRG_MASK  << IDR0_NUMSMGR_SHIFT)
+#define IDR0_NUMSIDB    (IDR0_NUMSIDB_MASK  << IDR0_NUMSIDB_SHIFT)
+#define IDR0_BTM        (IDR0_BTM_MASK      << IDR0_BTM_SHIFT)
+#define IDR0_CTTW       (IDR0_CTTW_MASK     << IDR0_CTTW_SHIFT)
+#define IDR0_NUMIRPT    (IDR0_NUMIPRT_MASK  << IDR0_NUMIRPT_SHIFT)
+#define IDR0_PTFS       (IDR0_PTFS_MASK     << IDR0_PTFS_SHIFT)
+#define IDR0_SMS        (IDR0_SMS_MASK      << IDR0_SMS_SHIFT)
+#define IDR0_NTS        (IDR0_NTS_MASK      << IDR0_NTS_SHIFT)
+#define IDR0_S2TS       (IDR0_S2TS_MASK     << IDR0_S2TS_SHIFT)
+#define IDR0_S1TS       (IDR0_S1TS_MASK     << IDR0_S1TS_SHIFT)
+#define IDR0_SES        (IDR0_SES_MASK      << IDR0_SES_SHIFT)
+
+/* Identification Register: IDR1 */
+#define IDR1_NUMCB       (IDR1_NUMCB_MASK       << IDR1_NUMCB_SHIFT)
+#define IDR1_NUMSSDNDXB  (IDR1_NUMSSDNDXB_MASK  << IDR1_NUMSSDNDXB_SHIFT)
+#define IDR1_SSDTP       (IDR1_SSDTP_MASK       << IDR1_SSDTP_SHIFT)
+#define IDR1_SMCD        (IDR1_SMCD_MASK        << IDR1_SMCD_SHIFT)
+#define IDR1_NUMS2CB     (IDR1_NUMS2CB_MASK     << IDR1_NUMS2CB_SHIFT)
+#define IDR1_NUMPAGENDXB (IDR1_NUMPAGENDXB_MASK << IDR1_NUMPAGENDXB_SHIFT)
+#define IDR1_PAGESIZE    (IDR1_PAGESIZE_MASK    << IDR1_PAGESIZE_SHIFT)
+
+/* Identification Register: IDR2 */
+#define IDR2_IAS         (IDR2_IAS_MASK << IDR2_IAS_SHIFT)
+#define IDR1_OAS         (IDR2_OAS_MASK << IDR2_OAS_SHIFT)
+
+/* Identification Register: IDR7 */
+#define IDR7_MINOR       (IDR7_MINOR_MASK << IDR7_MINOR_SHIFT)
+#define IDR7_MAJOR       (IDR7_MAJOR_MASK << IDR7_MAJOR_SHIFT)
+
+/* Stream to Context Register: S2CR */
+#define S2CR_CBNDX        (S2CR_CBNDX_MASK         << S2cR_CBNDX_SHIFT)
+#define S2CR_SHCFG        (S2CR_SHCFG_MASK         << s2CR_SHCFG_SHIFT)
+#define S2CR_MTCFG        (S2CR_MTCFG_MASK         << S2CR_MTCFG_SHIFT)
+#define S2CR_MEMATTR      (S2CR_MEMATTR_MASK       << S2CR_MEMATTR_SHIFT)
+#define S2CR_TYPE         (S2CR_TYPE_MASK          << S2CR_TYPE_SHIFT)
+#define S2CR_NSCFG        (S2CR_NSCFG_MASK         << S2CR_NSCFG_SHIFT)
+#define S2CR_RACFG        (S2CR_RACFG_MASK         << S2CR_RACFG_SHIFT)
+#define S2CR_WACFG        (S2CR_WACFG_MASK         << S2CR_WACFG_SHIFT)
+#define S2CR_PRIVCFG      (S2CR_PRIVCFG_MASK       << S2CR_PRIVCFG_SHIFT)
+#define S2CR_INSTCFG      (S2CR_INSTCFG_MASK       << S2CR_INSTCFG_SHIFT)
+#define S2CR_TRANSIENTCFG (S2CR_TRANSIENTCFG_MASK  << S2CR_TRANSIENTCFG_SHIFT)
+#define S2CR_VMID         (S2CR_VMID_MASK          << S2CR_VMID_SHIFT)
+#define S2CR_BSU          (S2CR_BSU_MASK           << S2CR_BSU_SHIFT)
+#define S2CR_FB           (S2CR_FB_MASK            << S2CR_FB_SHIFT)
+
+/* Stream Match Register: SMR */
+#define SMR_ID            (SMR_ID_MASK    << SMR_ID_SHIFT)
+#define SMR_MASK          (SMR_MASK_MASK  << SMR_MASK_SHIFT)
+#define SMR_VALID         (SMR_VALID_MASK << SMR_VALID_SHIFT)
+
+/* Global TLB Status: TLBGSTATUS */
+#define TLBGSTATUS_GSACTIVE (TLBGSTATUS_GSACTIVE_MASK << \
+					TLBGSTATUS_GSACTIVE_SHIFT)
+/* Invalidate Hyp TLB by VA: TLBIVAH */
+#define TLBIVAH_ADDR  (TLBIVAH_ADDR_MASK << TLBIVAH_ADDR_SHIFT)
+
+/* Invalidate TLB by VMID: TLBIVMID */
+#define TLBIVMID_VMID (TLBIVMID_VMID_MASK << TLBIVMID_VMID_SHIFT)
+
+/* Context Bank Attribute Register: CBAR */
+#define CBAR_VMID       (CBAR_VMID_MASK    << CBAR_VMID_SHIFT)
+#define CBAR_CBNDX      (CBAR_CBNDX_MASK   << CBAR_CBNDX_SHIFT)
+#define CBAR_BPSHCFG    (CBAR_BPSHCFG_MASK << CBAR_BPSHCFG_SHIFT)
+#define CBAR_HYPC       (CBAR_HYPC_MASK    << CBAR_HYPC_SHIFT)
+#define CBAR_FB         (CBAR_FB_MASK      << CBAR_FB_SHIFT)
+#define CBAR_MEMATTR    (CBAR_MEMATTR_MASK << CBAR_MEMATTR_SHIFT)
+#define CBAR_TYPE       (CBAR_TYPE_MASK    << CBAR_TYPE_SHIFT)
+#define CBAR_BSU        (CBAR_BSU_MASK     << CBAR_BSU_SHIFT)
+#define CBAR_RACFG      (CBAR_RACFG_MASK   << CBAR_RACFG_SHIFT)
+#define CBAR_WACFG      (CBAR_WACFG_MASK   << CBAR_WACFG_SHIFT)
+#define CBAR_IRPTNDX    (CBAR_IRPTNDX_MASK << CBAR_IRPTNDX_SHIFT)
+
+/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA */
+#define CBFRSYNRA_SID   (CBFRSYNRA_SID_MASK << CBFRSYNRA_SID_SHIFT)
+
+/* Performance Monitoring Register Fields */
+
+/* Stage 1 Context Bank Format Fields */
+/* Auxiliary Control Register: CB_ACTLR */
+#define CB_ACTLR_REQPRIORITY \
+		(CB_ACTLR_REQPRIORITY_MASK << CB_ACTLR_REQPRIORITY_SHIFT)
+#define CB_ACTLR_REQPRIORITYCFG \
+		(CB_ACTLR_REQPRIORITYCFG_MASK << CB_ACTLR_REQPRIORITYCFG_SHIFT)
+#define CB_ACTLR_PRIVCFG (CB_ACTLR_PRIVCFG_MASK << CB_ACTLR_PRIVCFG_SHIFT)
+#define CB_ACTLR_BPRCOSH (CB_ACTLR_BPRCOSH_MASK << CB_ACTLR_BPRCOSH_SHIFT)
+#define CB_ACTLR_BPRCISH (CB_ACTLR_BPRCISH_MASK << CB_ACTLR_BPRCISH_SHIFT)
+#define CB_ACTLR_BPRCNSH (CB_ACTLR_BPRCNSH_MASK << CB_ACTLR_BPRCNSH_SHIFT)
+
+/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */
+#define CB_ATS1PR_ADDR  (CB_ATS1PR_ADDR_MASK << CB_ATS1PR_ADDR_SHIFT)
+
+/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */
+#define CB_ATS1PW_ADDR  (CB_ATS1PW_ADDR_MASK << CB_ATS1PW_ADDR_SHIFT)
+
+/* Address Translation, Stage 1, User Read: CB_ATS1UR */
+#define CB_ATS1UR_ADDR  (CB_ATS1UR_ADDR_MASK << CB_ATS1UR_ADDR_SHIFT)
+
+/* Address Translation, Stage 1, User Write: CB_ATS1UW */
+#define CB_ATS1UW_ADDR  (CB_ATS1UW_ADDR_MASK << CB_ATS1UW_ADDR_SHIFT)
+
+/* Address Translation Status Register: CB_ATSR */
+#define CB_ATSR_ACTIVE  (CB_ATSR_ACTIVE_MASK << CB_ATSR_ACTIVE_SHIFT)
+
+/* Context ID Register: CB_CONTEXTIDR */
+#define CB_CONTEXTIDR_ASID    (CB_CONTEXTIDR_ASID_MASK << \
+				CB_CONTEXTIDR_ASID_SHIFT)
+#define CB_CONTEXTIDR_PROCID  (CB_CONTEXTIDR_PROCID_MASK << \
+				CB_CONTEXTIDR_PROCID_SHIFT)
+
+/* Fault Address Register: CB_FAR */
+#define CB_FAR_FADDR  (CB_FAR_FADDR_MASK << CB_FAR_FADDR_SHIFT)
+
+/* Fault Status Register: CB_FSR */
+#define CB_FSR_TF     (CB_FSR_TF_MASK     << CB_FSR_TF_SHIFT)
+#define CB_FSR_AFF    (CB_FSR_AFF_MASK    << CB_FSR_AFF_SHIFT)
+#define CB_FSR_PF     (CB_FSR_PF_MASK     << CB_FSR_PF_SHIFT)
+#define CB_FSR_EF     (CB_FSR_EF_MASK     << CB_FSR_EF_SHIFT)
+#define CB_FSR_TLBMCF (CB_FSR_TLBMCF_MASK << CB_FSR_TLBMCF_SHIFT)
+#define CB_FSR_TLBLKF (CB_FSR_TLBLKF_MASK << CB_FSR_TLBLKF_SHIFT)
+#define CB_FSR_SS     (CB_FSR_SS_MASK     << CB_FSR_SS_SHIFT)
+#define CB_FSR_MULTI  (CB_FSR_MULTI_MASK  << CB_FSR_MULTI_SHIFT)
+
+/* Fault Syndrome Register 0: CB_FSYNR0 */
+#define CB_FSYNR0_PLVL     (CB_FSYNR0_PLVL_MASK    << CB_FSYNR0_PLVL_SHIFT)
+#define CB_FSYNR0_S1PTWF   (CB_FSYNR0_S1PTWF_MASK  << CB_FSYNR0_S1PTWF_SHIFT)
+#define CB_FSYNR0_WNR      (CB_FSYNR0_WNR_MASK     << CB_FSYNR0_WNR_SHIFT)
+#define CB_FSYNR0_PNU      (CB_FSYNR0_PNU_MASK     << CB_FSYNR0_PNU_SHIFT)
+#define CB_FSYNR0_IND      (CB_FSYNR0_IND_MASK     << CB_FSYNR0_IND_SHIFT)
+#define CB_FSYNR0_NSSTATE  (CB_FSYNR0_NSSTATE_MASK << CB_FSYNR0_NSSTATE_SHIFT)
+#define CB_FSYNR0_NSATTR   (CB_FSYNR0_NSATTR_MASK  << CB_FSYNR0_NSATTR_SHIFT)
+#define CB_FSYNR0_ATOF     (CB_FSYNR0_ATOF_MASK    << CB_FSYNR0_ATOF_SHIFT)
+#define CB_FSYNR0_PTWF     (CB_FSYNR0_PTWF_MASK    << CB_FSYNR0_PTWF_SHIFT)
+#define CB_FSYNR0_AFR      (CB_FSYNR0_AFR_MASK     << CB_FSYNR0_AFR_SHIFT)
+#define CB_FSYNR0_S1CBNDX  (CB_FSYNR0_S1CBNDX_MASK << CB_FSYNR0_S1CBNDX_SHIFT)
+
+/* Normal Memory Remap Register: CB_NMRR */
+#define CB_NMRR_IR0        (CB_NMRR_IR0_MASK   << CB_NMRR_IR0_SHIFT)
+#define CB_NMRR_IR1        (CB_NMRR_IR1_MASK   << CB_NMRR_IR1_SHIFT)
+#define CB_NMRR_IR2        (CB_NMRR_IR2_MASK   << CB_NMRR_IR2_SHIFT)
+#define CB_NMRR_IR3        (CB_NMRR_IR3_MASK   << CB_NMRR_IR3_SHIFT)
+#define CB_NMRR_IR4        (CB_NMRR_IR4_MASK   << CB_NMRR_IR4_SHIFT)
+#define CB_NMRR_IR5        (CB_NMRR_IR5_MASK   << CB_NMRR_IR5_SHIFT)
+#define CB_NMRR_IR6        (CB_NMRR_IR6_MASK   << CB_NMRR_IR6_SHIFT)
+#define CB_NMRR_IR7        (CB_NMRR_IR7_MASK   << CB_NMRR_IR7_SHIFT)
+#define CB_NMRR_OR0        (CB_NMRR_OR0_MASK   << CB_NMRR_OR0_SHIFT)
+#define CB_NMRR_OR1        (CB_NMRR_OR1_MASK   << CB_NMRR_OR1_SHIFT)
+#define CB_NMRR_OR2        (CB_NMRR_OR2_MASK   << CB_NMRR_OR2_SHIFT)
+#define CB_NMRR_OR3        (CB_NMRR_OR3_MASK   << CB_NMRR_OR3_SHIFT)
+#define CB_NMRR_OR4        (CB_NMRR_OR4_MASK   << CB_NMRR_OR4_SHIFT)
+#define CB_NMRR_OR5        (CB_NMRR_OR5_MASK   << CB_NMRR_OR5_SHIFT)
+#define CB_NMRR_OR6        (CB_NMRR_OR6_MASK   << CB_NMRR_OR6_SHIFT)
+#define CB_NMRR_OR7        (CB_NMRR_OR7_MASK   << CB_NMRR_OR7_SHIFT)
+
+/* Physical Address Register: CB_PAR */
+#define CB_PAR_F           (CB_PAR_F_MASK      << CB_PAR_F_SHIFT)
+#define CB_PAR_SS          (CB_PAR_SS_MASK     << CB_PAR_SS_SHIFT)
+#define CB_PAR_OUTER       (CB_PAR_OUTER_MASK  << CB_PAR_OUTER_SHIFT)
+#define CB_PAR_INNER       (CB_PAR_INNER_MASK  << CB_PAR_INNER_SHIFT)
+#define CB_PAR_SH          (CB_PAR_SH_MASK     << CB_PAR_SH_SHIFT)
+#define CB_PAR_NS          (CB_PAR_NS_MASK     << CB_PAR_NS_SHIFT)
+#define CB_PAR_NOS         (CB_PAR_NOS_MASK    << CB_PAR_NOS_SHIFT)
+#define CB_PAR_PA          (CB_PAR_PA_MASK     << CB_PAR_PA_SHIFT)
+#define CB_PAR_TF          (CB_PAR_TF_MASK     << CB_PAR_TF_SHIFT)
+#define CB_PAR_AFF         (CB_PAR_AFF_MASK    << CB_PAR_AFF_SHIFT)
+#define CB_PAR_PF          (CB_PAR_PF_MASK     << CB_PAR_PF_SHIFT)
+#define CB_PAR_TLBMCF      (CB_PAR_TLBMCF_MASK << CB_PAR_TLBMCF_SHIFT)
+#define CB_PAR_TLBLKF      (CB_PAR_TLBLKF_MASK << CB_PAR_TLBLKF_SHIFT)
+#define CB_PAR_ATOT        (CB_PAR_ATOT_MASK   << CB_PAR_ATOT_SHIFT)
+#define CB_PAR_PLVL        (CB_PAR_PLVL_MASK   << CB_PAR_PLVL_SHIFT)
+#define CB_PAR_STAGE       (CB_PAR_STAGE_MASK  << CB_PAR_STAGE_SHIFT)
+
+/* Primary Region Remap Register: CB_PRRR */
+#define CB_PRRR_TR0        (CB_PRRR_TR0_MASK   << CB_PRRR_TR0_SHIFT)
+#define CB_PRRR_TR1        (CB_PRRR_TR1_MASK   << CB_PRRR_TR1_SHIFT)
+#define CB_PRRR_TR2        (CB_PRRR_TR2_MASK   << CB_PRRR_TR2_SHIFT)
+#define CB_PRRR_TR3        (CB_PRRR_TR3_MASK   << CB_PRRR_TR3_SHIFT)
+#define CB_PRRR_TR4        (CB_PRRR_TR4_MASK   << CB_PRRR_TR4_SHIFT)
+#define CB_PRRR_TR5        (CB_PRRR_TR5_MASK   << CB_PRRR_TR5_SHIFT)
+#define CB_PRRR_TR6        (CB_PRRR_TR6_MASK   << CB_PRRR_TR6_SHIFT)
+#define CB_PRRR_TR7        (CB_PRRR_TR7_MASK   << CB_PRRR_TR7_SHIFT)
+#define CB_PRRR_DS0        (CB_PRRR_DS0_MASK   << CB_PRRR_DS0_SHIFT)
+#define CB_PRRR_DS1        (CB_PRRR_DS1_MASK   << CB_PRRR_DS1_SHIFT)
+#define CB_PRRR_NS0        (CB_PRRR_NS0_MASK   << CB_PRRR_NS0_SHIFT)
+#define CB_PRRR_NS1        (CB_PRRR_NS1_MASK   << CB_PRRR_NS1_SHIFT)
+#define CB_PRRR_NOS0       (CB_PRRR_NOS0_MASK  << CB_PRRR_NOS0_SHIFT)
+#define CB_PRRR_NOS1       (CB_PRRR_NOS1_MASK  << CB_PRRR_NOS1_SHIFT)
+#define CB_PRRR_NOS2       (CB_PRRR_NOS2_MASK  << CB_PRRR_NOS2_SHIFT)
+#define CB_PRRR_NOS3       (CB_PRRR_NOS3_MASK  << CB_PRRR_NOS3_SHIFT)
+#define CB_PRRR_NOS4       (CB_PRRR_NOS4_MASK  << CB_PRRR_NOS4_SHIFT)
+#define CB_PRRR_NOS5       (CB_PRRR_NOS5_MASK  << CB_PRRR_NOS5_SHIFT)
+#define CB_PRRR_NOS6       (CB_PRRR_NOS6_MASK  << CB_PRRR_NOS6_SHIFT)
+#define CB_PRRR_NOS7       (CB_PRRR_NOS7_MASK  << CB_PRRR_NOS7_SHIFT)
+
+/* Transaction Resume: CB_RESUME */
+#define CB_RESUME_TNR      (CB_RESUME_TNR_MASK << CB_RESUME_TNR_SHIFT)
+
+/* System Control Register: CB_SCTLR */
+#define CB_SCTLR_M           (CB_SCTLR_M_MASK       << CB_SCTLR_M_SHIFT)
+#define CB_SCTLR_TRE         (CB_SCTLR_TRE_MASK     << CB_SCTLR_TRE_SHIFT)
+#define CB_SCTLR_AFE         (CB_SCTLR_AFE_MASK     << CB_SCTLR_AFE_SHIFT)
+#define CB_SCTLR_AFFD        (CB_SCTLR_AFFD_MASK    << CB_SCTLR_AFFD_SHIFT)
+#define CB_SCTLR_E           (CB_SCTLR_E_MASK       << CB_SCTLR_E_SHIFT)
+#define CB_SCTLR_CFRE        (CB_SCTLR_CFRE_MASK    << CB_SCTLR_CFRE_SHIFT)
+#define CB_SCTLR_CFIE        (CB_SCTLR_CFIE_MASK    << CB_SCTLR_CFIE_SHIFT)
+#define CB_SCTLR_CFCFG       (CB_SCTLR_CFCFG_MASK   << CB_SCTLR_CFCFG_SHIFT)
+#define CB_SCTLR_HUPCF       (CB_SCTLR_HUPCF_MASK   << CB_SCTLR_HUPCF_SHIFT)
+#define CB_SCTLR_WXN         (CB_SCTLR_WXN_MASK     << CB_SCTLR_WXN_SHIFT)
+#define CB_SCTLR_UWXN        (CB_SCTLR_UWXN_MASK    << CB_SCTLR_UWXN_SHIFT)
+#define CB_SCTLR_ASIDPNE     (CB_SCTLR_ASIDPNE_MASK << CB_SCTLR_ASIDPNE_SHIFT)
+#define CB_SCTLR_TRANSIENTCFG (CB_SCTLR_TRANSIENTCFG_MASK << \
+						CB_SCTLR_TRANSIENTCFG_SHIFT)
+#define CB_SCTLR_MEMATTR     (CB_SCTLR_MEMATTR_MASK << CB_SCTLR_MEMATTR_SHIFT)
+#define CB_SCTLR_MTCFG       (CB_SCTLR_MTCFG_MASK   << CB_SCTLR_MTCFG_SHIFT)
+#define CB_SCTLR_SHCFG       (CB_SCTLR_SHCFG_MASK   << CB_SCTLR_SHCFG_SHIFT)
+#define CB_SCTLR_RACFG       (CB_SCTLR_RACFG_MASK   << CB_SCTLR_RACFG_SHIFT)
+#define CB_SCTLR_WACFG       (CB_SCTLR_WACFG_MASK   << CB_SCTLR_WACFG_SHIFT)
+#define CB_SCTLR_NSCFG       (CB_SCTLR_NSCFG_MASK   << CB_SCTLR_NSCFG_SHIFT)
+
+/* Invalidate TLB by ASID: CB_TLBIASID */
+#define CB_TLBIASID_ASID     (CB_TLBIASID_ASID_MASK << CB_TLBIASID_ASID_SHIFT)
+
+/* Invalidate TLB by VA: CB_TLBIVA */
+#define CB_TLBIVA_ASID       (CB_TLBIVA_ASID_MASK   << CB_TLBIVA_ASID_SHIFT)
+#define CB_TLBIVA_VA         (CB_TLBIVA_VA_MASK     << CB_TLBIVA_VA_SHIFT)
+
+/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */
+#define CB_TLBIVAA_VA        (CB_TLBIVAA_VA_MASK    << CB_TLBIVAA_VA_SHIFT)
+
+/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */
+#define CB_TLBIVAAL_VA       (CB_TLBIVAAL_VA_MASK   << CB_TLBIVAAL_VA_SHIFT)
+
+/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */
+#define CB_TLBIVAL_ASID      (CB_TLBIVAL_ASID_MASK  << CB_TLBIVAL_ASID_SHIFT)
+#define CB_TLBIVAL_VA        (CB_TLBIVAL_VA_MASK    << CB_TLBIVAL_VA_SHIFT)
+
+/* TLB Status: CB_TLBSTATUS */
+#define CB_TLBSTATUS_SACTIVE (CB_TLBSTATUS_SACTIVE_MASK << \
+						CB_TLBSTATUS_SACTIVE_SHIFT)
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define CB_TTBCR_T0SZ        (CB_TTBCR_T0SZ_MASK    << CB_TTBCR_T0SZ_SHIFT)
+#define CB_TTBCR_PD0         (CB_TTBCR_PD0_MASK     << CB_TTBCR_PD0_SHIFT)
+#define CB_TTBCR_PD1         (CB_TTBCR_PD1_MASK     << CB_TTBCR_PD1_SHIFT)
+#define CB_TTBCR_NSCFG0      (CB_TTBCR_NSCFG0_MASK  << CB_TTBCR_NSCFG0_SHIFT)
+#define CB_TTBCR_NSCFG1      (CB_TTBCR_NSCFG1_MASK  << CB_TTBCR_NSCFG1_SHIFT)
+#define CB_TTBCR_EAE         (CB_TTBCR_EAE_MASK     << CB_TTBCR_EAE_SHIFT)
+
+/* Translation Table Base Register 0: CB_TTBR0 */
+#define CB_TTBR0_IRGN1       (CB_TTBR0_IRGN1_MASK   << CB_TTBR0_IRGN1_SHIFT)
+#define CB_TTBR0_S           (CB_TTBR0_S_MASK       << CB_TTBR0_S_SHIFT)
+#define CB_TTBR0_RGN         (CB_TTBR0_RGN_MASK     << CB_TTBR0_RGN_SHIFT)
+#define CB_TTBR0_NOS         (CB_TTBR0_NOS_MASK     << CB_TTBR0_NOS_SHIFT)
+#define CB_TTBR0_IRGN0       (CB_TTBR0_IRGN0_MASK   << CB_TTBR0_IRGN0_SHIFT)
+#define CB_TTBR0_ADDR        (CB_TTBR0_ADDR_MASK    << CB_TTBR0_ADDR_SHIFT)
+
+/* Translation Table Base Register 1: CB_TTBR1 */
+#define CB_TTBR1_IRGN1       (CB_TTBR1_IRGN1_MASK   << CB_TTBR1_IRGN1_SHIFT)
+#define CB_TTBR1_S           (CB_TTBR1_S_MASK       << CB_TTBR1_S_SHIFT)
+#define CB_TTBR1_RGN         (CB_TTBR1_RGN_MASK     << CB_TTBR1_RGN_SHIFT)
+#define CB_TTBR1_NOS         (CB_TTBR1_NOS_MASK     << CB_TTBR1_NOS_SHIFT)
+#define CB_TTBR1_IRGN0       (CB_TTBR1_IRGN0_MASK   << CB_TTBR1_IRGN0_SHIFT)
+#define CB_TTBR1_ADDR        (CB_TTBR1_ADDR_MASK    << CB_TTBR1_ADDR_SHIFT)
+
+/* Global Register Masks */
+/* Configuration Register 0 */
+#define CR0_NSCFG_MASK          0x03
+#define CR0_WACFG_MASK          0x03
+#define CR0_RACFG_MASK          0x03
+#define CR0_SHCFG_MASK          0x03
+#define CR0_SMCFCFG_MASK        0x01
+#define CR0_MTCFG_MASK          0x01
+#define CR0_MEMATTR_MASK        0x0F
+#define CR0_BSU_MASK            0x03
+#define CR0_FB_MASK             0x01
+#define CR0_PTM_MASK            0x01
+#define CR0_VMIDPNE_MASK        0x01
+#define CR0_USFCFG_MASK         0x01
+#define CR0_GSE_MASK            0x01
+#define CR0_STALLD_MASK         0x01
+#define CR0_TRANSIENTCFG_MASK   0x03
+#define CR0_GCFGFIE_MASK        0x01
+#define CR0_GCFGFRE_MASK        0x01
+#define CR0_GFIE_MASK           0x01
+#define CR0_GFRE_MASK           0x01
+#define CR0_CLIENTPD_MASK       0x01
+
+/* Configuration Register 2 */
+#define CR2_BPVMID_MASK         0xFF
+
+/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */
+#define GATS1PR_ADDR_MASK       0xFFFFF
+#define GATS1PR_NDX_MASK        0xFF
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define GATS1PW_ADDR_MASK       0xFFFFF
+#define GATS1PW_NDX_MASK        0xFF
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define GATS1UR_ADDR_MASK       0xFFFFF
+#define GATS1UR_NDX_MASK        0xFF
+
+/* Global Address Translation, Stage 1, User Write: GATS1UW */
+#define GATS1UW_ADDR_MASK       0xFFFFF
+#define GATS1UW_NDX_MASK        0xFF
+
+/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS1PR */
+#define GATS12PR_ADDR_MASK      0xFFFFF
+#define GATS12PR_NDX_MASK       0xFF
+
+/* Global Address Translation, Stage 1 and 2, Privileged Write: GATS1PW */
+#define GATS12PW_ADDR_MASK      0xFFFFF
+#define GATS12PW_NDX_MASK       0xFF
+
+/* Global Address Translation, Stage 1 and 2, User Read: GATS1UR */
+#define GATS12UR_ADDR_MASK      0xFFFFF
+#define GATS12UR_NDX_MASK       0xFF
+
+/* Global Address Translation, Stage 1 and 2, User Write: GATS1UW */
+#define GATS12UW_ADDR_MASK      0xFFFFF
+#define GATS12UW_NDX_MASK       0xFF
+
+/* Global Address Translation Status Register: GATSR */
+#define GATSR_ACTIVE_MASK       0x01
+
+/* Global Fault Address Register: GFAR */
+#define GFAR_FADDR_MASK         0xFFFFFFFF
+
+/* Global Fault Status Register: GFSR */
+#define GFSR_ICF_MASK           0x01
+#define GFSR_USF_MASK           0x01
+#define GFSR_SMCF_MASK          0x01
+#define GFSR_UCBF_MASK          0x01
+#define GFSR_UCIF_MASK          0x01
+#define GFSR_CAF_MASK           0x01
+#define GFSR_EF_MASK            0x01
+#define GFSR_PF_MASK            0x01
+#define GFSR_MULTI_MASK         0x01
+
+/* Global Fault Syndrome Register 0: GFSYNR0 */
+#define GFSYNR0_NESTED_MASK     0x01
+#define GFSYNR0_WNR_MASK        0x01
+#define GFSYNR0_PNU_MASK        0x01
+#define GFSYNR0_IND_MASK        0x01
+#define GFSYNR0_NSSTATE_MASK    0x01
+#define GFSYNR0_NSATTR_MASK     0x01
+
+/* Global Fault Syndrome Register 1: GFSYNR1 */
+#define GFSYNR1_SID_MASK        0x7FFF
+#define GFSYNr1_SSD_IDX_MASK    0x7FFF
+
+/* Global Physical Address Register: GPAR */
+#define GPAR_F_MASK             0x01
+#define GPAR_SS_MASK            0x01
+#define GPAR_OUTER_MASK         0x03
+#define GPAR_INNER_MASK         0x03
+#define GPAR_SH_MASK            0x01
+#define GPAR_NS_MASK            0x01
+#define GPAR_NOS_MASK           0x01
+#define GPAR_PA_MASK            0xFFFFF
+#define GPAR_TF_MASK            0x01
+#define GPAR_AFF_MASK           0x01
+#define GPAR_PF_MASK            0x01
+#define GPAR_EF_MASK            0x01
+#define GPAR_TLBMCF_MASK        0x01
+#define GPAR_TLBLKF_MASK        0x01
+#define GPAR_UCBF_MASK          0x01
+
+/* Identification Register: IDR0 */
+#define IDR0_NUMSMRG_MASK       0xFF
+#define IDR0_NUMSIDB_MASK       0x0F
+#define IDR0_BTM_MASK           0x01
+#define IDR0_CTTW_MASK          0x01
+#define IDR0_NUMIPRT_MASK       0xFF
+#define IDR0_PTFS_MASK          0x01
+#define IDR0_SMS_MASK           0x01
+#define IDR0_NTS_MASK           0x01
+#define IDR0_S2TS_MASK          0x01
+#define IDR0_S1TS_MASK          0x01
+#define IDR0_SES_MASK           0x01
+
+/* Identification Register: IDR1 */
+#define IDR1_NUMCB_MASK         0xFF
+#define IDR1_NUMSSDNDXB_MASK    0x0F
+#define IDR1_SSDTP_MASK         0x01
+#define IDR1_SMCD_MASK          0x01
+#define IDR1_NUMS2CB_MASK       0xFF
+#define IDR1_NUMPAGENDXB_MASK   0x07
+#define IDR1_PAGESIZE_MASK      0x01
+
+/* Identification Register: IDR2 */
+#define IDR2_IAS_MASK           0x0F
+#define IDR2_OAS_MASK           0x0F
+
+/* Identification Register: IDR7 */
+#define IDR7_MINOR_MASK         0x0F
+#define IDR7_MAJOR_MASK         0x0F
+
+/* Stream to Context Register: S2CR */
+#define S2CR_CBNDX_MASK         0xFF
+#define S2CR_SHCFG_MASK         0x03
+#define S2CR_MTCFG_MASK         0x01
+#define S2CR_MEMATTR_MASK       0x0F
+#define S2CR_TYPE_MASK          0x03
+#define S2CR_NSCFG_MASK         0x03
+#define S2CR_RACFG_MASK         0x03
+#define S2CR_WACFG_MASK         0x03
+#define S2CR_PRIVCFG_MASK       0x03
+#define S2CR_INSTCFG_MASK       0x03
+#define S2CR_TRANSIENTCFG_MASK  0x03
+#define S2CR_VMID_MASK          0xFF
+#define S2CR_BSU_MASK           0x03
+#define S2CR_FB_MASK            0x01
+
+/* Stream Match Register: SMR */
+#define SMR_ID_MASK             0x7FFF
+#define SMR_MASK_MASK           0x7FFF
+#define SMR_VALID_MASK          0x01
+
+/* Global TLB Status: TLBGSTATUS */
+#define TLBGSTATUS_GSACTIVE_MASK 0x01
+
+/* Invalidate Hyp TLB by VA: TLBIVAH */
+#define TLBIVAH_ADDR_MASK       0xFFFFF
+
+/* Invalidate TLB by VMID: TLBIVMID */
+#define TLBIVMID_VMID_MASK      0xFF
+
+/* Global Register Space 1 Mask */
+/* Context Bank Attribute Register: CBAR */
+#define CBAR_VMID_MASK          0xFF
+#define CBAR_CBNDX_MASK         0x03
+#define CBAR_BPSHCFG_MASK       0x03
+#define CBAR_HYPC_MASK          0x01
+#define CBAR_FB_MASK            0x01
+#define CBAR_MEMATTR_MASK       0x0F
+#define CBAR_TYPE_MASK          0x03
+#define CBAR_BSU_MASK           0x03
+#define CBAR_RACFG_MASK         0x03
+#define CBAR_WACFG_MASK         0x03
+#define CBAR_IRPTNDX_MASK       0xFF
+
+/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA */
+#define CBFRSYNRA_SID_MASK      0x7FFF
+
+/* Stage 1 Context Bank Format Masks */
+/* Auxiliary Control Register: CB_ACTLR */
+#define CB_ACTLR_REQPRIORITY_MASK    0x3
+#define CB_ACTLR_REQPRIORITYCFG_MASK 0x1
+#define CB_ACTLR_PRIVCFG_MASK        0x3
+#define CB_ACTLR_BPRCOSH_MASK        0x1
+#define CB_ACTLR_BPRCISH_MASK        0x1
+#define CB_ACTLR_BPRCNSH_MASK        0x1
+
+/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */
+#define CB_ATS1PR_ADDR_MASK     0xFFFFF
+
+/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */
+#define CB_ATS1PW_ADDR_MASK     0xFFFFF
+
+/* Address Translation, Stage 1, User Read: CB_ATS1UR */
+#define CB_ATS1UR_ADDR_MASK     0xFFFFF
+
+/* Address Translation, Stage 1, User Write: CB_ATS1UW */
+#define CB_ATS1UW_ADDR_MASK     0xFFFFF
+
+/* Address Translation Status Register: CB_ATSR */
+#define CB_ATSR_ACTIVE_MASK     0x01
+
+/* Context ID Register: CB_CONTEXTIDR */
+#define CB_CONTEXTIDR_ASID_MASK   0xFF
+#define CB_CONTEXTIDR_PROCID_MASK 0xFFFFFF
+
+/* Fault Address Register: CB_FAR */
+#define CB_FAR_FADDR_MASK       0xFFFFFFFF
+
+/* Fault Status Register: CB_FSR */
+#define CB_FSR_TF_MASK          0x01
+#define CB_FSR_AFF_MASK         0x01
+#define CB_FSR_PF_MASK          0x01
+#define CB_FSR_EF_MASK          0x01
+#define CB_FSR_TLBMCF_MASK      0x01
+#define CB_FSR_TLBLKF_MASK      0x01
+#define CB_FSR_SS_MASK          0x01
+#define CB_FSR_MULTI_MASK       0x01
+
+/* Fault Syndrome Register 0: CB_FSYNR0 */
+#define CB_FSYNR0_PLVL_MASK     0x03
+#define CB_FSYNR0_S1PTWF_MASK   0x01
+#define CB_FSYNR0_WNR_MASK      0x01
+#define CB_FSYNR0_PNU_MASK      0x01
+#define CB_FSYNR0_IND_MASK      0x01
+#define CB_FSYNR0_NSSTATE_MASK  0x01
+#define CB_FSYNR0_NSATTR_MASK   0x01
+#define CB_FSYNR0_ATOF_MASK     0x01
+#define CB_FSYNR0_PTWF_MASK     0x01
+#define CB_FSYNR0_AFR_MASK      0x01
+#define CB_FSYNR0_S1CBNDX_MASK  0xFF
+
+/* Normal Memory Remap Register: CB_NMRR */
+#define CB_NMRR_IR0_MASK        0x03
+#define CB_NMRR_IR1_MASK        0x03
+#define CB_NMRR_IR2_MASK        0x03
+#define CB_NMRR_IR3_MASK        0x03
+#define CB_NMRR_IR4_MASK        0x03
+#define CB_NMRR_IR5_MASK        0x03
+#define CB_NMRR_IR6_MASK        0x03
+#define CB_NMRR_IR7_MASK        0x03
+#define CB_NMRR_OR0_MASK        0x03
+#define CB_NMRR_OR1_MASK        0x03
+#define CB_NMRR_OR2_MASK        0x03
+#define CB_NMRR_OR3_MASK        0x03
+#define CB_NMRR_OR4_MASK        0x03
+#define CB_NMRR_OR5_MASK        0x03
+#define CB_NMRR_OR6_MASK        0x03
+#define CB_NMRR_OR7_MASK        0x03
+
+/* Physical Address Register: CB_PAR */
+#define CB_PAR_F_MASK           0x01
+#define CB_PAR_SS_MASK          0x01
+#define CB_PAR_OUTER_MASK       0x03
+#define CB_PAR_INNER_MASK       0x07
+#define CB_PAR_SH_MASK          0x01
+#define CB_PAR_NS_MASK          0x01
+#define CB_PAR_NOS_MASK         0x01
+#define CB_PAR_PA_MASK          0xFFFFF
+#define CB_PAR_TF_MASK          0x01
+#define CB_PAR_AFF_MASK         0x01
+#define CB_PAR_PF_MASK          0x01
+#define CB_PAR_TLBMCF_MASK      0x01
+#define CB_PAR_TLBLKF_MASK      0x01
+#define CB_PAR_ATOT_MASK        0x01
+#define CB_PAR_PLVL_MASK        0x03
+#define CB_PAR_STAGE_MASK       0x01
+
+/* Primary Region Remap Register: CB_PRRR */
+#define CB_PRRR_TR0_MASK        0x03
+#define CB_PRRR_TR1_MASK        0x03
+#define CB_PRRR_TR2_MASK        0x03
+#define CB_PRRR_TR3_MASK        0x03
+#define CB_PRRR_TR4_MASK        0x03
+#define CB_PRRR_TR5_MASK        0x03
+#define CB_PRRR_TR6_MASK        0x03
+#define CB_PRRR_TR7_MASK        0x03
+#define CB_PRRR_DS0_MASK        0x01
+#define CB_PRRR_DS1_MASK        0x01
+#define CB_PRRR_NS0_MASK        0x01
+#define CB_PRRR_NS1_MASK        0x01
+#define CB_PRRR_NOS0_MASK       0x01
+#define CB_PRRR_NOS1_MASK       0x01
+#define CB_PRRR_NOS2_MASK       0x01
+#define CB_PRRR_NOS3_MASK       0x01
+#define CB_PRRR_NOS4_MASK       0x01
+#define CB_PRRR_NOS5_MASK       0x01
+#define CB_PRRR_NOS6_MASK       0x01
+#define CB_PRRR_NOS7_MASK       0x01
+
+/* Transaction Resume: CB_RESUME */
+#define CB_RESUME_TNR_MASK      0x01
+
+/* System Control Register: CB_SCTLR */
+#define CB_SCTLR_M_MASK            0x01
+#define CB_SCTLR_TRE_MASK          0x01
+#define CB_SCTLR_AFE_MASK          0x01
+#define CB_SCTLR_AFFD_MASK         0x01
+#define CB_SCTLR_E_MASK            0x01
+#define CB_SCTLR_CFRE_MASK         0x01
+#define CB_SCTLR_CFIE_MASK         0x01
+#define CB_SCTLR_CFCFG_MASK        0x01
+#define CB_SCTLR_HUPCF_MASK        0x01
+#define CB_SCTLR_WXN_MASK          0x01
+#define CB_SCTLR_UWXN_MASK         0x01
+#define CB_SCTLR_ASIDPNE_MASK      0x01
+#define CB_SCTLR_TRANSIENTCFG_MASK 0x03
+#define CB_SCTLR_MEMATTR_MASK      0x0F
+#define CB_SCTLR_MTCFG_MASK        0x01
+#define CB_SCTLR_SHCFG_MASK        0x03
+#define CB_SCTLR_RACFG_MASK        0x03
+#define CB_SCTLR_WACFG_MASK        0x03
+#define CB_SCTLR_NSCFG_MASK        0x03
+
+/* Invalidate TLB by ASID: CB_TLBIASID */
+#define CB_TLBIASID_ASID_MASK      0xFF
+
+/* Invalidate TLB by VA: CB_TLBIVA */
+#define CB_TLBIVA_ASID_MASK        0xFF
+#define CB_TLBIVA_VA_MASK          0xFFFFF
+
+/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */
+#define CB_TLBIVAA_VA_MASK         0xFFFFF
+
+/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */
+#define CB_TLBIVAAL_VA_MASK        0xFFFFF
+
+/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */
+#define CB_TLBIVAL_ASID_MASK       0xFF
+#define CB_TLBIVAL_VA_MASK         0xFFFFF
+
+/* TLB Status: CB_TLBSTATUS */
+#define CB_TLBSTATUS_SACTIVE_MASK  0x01
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define CB_TTBCR_T0SZ_MASK         0x07
+#define CB_TTBCR_PD0_MASK          0x01
+#define CB_TTBCR_PD1_MASK          0x01
+#define CB_TTBCR_NSCFG0_MASK       0x01
+#define CB_TTBCR_NSCFG1_MASK       0x01
+#define CB_TTBCR_EAE_MASK          0x01
+
+/* Translation Table Base Register 0/1: CB_TTBR */
+#define CB_TTBR0_IRGN1_MASK        0x01
+#define CB_TTBR0_S_MASK            0x01
+#define CB_TTBR0_RGN_MASK          0x01
+#define CB_TTBR0_NOS_MASK          0x01
+#define CB_TTBR0_IRGN0_MASK        0x01
+#define CB_TTBR0_ADDR_MASK         0xFFFFFF
+
+#define CB_TTBR1_IRGN1_MASK        0x1
+#define CB_TTBR1_S_MASK            0x1
+#define CB_TTBR1_RGN_MASK          0x1
+#define CB_TTBR1_NOS_MASK          0X1
+#define CB_TTBR1_IRGN0_MASK        0X1
+#define CB_TTBR1_ADDR_MASK         0xFFFFFF
+
+/* Global Register Shifts */
+/* Configuration Register: CR0 */
+#define CR0_NSCFG_SHIFT            28
+#define CR0_WACFG_SHIFT            26
+#define CR0_RACFG_SHIFT            24
+#define CR0_SHCFG_SHIFT            22
+#define CR0_SMCFCFG_SHIFT          21
+#define CR0_MTCFG_SHIFT            20
+#define CR0_MEMATTR_SHIFT          16
+#define CR0_BSU_SHIFT              14
+#define CR0_FB_SHIFT               13
+#define CR0_PTM_SHIFT              12
+#define CR0_VMIDPNE_SHIFT          11
+#define CR0_USFCFG_SHIFT           10
+#define CR0_GSE_SHIFT              9
+#define CR0_STALLD_SHIFT           8
+#define CR0_TRANSIENTCFG_SHIFT     6
+#define CR0_GCFGFIE_SHIFT          5
+#define CR0_GCFGFRE_SHIFT          4
+#define CR0_GFIE_SHIFT             2
+#define CR0_GFRE_SHIFT             1
+#define CR0_CLIENTPD_SHIFT         0
+
+/* Configuration Register: CR2 */
+#define CR2_BPVMID_SHIFT           0
+
+/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */
+#define GATS1PR_ADDR_SHIFT         12
+#define GATS1PR_NDX_SHIFT          0
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define GATS1PW_ADDR_SHIFT         12
+#define GATS1PW_NDX_SHIFT          0
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define GATS1UR_ADDR_SHIFT         12
+#define GATS1UR_NDX_SHIFT          0
+
+/* Global Address Translation, Stage 1, User Write: GATS1UW */
+#define GATS1UW_ADDR_SHIFT         12
+#define GATS1UW_NDX_SHIFT          0
+
+/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS12PR */
+#define GATS12PR_ADDR_SHIFT        12
+#define GATS12PR_NDX_SHIFT         0
+
+/* Global Address Translation, Stage 1 and 2, Privileged Write: GATS12PW */
+#define GATS12PW_ADDR_SHIFT        12
+#define GATS12PW_NDX_SHIFT         0
+
+/* Global Address Translation, Stage 1 and 2, User Read: GATS12UR */
+#define GATS12UR_ADDR_SHIFT        12
+#define GATS12UR_NDX_SHIFT         0
+
+/* Global Address Translation, Stage 1 and 2, User Write: GATS12UW */
+#define GATS12UW_ADDR_SHIFT        12
+#define GATS12UW_NDX_SHIFT         0
+
+/* Global Address Translation Status Register: GATSR */
+#define GATSR_ACTIVE_SHIFT         0
+
+/* Global Fault Address Register: GFAR */
+#define GFAR_FADDR_SHIFT           0
+
+/* Global Fault Status Register: GFSR */
+#define GFSR_ICF_SHIFT             0
+#define GFSR_USF_SHIFT             1
+#define GFSR_SMCF_SHIFT            2
+#define GFSR_UCBF_SHIFT            3
+#define GFSR_UCIF_SHIFT            4
+#define GFSR_CAF_SHIFT             5
+#define GFSR_EF_SHIFT              6
+#define GFSR_PF_SHIFT              7
+#define GFSR_MULTI_SHIFT           31
+
+/* Global Fault Syndrome Register 0: GFSYNR0 */
+#define GFSYNR0_NESTED_SHIFT       0
+#define GFSYNR0_WNR_SHIFT          1
+#define GFSYNR0_PNU_SHIFT          2
+#define GFSYNR0_IND_SHIFT          3
+#define GFSYNR0_NSSTATE_SHIFT      4
+#define GFSYNR0_NSATTR_SHIFT       5
+
+/* Global Fault Syndrome Register 1: GFSYNR1 */
+#define GFSYNR1_SID_SHIFT          0
+
+/* Global Physical Address Register: GPAR */
+#define GPAR_F_SHIFT               0
+#define GPAR_SS_SHIFT              1
+#define GPAR_OUTER_SHIFT           2
+#define GPAR_INNER_SHIFT           4
+#define GPAR_SH_SHIFT              7
+#define GPAR_NS_SHIFT              9
+#define GPAR_NOS_SHIFT             10
+#define GPAR_PA_SHIFT              12
+#define GPAR_TF_SHIFT              1
+#define GPAR_AFF_SHIFT             2
+#define GPAR_PF_SHIFT              3
+#define GPAR_EF_SHIFT              4
+#define GPAR_TLCMCF_SHIFT          5
+#define GPAR_TLBLKF_SHIFT          6
+#define GFAR_UCBF_SHIFT            30
+
+/* Identification Register: IDR0 */
+#define IDR0_NUMSMRG_SHIFT         0
+#define IDR0_NUMSIDB_SHIFT         9
+#define IDR0_BTM_SHIFT             13
+#define IDR0_CTTW_SHIFT            14
+#define IDR0_NUMIRPT_SHIFT         16
+#define IDR0_PTFS_SHIFT            24
+#define IDR0_SMS_SHIFT             27
+#define IDR0_NTS_SHIFT             28
+#define IDR0_S2TS_SHIFT            29
+#define IDR0_S1TS_SHIFT            30
+#define IDR0_SES_SHIFT             31
+
+/* Identification Register: IDR1 */
+#define IDR1_NUMCB_SHIFT           0
+#define IDR1_NUMSSDNDXB_SHIFT      8
+#define IDR1_SSDTP_SHIFT           12
+#define IDR1_SMCD_SHIFT            15
+#define IDR1_NUMS2CB_SHIFT         16
+#define IDR1_NUMPAGENDXB_SHIFT     28
+#define IDR1_PAGESIZE_SHIFT        31
+
+/* Identification Register: IDR2 */
+#define IDR2_IAS_SHIFT             0
+#define IDR2_OAS_SHIFT             4
+
+/* Identification Register: IDR7 */
+#define IDR7_MINOR_SHIFT           0
+#define IDR7_MAJOR_SHIFT           4
+
+/* Stream to Context Register: S2CR */
+#define S2CR_CBNDX_SHIFT           0
+#define s2CR_SHCFG_SHIFT           8
+#define S2CR_MTCFG_SHIFT           11
+#define S2CR_MEMATTR_SHIFT         12
+#define S2CR_TYPE_SHIFT            16
+#define S2CR_NSCFG_SHIFT           18
+#define S2CR_RACFG_SHIFT           20
+#define S2CR_WACFG_SHIFT           22
+#define S2CR_PRIVCFG_SHIFT         24
+#define S2CR_INSTCFG_SHIFT         26
+#define S2CR_TRANSIENTCFG_SHIFT    28
+#define S2CR_VMID_SHIFT            0
+#define S2CR_BSU_SHIFT             24
+#define S2CR_FB_SHIFT              26
+
+/* Stream Match Register: SMR */
+#define SMR_ID_SHIFT               0
+#define SMR_MASK_SHIFT             16
+#define SMR_VALID_SHIFT            31
+
+/* Global TLB Status: TLBGSTATUS */
+#define TLBGSTATUS_GSACTIVE_SHIFT  0
+
+/* Invalidate Hyp TLB by VA: TLBIVAH */
+#define TLBIVAH_ADDR_SHIFT         12
+
+/* Invalidate TLB by VMID: TLBIVMID */
+#define TLBIVMID_VMID_SHIFT        0
+
+/* Context Bank Attribute Register: CBAR */
+#define CBAR_VMID_SHIFT            0
+#define CBAR_CBNDX_SHIFT           8
+#define CBAR_BPSHCFG_SHIFT         8
+#define CBAR_HYPC_SHIFT            10
+#define CBAR_FB_SHIFT              11
+#define CBAR_MEMATTR_SHIFT         12
+#define CBAR_TYPE_SHIFT            16
+#define CBAR_BSU_SHIFT             18
+#define CBAR_RACFG_SHIFT           20
+#define CBAR_WACFG_SHIFT           22
+#define CBAR_IRPTNDX_SHIFT         24
+
+/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA */
+#define CBFRSYNRA_SID_SHIFT        0
+
+/* Stage 1 Context Bank Format Shifts */
+/* Auxiliary Control Register: CB_ACTLR */
+#define CB_ACTLR_REQPRIORITY_SHIFT     0
+#define CB_ACTLR_REQPRIORITYCFG_SHIFT  4
+#define CB_ACTLR_PRIVCFG_SHIFT         8
+#define CB_ACTLR_BPRCOSH_SHIFT         28
+#define CB_ACTLR_BPRCISH_SHIFT         29
+#define CB_ACTLR_BPRCNSH_SHIFT         30
+
+/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */
+#define CB_ATS1PR_ADDR_SHIFT       12
+
+/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */
+#define CB_ATS1PW_ADDR_SHIFT       12
+
+/* Address Translation, Stage 1, User Read: CB_ATS1UR */
+#define CB_ATS1UR_ADDR_SHIFT       12
+
+/* Address Translation, Stage 1, User Write: CB_ATS1UW */
+#define CB_ATS1UW_ADDR_SHIFT       12
+
+/* Address Translation Status Register: CB_ATSR */
+#define CB_ATSR_ACTIVE_SHIFT       0
+
+/* Context ID Register: CB_CONTEXTIDR */
+#define CB_CONTEXTIDR_ASID_SHIFT   0
+#define CB_CONTEXTIDR_PROCID_SHIFT 8
+
+/* Fault Address Register: CB_FAR */
+#define CB_FAR_FADDR_SHIFT         0
+
+/* Fault Status Register: CB_FSR */
+#define CB_FSR_TF_SHIFT            1
+#define CB_FSR_AFF_SHIFT           2
+#define CB_FSR_PF_SHIFT            3
+#define CB_FSR_EF_SHIFT            4
+#define CB_FSR_TLBMCF_SHIFT        5
+#define CB_FSR_TLBLKF_SHIFT        6
+#define CB_FSR_SS_SHIFT            30
+#define CB_FSR_MULTI_SHIFT         31
+
+/* Fault Syndrome Register 0: CB_FSYNR0 */
+#define CB_FSYNR0_PLVL_SHIFT       0
+#define CB_FSYNR0_S1PTWF_SHIFT     3
+#define CB_FSYNR0_WNR_SHIFT        4
+#define CB_FSYNR0_PNU_SHIFT        5
+#define CB_FSYNR0_IND_SHIFT        6
+#define CB_FSYNR0_NSSTATE_SHIFT    7
+#define CB_FSYNR0_NSATTR_SHIFT     8
+#define CB_FSYNR0_ATOF_SHIFT       9
+#define CB_FSYNR0_PTWF_SHIFT       10
+#define CB_FSYNR0_AFR_SHIFT        11
+#define CB_FSYNR0_S1CBNDX_SHIFT    16
+
+/* Normal Memory Remap Register: CB_NMRR */
+#define CB_NMRR_IR0_SHIFT          0
+#define CB_NMRR_IR1_SHIFT          2
+#define CB_NMRR_IR2_SHIFT          4
+#define CB_NMRR_IR3_SHIFT          6
+#define CB_NMRR_IR4_SHIFT          8
+#define CB_NMRR_IR5_SHIFT          10
+#define CB_NMRR_IR6_SHIFT          12
+#define CB_NMRR_IR7_SHIFT          14
+#define CB_NMRR_OR0_SHIFT          16
+#define CB_NMRR_OR1_SHIFT          18
+#define CB_NMRR_OR2_SHIFT          20
+#define CB_NMRR_OR3_SHIFT          22
+#define CB_NMRR_OR4_SHIFT          24
+#define CB_NMRR_OR5_SHIFT          26
+#define CB_NMRR_OR6_SHIFT          28
+#define CB_NMRR_OR7_SHIFT          30
+
+/* Physical Address Register: CB_PAR */
+#define CB_PAR_F_SHIFT             0
+#define CB_PAR_SS_SHIFT            1
+#define CB_PAR_OUTER_SHIFT         2
+#define CB_PAR_INNER_SHIFT         4
+#define CB_PAR_SH_SHIFT            7
+#define CB_PAR_NS_SHIFT            9
+#define CB_PAR_NOS_SHIFT           10
+#define CB_PAR_PA_SHIFT            12
+#define CB_PAR_TF_SHIFT            1
+#define CB_PAR_AFF_SHIFT           2
+#define CB_PAR_PF_SHIFT            3
+#define CB_PAR_TLBMCF_SHIFT        5
+#define CB_PAR_TLBLKF_SHIFT        6
+#define CB_PAR_ATOT_SHIFT          31
+#define CB_PAR_PLVL_SHIFT          0
+#define CB_PAR_STAGE_SHIFT         3
+
+/* Primary Region Remap Register: CB_PRRR */
+#define CB_PRRR_TR0_SHIFT          0
+#define CB_PRRR_TR1_SHIFT          2
+#define CB_PRRR_TR2_SHIFT          4
+#define CB_PRRR_TR3_SHIFT          6
+#define CB_PRRR_TR4_SHIFT          8
+#define CB_PRRR_TR5_SHIFT          10
+#define CB_PRRR_TR6_SHIFT          12
+#define CB_PRRR_TR7_SHIFT          14
+#define CB_PRRR_DS0_SHIFT          16
+#define CB_PRRR_DS1_SHIFT          17
+#define CB_PRRR_NS0_SHIFT          18
+#define CB_PRRR_NS1_SHIFT          19
+#define CB_PRRR_NOS0_SHIFT         24
+#define CB_PRRR_NOS1_SHIFT         25
+#define CB_PRRR_NOS2_SHIFT         26
+#define CB_PRRR_NOS3_SHIFT         27
+#define CB_PRRR_NOS4_SHIFT         28
+#define CB_PRRR_NOS5_SHIFT         29
+#define CB_PRRR_NOS6_SHIFT         30
+#define CB_PRRR_NOS7_SHIFT         31
+
+/* Transaction Resume: CB_RESUME */
+#define CB_RESUME_TNR_SHIFT        0
+
+/* System Control Register: CB_SCTLR */
+#define CB_SCTLR_M_SHIFT            0
+#define CB_SCTLR_TRE_SHIFT          1
+#define CB_SCTLR_AFE_SHIFT          2
+#define CB_SCTLR_AFFD_SHIFT         3
+#define CB_SCTLR_E_SHIFT            4
+#define CB_SCTLR_CFRE_SHIFT         5
+#define CB_SCTLR_CFIE_SHIFT         6
+#define CB_SCTLR_CFCFG_SHIFT        7
+#define CB_SCTLR_HUPCF_SHIFT        8
+#define CB_SCTLR_WXN_SHIFT          9
+#define CB_SCTLR_UWXN_SHIFT         10
+#define CB_SCTLR_ASIDPNE_SHIFT      12
+#define CB_SCTLR_TRANSIENTCFG_SHIFT 14
+#define CB_SCTLR_MEMATTR_SHIFT      16
+#define CB_SCTLR_MTCFG_SHIFT        20
+#define CB_SCTLR_SHCFG_SHIFT        22
+#define CB_SCTLR_RACFG_SHIFT        24
+#define CB_SCTLR_WACFG_SHIFT        26
+#define CB_SCTLR_NSCFG_SHIFT        28
+
+/* Invalidate TLB by ASID: CB_TLBIASID */
+#define CB_TLBIASID_ASID_SHIFT      0
+
+/* Invalidate TLB by VA: CB_TLBIVA */
+#define CB_TLBIVA_ASID_SHIFT        0
+#define CB_TLBIVA_VA_SHIFT          12
+
+/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */
+#define CB_TLBIVAA_VA_SHIFT         12
+
+/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */
+#define CB_TLBIVAAL_VA_SHIFT        12
+
+/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */
+#define CB_TLBIVAL_ASID_SHIFT       0
+#define CB_TLBIVAL_VA_SHIFT         12
+
+/* TLB Status: CB_TLBSTATUS */
+#define CB_TLBSTATUS_SACTIVE_SHIFT  0
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define CB_TTBCR_T0SZ_SHIFT         0
+#define CB_TTBCR_PD0_SHIFT          4
+#define CB_TTBCR_PD1_SHIFT          5
+#define CB_TTBCR_NSCFG0_SHIFT       14
+#define CB_TTBCR_NSCFG1_SHIFT       30
+#define CB_TTBCR_EAE_SHIFT          31
+
+/* Translation Table Base Register 0/1: CB_TTBR */
+#define CB_TTBR0_IRGN1_SHIFT        0
+#define CB_TTBR0_S_SHIFT            1
+#define CB_TTBR0_RGN_SHIFT          3
+#define CB_TTBR0_NOS_SHIFT          5
+#define CB_TTBR0_IRGN0_SHIFT        6
+#define CB_TTBR0_ADDR_SHIFT         14
+
+#define CB_TTBR1_IRGN1_SHIFT        0
+#define CB_TTBR1_S_SHIFT            1
+#define CB_TTBR1_RGN_SHIFT          3
+#define CB_TTBR1_NOS_SHIFT          5
+#define CB_TTBR1_IRGN0_SHIFT        6
+#define CB_TTBR1_ADDR_SHIFT         14
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/mdm2.h b/arch/arm/mach-msm/include/mach/mdm2.h
index 78ca88f..997b3be 100644
--- a/arch/arm/mach-msm/include/mach/mdm2.h
+++ b/arch/arm/mach-msm/include/mach/mdm2.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -16,6 +16,9 @@
 struct mdm_platform_data {
 	char *mdm_version;
 	int ramdump_delay_ms;
+	int soft_reset_inverted;
+	int early_power_on;
+	int sfr_query;
 	struct platform_device *peripheral_platform_device;
 };
 
diff --git a/arch/arm/mach-msm/include/mach/msm_bus_board.h b/arch/arm/mach-msm/include/mach/msm_bus_board.h
index f62bc86..956d44e 100644
--- a/arch/arm/mach-msm/include/mach/msm_bus_board.h
+++ b/arch/arm/mach-msm/include/mach/msm_bus_board.h
@@ -37,6 +37,8 @@
 	const unsigned int ntieredslaves;
 	bool il_flag;
 	const struct msm_bus_board_algorithm *board_algo;
+	int hw_sel;
+	void *hw_data;
 };
 
 enum msm_bus_bw_tier_type {
@@ -154,6 +156,16 @@
 	MSM_BUS_FAB_CPSS_FPB = 4096,
 };
 
+enum msm_bus_fab_noc_bimc_type {
+	MSM_BUS_FAB_BIMC = 0,
+	MSM_BUS_FAB_SYS_NOC = 1024,
+	MSM_BUS_FAB_MMSS_NOC = 2048,
+	MSM_BUS_FAB_OCMEM_NOC = 3072,
+	MSM_BUS_FAB_PERIPH_NOC = 4096,
+	MSM_BUS_FAB_CONFIG_NOC = 5120,
+	MSM_BUS_FAB_OCMEM_VNOC = 6144,
+};
+
 enum msm_bus_fabric_master_type {
 	MSM_BUS_MASTER_FIRST = 1,
 	MSM_BUS_MASTER_AMPSS_M0 = 1,
@@ -212,7 +224,50 @@
 	MSM_BUS_MASTER_GRAPHICS_3D_PORT1,
 	MSM_BUS_MASTER_VIDEO_ENC,
 	MSM_BUS_MASTER_VIDEO_DEC,
-	MSM_BUS_MASTER_LAST = MSM_BUS_MASTER_VIDEO_DEC,
+
+	MSM_BUS_MASTER_LPASS_AHB,
+	MSM_BUS_MASTER_QDSS_BAM,
+	MSM_BUS_MASTER_SNOC_CFG,
+	MSM_BUS_MASTER_CRYPTO_CORE0,
+	MSM_BUS_MASTER_CRYPTO_CORE1,
+	MSM_BUS_MASTER_MSS_NAV,
+	MSM_BUS_MASTER_OCMEM_DMA,
+	MSM_BUS_MASTER_WCSS,
+	MSM_BUS_MASTER_QDSS_ETR,
+	MSM_BUS_MASTER_USB3,
+
+	MSM_BUS_MASTER_JPEG,
+	MSM_BUS_MASTER_VIDEO_P0,
+	MSM_BUS_MASTER_VIDEO_P1,
+
+	MSM_BUS_MASTER_MSS_PROC,
+	MSM_BUS_MASTER_JPEG_OCMEM,
+	MSM_BUS_MASTER_MDP_OCMEM,
+	MSM_BUS_MASTER_VIDEO_P0_OCMEM,
+	MSM_BUS_MASTER_VIDEO_P1_OCMEM,
+	MSM_BUS_MASTER_VFE_OCMEM,
+	MSM_BUS_MASTER_CNOC_ONOC_CFG,
+	MSM_BUS_MASTER_RPM_INST,
+	MSM_BUS_MASTER_RPM_DATA,
+	MSM_BUS_MASTER_RPM_SYS,
+	MSM_BUS_MASTER_DEHR,
+	MSM_BUS_MASTER_QDSS_DAP,
+	MSM_BUS_MASTER_TIC,
+
+	MSM_BUS_MASTER_SDCC_1,
+	MSM_BUS_MASTER_SDCC_3,
+	MSM_BUS_MASTER_SDCC_4,
+	MSM_BUS_MASTER_SDCC_2,
+	MSM_BUS_MASTER_TSIF,
+	MSM_BUS_MASTER_BAM_DMA,
+	MSM_BUS_MASTER_BLSP_2,
+	MSM_BUS_MASTER_USB_HSIC,
+	MSM_BUS_MASTER_BLSP_1,
+	MSM_BUS_MASTER_USB_HS,
+	MSM_BUS_MASTER_PNOC_CFG,
+	MSM_BUS_MASTER_V_OCMEM_GFX3D,
+
+	MSM_BUS_MASTER_LAST = MSM_BUS_MASTER_V_OCMEM_GFX3D,
 
 	MSM_BUS_SYSTEM_FPB_MASTER_SYSTEM =
 		MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB,
@@ -297,7 +352,77 @@
 	MSM_BUS_SLAVE_MSM_PRNG,
 	MSM_BUS_SLAVE_GSS,
 	MSM_BUS_SLAVE_SATA,
-	MSM_BUS_SLAVE_LAST = MSM_BUS_SLAVE_MSM_PRNG,
+
+	MSM_BUS_SLAVE_USB3,
+	MSM_BUS_SLAVE_WCSS,
+	MSM_BUS_SLAVE_OCIMEM,
+	MSM_BUS_SLAVE_SNOC_OCMEM,
+	MSM_BUS_SLAVE_SERVICE_SNOC,
+	MSM_BUS_SLAVE_QDSS_STM,
+
+	MSM_BUS_SLAVE_CAMERA_CFG,
+	MSM_BUS_SLAVE_DISPLAY_CFG,
+	MSM_BUS_SLAVE_OCMEM_CFG,
+	MSM_BUS_SLAVE_CPR_CFG,
+	MSM_BUS_SLAVE_CPR_XPU_CFG,
+	MSM_BUS_SLAVE_MISC_CFG,
+	MSM_BUS_SLAVE_MISC_XPU_CFG,
+	MSM_BUS_SLAVE_VENUS_CFG,
+	MSM_BUS_SLAVE_MISC_VENUS_CFG,
+	MSM_BUS_SLAVE_GRAPHICS_3D_CFG,
+	MSM_BUS_SLAVE_MMSS_CLK_CFG,
+	MSM_BUS_SLAVE_MMSS_CLK_XPU_CFG,
+	MSM_BUS_SLAVE_MNOC_MPU_CFG,
+	MSM_BUS_SLAVE_ONOC_MPU_CFG,
+	MSM_BUS_SLAVE_SERVICE_MNOC,
+
+	MSM_BUS_SLAVE_OCMEM,
+	MSM_BUS_SLAVE_SERVICE_ONOC,
+
+	MSM_BUS_SLAVE_SDCC_1,
+	MSM_BUS_SLAVE_SDCC_3,
+	MSM_BUS_SLAVE_SDCC_2,
+	MSM_BUS_SLAVE_SDCC_4,
+	MSM_BUS_SLAVE_BAM_DMA,
+	MSM_BUS_SLAVE_BLSP_2,
+	MSM_BUS_SLAVE_USB_HSIC,
+	MSM_BUS_SLAVE_BLSP_1,
+	MSM_BUS_SLAVE_USB_HS,
+	MSM_BUS_SLAVE_PDM,
+	MSM_BUS_SLAVE_PERIPH_APU_CFG,
+	MSM_BUS_SLAVE_PNOC_MPU_CFG,
+	MSM_BUS_SLAVE_PRNG,
+	MSM_BUS_SLAVE_SERVICE_PNOC,
+
+	MSM_BUS_SLAVE_CLK_CTL,
+	MSM_BUS_SLAVE_CNOC_MSS,
+	MSM_BUS_SLAVE_SECURITY,
+	MSM_BUS_SLAVE_TCSR,
+	MSM_BUS_SLAVE_TLMM,
+	MSM_BUS_SLAVE_CRYPTO_0_CFG,
+	MSM_BUS_SLAVE_CRYPTO_1_CFG,
+	MSM_BUS_SLAVE_IMEM_CFG,
+	MSM_BUS_SLAVE_MESSAGE_RAM,
+	MSM_BUS_SLAVE_BIMC_CFG,
+	MSM_BUS_SLAVE_BOOT_ROM,
+	MSM_BUS_SLAVE_CNOC_MNOC_MMSS_CFG,
+	MSM_BUS_SLAVE_PMIC_ARB,
+	MSM_BUS_SLAVE_SPDM_WRAPPER,
+	MSM_BUS_SLAVE_DEHR_CFG,
+	MSM_BUS_SLAVE_QDSS_CFG,
+	MSM_BUS_SLAVE_RBCPR_CFG,
+	MSM_BUS_SLAVE_RBCPR_QDSS_APU_CFG,
+	MSM_BUS_SLAVE_SNOC_MPU_CFG,
+	MSM_BUS_SLAVE_CNOC_ONOC_CFG,
+	MSM_BUS_SLAVE_CNOC_MNOC_CFG,
+	MSM_BUS_SLAVE_PNOC_CFG,
+	MSM_BUS_SLAVE_SNOC_CFG,
+	MSM_BUS_SLAVE_EBI1_DLL_CFG,
+	MSM_BUS_SLAVE_PHY_APU_CFG,
+	MSM_BUS_SLAVE_EBI1_PHY_CFG,
+	MSM_BUS_SLAVE_SERVICE_CNOC,
+
+	MSM_BUS_SLAVE_LAST = MSM_BUS_SLAVE_SERVICE_CNOC,
 
 	MSM_BUS_SYSTEM_FPB_SLAVE_SYSTEM =
 		MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB,
diff --git a/arch/arm/mach-msm/include/mach/msm_cache_dump.h b/arch/arm/mach-msm/include/mach/msm_cache_dump.h
index 6e4f628..80f4159 100644
--- a/arch/arm/mach-msm/include/mach/msm_cache_dump.h
+++ b/arch/arm/mach-msm/include/mach/msm_cache_dump.h
@@ -57,9 +57,6 @@
 	unsigned int l2_size;
 };
 
-#define L1_BUFFER_SIZE	SZ_1M
-#define L2_BUFFER_SIZE	SZ_4M
-
 #define CACHE_BUFFER_DUMP_SIZE (L1_BUFFER_SIZE + L2_BUFFER_SIZE)
 
 #define L1C_SERVICE_ID 3
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-8064.h b/arch/arm/mach-msm/include/mach/msm_iomap-8064.h
index 96bc35e..10e2b74 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-8064.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-8064.h
@@ -106,4 +106,9 @@
 #define APQ8064_HDMI_PHYS		0x04A00000
 #define APQ8064_HDMI_SIZE		SZ_4K
 
+#ifdef CONFIG_DEBUG_APQ8064_UART
+#define MSM_DEBUG_UART_BASE		IOMEM(0xFA740000)
+#define MSM_DEBUG_UART_PHYS		0x16640000
+#endif
+
 #endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-copper.h b/arch/arm/mach-msm/include/mach/msm_iomap-copper.h
index b560276..441f82a 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-copper.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-copper.h
@@ -23,7 +23,7 @@
  *
  */
 
-#define COPPER_MSM_SHARED_RAM_PHYS	0x18D00000
+#define COPPER_MSM_SHARED_RAM_PHYS	0x0FA00000
 
 #define COPPER_QGIC_DIST_PHYS	0xF9000000
 #define COPPER_QGIC_DIST_SIZE	SZ_4K
diff --git a/arch/arm/mach-msm/include/mach/msm_smsm.h b/arch/arm/mach-msm/include/mach/msm_smsm.h
index 5b15340..0fc3a2b 100644
--- a/arch/arm/mach-msm/include/mach/msm_smsm.h
+++ b/arch/arm/mach-msm/include/mach/msm_smsm.h
@@ -58,6 +58,7 @@
 #define SMSM_TIMEWAIT          0x00000400
 #define SMSM_TIMEINIT          0x00000800
 #define SMSM_PWRC_EARLY_EXIT   0x00001000
+#define SMSM_LTE_COEX_AWAKE    0x00001000
 #define SMSM_WFPI              0x00002000
 #define SMSM_SLEEP             0x00004000
 #define SMSM_SLEEPEXIT         0x00008000
diff --git a/arch/arm/mach-msm/include/mach/rpm-8930.h b/arch/arm/mach-msm/include/mach/rpm-8930.h
index 0211b67..5ec3a74 100644
--- a/arch/arm/mach-msm/include/mach/rpm-8930.h
+++ b/arch/arm/mach-msm/include/mach/rpm-8930.h
@@ -100,6 +100,7 @@
 	MSM_RPM_8930_SEL_CXO_BUFFERS				= 81,
 	MSM_RPM_8930_SEL_USB_OTG_SWITCH				= 82,
 	MSM_RPM_8930_SEL_HDMI_SWITCH				= 83,
+	MSM_RPM_8930_SEL_DDR_DMM				= 84,
 	MSM_RPM_8930_SEL_VOLTAGE_CORNER				= 87,
 	MSM_RPM_8930_SEL_LAST = MSM_RPM_8930_SEL_VOLTAGE_CORNER,
 };
@@ -239,8 +240,10 @@
 	MSM_RPM_8930_ID_CXO_BUFFERS	= 164,
 	MSM_RPM_8930_ID_USB_OTG_SWITCH	= 165,
 	MSM_RPM_8930_ID_HDMI_SWITCH	= 166,
-	MSM_RPM_8930_ID_QDSS_CLK	= 167,
-	MSM_RPM_8930_ID_VOLTAGE_CORNER	= 168,
+	MSM_RPM_8930_ID_DDR_DMM_0		= 167,
+	MSM_RPM_8930_ID_DDR_DMM_1		= 168,
+	MSM_RPM_8930_ID_QDSS_CLK	= 168,
+	MSM_RPM_8930_ID_VOLTAGE_CORNER	= 169,
 	MSM_RPM_8930_ID_LAST = MSM_RPM_8930_ID_VOLTAGE_CORNER,
 };
 
@@ -350,8 +353,10 @@
 	MSM_RPM_8930_STATUS_ID_CXO_BUFFERS			= 105,
 	MSM_RPM_8930_STATUS_ID_USB_OTG_SWITCH			= 106,
 	MSM_RPM_8930_STATUS_ID_HDMI_SWITCH			= 107,
-	MSM_RPM_8930_STATUS_ID_QDSS_CLK				= 108,
-	MSM_RPM_8930_STATUS_ID_VOLTAGE_CORNER			= 109,
+	MSM_RPM_8930_STATUS_ID_DDR_DMM_0			= 108,
+	MSM_RPM_8930_STATUS_ID_DDR_DMM_1			= 109,
+	MSM_RPM_8930_STATUS_ID_QDSS_CLK				= 110,
+	MSM_RPM_8930_STATUS_ID_VOLTAGE_CORNER			= 111,
 	MSM_RPM_8930_STATUS_ID_LAST = MSM_RPM_8930_STATUS_ID_VOLTAGE_CORNER,
 };
 
diff --git a/arch/arm/mach-msm/include/mach/rpm-regulator-smd.h b/arch/arm/mach-msm/include/mach/rpm-regulator-smd.h
new file mode 100644
index 0000000..2eb59f5
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/rpm-regulator-smd.h
@@ -0,0 +1,55 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_INCLUDE_MACH_RPM_REGULATOR_SMD_H
+#define __ARCH_ARM_MACH_MSM_INCLUDE_MACH_RPM_REGULATOR_SMD_H
+
+#include <linux/device.h>
+
+struct rpm_regulator;
+
+#if defined(CONFIG_MSM_RPM_REGULATOR_SMD) || defined(CONFIG_MSM_RPM_REGULATOR)
+
+struct rpm_regulator *rpm_regulator_get(struct device *dev, const char *supply);
+
+void rpm_regulator_put(struct rpm_regulator *regulator);
+
+int rpm_regulator_enable(struct rpm_regulator *regulator);
+
+int rpm_regulator_disable(struct rpm_regulator *regulator);
+
+int rpm_regulator_set_voltage(struct rpm_regulator *regulator, int min_uV,
+			      int max_uV);
+
+int __init rpm_regulator_smd_driver_init(void);
+
+#else
+
+static inline struct rpm_regulator *rpm_regulator_get(struct device *dev,
+					const char *supply) { return NULL; }
+
+static inline void rpm_regulator_put(struct rpm_regulator *regulator) { }
+
+static inline int rpm_regulator_enable(struct rpm_regulator *regulator)
+			{ return 0; }
+
+static inline int rpm_regulator_disable(struct rpm_regulator *regulator)
+			{ return 0; }
+
+static inline int rpm_regulator_set_voltage(struct rpm_regulator *regulator,
+					int min_uV, int max_uV) { return 0; }
+
+static inline int __init rpm_regulator_smd_driver_init(void) { return 0; }
+
+#endif /* CONFIG_MSM_RPM_REGULATOR_SMD */
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/rpm-regulator.h b/arch/arm/mach-msm/include/mach/rpm-regulator.h
index f9fc487..a010257 100644
--- a/arch/arm/mach-msm/include/mach/rpm-regulator.h
+++ b/arch/arm/mach-msm/include/mach/rpm-regulator.h
@@ -81,6 +81,20 @@
 };
 
 /**
+ * enum rpm_vreg_voter - RPM regulator voter IDs for private APIs
+ */
+enum rpm_vreg_voter {
+	RPM_VREG_VOTER_REG_FRAMEWORK,	/* for internal use only */
+	RPM_VREG_VOTER1,		/* for use by the acpu-clock driver */
+	RPM_VREG_VOTER2,		/* for use by the acpu-clock driver */
+	RPM_VREG_VOTER3,		/* for use by other drivers */
+	RPM_VREG_VOTER4,		/* for use by the acpu-clock driver */
+	RPM_VREG_VOTER5,		/* for use by the acpu-clock driver */
+	RPM_VREG_VOTER6,		/* for use by the acpu-clock driver */
+	RPM_VREG_VOTER_COUNT,
+};
+
+/**
  * struct rpm_regulator_init_data - RPM regulator initialization data
  * @init_data:		regulator constraints
  * @id:			regulator id; from enum rpm_vreg_id
@@ -132,28 +146,27 @@
 };
 
 /**
- * struct rpm_regulator_platform_data - RPM regulator platform data
+ * struct rpm_regulator_consumer_mapping - mapping used by private consumers
  */
-struct rpm_regulator_platform_data {
-	struct rpm_regulator_init_data	*init_data;
-	int				num_regulators;
-	enum rpm_vreg_version		version;
-	int				vreg_id_vdd_mem;
-	int				vreg_id_vdd_dig;
+struct rpm_regulator_consumer_mapping {
+	const char		*dev_name;
+	const char		*supply;
+	int			vreg_id;
+	enum rpm_vreg_voter	voter;
+	int			sleep_also;
 };
 
 /**
- * enum rpm_vreg_voter - RPM regulator voter IDs for private APIs
+ * struct rpm_regulator_platform_data - RPM regulator platform data
  */
-enum rpm_vreg_voter {
-	RPM_VREG_VOTER_REG_FRAMEWORK,	/* for internal use only */
-	RPM_VREG_VOTER1,		/* for use by the acpu-clock driver */
-	RPM_VREG_VOTER2,		/* for use by the acpu-clock driver */
-	RPM_VREG_VOTER3,		/* for use by other drivers */
-	RPM_VREG_VOTER4,		/* for use by the acpu-clock driver */
-	RPM_VREG_VOTER5,		/* for use by the acpu-clock driver */
-	RPM_VREG_VOTER6,		/* for use by the acpu-clock driver */
-	RPM_VREG_VOTER_COUNT,
+struct rpm_regulator_platform_data {
+	struct rpm_regulator_init_data		*init_data;
+	int					num_regulators;
+	enum rpm_vreg_version			version;
+	int					vreg_id_vdd_mem;
+	int					vreg_id_vdd_dig;
+	struct rpm_regulator_consumer_mapping	*consumer_map;
+	int					consumer_map_len;
 };
 
 #ifdef CONFIG_MSM_RPM_REGULATOR
diff --git a/arch/arm/mach-msm/include/mach/usb_gadget_xport.h b/arch/arm/mach-msm/include/mach/usb_gadget_xport.h
index d8a3e60..be11989 100644
--- a/arch/arm/mach-msm/include/mach/usb_gadget_xport.h
+++ b/arch/arm/mach-msm/include/mach/usb_gadget_xport.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -22,6 +22,7 @@
 	USB_GADGET_XPORT_BAM,
 	USB_GADGET_XPORT_BAM2BAM,
 	USB_GADGET_XPORT_HSIC,
+	USB_GADGET_XPORT_HSUART,
 	USB_GADGET_XPORT_NONE,
 };
 
@@ -42,6 +43,8 @@
 		return "BAM2BAM";
 	case USB_GADGET_XPORT_HSIC:
 		return "HSIC";
+	case USB_GADGET_XPORT_HSUART:
+		return "HSUART";
 	case USB_GADGET_XPORT_NONE:
 		return "NONE";
 	default:
@@ -63,6 +66,8 @@
 		return USB_GADGET_XPORT_BAM2BAM;
 	if (!strncasecmp("HSIC", name, XPORT_STR_LEN))
 		return USB_GADGET_XPORT_HSIC;
+	if (!strncasecmp("HSUART", name, XPORT_STR_LEN))
+		return USB_GADGET_XPORT_HSUART;
 	if (!strncasecmp("", name, XPORT_STR_LEN))
 		return USB_GADGET_XPORT_NONE;
 
@@ -79,6 +84,11 @@
 #define NUM_PORTS (NUM_RMNET_HSIC_PORTS \
 	+ NUM_DUN_HSIC_PORTS)
 
+#define NUM_RMNET_HSUART_PORTS 1
+#define NUM_DUN_HSUART_PORTS 1
+#define NUM_HSUART_PORTS (NUM_RMNET_HSUART_PORTS \
+	+ NUM_DUN_HSUART_PORTS)
+
 int ghsic_ctrl_connect(void *, int);
 void ghsic_ctrl_disconnect(void *, int);
 int ghsic_ctrl_setup(unsigned int, enum gadget_type);
@@ -86,4 +96,10 @@
 void ghsic_data_disconnect(void *, int);
 int ghsic_data_setup(unsigned int, enum gadget_type);
 
+int ghsuart_ctrl_connect(void *, int);
+void ghsuart_ctrl_disconnect(void *, int);
+int ghsuart_ctrl_setup(unsigned int, enum gadget_type);
+int ghsuart_data_connect(void *, int);
+void ghsuart_data_disconnect(void *, int);
+int ghsuart_data_setup(unsigned int, enum gadget_type);
 #endif
diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c
index 59d3a96..2a0d34a 100644
--- a/arch/arm/mach-msm/io.c
+++ b/arch/arm/mach-msm/io.c
@@ -281,6 +281,9 @@
 	},
 	MSM_CHIP_DEVICE(QFPROM, APQ8064),
 	MSM_CHIP_DEVICE(SIC_NON_SECURE, APQ8064),
+#ifdef CONFIG_DEBUG_APQ8064_UART
+	MSM_DEVICE(DEBUG_UART),
+#endif
 };
 
 void __init msm_map_apq8064_io(void)
diff --git a/arch/arm/mach-msm/iommu-v2.c b/arch/arm/mach-msm/iommu-v2.c
new file mode 100644
index 0000000..41f5043
--- /dev/null
+++ b/arch/arm/mach-msm/iommu-v2.c
@@ -0,0 +1,570 @@
+/* Copyright (c) 2012 Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/clk.h>
+#include <linux/scatterlist.h>
+
+#include <asm/sizes.h>
+
+#include <mach/iommu_hw-v2.h>
+#include <mach/iommu.h>
+
+#include "iommu_pagetable.h"
+
+static DEFINE_MUTEX(msm_iommu_lock);
+
+struct msm_priv {
+	struct iommu_pt pt;
+	struct list_head list_attached;
+};
+
+static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
+{
+	int ret;
+
+	ret = clk_prepare_enable(drvdata->pclk);
+	if (ret)
+		goto fail;
+
+	if (drvdata->clk) {
+		ret = clk_prepare_enable(drvdata->clk);
+		if (ret)
+			clk_disable_unprepare(drvdata->pclk);
+	}
+fail:
+	return ret;
+}
+
+static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
+{
+	if (drvdata->clk)
+		clk_disable_unprepare(drvdata->clk);
+	clk_disable_unprepare(drvdata->pclk);
+}
+
+static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
+{
+	struct msm_priv *priv = domain->priv;
+	struct msm_iommu_drvdata *iommu_drvdata;
+	struct msm_iommu_ctx_drvdata *ctx_drvdata;
+	int asid;
+
+	list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
+		BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
+
+		iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
+		BUG_ON(!iommu_drvdata);
+
+		asid = GET_CB_CONTEXTIDR_ASID(iommu_drvdata->base,
+					   ctx_drvdata->num);
+
+		SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
+			   asid | (va & CB_TLBIVA_VA));
+		mb();
+	}
+
+	return 0;
+}
+
+static int __flush_iotlb(struct iommu_domain *domain)
+{
+	struct msm_priv *priv = domain->priv;
+	struct msm_iommu_drvdata *iommu_drvdata;
+	struct msm_iommu_ctx_drvdata *ctx_drvdata;
+	int asid;
+
+	list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
+		BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
+
+		iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
+		BUG_ON(!iommu_drvdata);
+
+		asid = GET_CB_CONTEXTIDR_ASID(iommu_drvdata->base,
+					   ctx_drvdata->num);
+
+		SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, asid);
+		mb();
+	}
+
+	return 0;
+}
+
+static void __reset_context(void __iomem *base, int ctx)
+{
+	SET_ACTLR(base, ctx, 0);
+	SET_FAR(base, ctx, 0);
+	SET_FSRRESTORE(base, ctx, 0);
+	SET_NMRR(base, ctx, 0);
+	SET_PAR(base, ctx, 0);
+	SET_PRRR(base, ctx, 0);
+	SET_SCTLR(base, ctx, 0);
+	SET_TLBIALL(base, ctx, 0);
+	SET_TTBCR(base, ctx, 0);
+	SET_TTBR0(base, ctx, 0);
+	SET_TTBR1(base, ctx, 0);
+	mb();
+}
+
+static void __program_context(void __iomem *base, int ctx, int ncb,
+				phys_addr_t pgtable, int redirect)
+{
+	unsigned int prrr, nmrr;
+	unsigned int pn;
+	int i, j, found;
+
+	__reset_context(base, ctx);
+
+	pn = pgtable >> CB_TTBR0_ADDR_SHIFT;
+	SET_TTBCR(base, ctx, 0);
+	SET_CB_TTBR0_ADDR(base, ctx, pn);
+
+	/* Enable context fault interrupt */
+	SET_CB_SCTLR_CFIE(base, ctx, 1);
+
+	/* Redirect all cacheable requests to L2 slave port. */
+	SET_CB_ACTLR_BPRCISH(base, ctx, 1);
+	SET_CB_ACTLR_BPRCOSH(base, ctx, 1);
+	SET_CB_ACTLR_BPRCNSH(base, ctx, 1);
+
+	/* Turn on TEX Remap */
+	SET_CB_SCTLR_TRE(base, ctx, 1);
+
+	/* Enable private ASID namespace */
+	SET_CB_SCTLR_ASIDPNE(base, ctx, 1);
+
+	/* Set TEX remap attributes */
+	RCP15_PRRR(prrr);
+	RCP15_NMRR(nmrr);
+	SET_PRRR(base, ctx, prrr);
+	SET_NMRR(base, ctx, nmrr);
+
+	/* Configure page tables as inner-cacheable and shareable to reduce
+	 * the TLB miss penalty.
+	 */
+	if (redirect) {
+		SET_CB_TTBR0_S(base, ctx, 1);
+		SET_CB_TTBR0_NOS(base, ctx, 1);
+		SET_CB_TTBR0_IRGN1(base, ctx, 0); /* WB, WA */
+		SET_CB_TTBR0_IRGN0(base, ctx, 1);
+		SET_CB_TTBR0_RGN(base, ctx, 1);   /* WB, WA */
+	}
+
+       /* Find if this page table is used elsewhere, and re-use ASID */
+	found = 0;
+	for (i = 0; i < ncb; i++)
+		if ((GET_CB_TTBR0_ADDR(base, i) == pn) && (i != ctx)) {
+			SET_CB_CONTEXTIDR_ASID(base, ctx, \
+					GET_CB_CONTEXTIDR_ASID(base, i));
+			found = 1;
+			break;
+		}
+
+	/* If page table is new, find an unused ASID */
+	if (!found) {
+		for (i = 0; i < ncb; i++) {
+			found = 0;
+			for (j = 0; j < ncb; j++) {
+				if (GET_CB_CONTEXTIDR_ASID(base, j) == i &&
+				    j != ctx)
+					found = 1;
+			}
+
+			if (!found) {
+				SET_CB_CONTEXTIDR_ASID(base, ctx, i);
+				break;
+			}
+		}
+		BUG_ON(found);
+	}
+
+	/* Enable the MMU */
+	SET_CB_SCTLR_M(base, ctx, 1);
+	mb();
+}
+
+static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
+{
+	struct msm_priv *priv;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		goto fail_nomem;
+
+#ifdef CONFIG_IOMMU_PGTABLES_L2
+	priv->pt.redirect = flags & MSM_IOMMU_DOMAIN_PT_CACHEABLE;
+#endif
+
+	INIT_LIST_HEAD(&priv->list_attached);
+	if (msm_iommu_pagetable_alloc(&priv->pt))
+		goto fail_nomem;
+
+	domain->priv = priv;
+	return 0;
+
+fail_nomem:
+	kfree(priv);
+	return -ENOMEM;
+}
+
+static void msm_iommu_domain_destroy(struct iommu_domain *domain)
+{
+	struct msm_priv *priv;
+
+	mutex_lock(&msm_iommu_lock);
+	priv = domain->priv;
+	domain->priv = NULL;
+
+	if (priv)
+		msm_iommu_pagetable_free(&priv->pt);
+
+	kfree(priv);
+	mutex_unlock(&msm_iommu_lock);
+}
+
+static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
+{
+	struct msm_priv *priv;
+	struct msm_iommu_drvdata *iommu_drvdata;
+	struct msm_iommu_ctx_drvdata *ctx_drvdata;
+	struct msm_iommu_ctx_drvdata *tmp_drvdata;
+	int ret = 0;
+
+	mutex_lock(&msm_iommu_lock);
+
+	priv = domain->priv;
+	if (!priv || !dev) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	iommu_drvdata = dev_get_drvdata(dev->parent);
+	ctx_drvdata = dev_get_drvdata(dev);
+	if (!iommu_drvdata || !ctx_drvdata) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	if (!list_empty(&ctx_drvdata->attached_elm)) {
+		ret = -EBUSY;
+		goto fail;
+	}
+
+	list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
+		if (tmp_drvdata == ctx_drvdata) {
+			ret = -EBUSY;
+			goto fail;
+		}
+
+	ret = __enable_clocks(iommu_drvdata);
+	if (ret)
+		goto fail;
+
+	__program_context(iommu_drvdata->base, ctx_drvdata->num,
+		iommu_drvdata->ncb, __pa(priv->pt.fl_table),
+		priv->pt.redirect);
+
+	__disable_clocks(iommu_drvdata);
+	list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
+	ctx_drvdata->attached_domain = domain;
+
+fail:
+	mutex_unlock(&msm_iommu_lock);
+	return ret;
+}
+
+static void msm_iommu_detach_dev(struct iommu_domain *domain,
+				 struct device *dev)
+{
+	struct msm_priv *priv;
+	struct msm_iommu_drvdata *iommu_drvdata;
+	struct msm_iommu_ctx_drvdata *ctx_drvdata;
+	int ret;
+
+	mutex_lock(&msm_iommu_lock);
+	priv = domain->priv;
+	if (!priv || !dev)
+		goto fail;
+
+	iommu_drvdata = dev_get_drvdata(dev->parent);
+	ctx_drvdata = dev_get_drvdata(dev);
+	if (!iommu_drvdata || !ctx_drvdata || !ctx_drvdata->attached_domain)
+		goto fail;
+
+	ret = __enable_clocks(iommu_drvdata);
+	if (ret)
+		goto fail;
+
+	SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num,
+		GET_CB_CONTEXTIDR_ASID(iommu_drvdata->base, ctx_drvdata->num));
+
+	__reset_context(iommu_drvdata->base, ctx_drvdata->num);
+	__disable_clocks(iommu_drvdata);
+	list_del_init(&ctx_drvdata->attached_elm);
+	ctx_drvdata->attached_domain = NULL;
+
+fail:
+	mutex_unlock(&msm_iommu_lock);
+}
+
+static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
+			 phys_addr_t pa, int order, int prot)
+{
+	struct msm_priv *priv;
+	int ret = 0;
+
+	mutex_lock(&msm_iommu_lock);
+
+	priv = domain->priv;
+	if (!priv) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	ret = msm_iommu_pagetable_map(&priv->pt, va, pa, order, prot);
+	if (ret)
+		goto fail;
+
+	ret = __flush_iotlb_va(domain, va);
+fail:
+	mutex_unlock(&msm_iommu_lock);
+	return ret;
+}
+
+static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
+			    int order)
+{
+	struct msm_priv *priv;
+	int ret = 0;
+
+	mutex_lock(&msm_iommu_lock);
+
+	priv = domain->priv;
+	if (!priv) {
+		ret = -ENODEV;
+		goto fail;
+	}
+
+	ret = msm_iommu_pagetable_unmap(&priv->pt, va, order);
+	if (ret < 0)
+		goto fail;
+
+	ret = __flush_iotlb_va(domain, va);
+fail:
+	mutex_unlock(&msm_iommu_lock);
+	return ret;
+}
+
+static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
+			       struct scatterlist *sg, unsigned int len,
+			       int prot)
+{
+	int ret;
+	struct msm_priv *priv;
+
+	mutex_lock(&msm_iommu_lock);
+
+	priv = domain->priv;
+	if (!priv) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	ret = msm_iommu_pagetable_map_range(&priv->pt, va, sg, len, prot);
+	if (ret)
+		goto fail;
+
+	__flush_iotlb(domain);
+fail:
+	mutex_unlock(&msm_iommu_lock);
+	return ret;
+}
+
+
+static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
+				 unsigned int len)
+{
+	struct msm_priv *priv;
+
+	mutex_lock(&msm_iommu_lock);
+
+	priv = domain->priv;
+	msm_iommu_pagetable_unmap_range(&priv->pt, va, len);
+
+	__flush_iotlb(domain);
+	mutex_unlock(&msm_iommu_lock);
+	return 0;
+}
+
+static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
+					  unsigned long va)
+{
+	struct msm_priv *priv;
+	struct msm_iommu_drvdata *iommu_drvdata;
+	struct msm_iommu_ctx_drvdata *ctx_drvdata;
+	unsigned int par;
+	void __iomem *base;
+	phys_addr_t pa = 0;
+	int ctx;
+
+	mutex_lock(&msm_iommu_lock);
+
+	priv = domain->priv;
+	if (list_empty(&priv->list_attached))
+		goto fail;
+
+	ctx_drvdata = list_entry(priv->list_attached.next,
+				 struct msm_iommu_ctx_drvdata, attached_elm);
+	iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
+
+	base = iommu_drvdata->base;
+	ctx = ctx_drvdata->num;
+
+	SET_ATS1PR(base, ctx, va & CB_ATS1PR_ADDR);
+	mb();
+	while (GET_CB_ATSR_ACTIVE(base, ctx))
+		cpu_relax();
+
+	par = GET_PAR(base, ctx);
+	if (par & CB_PAR_F) {
+		pa = 0;
+	} else {
+		/* We are dealing with a supersection */
+		if (par & CB_PAR_SS)
+			pa = (par & 0xFF000000) | (va & 0x00FFFFFF);
+		else /* Upper 20 bits from PAR, lower 12 from VA */
+			pa = (par & 0xFFFFF000) | (va & 0x00000FFF);
+	}
+
+fail:
+	mutex_unlock(&msm_iommu_lock);
+	return pa;
+}
+
+static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
+				    unsigned long cap)
+{
+	return 0;
+}
+
+static void print_ctx_regs(void __iomem *base, int ctx, unsigned int fsr)
+{
+	pr_err("FAR    = %08x    PAR    = %08x\n",
+		 GET_FAR(base, ctx), GET_PAR(base, ctx));
+	pr_err("FSR    = %08x [%s%s%s%s%s%s%s%s%s]\n", fsr,
+			(fsr & 0x02) ? "TF " : "",
+			(fsr & 0x04) ? "AFF " : "",
+			(fsr & 0x08) ? "PF " : "",
+			(fsr & 0x10) ? "EF " : "",
+			(fsr & 0x20) ? "TLBMCF " : "",
+			(fsr & 0x40) ? "TLBLKF " : "",
+			(fsr & 0x80) ? "MHF " : "",
+			(fsr & 0x40000000) ? "SS " : "",
+			(fsr & 0x80000000) ? "MULTI " : "");
+
+	pr_err("FSYNR0 = %08x    FSYNR1 = %08x\n",
+		 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
+	pr_err("TTBR0  = %08x    TTBR1  = %08x\n",
+		 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
+	pr_err("SCTLR  = %08x    ACTLR  = %08x\n",
+		 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
+	pr_err("PRRR   = %08x    NMRR   = %08x\n",
+		 GET_PRRR(base, ctx), GET_NMRR(base, ctx));
+}
+
+irqreturn_t msm_iommu_fault_handler_v2(int irq, void *dev_id)
+{
+	struct platform_device *pdev = dev_id;
+	struct msm_iommu_drvdata *drvdata;
+	struct msm_iommu_ctx_drvdata *ctx_drvdata;
+	unsigned int fsr;
+	int ret = IRQ_NONE;
+
+	mutex_lock(&msm_iommu_lock);
+
+	BUG_ON(!pdev);
+
+	drvdata = dev_get_drvdata(pdev->dev.parent);
+	BUG_ON(!drvdata);
+
+	ctx_drvdata = dev_get_drvdata(&pdev->dev);
+	BUG_ON(!ctx_drvdata);
+
+	fsr = GET_FSR(drvdata->base, ctx_drvdata->num);
+	if (fsr) {
+		if (!ctx_drvdata->attached_domain) {
+			pr_err("Bad domain in interrupt handler\n");
+			ret = -ENOSYS;
+		} else
+			ret = report_iommu_fault(ctx_drvdata->attached_domain,
+				&ctx_drvdata->pdev->dev,
+				GET_FAR(drvdata->base, ctx_drvdata->num), 0);
+
+		if (ret == -ENOSYS) {
+			pr_err("Unexpected IOMMU page fault!\n");
+			pr_err("name = %s\n", drvdata->name);
+			pr_err("context = %s (%d)\n", ctx_drvdata->name,
+							ctx_drvdata->num);
+			pr_err("Interesting registers:\n");
+			print_ctx_regs(drvdata->base, ctx_drvdata->num, fsr);
+		}
+
+		SET_FSR(drvdata->base, ctx_drvdata->num, fsr);
+		ret = IRQ_HANDLED;
+	} else
+		ret = IRQ_NONE;
+
+	mutex_unlock(&msm_iommu_lock);
+	return ret;
+}
+
+static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
+{
+	struct msm_priv *priv = domain->priv;
+	return __pa(priv->pt.fl_table);
+}
+
+static struct iommu_ops msm_iommu_ops = {
+	.domain_init = msm_iommu_domain_init,
+	.domain_destroy = msm_iommu_domain_destroy,
+	.attach_dev = msm_iommu_attach_dev,
+	.detach_dev = msm_iommu_detach_dev,
+	.map = msm_iommu_map,
+	.unmap = msm_iommu_unmap,
+	.map_range = msm_iommu_map_range,
+	.unmap_range = msm_iommu_unmap_range,
+	.iova_to_phys = msm_iommu_iova_to_phys,
+	.domain_has_cap = msm_iommu_domain_has_cap,
+	.get_pt_base_addr = msm_iommu_get_pt_base_addr
+};
+
+static int __init msm_iommu_init(void)
+{
+	msm_iommu_pagetable_init();
+	register_iommu(&msm_iommu_ops);
+	return 0;
+}
+
+subsys_initcall(msm_iommu_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM SMMU v2 Driver");
diff --git a/arch/arm/mach-msm/iommu.c b/arch/arm/mach-msm/iommu.c
index 49a3e6f..89eef57 100644
--- a/arch/arm/mach-msm/iommu.c
+++ b/arch/arm/mach-msm/iommu.c
@@ -1083,7 +1083,7 @@
 
 static int __init msm_iommu_init(void)
 {
-	if (!msm_soc_version_supports_iommu())
+	if (!msm_soc_version_supports_iommu_v1())
 		return -ENODEV;
 
 	setup_iommu_tex_classes();
diff --git a/arch/arm/mach-msm/iommu_dev-v2.c b/arch/arm/mach-msm/iommu_dev-v2.c
new file mode 100644
index 0000000..e690ada
--- /dev/null
+++ b/arch/arm/mach-msm/iommu_dev-v2.c
@@ -0,0 +1,376 @@
+/* Copyright (c) 2012 Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/iommu.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+
+#include <mach/iommu_hw-v2.h>
+#include <mach/iommu.h>
+
+static void msm_iommu_reset(void __iomem *base)
+{
+	int i;
+
+	SET_ACR(base, 0);
+	SET_NSACR(base, 0);
+	SET_CR2(base, 0);
+	SET_NSCR2(base, 0);
+	SET_GFAR(base, 0);
+	SET_GFSRRESTORE(base, 0);
+	SET_TLBIALLNSNH(base, 0);
+	SET_PMCR(base, 0);
+	SET_SCR1(base, 0);
+	SET_SSDR_N(base, 0, 0);
+
+	for (i = 0; i < MAX_NUM_SMR; i++)
+		SET_SMR_VALID(base, i, 0);
+
+	mb();
+}
+
+static int msm_iommu_parse_dt(struct platform_device *pdev,
+				struct msm_iommu_drvdata *drvdata)
+{
+	struct device_node *child;
+	int ret;
+
+	ret = device_move(&pdev->dev, &msm_iommu_root_dev->dev, DPM_ORDER_NONE);
+	if (ret)
+		return ret;
+
+	for_each_child_of_node(pdev->dev.of_node, child) {
+		drvdata->ncb++;
+		if (!of_platform_device_create(child, NULL, &pdev->dev))
+			pr_err("Failed to create %s device\n", child->name);
+	}
+
+	drvdata->name = dev_name(&pdev->dev);
+	return 0;
+}
+
+static atomic_t msm_iommu_next_id = ATOMIC_INIT(-1);
+
+static int __devinit msm_iommu_probe(struct platform_device *pdev)
+{
+	struct msm_iommu_drvdata *drvdata;
+	struct resource *r;
+	int ret;
+
+	if (msm_iommu_root_dev == pdev)
+		return 0;
+
+	if (pdev->id == -1)
+		pdev->id = atomic_inc_return(&msm_iommu_next_id) - 1;
+
+	drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
+	if (!drvdata)
+		return -ENOMEM;
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!r)
+		return -EINVAL;
+
+	drvdata->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
+	if (!drvdata->base)
+		return -ENOMEM;
+
+	drvdata->pclk = clk_get(&pdev->dev, "iface_clk");
+	if (IS_ERR(drvdata->pclk))
+		return PTR_ERR(drvdata->pclk);
+
+	ret = clk_prepare_enable(drvdata->pclk);
+	if (ret)
+		goto fail_enable;
+
+	drvdata->clk = clk_get(&pdev->dev, "core_clk");
+	if (!IS_ERR(drvdata->clk)) {
+		if (clk_get_rate(drvdata->clk) == 0) {
+			ret = clk_round_rate(drvdata->clk, 1);
+			clk_set_rate(drvdata->clk, ret);
+		}
+
+		ret = clk_prepare_enable(drvdata->clk);
+		if (ret) {
+			clk_put(drvdata->clk);
+			goto fail_pclk;
+		}
+	} else
+		drvdata->clk = NULL;
+
+	msm_iommu_reset(drvdata->base);
+
+	SET_CR0_SMCFCFG(drvdata->base, 1);
+	SET_CR0_USFCFG(drvdata->base, 1);
+	SET_CR0_STALLD(drvdata->base, 1);
+	SET_CR0_GCFGFIE(drvdata->base, 1);
+	SET_CR0_GCFGFRE(drvdata->base, 1);
+	SET_CR0_GFIE(drvdata->base, 1);
+	SET_CR0_GFRE(drvdata->base, 1);
+	SET_CR0_CLIENTPD(drvdata->base, 0);
+
+	ret = msm_iommu_parse_dt(pdev, drvdata);
+	if (ret)
+		goto fail_clk;
+
+	pr_info("device %s mapped at %p, with %d ctx banks\n",
+		drvdata->name, drvdata->base, drvdata->ncb);
+
+	platform_set_drvdata(pdev, drvdata);
+
+	if (drvdata->clk)
+		clk_disable_unprepare(drvdata->clk);
+
+	clk_disable_unprepare(drvdata->pclk);
+
+	return 0;
+
+fail_clk:
+	if (drvdata->clk) {
+		clk_disable_unprepare(drvdata->clk);
+		clk_put(drvdata->clk);
+	}
+fail_pclk:
+	clk_disable_unprepare(drvdata->pclk);
+fail_enable:
+	clk_put(drvdata->pclk);
+	return ret;
+}
+
+static int __devexit msm_iommu_remove(struct platform_device *pdev)
+{
+	struct msm_iommu_drvdata *drv = NULL;
+
+	drv = platform_get_drvdata(pdev);
+	if (drv) {
+		if (drv->clk)
+			clk_put(drv->clk);
+		clk_put(drv->pclk);
+		platform_set_drvdata(pdev, NULL);
+	}
+	return 0;
+}
+
+static int msm_iommu_ctx_parse_dt(struct platform_device *pdev,
+				struct msm_iommu_drvdata *drvdata,
+				struct msm_iommu_ctx_drvdata *ctx_drvdata)
+{
+	struct resource *r, rp;
+	u32 sids[MAX_NUM_SMR];
+	int num = 0;
+	int irq, i, ret, len = 0;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq > 0) {
+		ret = request_threaded_irq(irq, NULL,
+				msm_iommu_fault_handler_v2,
+				IRQF_ONESHOT | IRQF_SHARED,
+				"msm_iommu_nonsecure_irq", pdev);
+		if (ret) {
+			pr_err("Request IRQ %d failed with ret=%d\n", irq, ret);
+			return ret;
+		}
+	}
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!r)
+		return -EINVAL;
+
+	ret = of_address_to_resource(pdev->dev.parent->of_node, 0, &rp);
+	if (ret)
+		return -EINVAL;
+
+	/* Calculate the context bank number using the base addresses. The
+	 * first 8 pages belong to the global address space which is followed
+	 * by the context banks, hence subtract by 8 to get the context bank
+	 * number.
+	 */
+	ctx_drvdata->num = ((r->start - rp.start) >> CTX_SHIFT) - 8;
+
+	if (of_property_read_string(pdev->dev.of_node, "qcom,iommu-ctx-name",
+					&ctx_drvdata->name))
+		ctx_drvdata->name = dev_name(&pdev->dev);
+
+	of_get_property(pdev->dev.of_node, "qcom,iommu-ctx-sids", &len);
+	BUG_ON(len >= sizeof(sids));
+	if (of_property_read_u32_array(pdev->dev.of_node, "qcom,iommu-ctx-sids",
+					sids, len / sizeof(*sids)))
+		return -EINVAL;
+
+	/* Program the M2V tables for this context */
+	for (i = 0; i < len / sizeof(*sids); i++) {
+		for (; num < MAX_NUM_SMR; num++)
+			if (GET_SMR_VALID(drvdata->base, num) == 0)
+				break;
+		BUG_ON(num >= MAX_NUM_SMR);
+
+		SET_SMR_VALID(drvdata->base, num, 1);
+		SET_SMR_MASK(drvdata->base, num, 0);
+		SET_SMR_ID(drvdata->base, num, sids[i]);
+
+		/* Set VMID = 0 */
+		SET_S2CR_N(drvdata->base, num, 0);
+		SET_S2CR_CBNDX(drvdata->base, num, ctx_drvdata->num);
+		/* Set security bit override to be Non-secure */
+		SET_S2CR_NSCFG(drvdata->base, sids[i], 3);
+
+		SET_CBAR_N(drvdata->base, ctx_drvdata->num, 0);
+		/* Stage 1 Context with Stage 2 bypass */
+		SET_CBAR_TYPE(drvdata->base, ctx_drvdata->num, 1);
+		/* Route page faults to the non-secure interrupt */
+		SET_CBAR_IRPTNDX(drvdata->base, ctx_drvdata->num, 1);
+	}
+	mb();
+
+	return 0;
+}
+
+static int __devinit msm_iommu_ctx_probe(struct platform_device *pdev)
+{
+	struct msm_iommu_drvdata *drvdata;
+	struct msm_iommu_ctx_drvdata *ctx_drvdata = NULL;
+	int ret;
+
+	if (!pdev->dev.parent)
+		return -EINVAL;
+
+	drvdata = dev_get_drvdata(pdev->dev.parent);
+	if (!drvdata)
+		return -ENODEV;
+
+	ctx_drvdata = devm_kzalloc(&pdev->dev, sizeof(*ctx_drvdata),
+					GFP_KERNEL);
+	if (!ctx_drvdata)
+		return -ENOMEM;
+
+	ctx_drvdata->pdev = pdev;
+	INIT_LIST_HEAD(&ctx_drvdata->attached_elm);
+	platform_set_drvdata(pdev, ctx_drvdata);
+
+	ret = clk_prepare_enable(drvdata->pclk);
+	if (ret)
+		return ret;
+
+	if (drvdata->clk) {
+		ret = clk_prepare_enable(drvdata->clk);
+		if (ret) {
+			clk_disable_unprepare(drvdata->pclk);
+			return ret;
+		}
+	}
+
+	ret = msm_iommu_ctx_parse_dt(pdev, drvdata, ctx_drvdata);
+	if (!ret)
+		dev_info(&pdev->dev, "context %s using bank %d\n",
+				dev_name(&pdev->dev), ctx_drvdata->num);
+
+	if (drvdata->clk)
+		clk_disable_unprepare(drvdata->clk);
+	clk_disable_unprepare(drvdata->pclk);
+
+	return ret;
+}
+
+static int __devexit msm_iommu_ctx_remove(struct platform_device *pdev)
+{
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+static struct of_device_id msm_iommu_match_table[] = {
+	{ .compatible = "qcom,msm-smmu-v2", },
+	{}
+};
+
+static struct platform_driver msm_iommu_driver = {
+	.driver = {
+		.name	= "msm_iommu_v2",
+		.of_match_table = msm_iommu_match_table,
+	},
+	.probe		= msm_iommu_probe,
+	.remove		= __devexit_p(msm_iommu_remove),
+};
+
+static struct of_device_id msm_iommu_ctx_match_table[] = {
+	{ .name = "qcom,iommu-ctx", },
+	{}
+};
+
+static struct platform_driver msm_iommu_ctx_driver = {
+	.driver = {
+		.name	= "msm_iommu_ctx_v2",
+		.of_match_table = msm_iommu_ctx_match_table,
+	},
+	.probe		= msm_iommu_ctx_probe,
+	.remove		= __devexit_p(msm_iommu_ctx_remove),
+};
+
+static int __init msm_iommu_driver_init(void)
+{
+	struct device_node *node;
+	int ret;
+
+	node = of_find_compatible_node(NULL, NULL, "qcom,msm-smmu-v2");
+	if (!node)
+		return -ENODEV;
+
+	of_node_put(node);
+
+	msm_iommu_root_dev = platform_device_register_simple(
+						"msm_iommu", -1, 0, 0);
+	if (!msm_iommu_root_dev) {
+		pr_err("Failed to create root IOMMU device\n");
+		ret = -ENODEV;
+		goto error;
+	}
+
+	atomic_inc(&msm_iommu_next_id);
+
+	ret = platform_driver_register(&msm_iommu_driver);
+	if (ret != 0) {
+		pr_err("Failed to register IOMMU driver\n");
+		goto error;
+	}
+
+	ret = platform_driver_register(&msm_iommu_ctx_driver);
+	if (ret != 0) {
+		pr_err("Failed to register IOMMU context driver\n");
+		goto error;
+	}
+
+error:
+	return ret;
+}
+
+static void __exit msm_iommu_driver_exit(void)
+{
+	platform_driver_unregister(&msm_iommu_ctx_driver);
+	platform_driver_unregister(&msm_iommu_driver);
+	platform_device_unregister(msm_iommu_root_dev);
+}
+
+subsys_initcall(msm_iommu_driver_init);
+module_exit(msm_iommu_driver_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-msm/iommu_dev.c b/arch/arm/mach-msm/iommu_dev.c
index b8b5aa3..c164825 100644
--- a/arch/arm/mach-msm/iommu_dev.c
+++ b/arch/arm/mach-msm/iommu_dev.c
@@ -33,13 +33,14 @@
 	struct device *dev;
 };
 
-static struct platform_device *msm_iommu_root_dev;
+struct platform_device *msm_iommu_root_dev;
 
 static int each_iommu_ctx(struct device *dev, void *data)
 {
 	struct iommu_ctx_iter_data *res = data;
-	struct msm_iommu_ctx_dev *c = dev->platform_data;
+	struct msm_iommu_ctx_drvdata *c;
 
+	c = dev_get_drvdata(dev);
 	if (!res || !c || !c->name || !res->name)
 		return -EINVAL;
 
diff --git a/arch/arm/mach-msm/iommu_pagetable.c b/arch/arm/mach-msm/iommu_pagetable.c
new file mode 100644
index 0000000..b485605
--- /dev/null
+++ b/arch/arm/mach-msm/iommu_pagetable.c
@@ -0,0 +1,520 @@
+/* Copyright (c) 2012 Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/scatterlist.h>
+
+#include <asm/cacheflush.h>
+
+#include <mach/iommu.h>
+#include "iommu_pagetable.h"
+
+/* Sharability attributes of MSM IOMMU mappings */
+#define MSM_IOMMU_ATTR_NON_SH		0x0
+#define MSM_IOMMU_ATTR_SH		0x4
+
+/* Cacheability attributes of MSM IOMMU mappings */
+#define MSM_IOMMU_ATTR_NONCACHED	0x0
+#define MSM_IOMMU_ATTR_CACHED_WB_WA	0x1
+#define MSM_IOMMU_ATTR_CACHED_WB_NWA	0x2
+#define MSM_IOMMU_ATTR_CACHED_WT	0x3
+
+static int msm_iommu_tex_class[4];
+
+static inline void clean_pte(unsigned long *start, unsigned long *end,
+				int redirect)
+{
+	if (!redirect)
+		dmac_flush_range(start, end);
+}
+
+int msm_iommu_pagetable_alloc(struct iommu_pt *pt)
+{
+	pt->fl_table = (unsigned long *)__get_free_pages(GFP_KERNEL,
+							  get_order(SZ_16K));
+	if (!pt->fl_table)
+		return -ENOMEM;
+
+	memset(pt->fl_table, 0, SZ_16K);
+	clean_pte(pt->fl_table, pt->fl_table + NUM_FL_PTE, pt->redirect);
+
+	return 0;
+}
+
+void msm_iommu_pagetable_free(struct iommu_pt *pt)
+{
+	unsigned long *fl_table;
+	int i;
+
+	fl_table = pt->fl_table;
+	for (i = 0; i < NUM_FL_PTE; i++)
+		if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
+			free_page((unsigned long) __va(((fl_table[i]) &
+							FL_BASE_MASK)));
+	free_pages((unsigned long)fl_table, get_order(SZ_16K));
+	pt->fl_table = 0;
+}
+
+static int __get_pgprot(int prot, int len)
+{
+	unsigned int pgprot;
+	int tex;
+
+	if (!(prot & (IOMMU_READ | IOMMU_WRITE))) {
+		prot |= IOMMU_READ | IOMMU_WRITE;
+		WARN_ONCE(1, "No attributes in iommu mapping; assuming RW\n");
+	}
+
+	if ((prot & IOMMU_WRITE) && !(prot & IOMMU_READ)) {
+		prot |= IOMMU_READ;
+		WARN_ONCE(1, "Write-only unsupported; falling back to RW\n");
+	}
+
+	if (prot & IOMMU_CACHE)
+		tex = (pgprot_kernel >> 2) & 0x07;
+	else
+		tex = msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED];
+
+	if (tex < 0 || tex > NUM_TEX_CLASS - 1)
+		return 0;
+
+	if (len == SZ_16M || len == SZ_1M) {
+		pgprot = FL_SHARED;
+		pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
+		pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
+		pgprot |= tex & 0x04 ? FL_TEX0 : 0;
+		pgprot |= FL_AP0 | FL_AP1;
+		pgprot |= prot & IOMMU_WRITE ? 0 : FL_AP2;
+	} else	{
+		pgprot = SL_SHARED;
+		pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
+		pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
+		pgprot |= tex & 0x04 ? SL_TEX0 : 0;
+		pgprot |= SL_AP0 | SL_AP1;
+		pgprot |= prot & IOMMU_WRITE ? 0 : SL_AP2;
+	}
+
+	return pgprot;
+}
+
+int msm_iommu_pagetable_map(struct iommu_pt *pt, unsigned long va,
+			phys_addr_t pa, int order, int prot)
+{
+	unsigned long *fl_pte;
+	unsigned long fl_offset;
+	unsigned long *sl_table;
+	unsigned long *sl_pte;
+	unsigned long sl_offset;
+	unsigned int pgprot;
+	size_t len = 0x1000UL << order;
+	int ret = 0;
+
+	if (len != SZ_16M && len != SZ_1M &&
+	    len != SZ_64K && len != SZ_4K) {
+		pr_debug("Bad size: %d\n", len);
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	if (!pt->fl_table) {
+		pr_debug("Null page table\n");
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	pgprot = __get_pgprot(prot, len);
+	if (!pgprot) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	fl_offset = FL_OFFSET(va);		/* Upper 12 bits */
+	fl_pte = pt->fl_table + fl_offset;	/* int pointers, 4 bytes */
+
+	if (len == SZ_16M) {
+		int i = 0;
+
+		for (i = 0; i < 16; i++)
+			if (*(fl_pte+i)) {
+				ret = -EBUSY;
+				goto fail;
+			}
+
+		for (i = 0; i < 16; i++)
+			*(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION |
+				  FL_TYPE_SECT | FL_SHARED | FL_NG | pgprot;
+		clean_pte(fl_pte, fl_pte + 16, pt->redirect);
+	}
+
+	if (len == SZ_1M) {
+		if (*fl_pte) {
+			ret = -EBUSY;
+			goto fail;
+		}
+
+		*fl_pte = (pa & 0xFFF00000) | FL_NG | FL_TYPE_SECT
+					| FL_SHARED | pgprot;
+		clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+	}
+
+	/* Need a 2nd level table */
+	if (len == SZ_4K || len == SZ_64K) {
+
+		if (*fl_pte == 0) {
+			unsigned long *sl;
+			sl = (unsigned long *) __get_free_pages(GFP_KERNEL,
+							get_order(SZ_4K));
+
+			if (!sl) {
+				pr_debug("Could not allocate second level table\n");
+				ret = -ENOMEM;
+				goto fail;
+			}
+			memset(sl, 0, SZ_4K);
+			clean_pte(sl, sl + NUM_SL_PTE, pt->redirect);
+
+			*fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
+						      FL_TYPE_TABLE);
+			clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+		}
+
+		if (!(*fl_pte & FL_TYPE_TABLE)) {
+			ret = -EBUSY;
+			goto fail;
+		}
+	}
+
+	sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
+	sl_offset = SL_OFFSET(va);
+	sl_pte = sl_table + sl_offset;
+
+	if (len == SZ_4K) {
+		if (*sl_pte) {
+			ret = -EBUSY;
+			goto fail;
+		}
+
+		*sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_NG | SL_SHARED
+						| SL_TYPE_SMALL | pgprot;
+		clean_pte(sl_pte, sl_pte + 1, pt->redirect);
+	}
+
+	if (len == SZ_64K) {
+		int i;
+
+		for (i = 0; i < 16; i++)
+			if (*(sl_pte+i)) {
+				ret = -EBUSY;
+				goto fail;
+			}
+
+		for (i = 0; i < 16; i++)
+			*(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_NG
+					| SL_SHARED | SL_TYPE_LARGE | pgprot;
+
+		clean_pte(sl_pte, sl_pte + 16, pt->redirect);
+	}
+
+fail:
+	return ret;
+}
+
+int msm_iommu_pagetable_unmap(struct iommu_pt *pt, unsigned long va, int order)
+{
+	unsigned long *fl_pte;
+	unsigned long fl_offset;
+	unsigned long *sl_table;
+	unsigned long *sl_pte;
+	unsigned long sl_offset;
+	size_t len = 0x1000UL << order;
+	int i, ret = 0;
+
+	if (len != SZ_16M && len != SZ_1M &&
+	    len != SZ_64K && len != SZ_4K) {
+		pr_debug("Bad length: %d\n", len);
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	if (!pt->fl_table) {
+		pr_debug("Null page table\n");
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	fl_offset = FL_OFFSET(va);		/* Upper 12 bits */
+	fl_pte = pt->fl_table + fl_offset;	/* int pointers, 4 bytes */
+
+	if (*fl_pte == 0) {
+		pr_debug("First level PTE is 0\n");
+		ret = -ENODEV;
+		goto fail;
+	}
+
+	/* Unmap supersection */
+	if (len == SZ_16M) {
+		for (i = 0; i < 16; i++)
+			*(fl_pte+i) = 0;
+
+		clean_pte(fl_pte, fl_pte + 16, pt->redirect);
+	}
+
+	if (len == SZ_1M) {
+		*fl_pte = 0;
+		clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+	}
+
+	sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
+	sl_offset = SL_OFFSET(va);
+	sl_pte = sl_table + sl_offset;
+
+	if (len == SZ_64K) {
+		for (i = 0; i < 16; i++)
+			*(sl_pte+i) = 0;
+
+		clean_pte(sl_pte, sl_pte + 16, pt->redirect);
+	}
+
+	if (len == SZ_4K) {
+		*sl_pte = 0;
+		clean_pte(sl_pte, sl_pte + 1, pt->redirect);
+	}
+
+	if (len == SZ_4K || len == SZ_64K) {
+		int used = 0;
+
+		for (i = 0; i < NUM_SL_PTE; i++)
+			if (sl_table[i])
+				used = 1;
+		if (!used) {
+			free_page((unsigned long)sl_table);
+			*fl_pte = 0;
+			clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+		}
+	}
+
+fail:
+	return ret;
+}
+
+static unsigned int get_phys_addr(struct scatterlist *sg)
+{
+	/*
+	 * Try sg_dma_address first so that we can
+	 * map carveout regions that do not have a
+	 * struct page associated with them.
+	 */
+	unsigned int pa = sg_dma_address(sg);
+	if (pa == 0)
+		pa = sg_phys(sg);
+	return pa;
+}
+
+int msm_iommu_pagetable_map_range(struct iommu_pt *pt, unsigned int va,
+		       struct scatterlist *sg, unsigned int len, int prot)
+{
+	unsigned int pa;
+	unsigned int offset = 0;
+	unsigned int pgprot;
+	unsigned long *fl_pte;
+	unsigned long fl_offset;
+	unsigned long *sl_table;
+	unsigned long sl_offset, sl_start;
+	unsigned int chunk_offset = 0;
+	unsigned int chunk_pa;
+	int ret = 0;
+
+	BUG_ON(len & (SZ_4K - 1));
+
+	pgprot = __get_pgprot(prot, SZ_4K);
+	if (!pgprot) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	fl_offset = FL_OFFSET(va);		/* Upper 12 bits */
+	fl_pte = pt->fl_table + fl_offset;	/* int pointers, 4 bytes */
+
+	sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
+	sl_offset = SL_OFFSET(va);
+
+	chunk_pa = get_phys_addr(sg);
+	if (chunk_pa == 0) {
+		pr_debug("No dma address for sg %p\n", sg);
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	while (offset < len) {
+		/* Set up a 2nd level page table if one doesn't exist */
+		if (*fl_pte == 0) {
+			sl_table = (unsigned long *)
+				 __get_free_pages(GFP_KERNEL, get_order(SZ_4K));
+
+			if (!sl_table) {
+				pr_debug("Could not allocate second level table\n");
+				ret = -ENOMEM;
+				goto fail;
+			}
+
+			memset(sl_table, 0, SZ_4K);
+			clean_pte(sl_table, sl_table + NUM_SL_PTE,
+					pt->redirect);
+
+			*fl_pte = ((((int)__pa(sl_table)) & FL_BASE_MASK) |
+							    FL_TYPE_TABLE);
+			clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+		} else
+			sl_table = (unsigned long *)
+					       __va(((*fl_pte) & FL_BASE_MASK));
+
+		/* Keep track of initial position so we
+		 * don't clean more than we have to
+		 */
+		sl_start = sl_offset;
+
+		/* Build the 2nd level page table */
+		while (offset < len && sl_offset < NUM_SL_PTE) {
+			pa = chunk_pa + chunk_offset;
+			sl_table[sl_offset] = (pa & SL_BASE_MASK_SMALL) |
+			      pgprot | SL_NG | SL_SHARED | SL_TYPE_SMALL;
+			sl_offset++;
+			offset += SZ_4K;
+
+			chunk_offset += SZ_4K;
+
+			if (chunk_offset >= sg->length && offset < len) {
+				chunk_offset = 0;
+				sg = sg_next(sg);
+				chunk_pa = get_phys_addr(sg);
+				if (chunk_pa == 0) {
+					pr_debug("No dma address for sg %p\n",
+						sg);
+					ret = -EINVAL;
+					goto fail;
+				}
+			}
+		}
+
+		clean_pte(sl_table + sl_start, sl_table + sl_offset,
+				pt->redirect);
+		fl_pte++;
+		sl_offset = 0;
+	}
+
+fail:
+	return ret;
+}
+
+void msm_iommu_pagetable_unmap_range(struct iommu_pt *pt, unsigned int va,
+				 unsigned int len)
+{
+	unsigned int offset = 0;
+	unsigned long *fl_pte;
+	unsigned long fl_offset;
+	unsigned long *sl_table;
+	unsigned long sl_start, sl_end;
+	int used, i;
+
+	BUG_ON(len & (SZ_4K - 1));
+
+	fl_offset = FL_OFFSET(va);		/* Upper 12 bits */
+	fl_pte = pt->fl_table + fl_offset;	/* int pointers, 4 bytes */
+
+	sl_start = SL_OFFSET(va);
+
+	while (offset < len) {
+		sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
+		sl_end = ((len - offset) / SZ_4K) + sl_start;
+
+		if (sl_end > NUM_SL_PTE)
+			sl_end = NUM_SL_PTE;
+
+		memset(sl_table + sl_start, 0, (sl_end - sl_start) * 4);
+		clean_pte(sl_table + sl_start, sl_table + sl_end,
+				pt->redirect);
+
+		offset += (sl_end - sl_start) * SZ_4K;
+
+		/* Unmap and free the 2nd level table if all mappings in it
+		 * were removed. This saves memory, but the table will need
+		 * to be re-allocated the next time someone tries to map these
+		 * VAs.
+		 */
+		used = 0;
+
+		/* If we just unmapped the whole table, don't bother
+		 * seeing if there are still used entries left.
+		 */
+		if (sl_end - sl_start != NUM_SL_PTE)
+			for (i = 0; i < NUM_SL_PTE; i++)
+				if (sl_table[i]) {
+					used = 1;
+					break;
+				}
+		if (!used) {
+			free_page((unsigned long)sl_table);
+			*fl_pte = 0;
+			clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+		}
+
+		sl_start = 0;
+		fl_pte++;
+	}
+}
+
+static int __init get_tex_class(int icp, int ocp, int mt, int nos)
+{
+	int i = 0;
+	unsigned int prrr = 0;
+	unsigned int nmrr = 0;
+	int c_icp, c_ocp, c_mt, c_nos;
+
+	RCP15_PRRR(prrr);
+	RCP15_NMRR(nmrr);
+
+	for (i = 0; i < NUM_TEX_CLASS; i++) {
+		c_nos = PRRR_NOS(prrr, i);
+		c_mt = PRRR_MT(prrr, i);
+		c_icp = NMRR_ICP(nmrr, i);
+		c_ocp = NMRR_OCP(nmrr, i);
+
+		if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
+			return i;
+	}
+
+	return -ENODEV;
+}
+
+static void __init setup_iommu_tex_classes(void)
+{
+	msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
+			get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
+
+	msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
+			get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
+
+	msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
+			get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
+
+	msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
+			get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
+}
+
+void __init msm_iommu_pagetable_init(void)
+{
+	setup_iommu_tex_classes();
+}
diff --git a/arch/arm/mach-msm/iommu_pagetable.h b/arch/arm/mach-msm/iommu_pagetable.h
new file mode 100644
index 0000000..39f1d3d
--- /dev/null
+++ b/arch/arm/mach-msm/iommu_pagetable.h
@@ -0,0 +1,90 @@
+/* Copyright (c) 2012 Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_IOMMU_PAGETABLE_H
+#define __ARCH_ARM_MACH_MSM_IOMMU_PAGETABLE_H
+
+#define NUM_FL_PTE      4096
+#define NUM_SL_PTE      256
+#define NUM_TEX_CLASS   8
+
+/* First-level page table bits */
+#define FL_BASE_MASK            0xFFFFFC00
+#define FL_TYPE_TABLE           (1 << 0)
+#define FL_TYPE_SECT            (2 << 0)
+#define FL_SUPERSECTION         (1 << 18)
+#define FL_AP0                  (1 << 10)
+#define FL_AP1                  (1 << 11)
+#define FL_AP2                  (1 << 15)
+#define FL_SHARED               (1 << 16)
+#define FL_BUFFERABLE           (1 << 2)
+#define FL_CACHEABLE            (1 << 3)
+#define FL_TEX0                 (1 << 12)
+#define FL_OFFSET(va)           (((va) & 0xFFF00000) >> 20)
+#define FL_NG                   (1 << 17)
+
+/* Second-level page table bits */
+#define SL_BASE_MASK_LARGE      0xFFFF0000
+#define SL_BASE_MASK_SMALL      0xFFFFF000
+#define SL_TYPE_LARGE           (1 << 0)
+#define SL_TYPE_SMALL           (2 << 0)
+#define SL_AP0                  (1 << 4)
+#define SL_AP1                  (2 << 4)
+#define SL_AP2                  (1 << 9)
+#define SL_SHARED               (1 << 10)
+#define SL_BUFFERABLE           (1 << 2)
+#define SL_CACHEABLE            (1 << 3)
+#define SL_TEX0                 (1 << 6)
+#define SL_OFFSET(va)           (((va) & 0xFF000) >> 12)
+#define SL_NG                   (1 << 11)
+
+/* Memory type and cache policy attributes */
+#define MT_SO                   0
+#define MT_DEV                  1
+#define MT_NORMAL               2
+#define CP_NONCACHED            0
+#define CP_WB_WA                1
+#define CP_WT                   2
+#define CP_WB_NWA               3
+
+/* TEX Remap Registers */
+#define NMRR_ICP(nmrr, n) (((nmrr) & (3 << ((n) * 2))) >> ((n) * 2))
+#define NMRR_OCP(nmrr, n) (((nmrr) & (3 << ((n) * 2 + 16))) >> ((n) * 2 + 16))
+
+#define PRRR_NOS(prrr, n) ((prrr) & (1 << ((n) + 24)) ? 1 : 0)
+#define PRRR_MT(prrr, n)  ((((prrr) & (3 << ((n) * 2))) >> ((n) * 2)))
+
+#define MRC(reg, processor, op1, crn, crm, op2)                         \
+__asm__ __volatile__ (                                                  \
+"   mrc   "   #processor "," #op1 ", %0,"  #crn "," #crm "," #op2 "\n"  \
+: "=r" (reg))
+
+#define RCP15_PRRR(reg)   MRC(reg, p15, 0, c10, c2, 0)
+#define RCP15_NMRR(reg)   MRC(reg, p15, 0, c10, c2, 1)
+
+struct iommu_pt {
+	unsigned long *fl_table;
+	int redirect;
+};
+
+void msm_iommu_pagetable_init(void);
+int msm_iommu_pagetable_alloc(struct iommu_pt *pt);
+void msm_iommu_pagetable_free(struct iommu_pt *pt);
+int msm_iommu_pagetable_map(struct iommu_pt *pt, unsigned long va,
+			phys_addr_t pa, int order, int prot);
+int msm_iommu_pagetable_unmap(struct iommu_pt *pt, unsigned long va,
+				int order);
+int msm_iommu_pagetable_map_range(struct iommu_pt *pt, unsigned int va,
+			struct scatterlist *sg, unsigned int len, int prot);
+void msm_iommu_pagetable_unmap_range(struct iommu_pt *pt, unsigned int va,
+				unsigned int len);
+#endif
diff --git a/arch/arm/mach-msm/lpm_levels.c b/arch/arm/mach-msm/lpm_levels.c
index 48cf3f7..80b82cb 100644
--- a/arch/arm/mach-msm/lpm_levels.c
+++ b/arch/arm/mach-msm/lpm_levels.c
@@ -19,18 +19,19 @@
 #include <linux/of.h>
 #include <mach/mpm.h>
 #include "rpm_resources.h"
+#include "pm.h"
 
 static struct msm_rpmrs_level *msm_lpm_levels;
 static int msm_lpm_level_count;
 
-int msm_rpmrs_enter_sleep(uint32_t sclk_count, struct msm_rpmrs_limits *limits,
+static int msm_lpm_enter_sleep(uint32_t sclk_count, void *limits,
 		bool from_idle, bool notify_rpm)
 {
 	/* TODO */
 	return 0;
 }
 
-void msm_rpmrs_exit_sleep(struct msm_rpmrs_limits *limits, bool from_idle,
+static void msm_lpm_exit_sleep(void *limits, bool from_idle,
 		bool notify_rpm, bool collapsed)
 {
 	/* TODO */
@@ -50,14 +51,15 @@
 	return;
 }
 
-struct msm_rpmrs_limits *msm_rpmrs_lowest_limits(
-	bool from_idle, enum msm_pm_sleep_mode sleep_mode, uint32_t latency_us,
-	uint32_t sleep_us)
+static void *msm_lpm_lowest_limits(bool from_idle,
+		enum msm_pm_sleep_mode sleep_mode, uint32_t latency_us,
+		uint32_t sleep_us, uint32_t *power)
 {
 	unsigned int cpu = smp_processor_id();
 	struct msm_rpmrs_level *best_level = NULL;
 	bool irqs_detectable = false;
 	bool gpio_detectable = false;
+	uint32_t pwr;
 	int i;
 
 	if (!msm_lpm_levels)
@@ -70,7 +72,6 @@
 
 	for (i = 0; i < msm_lpm_level_count; i++) {
 		struct msm_rpmrs_level *level = &msm_lpm_levels[i];
-		uint32_t power;
 
 		if (!level->available)
 			continue;
@@ -86,28 +87,36 @@
 			continue;
 
 		if (sleep_us <= 1) {
-			power = level->energy_overhead;
+			pwr = level->energy_overhead;
 		} else if (sleep_us <= level->time_overhead_us) {
-			power = level->energy_overhead / sleep_us;
+			pwr = level->energy_overhead / sleep_us;
 		} else if ((sleep_us >> 10) > level->time_overhead_us) {
-			power = level->steady_state_power;
+			pwr = level->steady_state_power;
 		} else {
-			power = level->steady_state_power;
-			power -= (level->time_overhead_us *
+			pwr = level->steady_state_power;
+			pwr -= (level->time_overhead_us *
 					level->steady_state_power)/sleep_us;
-			power += level->energy_overhead / sleep_us;
+			pwr += level->energy_overhead / sleep_us;
 		}
 
-		if (!best_level ||
-				best_level->rs_limits.power[cpu] >= power) {
+		if (!best_level || best_level->rs_limits.power[cpu] >= pwr) {
+
 			level->rs_limits.latency_us[cpu] = level->latency_us;
-			level->rs_limits.power[cpu] = power;
+			level->rs_limits.power[cpu] = pwr;
 			best_level = level;
+
+			if (power)
+				*power = pwr;
 		}
 	}
 
 	return best_level ? &best_level->rs_limits : NULL;
 }
+static struct msm_pm_sleep_ops msm_lpm_ops = {
+	.lowest_limits = msm_lpm_lowest_limits,
+	.enter_sleep = msm_lpm_enter_sleep,
+	.exit_sleep = msm_lpm_exit_sleep,
+};
 
 static int __devinit msm_lpm_levels_probe(struct platform_device *pdev)
 {
@@ -204,6 +213,8 @@
 	msm_lpm_levels = levels;
 	msm_lpm_level_count = idx;
 
+	msm_pm_set_sleep_ops(&msm_lpm_ops);
+
 	return 0;
 fail:
 	pr_err("%s: Error in name %s key %s\n", __func__, node->full_name, key);
diff --git a/arch/arm/mach-msm/mdm2.c b/arch/arm/mach-msm/mdm2.c
index b4b7ea3..bd7bd9e 100644
--- a/arch/arm/mach-msm/mdm2.c
+++ b/arch/arm/mach-msm/mdm2.c
@@ -53,11 +53,13 @@
 
 static void mdm_peripheral_connect(struct mdm_modem_drv *mdm_drv)
 {
+	if (!mdm_drv->pdata->peripheral_platform_device)
+		return;
+
 	mutex_lock(&hsic_status_lock);
 	if (hsic_peripheral_status)
 		goto out;
-	if (mdm_drv->pdata->peripheral_platform_device)
-		platform_device_add(mdm_drv->pdata->peripheral_platform_device);
+	platform_device_add(mdm_drv->pdata->peripheral_platform_device);
 	hsic_peripheral_status = 1;
 out:
 	mutex_unlock(&hsic_status_lock);
@@ -65,84 +67,106 @@
 
 static void mdm_peripheral_disconnect(struct mdm_modem_drv *mdm_drv)
 {
+	if (!mdm_drv->pdata->peripheral_platform_device)
+		return;
+
 	mutex_lock(&hsic_status_lock);
 	if (!hsic_peripheral_status)
 		goto out;
-	if (mdm_drv->pdata->peripheral_platform_device)
-		platform_device_del(mdm_drv->pdata->peripheral_platform_device);
+	platform_device_del(mdm_drv->pdata->peripheral_platform_device);
 	hsic_peripheral_status = 0;
 out:
 	mutex_unlock(&hsic_status_lock);
 }
 
-static void power_on_mdm(struct mdm_modem_drv *mdm_drv)
+static void mdm_power_down_common(struct mdm_modem_drv *mdm_drv)
+{
+	int soft_reset_direction =
+		mdm_drv->pdata->soft_reset_inverted ? 1 : 0;
+
+	gpio_direction_output(mdm_drv->ap2mdm_soft_reset_gpio,
+				soft_reset_direction);
+	mdm_peripheral_disconnect(mdm_drv);
+}
+
+static void mdm_do_first_power_on(struct mdm_modem_drv *mdm_drv)
+{
+	int soft_reset_direction =
+		mdm_drv->pdata->soft_reset_inverted ? 0 : 1;
+
+	if (power_on_count != 1) {
+		pr_err("%s: Calling fn when power_on_count != 1\n",
+			   __func__);
+		return;
+	}
+
+	pr_err("%s: Powering on modem for the first time\n", __func__);
+	mdm_peripheral_disconnect(mdm_drv);
+
+	/* If the device has a kpd pwr gpio then toggle it. */
+	if (mdm_drv->ap2mdm_kpdpwr_n_gpio > 0) {
+		/* Pull AP2MDM_KPDPWR gpio high and wait for PS_HOLD to settle,
+		 * then	pull it back low.
+		 */
+		pr_debug("%s: Pulling AP2MDM_KPDPWR gpio high\n", __func__);
+		gpio_direction_output(mdm_drv->ap2mdm_kpdpwr_n_gpio, 1);
+		msleep(1000);
+		gpio_direction_output(mdm_drv->ap2mdm_kpdpwr_n_gpio, 0);
+	}
+
+	/* De-assert the soft reset line. */
+	pr_debug("%s: De-asserting soft reset gpio\n", __func__);
+	gpio_direction_output(mdm_drv->ap2mdm_soft_reset_gpio,
+						  soft_reset_direction);
+
+	mdm_peripheral_connect(mdm_drv);
+	msleep(200);
+}
+
+static void mdm_do_soft_power_on(struct mdm_modem_drv *mdm_drv)
+{
+	int soft_reset_direction =
+		mdm_drv->pdata->soft_reset_inverted ? 0 : 1;
+
+	/* De-assert the soft reset line. */
+	pr_err("%s: soft resetting mdm modem\n", __func__);
+
+	mdm_peripheral_disconnect(mdm_drv);
+
+	gpio_direction_output(mdm_drv->ap2mdm_soft_reset_gpio,
+		soft_reset_direction == 1 ? 0 : 1);
+	usleep_range(5000, 10000);
+	gpio_direction_output(mdm_drv->ap2mdm_soft_reset_gpio,
+		soft_reset_direction == 1 ? 1 : 0);
+
+	mdm_peripheral_connect(mdm_drv);
+	msleep(200);
+}
+
+static void mdm_power_on_common(struct mdm_modem_drv *mdm_drv)
 {
 	power_on_count++;
 
 	/* this gpio will be used to indicate apq readiness,
-	 * de-assert it now so that it can asserted later
+	 * de-assert it now so that it can be asserted later.
+	 * May not be used.
 	 */
-	gpio_direction_output(mdm_drv->ap2mdm_wakeup_gpio, 0);
+	if (mdm_drv->ap2mdm_wakeup_gpio > 0)
+		gpio_direction_output(mdm_drv->ap2mdm_wakeup_gpio, 0);
 
-	/* The second attempt to power-on the mdm is the first attempt
-	 * from user space, but we're already powered on. Ignore this.
-	 * Subsequent attempts are from SSR or if something failed, in
-	 * which case we must always reset the modem.
+	/*
+	 * If we did an "early power on" then ignore the very next
+	 * power-on request because it would the be first request from
+	 * user space but we're already powered on. Ignore it.
 	 */
-	if (power_on_count == 2)
+	if (mdm_drv->pdata->early_power_on &&
+			(power_on_count == 2))
 		return;
 
-	mdm_peripheral_disconnect(mdm_drv);
-
-	/* Pull RESET gpio low and wait for it to settle. */
-	pr_debug("Pulling RESET gpio low\n");
-	gpio_direction_output(mdm_drv->ap2mdm_pmic_reset_n_gpio, 0);
-	usleep_range(5000, 10000);
-
-	/* Deassert RESET first and wait for it to settle. */
-	pr_debug("%s: Pulling RESET gpio high\n", __func__);
-	gpio_direction_output(mdm_drv->ap2mdm_pmic_reset_n_gpio, 1);
-	usleep(20000);
-
-	/* Pull PWR gpio high and wait for it to settle, but only
-	 * the first time the mdm is powered up.
-	 * Some targets do not use ap2mdm_kpdpwr_n_gpio.
-	 */
-	if (power_on_count == 1) {
-		if (mdm_drv->ap2mdm_kpdpwr_n_gpio > 0) {
-			pr_debug("%s: Powering on mdm modem\n", __func__);
-			gpio_direction_output(mdm_drv->ap2mdm_kpdpwr_n_gpio, 1);
-			usleep(1000);
-		}
-	}
-	mdm_peripheral_connect(mdm_drv);
-
-	msleep(200);
-}
-
-static void power_down_mdm(struct mdm_modem_drv *mdm_drv)
-{
-	int i;
-
-	for (i = MDM_MODEM_TIMEOUT; i > 0; i -= MDM_MODEM_DELTA) {
-		pet_watchdog();
-		msleep(MDM_MODEM_DELTA);
-		if (gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0)
-			break;
-	}
-	if (i <= 0) {
-		pr_err("%s: MDM2AP_STATUS never went low.\n",
-			 __func__);
-		gpio_direction_output(mdm_drv->ap2mdm_pmic_reset_n_gpio, 0);
-
-		for (i = MDM_HOLD_TIME; i > 0; i -= MDM_MODEM_DELTA) {
-			pet_watchdog();
-			msleep(MDM_MODEM_DELTA);
-		}
-	}
-	if (mdm_drv->ap2mdm_kpdpwr_n_gpio > 0)
-		gpio_direction_output(mdm_drv->ap2mdm_kpdpwr_n_gpio, 0);
-	mdm_peripheral_disconnect(mdm_drv);
+	if (power_on_count == 1)
+		mdm_do_first_power_on(mdm_drv);
+	else
+		mdm_do_soft_power_on(mdm_drv);
 }
 
 static void debug_state_changed(int value)
@@ -157,13 +181,15 @@
 	if (value) {
 		mdm_peripheral_disconnect(mdm_drv);
 		mdm_peripheral_connect(mdm_drv);
-		gpio_direction_output(mdm_drv->ap2mdm_wakeup_gpio, 1);
+		if (mdm_drv->ap2mdm_wakeup_gpio > 0)
+			gpio_direction_output(mdm_drv->ap2mdm_wakeup_gpio, 1);
 	}
 }
 
 static struct mdm_ops mdm_cb = {
-	.power_on_mdm_cb = power_on_mdm,
-	.power_down_mdm_cb = power_down_mdm,
+	.power_on_mdm_cb = mdm_power_on_common,
+	.reset_mdm_cb = mdm_power_on_common,
+	.power_down_mdm_cb = mdm_power_down_common,
 	.debug_state_changed_cb = debug_state_changed,
 	.status_cb = mdm_status_changed,
 };
diff --git a/arch/arm/mach-msm/mdm_common.c b/arch/arm/mach-msm/mdm_common.c
index f8e187d..ffff782 100644
--- a/arch/arm/mach-msm/mdm_common.c
+++ b/arch/arm/mach-msm/mdm_common.c
@@ -152,6 +152,14 @@
 					 (unsigned long __user *) arg);
 		INIT_COMPLETION(mdm_needs_reload);
 		break;
+	case GET_DLOAD_STATUS:
+		pr_debug("getting status of mdm2ap_errfatal_gpio\n");
+		if (gpio_get_value(mdm_drv->mdm2ap_errfatal_gpio) == 1 &&
+			!mdm_drv->mdm_ready)
+			put_user(1, (unsigned long __user *) arg);
+		else
+			put_user(0, (unsigned long __user *) arg);
+		break;
 	default:
 		pr_err("%s: invalid ioctl cmd = %d\n", __func__, _IOC_NR(cmd));
 		ret = -EINVAL;
@@ -161,31 +169,13 @@
 	return ret;
 }
 
-static void mdm_fatal_fn(struct work_struct *work)
-{
-	pr_info("%s: Reseting the mdm due to an errfatal\n", __func__);
-	subsystem_restart(EXTERNAL_MODEM);
-}
-
-static DECLARE_WORK(mdm_fatal_work, mdm_fatal_fn);
-
 static void mdm_status_fn(struct work_struct *work)
 {
 	int value = gpio_get_value(mdm_drv->mdm2ap_status_gpio);
 
-	if (!mdm_drv->mdm_ready)
-		return;
-
-	mdm_drv->ops->status_cb(mdm_drv, value);
-
 	pr_debug("%s: status:%d\n", __func__, value);
-
-	if ((value == 0)) {
-		pr_info("%s: unexpected reset external modem\n", __func__);
-		subsystem_restart(EXTERNAL_MODEM);
-	} else if (value == 1) {
-		pr_info("%s: status = 1: mdm is now ready\n", __func__);
-	}
+	if (mdm_drv->mdm_ready && mdm_drv->ops->status_cb)
+		mdm_drv->ops->status_cb(mdm_drv, value);
 }
 
 static DECLARE_WORK(mdm_status_work, mdm_status_fn);
@@ -194,7 +184,6 @@
 {
 	disable_irq_nosync(mdm_drv->mdm_errfatal_irq);
 	disable_irq_nosync(mdm_drv->mdm_status_irq);
-
 }
 
 static irqreturn_t mdm_errfatal(int irq, void *dev_id)
@@ -202,8 +191,9 @@
 	pr_debug("%s: mdm got errfatal interrupt\n", __func__);
 	if (mdm_drv->mdm_ready &&
 		(gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 1)) {
-		pr_debug("%s: scheduling work now\n", __func__);
-		queue_work(mdm_queue, &mdm_fatal_work);
+		pr_info("%s: Reseting the mdm due to an errfatal\n", __func__);
+		mdm_drv->mdm_ready = 0;
+		subsystem_restart(EXTERNAL_MODEM);
 	}
 	return IRQ_HANDLED;
 }
@@ -242,8 +232,12 @@
 		if (gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0)
 			break;
 	}
-	if (i <= 0)
+	if (i <= 0) {
 		pr_err("%s: MDM2AP_STATUS never went low\n", __func__);
+		/* Reset the modem so that it will go into download mode. */
+		if (mdm_drv && mdm_drv->ops->reset_mdm_cb)
+			mdm_drv->ops->reset_mdm_cb(mdm_drv);
+	}
 	return NOTIFY_DONE;
 }
 
@@ -253,16 +247,23 @@
 
 static irqreturn_t mdm_status_change(int irq, void *dev_id)
 {
+	int value = gpio_get_value(mdm_drv->mdm2ap_status_gpio);
+
 	pr_debug("%s: mdm sent status change interrupt\n", __func__);
-
-	queue_work(mdm_queue, &mdm_status_work);
-
+	if (value == 0 && mdm_drv->mdm_ready == 1) {
+		pr_info("%s: unexpected reset external modem\n", __func__);
+		mdm_drv->mdm_unexpected_reset_occurred = 1;
+		mdm_drv->mdm_ready = 0;
+		subsystem_restart(EXTERNAL_MODEM);
+	} else if (value == 1) {
+		pr_info("%s: status = 1: mdm is now ready\n", __func__);
+		queue_work(mdm_queue, &mdm_status_work);
+	}
 	return IRQ_HANDLED;
 }
 
 static int mdm_subsys_shutdown(const struct subsys_data *crashed_subsys)
 {
-	mdm_drv->mdm_ready = 0;
 	gpio_direction_output(mdm_drv->ap2mdm_errfatal_gpio, 1);
 	if (mdm_drv->pdata->ramdump_delay_ms > 0) {
 		/* Wait for the external modem to complete
@@ -270,7 +271,11 @@
 		 */
 		msleep(mdm_drv->pdata->ramdump_delay_ms);
 	}
-	mdm_drv->ops->power_down_mdm_cb(mdm_drv);
+	if (!mdm_drv->mdm_unexpected_reset_occurred)
+		mdm_drv->ops->reset_mdm_cb(mdm_drv);
+	else
+		mdm_drv->mdm_unexpected_reset_occurred = 0;
+
 	return 0;
 }
 
@@ -287,8 +292,10 @@
 		pr_info("%s: mdm modem restart timed out.\n", __func__);
 	} else {
 		pr_info("%s: mdm modem has been restarted\n", __func__);
+
 		/* Log the reason for the restart */
-		queue_work(mdm_sfr_queue, &sfr_reason_work);
+		if (mdm_drv->pdata->sfr_query)
+			queue_work(mdm_sfr_queue, &sfr_reason_work);
 	}
 	INIT_COMPLETION(mdm_boot);
 	return mdm_drv->mdm_boot_status;
@@ -310,7 +317,6 @@
 			pr_info("%s: mdm modem ramdumps completed.\n",
 					__func__);
 		INIT_COMPLETION(mdm_ram_dumps);
-		gpio_direction_output(mdm_drv->ap2mdm_errfatal_gpio, 1);
 		mdm_drv->ops->power_down_mdm_cb(mdm_drv);
 	}
 	return mdm_drv->mdm_ram_dump_status;
@@ -395,11 +401,11 @@
 	if (pres)
 		mdm_drv->ap2mdm_wakeup_gpio = pres->start;
 
-	/* AP2MDM_PMIC_RESET_N */
+	/* AP2MDM_SOFT_RESET */
 	pres = platform_get_resource_byname(pdev, IORESOURCE_IO,
-							"AP2MDM_PMIC_RESET_N");
+							"AP2MDM_SOFT_RESET");
 	if (pres)
-		mdm_drv->ap2mdm_pmic_reset_n_gpio = pres->start;
+		mdm_drv->ap2mdm_soft_reset_gpio = pres->start;
 
 	/* AP2MDM_KPDPWR_N */
 	pres = platform_get_resource_byname(pdev, IORESOURCE_IO,
@@ -407,6 +413,12 @@
 	if (pres)
 		mdm_drv->ap2mdm_kpdpwr_n_gpio = pres->start;
 
+	/* AP2MDM_PMIC_PWR_EN */
+	pres = platform_get_resource_byname(pdev, IORESOURCE_IO,
+							"AP2MDM_PMIC_PWR_EN");
+	if (pres)
+		mdm_drv->ap2mdm_pmic_pwr_en_gpio = pres->start;
+
 	mdm_drv->boot_type                  = CHARM_NORMAL_BOOT;
 
 	mdm_drv->ops      = mdm_ops;
@@ -430,11 +442,18 @@
 
 	gpio_request(mdm_drv->ap2mdm_status_gpio, "AP2MDM_STATUS");
 	gpio_request(mdm_drv->ap2mdm_errfatal_gpio, "AP2MDM_ERRFATAL");
-	gpio_request(mdm_drv->ap2mdm_kpdpwr_n_gpio, "AP2MDM_KPDPWR_N");
-	gpio_request(mdm_drv->ap2mdm_pmic_reset_n_gpio, "AP2MDM_PMIC_RESET_N");
+	if (mdm_drv->ap2mdm_kpdpwr_n_gpio > 0)
+		gpio_request(mdm_drv->ap2mdm_kpdpwr_n_gpio, "AP2MDM_KPDPWR_N");
 	gpio_request(mdm_drv->mdm2ap_status_gpio, "MDM2AP_STATUS");
 	gpio_request(mdm_drv->mdm2ap_errfatal_gpio, "MDM2AP_ERRFATAL");
 
+	if (mdm_drv->ap2mdm_pmic_pwr_en_gpio > 0)
+		gpio_request(mdm_drv->ap2mdm_pmic_pwr_en_gpio,
+					 "AP2MDM_PMIC_PWR_EN");
+	if (mdm_drv->ap2mdm_soft_reset_gpio > 0)
+		gpio_request(mdm_drv->ap2mdm_soft_reset_gpio,
+					 "AP2MDM_SOFT_RESET");
+
 	if (mdm_drv->ap2mdm_wakeup_gpio > 0)
 		gpio_request(mdm_drv->ap2mdm_wakeup_gpio, "AP2MDM_WAKEUP");
 
@@ -515,10 +534,18 @@
 	mdm_drv->mdm_status_irq = irq;
 
 status_err:
+	/*
+	 * If AP2MDM_PMIC_PWR_EN gpio is used, pull it high. It remains
+	 * high until the whole phone is shut down.
+	 */
+	if (mdm_drv->ap2mdm_pmic_pwr_en_gpio > 0)
+		gpio_direction_output(mdm_drv->ap2mdm_pmic_pwr_en_gpio, 1);
+
 	/* Perform early powerup of the external modem in order to
 	 * allow tabla devices to be found.
 	 */
-	mdm_drv->ops->power_on_mdm_cb(mdm_drv);
+	if (mdm_drv->pdata->early_power_on)
+		mdm_drv->ops->power_on_mdm_cb(mdm_drv);
 
 	pr_info("%s: Registering mdm modem\n", __func__);
 	return misc_register(&mdm_modem_misc);
@@ -526,10 +553,14 @@
 fatal_err:
 	gpio_free(mdm_drv->ap2mdm_status_gpio);
 	gpio_free(mdm_drv->ap2mdm_errfatal_gpio);
-	gpio_free(mdm_drv->ap2mdm_kpdpwr_n_gpio);
-	gpio_free(mdm_drv->ap2mdm_pmic_reset_n_gpio);
+	if (mdm_drv->ap2mdm_kpdpwr_n_gpio > 0)
+		gpio_free(mdm_drv->ap2mdm_kpdpwr_n_gpio);
+	if (mdm_drv->ap2mdm_pmic_pwr_en_gpio > 0)
+		gpio_free(mdm_drv->ap2mdm_pmic_pwr_en_gpio);
 	gpio_free(mdm_drv->mdm2ap_status_gpio);
 	gpio_free(mdm_drv->mdm2ap_errfatal_gpio);
+	if (mdm_drv->ap2mdm_soft_reset_gpio > 0)
+		gpio_free(mdm_drv->ap2mdm_soft_reset_gpio);
 
 	if (mdm_drv->ap2mdm_wakeup_gpio > 0)
 		gpio_free(mdm_drv->ap2mdm_wakeup_gpio);
@@ -547,10 +578,14 @@
 
 	gpio_free(mdm_drv->ap2mdm_status_gpio);
 	gpio_free(mdm_drv->ap2mdm_errfatal_gpio);
-	gpio_free(mdm_drv->ap2mdm_kpdpwr_n_gpio);
-	gpio_free(mdm_drv->ap2mdm_pmic_reset_n_gpio);
+	if (mdm_drv->ap2mdm_kpdpwr_n_gpio > 0)
+		gpio_free(mdm_drv->ap2mdm_kpdpwr_n_gpio);
+	if (mdm_drv->ap2mdm_pmic_pwr_en_gpio > 0)
+		gpio_free(mdm_drv->ap2mdm_pmic_pwr_en_gpio);
 	gpio_free(mdm_drv->mdm2ap_status_gpio);
 	gpio_free(mdm_drv->mdm2ap_errfatal_gpio);
+	if (mdm_drv->ap2mdm_soft_reset_gpio > 0)
+		gpio_free(mdm_drv->ap2mdm_soft_reset_gpio);
 
 	if (mdm_drv->ap2mdm_wakeup_gpio > 0)
 		gpio_free(mdm_drv->ap2mdm_wakeup_gpio);
@@ -566,5 +601,7 @@
 	mdm_disable_irqs();
 
 	mdm_drv->ops->power_down_mdm_cb(mdm_drv);
+	if (mdm_drv->ap2mdm_pmic_pwr_en_gpio > 0)
+		gpio_direction_output(mdm_drv->ap2mdm_pmic_pwr_en_gpio, 0);
 }
 
diff --git a/arch/arm/mach-msm/mdm_private.h b/arch/arm/mach-msm/mdm_private.h
index 206bd8b..f157d88 100644
--- a/arch/arm/mach-msm/mdm_private.h
+++ b/arch/arm/mach-msm/mdm_private.h
@@ -17,6 +17,7 @@
 
 struct mdm_ops {
 	void (*power_on_mdm_cb)(struct mdm_modem_drv *mdm_drv);
+	void (*reset_mdm_cb)(struct mdm_modem_drv *mdm_drv);
 	void (*normal_boot_done_cb)(struct mdm_modem_drv *mdm_drv);
 	void (*power_down_mdm_cb)(struct mdm_modem_drv *mdm_drv);
 	void (*debug_state_changed_cb)(int value);
@@ -31,8 +32,9 @@
 	unsigned ap2mdm_status_gpio;
 	unsigned mdm2ap_wakeup_gpio;
 	unsigned ap2mdm_wakeup_gpio;
-	unsigned ap2mdm_pmic_reset_n_gpio;
 	unsigned ap2mdm_kpdpwr_n_gpio;
+	unsigned ap2mdm_soft_reset_gpio;
+	unsigned ap2mdm_pmic_pwr_en_gpio;
 
 	int mdm_errfatal_irq;
 	int mdm_status_irq;
@@ -41,6 +43,7 @@
 	int mdm_ram_dump_status;
 	enum charm_boot_type boot_type;
 	int mdm_debug_on;
+	int mdm_unexpected_reset_occurred;
 
 	struct mdm_ops *ops;
 	struct mdm_platform_data *pdata;
diff --git a/arch/arm/mach-msm/memory.c b/arch/arm/mach-msm/memory.c
index ccb18b3..8898585 100644
--- a/arch/arm/mach-msm/memory.c
+++ b/arch/arm/mach-msm/memory.c
@@ -83,67 +83,29 @@
 }
 EXPORT_SYMBOL(write_to_strongly_ordered_memory);
 
-void flush_axi_bus_buffer(void)
-{
-#if defined(CONFIG_ARCH_MSM7X27) && !defined(CONFIG_ARCH_MSM7X27A)
-	__asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
-				    : : "r" (0) : "memory");
-	write_to_strongly_ordered_memory();
-#endif
-}
-
-#define CACHE_LINE_SIZE 32
-
-/* These cache related routines make the assumption that the associated
- * physical memory is contiguous. They will operate on all (L1
- * and L2 if present) caches.
+/* These cache related routines make the assumption (if outer cache is
+ * available) that the associated physical memory is contiguous.
+ * They will operate on all (L1 and L2 if present) caches.
  */
 void clean_and_invalidate_caches(unsigned long vstart,
 	unsigned long length, unsigned long pstart)
 {
-	unsigned long vaddr;
-
-	for (vaddr = vstart; vaddr < vstart + length; vaddr += CACHE_LINE_SIZE)
-		asm ("mcr p15, 0, %0, c7, c14, 1" : : "r" (vaddr));
-#ifdef CONFIG_OUTER_CACHE
+	dmac_flush_range((void *)vstart, (void *) (vstart + length));
 	outer_flush_range(pstart, pstart + length);
-#endif
-	asm ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0));
-	asm ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
-
-	flush_axi_bus_buffer();
 }
 
 void clean_caches(unsigned long vstart,
 	unsigned long length, unsigned long pstart)
 {
-	unsigned long vaddr;
-
-	for (vaddr = vstart; vaddr < vstart + length; vaddr += CACHE_LINE_SIZE)
-		asm ("mcr p15, 0, %0, c7, c10, 1" : : "r" (vaddr));
-#ifdef CONFIG_OUTER_CACHE
+	dmac_clean_range((void *)vstart, (void *) (vstart + length));
 	outer_clean_range(pstart, pstart + length);
-#endif
-	asm ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0));
-	asm ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
-
-	flush_axi_bus_buffer();
 }
 
 void invalidate_caches(unsigned long vstart,
 	unsigned long length, unsigned long pstart)
 {
-	unsigned long vaddr;
-
-	for (vaddr = vstart; vaddr < vstart + length; vaddr += CACHE_LINE_SIZE)
-		asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (vaddr));
-#ifdef CONFIG_OUTER_CACHE
+	dmac_inv_range((void *)vstart, (void *) (vstart + length));
 	outer_inv_range(pstart, pstart + length);
-#endif
-	asm ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0));
-	asm ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
-
-	flush_axi_bus_buffer();
 }
 
 void * __init alloc_bootmem_aligned(unsigned long size, unsigned long alignment)
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_core.h b/arch/arm/mach-msm/msm_bus/msm_bus_core.h
index e35130e..341fda8 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_core.h
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_core.h
@@ -29,6 +29,17 @@
 #define MSM_FAB_ERR(msg, ...) \
 	dev_err(&fabric->fabdev.dev, msg, ## __VA_ARGS__)
 
+#define IS_MASTER_VALID(mas) \
+	(((mas >= MSM_BUS_MASTER_FIRST) && (mas <= MSM_BUS_MASTER_LAST)) \
+	 ? 1 : 0)
+#define IS_SLAVE_VALID(slv) \
+	(((slv >= MSM_BUS_SLAVE_FIRST) && (slv <= MSM_BUS_SLAVE_LAST)) ? 1 : 0)
+
+#define INTERLEAVED_BW(fab_pdata, bw, ports) \
+	((fab_pdata->il_flag) ? DIV_ROUND_UP((bw), (ports)) : (bw))
+#define INTERLEAVED_VAL(fab_pdata, n) \
+	((fab_pdata->il_flag) ? (n) : 1)
+
 enum msm_bus_dbg_op_type {
 	MSM_BUS_DBG_UNREGISTER = -2,
 	MSM_BUS_DBG_REGISTER,
@@ -48,9 +59,12 @@
 	int *tier;
 	int num_tiers;
 	int ahb;
+	int hw_sel;
 	const char *slaveclk[NUM_CTX];
 	const char *memclk;
 	unsigned int buswidth;
+	unsigned int ws;
+	unsigned int mode;
 };
 
 struct path_node {
@@ -88,6 +102,27 @@
 	int commit_index;
 	struct nodeclk nodeclk[NUM_CTX];
 	struct nodeclk memclk;
+	void *hw_data;
+};
+
+struct msm_bus_hw_algorithm {
+	int (*allocate_commit_data)(struct msm_bus_fabric_registration
+		*fab_pdata, void **cdata, int ctx);
+	void *(*allocate_hw_data)(struct platform_device *pdev,
+		struct msm_bus_fabric_registration *fab_pdata);
+	void (*node_init)(void *hw_data, struct msm_bus_inode_info *info);
+	void (*free_commit_data)(void *cdata);
+	void (*update_bw)(struct msm_bus_inode_info *hop,
+		struct msm_bus_inode_info *info,
+		struct msm_bus_fabric_registration *fab_pdata,
+		void *sel_cdata, int *master_tiers,
+		long int add_bw);
+	void (*fill_cdata_buffer)(int *curr, char *buf, const int max_size,
+		void *cdata, int nmasters, int nslaves, int ntslaves);
+	int (*commit)(struct msm_bus_fabric_registration
+		*fab_pdata, void *hw_data, void **cdata);
+	int (*port_unhalt)(uint32_t haltid, uint8_t mport);
+	int (*port_halt)(uint32_t haltid, uint8_t mport);
 };
 
 struct msm_bus_fabric_device {
@@ -96,6 +131,7 @@
 	struct device dev;
 	const struct msm_bus_fab_algorithm *algo;
 	const struct msm_bus_board_algorithm *board_algo;
+	struct msm_bus_hw_algorithm hw_algo;
 	int visited;
 };
 #define to_msm_bus_fabric_device(d) container_of(d, \
@@ -150,22 +186,17 @@
 struct msm_bus_fabric_device *msm_bus_get_fabric_device(int fabid);
 int msm_bus_get_num_fab(void);
 
-int allocate_commit_data(struct msm_bus_fabric_registration *fab_pdata,
-	void **cdata);
-struct msm_rpm_iv_pair *allocate_rpm_data(struct msm_bus_fabric_registration
-	*fab_pdata);
-int msm_bus_rpm_commit(struct msm_bus_fabric_registration
-	*fab_pdata, struct msm_rpm_iv_pair *rpm_data, void **cdata);
-void free_commit_data(void *cdata);
-void msm_bus_rpm_update_bw(struct msm_bus_inode_info *hop,
-	struct msm_bus_inode_info *info,
-	struct msm_bus_fabric_registration *fab_pdata,
-	void *sel_cdata, int *master_tiers,
-	long int add_bw);
 void msm_bus_rpm_fill_cdata_buffer(int *curr, char *buf, const int max_size,
 	void *cdata, int nmasters, int nslaves, int ntslaves);
-bool msm_bus_rpm_is_mem_interleaved(void);
 
+int msm_bus_hw_fab_init(struct msm_bus_fabric_registration *pdata,
+	struct msm_bus_hw_algorithm *hw_algo);
+int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata,
+	struct msm_bus_hw_algorithm *hw_algo);
+int msm_bus_noc_hw_init(struct msm_bus_fabric_registration *pdata,
+	struct msm_bus_hw_algorithm *hw_algo);
+int msm_bus_bimc_hw_init(struct msm_bus_fabric_registration *pdata,
+	struct msm_bus_hw_algorithm *hw_algo);
 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_MSM_BUS_SCALING)
 void msm_bus_dbg_client_data(struct msm_bus_scale_pdata *pdata, int index,
 	uint32_t cl);
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c b/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c
index 64fd03e..8c015d1 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c
@@ -20,7 +20,7 @@
 #include <linux/clk.h>
 #include <linux/radix-tree.h>
 #include <mach/board.h>
-#include <mach/rpm.h>
+#include <mach/socinfo.h>
 #include "msm_bus_core.h"
 
 enum {
@@ -43,9 +43,8 @@
 	int num_nodes;
 	struct list_head gateways;
 	struct msm_bus_inode_info info;
-	const struct msm_bus_fab_algorithm *algo;
 	struct msm_bus_fabric_registration *pdata;
-	struct msm_rpm_iv_pair *rpm_data;
+	void *hw_data;
 };
 #define to_msm_bus_fabric(d) container_of(d, \
 	struct msm_bus_fabric, d)
@@ -117,18 +116,26 @@
 /**
  * register_fabric_info() - Create the internal fabric structure and
  * build the topology tree from platform specific data
- *
+ * @pdev: Platform device for getting base addresses
  * @fabric: Fabric to which the gateways, nodes should be added
  *
  * This function is called from probe. Iterates over the platform data,
  * and builds the topology
  */
-static int register_fabric_info(struct msm_bus_fabric *fabric)
+static int register_fabric_info(struct platform_device *pdev,
+	struct msm_bus_fabric *fabric)
 {
-	int i, ret = 0, err = 0;
+	int i = 0, ret = 0, err = 0;
 
 	MSM_BUS_DBG("id:%d pdata-id: %d len: %d\n", fabric->fabdev.id,
 		fabric->pdata->id, fabric->pdata->len);
+	fabric->hw_data = fabric->fabdev.hw_algo.allocate_hw_data(pdev,
+		fabric->pdata);
+	if (ZERO_OR_NULL_PTR(fabric->hw_data)) {
+		MSM_BUS_ERR("Couldn't allocate hw_data for fab: %d\n",
+			fabric->fabdev.id);
+		goto error;
+	}
 
 	for (i = 0; i < fabric->pdata->len; i++) {
 		struct msm_bus_inode_info *info;
@@ -177,9 +184,16 @@
 			kfree(info);
 			goto error;
 		}
-	}
 
-	fabric->rpm_data = allocate_rpm_data(fabric->pdata);
+		if (fabric->fabdev.hw_algo.node_init == NULL)
+			continue;
+
+		fabric->fabdev.hw_algo.node_init(fabric->hw_data, info);
+		if (ret) {
+			MSM_BUS_ERR("Unable to init node info, ret: %d\n", ret);
+			kfree(info);
+		}
+	}
 
 	MSM_BUS_DBG("Fabric: %d nmasters: %d nslaves: %d\n"
 		" ntieredslaves: %d, rpm_enabled: %d\n",
@@ -323,6 +337,10 @@
 	struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
 	void *sel_cdata;
 
+	/* Temporarily stub out arbitration settings for copper */
+	if (machine_is_copper())
+		return;
+
 	sel_cdata = fabric->cdata[ctx];
 
 	/* If it's an ahb fabric, don't calculate arb values */
@@ -335,7 +353,7 @@
 		return;
 	}
 
-	msm_bus_rpm_update_bw(hop, info, fabric->pdata, sel_cdata,
+	fabdev->hw_algo.update_bw(hop, info, fabric->pdata, sel_cdata,
 		master_tiers, add_bw);
 	fabric->arb_dirty = true;
 }
@@ -416,10 +434,10 @@
 }
 
 /**
- * msm_bus_fabric_rpm_commit() - Commit the arbitration data to RPM
+ * msm_bus_fabric_hw_commit() - Commit the arbitration data to Hardware.
  * @fabric: Fabric for which the data should be committed
  * */
-static int msm_bus_fabric_rpm_commit(struct msm_bus_fabric_device *fabdev)
+static int msm_bus_fabric_hw_commit(struct msm_bus_fabric_device *fabdev)
 {
 	int status = 0;
 	struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
@@ -439,7 +457,7 @@
 		goto skip_arb;
 	}
 
-	status = msm_bus_rpm_commit(fabric->pdata, fabric->rpm_data,
+	status = fabdev->hw_algo.commit(fabric->pdata, fabric->hw_data,
 		(void **)fabric->cdata);
 	if (status)
 		MSM_BUS_DBG("Error committing arb data for fabric: %d\n",
@@ -466,12 +484,9 @@
  */
 int msm_bus_fabric_port_halt(struct msm_bus_fabric_device *fabdev, int iid)
 {
-	struct msm_bus_halt_vector hvector = {0, 0};
-	struct msm_rpm_iv_pair rpm_data[2];
 	struct msm_bus_inode_info *info = NULL;
 	uint8_t mport;
 	uint32_t haltid = 0;
-	int status = 0;
 	struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
 
 	info = fabdev->algo->find_node(fabdev, iid);
@@ -482,23 +497,8 @@
 
 	haltid = fabric->pdata->haltid;
 	mport = info->node_info->masterp[0];
-	MSM_BUS_MASTER_HALT(hvector.haltmask, hvector.haltval, mport);
-	rpm_data[0].id = haltid;
-	rpm_data[0].value = hvector.haltval;
-	rpm_data[1].id = haltid + 1;
-	rpm_data[1].value = hvector.haltmask;
 
-	MSM_BUS_DBG("ctx: %d, id: %d, value: %d\n",
-		MSM_RPM_CTX_SET_0, rpm_data[0].id, rpm_data[0].value);
-	MSM_BUS_DBG("ctx: %d, id: %d, value: %d\n",
-		MSM_RPM_CTX_SET_0, rpm_data[1].id, rpm_data[1].value);
-
-	if (fabric->pdata->rpm_enabled)
-		status = msm_rpm_set(MSM_RPM_CTX_SET_0, rpm_data, 2);
-	if (status)
-		MSM_BUS_DBG("msm_rpm_set returned: %d\n", status);
-
-	return status;
+	return fabdev->hw_algo.port_halt(haltid, mport);
 }
 
 /**
@@ -508,12 +508,9 @@
  */
 int msm_bus_fabric_port_unhalt(struct msm_bus_fabric_device *fabdev, int iid)
 {
-	struct msm_bus_halt_vector hvector = {0, 0};
-	struct msm_rpm_iv_pair rpm_data[2];
 	struct msm_bus_inode_info *info = NULL;
 	uint8_t mport;
 	uint32_t haltid = 0;
-	int status = 0;
 	struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
 
 	info = fabdev->algo->find_node(fabdev, iid);
@@ -524,24 +521,7 @@
 
 	haltid = fabric->pdata->haltid;
 	mport = info->node_info->masterp[0];
-	MSM_BUS_MASTER_UNHALT(hvector.haltmask, hvector.haltval,
-		mport);
-	rpm_data[0].id = haltid;
-	rpm_data[0].value = hvector.haltval;
-	rpm_data[1].id = haltid + 1;
-	rpm_data[1].value = hvector.haltmask;
-
-	MSM_BUS_DBG("unalt: ctx: %d, id: %d, value: %d\n",
-		MSM_RPM_CTX_SET_SLEEP, rpm_data[0].id, rpm_data[0].value);
-	MSM_BUS_DBG("unhalt: ctx: %d, id: %d, value: %d\n",
-		MSM_RPM_CTX_SET_SLEEP, rpm_data[1].id, rpm_data[1].value);
-
-	if (fabric->pdata->rpm_enabled)
-		status = msm_rpm_set(MSM_RPM_CTX_SET_0, rpm_data, 2);
-	if (status)
-		MSM_BUS_DBG("msm_rpm_set returned: %d\n", status);
-
-	return status;
+	return fabdev->hw_algo.port_unhalt(haltid, mport);
 }
 
 /**
@@ -604,12 +584,25 @@
 	.update_bw = msm_bus_fabric_update_bw,
 	.port_halt = msm_bus_fabric_port_halt,
 	.port_unhalt = msm_bus_fabric_port_unhalt,
-	.commit = msm_bus_fabric_rpm_commit,
+	.commit = msm_bus_fabric_hw_commit,
 	.find_node = msm_bus_fabric_find_node,
 	.find_gw_node = msm_bus_fabric_find_gw_node,
 	.get_gw_list = msm_bus_fabric_get_gw_list,
 };
 
+static int msm_bus_fabric_hw_init(struct msm_bus_fabric_registration *pdata,
+	struct msm_bus_hw_algorithm *hw_algo)
+{
+	int ret = 0;
+	ret = msm_bus_rpm_hw_init(pdata, hw_algo);
+	if (ret) {
+		MSM_BUS_ERR("RPM initialization failed\n");
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
 static int msm_bus_fabric_probe(struct platform_device *pdev)
 {
 	int ctx, ret = 0;
@@ -647,7 +640,13 @@
 	pdata = (struct msm_bus_fabric_registration *)pdev->dev.platform_data;
 	fabric->fabdev.name = pdata->name;
 	fabric->fabdev.algo = &msm_bus_algo;
-	pdata->il_flag = msm_bus_rpm_is_mem_interleaved();
+	ret = msm_bus_fabric_hw_init(pdata, &fabric->fabdev.hw_algo);
+	if (ret) {
+		MSM_BUS_ERR("Error initializing hardware for fabric: %d\n",
+			fabric->fabdev.id);
+		goto err;
+	}
+
 	fabric->ahb = pdata->ahb;
 	fabric->pdata = pdata;
 	fabric->pdata->board_algo->assign_iids(fabric->pdata,
@@ -681,7 +680,7 @@
 	}
 
 	/* Find num. of slaves, masters, populate gateways, radix tree */
-	ret = register_fabric_info(fabric);
+	ret = register_fabric_info(pdev, fabric);
 	if (ret) {
 		MSM_BUS_ERR("Could not register fabric %d info, ret: %d\n",
 			fabric->fabdev.id, ret);
@@ -690,8 +689,8 @@
 	if (!fabric->ahb) {
 		/* Allocate memory for commit data */
 		for (ctx = 0; ctx < NUM_CTX; ctx++) {
-			ret = allocate_commit_data(fabric->pdata, &fabric->
-				cdata[ctx]);
+			ret = fabric->fabdev.hw_algo.allocate_commit_data(
+				fabric->pdata, &fabric->cdata[ctx], ctx);
 			if (ret) {
 				MSM_BUS_ERR("Failed to alloc commit data for "
 					"fab: %d, ret = %d\n",
@@ -726,12 +725,12 @@
 		fabric->pdata->nslaves; i++)
 		radix_tree_delete(&fabric->fab_tree, i);
 	if (!fabric->ahb) {
-		free_commit_data(fabric->cdata[DUAL_CTX]);
-		free_commit_data(fabric->cdata[ACTIVE_CTX]);
+		fabdev->hw_algo.free_commit_data(fabric->cdata[DUAL_CTX]);
+		fabdev->hw_algo.free_commit_data(fabric->cdata[ACTIVE_CTX]);
 	}
 
 	kfree(fabric->info.node_info);
-	kfree(fabric->rpm_data);
+	kfree(fabric->hw_data);
 	kfree(fabric);
 	return ret;
 }
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_rpm.c b/arch/arm/mach-msm/msm_bus/msm_bus_rpm.c
index b13037e..4653431 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_rpm.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_rpm.c
@@ -24,11 +24,6 @@
 #include "msm_bus_core.h"
 #include "../rpm_resources.h"
 
-#define INTERLEAVED_BW(fab_pdata, bw, ports) \
-	((fab_pdata->il_flag) ? ((bw) / (ports)) : (bw))
-#define INTERLEAVED_VAL(fab_pdata, n) \
-	((fab_pdata->il_flag) ? (n) : 1)
-
 void msm_bus_rpm_set_mt_mask()
 {
 #ifdef CONFIG_MSM_BUS_RPM_MULTI_TIER_ENABLED
@@ -115,7 +110,7 @@
 #define BW_VAL_FROM_BYTES(bw) \
 	((((bw) >> 17) & 0x8000) ? 0x7FFF : ((bw) >> 17))
 
-uint32_t msm_bus_set_bw_bytes(unsigned long bw)
+static uint32_t msm_bus_set_bw_bytes(unsigned long bw)
 {
 	return ((((bw) & 0x1FFFF) && (((bw) >> 17) == 0)) ?
 		ROUNDED_BW_VAL_FROM_BYTES(bw) : BW_VAL_FROM_BYTES(bw));
@@ -132,7 +127,8 @@
 	return (val)&0x7FFF;
 }
 
-uint16_t msm_bus_create_bw_tier_pair_bytes(uint8_t type, unsigned long bw)
+static uint16_t msm_bus_create_bw_tier_pair_bytes(uint8_t type,
+	unsigned long bw)
 {
 	return ((((type) == MSM_BUS_BW_TIER1 ? 1 : 0) << 15) |
 	 (msm_bus_set_bw_bytes(bw)));
@@ -168,8 +164,8 @@
  * format specified by RPM
  * @fabric: Fabric device for which commit data is allocated
  */
-int allocate_commit_data(struct msm_bus_fabric_registration *fab_pdata,
-	void **cdata)
+static int msm_bus_rpm_allocate_commit_data(struct msm_bus_fabric_registration
+	*fab_pdata, void **cdata, int ctx)
 {
 	struct commit_data **cd = (struct commit_data **)cdata;
 	*cd = kzalloc(sizeof(struct commit_data), GFP_KERNEL);
@@ -209,7 +205,7 @@
 	return 0;
 }
 
-void free_commit_data(void *cdata)
+static void free_commit_data(void *cdata)
 {
 	struct commit_data *cd = (struct commit_data *)cdata;
 
@@ -223,8 +219,8 @@
  * allocate_rpm_data() - Allocate the id-value pairs to be
  * sent to RPM
  */
-struct msm_rpm_iv_pair *allocate_rpm_data(struct msm_bus_fabric_registration
-	*fab_pdata)
+static void *msm_bus_rpm_allocate_rpm_data(struct platform_device *pdev,
+	struct msm_bus_fabric_registration *fab_pdata)
 {
 	struct msm_rpm_iv_pair *rpm_data;
 	uint16_t count = ((fab_pdata->nmasters * fab_pdata->ntieredslaves) +
@@ -232,14 +228,14 @@
 
 	rpm_data = kmalloc((sizeof(struct msm_rpm_iv_pair) * count),
 		GFP_KERNEL);
-	return rpm_data;
+	return (void *)rpm_data;
 }
 
 #define BWMASK 0x7FFF
 #define TIERMASK 0x8000
 #define GET_TIER(n) (((n) & TIERMASK) >> 15)
 
-void msm_bus_rpm_update_bw(struct msm_bus_inode_info *hop,
+static void msm_bus_rpm_update_bw(struct msm_bus_inode_info *hop,
 	struct msm_bus_inode_info *info,
 	struct msm_bus_fabric_registration *fab_pdata,
 	void *sel_cdata, int *master_tiers,
@@ -469,7 +465,7 @@
 #define MODE1_IMM(val)	((val) & 0x7F)
 #define __CLZ(x) ((8 * sizeof(uint32_t)) - 1 - __fls(x))
 
-uint8_t msm_bus_set_bw_bytes(unsigned long val)
+static uint8_t msm_bus_set_bw_bytes(unsigned long val)
 {
 	unsigned int shift;
 	unsigned int intVal;
@@ -524,7 +520,8 @@
 	return msm_bus_get_bw(val) << 20;
 }
 
-uint8_t msm_bus_create_bw_tier_pair_bytes(uint8_t type, unsigned long bw)
+static uint8_t msm_bus_create_bw_tier_pair_bytes(uint8_t type,
+	unsigned long bw)
 {
 	return msm_bus_set_bw_bytes(bw);
 };
@@ -534,14 +531,14 @@
 	return msm_bus_create_bw_tier_pair_bytes(type, bw);
 };
 
-int allocate_commit_data(struct msm_bus_fabric_registration *fab_pdata,
-	void **cdata)
+static int msm_bus_rpm_allocate_commit_data(struct msm_bus_fabric_registration
+	*fab_pdata, void **cdata, int ctx)
 {
 	struct commit_data **cd = (struct commit_data **)cdata;
 	int i;
 
 	*cd = kzalloc(sizeof(struct commit_data), GFP_KERNEL);
-	if (!*cdata) {
+	if (!*cd) {
 		MSM_BUS_DBG("Couldn't alloc mem for cdata\n");
 		goto cdata_err;
 	}
@@ -589,7 +586,7 @@
 	return -ENOMEM;
 }
 
-void free_commit_data(void *cdata)
+static void free_commit_data(void *cdata)
 {
 	int i;
 	struct commit_data *cd = (struct commit_data *)cdata;
@@ -601,8 +598,8 @@
 	kfree(cd);
 }
 
-struct msm_rpm_iv_pair *allocate_rpm_data(struct msm_bus_fabric_registration
-	*fab_pdata)
+static void *msm_bus_rpm_allocate_rpm_data(struct platform_device *pdev,
+	struct msm_bus_fabric_registration *fab_pdata)
 {
 	struct msm_rpm_iv_pair *rpm_data;
 	uint16_t count = (((fab_pdata->nmasters * fab_pdata->ntieredslaves *
@@ -610,7 +607,7 @@
 
 	rpm_data = kmalloc((sizeof(struct msm_rpm_iv_pair) * count),
 		GFP_KERNEL);
-	return rpm_data;
+	return (void *)rpm_data;
 }
 
 static int msm_bus_rpm_compare_cdata(
@@ -750,12 +747,12 @@
 	-(msm_bus_get_bw_bytes(msm_bus_create_bw_tier_pair_bytes(0, -(x)))) : \
 	(msm_bus_get_bw_bytes(msm_bus_create_bw_tier_pair_bytes(0, x))))
 
-uint16_t msm_bus_pack_bwsum_bytes(unsigned long bw)
+static uint16_t msm_bus_pack_bwsum_bytes(unsigned long bw)
 {
 	return (bw + ((1 << 20) - 1)) >> 20;
 };
 
-void msm_bus_rpm_update_bw(struct msm_bus_inode_info *hop,
+static void msm_bus_rpm_update_bw(struct msm_bus_inode_info *hop,
 	struct msm_bus_inode_info *info,
 	struct msm_bus_fabric_registration *fab_pdata,
 	void *sel_cdata, int *master_tiers,
@@ -862,14 +859,14 @@
 * msm_bus_rpm_commit() - Commit the arbitration data to RPM
 * @fabric: Fabric for which the data should be committed
 **/
-int msm_bus_rpm_commit(struct msm_bus_fabric_registration
-	*fab_pdata, struct msm_rpm_iv_pair *rpm_data,
-	void **cdata)
+static int msm_bus_rpm_commit(struct msm_bus_fabric_registration
+	*fab_pdata, void *hw_data, void **cdata)
 {
 
 	int ret;
 	bool valid;
 	struct commit_data *dual_cd, *act_cd;
+	struct msm_rpm_iv_pair *rpm_data = (struct msm_rpm_iv_pair *)hw_data;
 	dual_cd = (struct commit_data *)cdata[DUAL_CTX];
 	act_cd = (struct commit_data *)cdata[ACTIVE_CTX];
 
@@ -901,3 +898,67 @@
 
 	return ret;
 }
+
+static int msm_bus_rpm_port_halt(uint32_t haltid, uint8_t mport)
+{
+	int status = 0;
+	struct msm_bus_halt_vector hvector = {0, 0};
+	struct msm_rpm_iv_pair rpm_data[2];
+
+	MSM_BUS_MASTER_HALT(hvector.haltmask, hvector.haltval, mport);
+	rpm_data[0].id = haltid;
+	rpm_data[0].value = hvector.haltval;
+	rpm_data[1].id = haltid + 1;
+	rpm_data[1].value = hvector.haltmask;
+
+	MSM_BUS_DBG("ctx: %d, id: %d, value: %d\n",
+		MSM_RPM_CTX_SET_0, rpm_data[0].id, rpm_data[0].value);
+	MSM_BUS_DBG("ctx: %d, id: %d, value: %d\n",
+		MSM_RPM_CTX_SET_0, rpm_data[1].id, rpm_data[1].value);
+
+	status = msm_rpm_set(MSM_RPM_CTX_SET_0, rpm_data, 2);
+	if (status)
+		MSM_BUS_DBG("msm_rpm_set returned: %d\n", status);
+	return status;
+}
+
+static int msm_bus_rpm_port_unhalt(uint32_t haltid, uint8_t mport)
+{
+	int status = 0;
+	struct msm_bus_halt_vector hvector = {0, 0};
+	struct msm_rpm_iv_pair rpm_data[2];
+
+	MSM_BUS_MASTER_UNHALT(hvector.haltmask, hvector.haltval,
+		mport);
+	rpm_data[0].id = haltid;
+	rpm_data[0].value = hvector.haltval;
+	rpm_data[1].id = haltid + 1;
+	rpm_data[1].value = hvector.haltmask;
+
+	MSM_BUS_DBG("unalt: ctx: %d, id: %d, value: %d\n",
+		MSM_RPM_CTX_SET_SLEEP, rpm_data[0].id, rpm_data[0].value);
+	MSM_BUS_DBG("unhalt: ctx: %d, id: %d, value: %d\n",
+		MSM_RPM_CTX_SET_SLEEP, rpm_data[1].id, rpm_data[1].value);
+
+	status = msm_rpm_set(MSM_RPM_CTX_SET_0, rpm_data, 2);
+	if (status)
+		MSM_BUS_DBG("msm_rpm_set returned: %d\n", status);
+	return status;
+}
+
+int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata,
+	struct msm_bus_hw_algorithm *hw_algo)
+{
+	pdata->il_flag = msm_bus_rpm_is_mem_interleaved();
+	hw_algo->allocate_commit_data = msm_bus_rpm_allocate_commit_data;
+	hw_algo->allocate_hw_data = msm_bus_rpm_allocate_rpm_data;
+	hw_algo->node_init = NULL;
+	hw_algo->free_commit_data = free_commit_data;
+	hw_algo->update_bw = msm_bus_rpm_update_bw;
+	hw_algo->commit = msm_bus_rpm_commit;
+	hw_algo->port_halt = msm_bus_rpm_port_halt;
+	hw_algo->port_unhalt = msm_bus_rpm_port_unhalt;
+	if (!pdata->ahb)
+		pdata->rpm_enabled = 1;
+	return 0;
+}
diff --git a/arch/arm/mach-msm/msm_cache_dump.c b/arch/arm/mach-msm/msm_cache_dump.c
index b21412f..9759d5a 100644
--- a/arch/arm/mach-msm/msm_cache_dump.c
+++ b/arch/arm/mach-msm/msm_cache_dump.c
@@ -31,13 +31,12 @@
 static unsigned long msm_cache_dump_addr;
 
 /*
- * These are dummy pointers so the defintion of l1_cache_dump
- * and l2_cache_dump don't get optimized away. If they aren't
- * referenced, the structure definitions don't show up in the
- * debugging information which is needed for post processing.
+ * These should not actually be dereferenced. There's no
+ * need for a virtual mapping, but the physical address is
+ * necessary.
  */
-static struct l1_cache_dump __used *l1_dump;
-static struct l2_cache_dump __used *l2_dump;
+static struct l1_cache_dump *l1_dump;
+static struct l2_cache_dump *l2_dump;
 
 static int msm_cache_dump_panic(struct notifier_block *this,
 				unsigned long event, void *ptr)
@@ -96,6 +95,8 @@
 		pr_err("%s: could not register L1 buffer ret = %d.\n",
 			__func__, ret);
 
+	l1_dump = (struct l1_cache_dump *)msm_cache_dump_addr;
+
 #if defined(CONFIG_MSM_CACHE_DUMP_ON_PANIC)
 	l1_cache_data.buf = msm_cache_dump_addr + d->l1_size;
 	l1_cache_data.size = d->l2_size;
@@ -110,6 +111,9 @@
 	__raw_writel(msm_cache_dump_addr + d->l1_size,
 			MSM_IMEM_BASE + L2_DUMP_OFFSET);
 
+
+	l2_dump = (struct l2_cache_dump *)(msm_cache_dump_addr + d->l1_size);
+
 	atomic_notifier_chain_register(&panic_notifier_list,
 						&msm_cache_dump_blk);
 	return 0;
diff --git a/arch/arm/mach-msm/ocmem.c b/arch/arm/mach-msm/ocmem.c
index ed0b2f0..69e39df 100644
--- a/arch/arm/mach-msm/ocmem.c
+++ b/arch/arm/mach-msm/ocmem.c
@@ -37,7 +37,7 @@
 	unsigned long size;
 	unsigned long base;
 	struct ocmem_partition *parts;
-	int nr_parts;
+	unsigned nr_parts;
 };
 
 struct ocmem_zone zones[OCMEM_CLIENT_MAX];
@@ -102,7 +102,7 @@
 	struct ocmem_plat_data *pdata = NULL;
 	struct ocmem_partition *parts = NULL;
 	struct device   *dev = &pdev->dev;
-	int nr_parts;
+	unsigned nr_parts = 0;
 	int i;
 	int j;
 
diff --git a/arch/arm/mach-msm/pcie.c b/arch/arm/mach-msm/pcie.c
index 4e2b1083..dd91e66 100644
--- a/arch/arm/mach-msm/pcie.c
+++ b/arch/arm/mach-msm/pcie.c
@@ -201,7 +201,7 @@
 	.write = msm_pcie_wr_conf,
 };
 
-static int __devinit msm_pcie_gpio_init(void)
+static int __init msm_pcie_gpio_init(void)
 {
 	int rc, i;
 	struct msm_pcie_gpio_info_t *info;
@@ -239,7 +239,7 @@
 		gpio_free(msm_pcie_dev.gpio[i].num);
 }
 
-static int __devinit msm_pcie_vreg_init(struct device *dev)
+static int __init msm_pcie_vreg_init(struct device *dev)
 {
 	int i, rc = 0;
 	struct regulator *vreg;
@@ -306,7 +306,7 @@
 	}
 }
 
-static int __devinit msm_pcie_clk_init(struct device *dev)
+static int __init msm_pcie_clk_init(struct device *dev)
 {
 	int i, rc = 0;
 	struct clk *clk_hdl;
@@ -346,7 +346,7 @@
 	}
 }
 
-static void __devinit msm_pcie_config_controller(void)
+static void __init msm_pcie_config_controller(void)
 {
 	struct msm_pcie_dev_t *dev = &msm_pcie_dev;
 	struct msm_pcie_res_info_t *axi_bar = &dev->res[MSM_PCIE_RES_AXI_BAR];
@@ -393,7 +393,7 @@
 	wmb();
 }
 
-static int __devinit msm_pcie_get_resources(struct platform_device *pdev)
+static int __init msm_pcie_get_resources(struct platform_device *pdev)
 {
 	int i, rc = 0;
 	struct resource *res;
@@ -437,7 +437,7 @@
 	return rc;
 }
 
-static void __devexit msm_pcie_release_resources(void)
+static void msm_pcie_release_resources(void)
 {
 	int i;
 
@@ -452,7 +452,7 @@
 	msm_pcie_dev.axi_conf = NULL;
 }
 
-static int __devinit msm_pcie_setup(int nr, struct pci_sys_data *sys)
+static int __init msm_pcie_setup(int nr, struct pci_sys_data *sys)
 {
 	int rc;
 	struct msm_pcie_dev_t *dev = &msm_pcie_dev;
@@ -548,8 +548,8 @@
 	return (rc) ? 0 : 1;
 }
 
-static struct pci_bus __devinit *msm_pcie_scan_bus(int nr,
-						   struct pci_sys_data *sys)
+static struct pci_bus __init *msm_pcie_scan_bus(int nr,
+						struct pci_sys_data *sys)
 {
 	struct pci_bus *bus = NULL;
 
@@ -560,13 +560,13 @@
 	return bus;
 }
 
-static int __devinit msm_pcie_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+static int __init msm_pcie_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
 {
 	PCIE_DBG("slot %d pin %d\n", slot, pin);
 	return (pin <= 4) ? (PCIE20_INTA + pin - 1) : 0;
 }
 
-static struct hw_pci msm_pci __devinitdata = {
+static struct hw_pci msm_pci __initdata = {
 	.nr_controllers = 1,
 	.swizzle = pci_std_swizzle,
 	.setup = msm_pcie_setup,
@@ -574,7 +574,7 @@
 	.map_irq = msm_pcie_map_irq,
 };
 
-static int __devinit msm_pcie_probe(struct platform_device *pdev)
+static int __init msm_pcie_probe(struct platform_device *pdev)
 {
 	const struct msm_pcie_platform *pdata;
 	int rc;
@@ -603,7 +603,7 @@
 	return 0;
 }
 
-static int __devexit msm_pcie_remove(struct platform_device *pdev)
+static int __exit msm_pcie_remove(struct platform_device *pdev)
 {
 	PCIE_DBG("\n");
 
@@ -621,8 +621,7 @@
 }
 
 static struct platform_driver msm_pcie_driver = {
-	.probe = msm_pcie_probe,
-	.remove = __devexit_p(msm_pcie_remove),
+	.remove = __exit_p(msm_pcie_remove),
 	.driver = {
 		.name = "msm_pcie",
 		.owner = THIS_MODULE,
@@ -632,7 +631,7 @@
 static int __init msm_pcie_init(void)
 {
 	PCIE_DBG("\n");
-	return platform_driver_register(&msm_pcie_driver);
+	return platform_driver_probe(&msm_pcie_driver, msm_pcie_probe);
 }
 subsys_initcall(msm_pcie_init);
 
diff --git a/arch/arm/mach-msm/pcie_irq.c b/arch/arm/mach-msm/pcie_irq.c
index df100db..d915561 100644
--- a/arch/arm/mach-msm/pcie_irq.c
+++ b/arch/arm/mach-msm/pcie_irq.c
@@ -67,7 +67,7 @@
 	return IRQ_HANDLED;
 }
 
-uint32_t __devinit msm_pcie_irq_init(struct msm_pcie_dev_t *dev)
+uint32_t __init msm_pcie_irq_init(struct msm_pcie_dev_t *dev)
 {
 	int i, rc;
 
@@ -93,7 +93,7 @@
 	return rc;
 }
 
-void msm_pcie_irq_deinit(struct msm_pcie_dev_t *dev)
+void __exit msm_pcie_irq_deinit(struct msm_pcie_dev_t *dev)
 {
 	free_irq(PCIE20_INT_MSI, dev);
 }
diff --git a/arch/arm/mach-msm/peripheral-loader.c b/arch/arm/mach-msm/peripheral-loader.c
index 9d0ce0d..bfbf4bc 100644
--- a/arch/arm/mach-msm/peripheral-loader.c
+++ b/arch/arm/mach-msm/peripheral-loader.c
@@ -138,8 +138,8 @@
 	const u8 *data;
 
 	if (memblock_overlaps_memory(phdr->p_paddr, phdr->p_memsz)) {
-		dev_err(&pil->dev,
-			"kernel memory would be overwritten [%#08lx, %#08lx)\n",
+		dev_err(&pil->dev, "%s: kernel memory would be overwritten "
+			"[%#08lx, %#08lx)\n", pil->desc->name,
 			(unsigned long)phdr->p_paddr,
 			(unsigned long)(phdr->p_paddr + phdr->p_memsz));
 		return -EPERM;
@@ -150,14 +150,15 @@
 				pil->desc->name, num);
 		ret = request_firmware(&fw, fw_name, &pil->dev);
 		if (ret) {
-			dev_err(&pil->dev, "Failed to locate blob %s\n",
-					fw_name);
+			dev_err(&pil->dev, "%s: Failed to locate blob %s\n",
+					pil->desc->name, fw_name);
 			return ret;
 		}
 
 		if (fw->size != phdr->p_filesz) {
-			dev_err(&pil->dev, "Blob size %u doesn't match %u\n",
-					fw->size, phdr->p_filesz);
+			dev_err(&pil->dev, "%s: Blob size %u doesn't match "
+					"%u\n", pil->desc->name, fw->size,
+					phdr->p_filesz);
 			ret = -EPERM;
 			goto release_fw;
 		}
@@ -174,7 +175,8 @@
 		size = min_t(size_t, IOMAP_SIZE, count);
 		buf = ioremap(paddr, size);
 		if (!buf) {
-			dev_err(&pil->dev, "Failed to map memory\n");
+			dev_err(&pil->dev, "%s: Failed to map memory\n",
+					pil->desc->name);
 			ret = -ENOMEM;
 			goto release_fw;
 		}
@@ -195,7 +197,8 @@
 		size = min_t(size_t, IOMAP_SIZE, count);
 		buf = ioremap(paddr, size);
 		if (!buf) {
-			dev_err(&pil->dev, "Failed to map memory\n");
+			dev_err(&pil->dev, "%s: Failed to map memory\n",
+					pil->desc->name);
 			ret = -ENOMEM;
 			goto release_fw;
 		}
@@ -210,7 +213,8 @@
 		ret = pil->desc->ops->verify_blob(pil->desc, phdr->p_paddr,
 					  phdr->p_memsz);
 		if (ret)
-			dev_err(&pil->dev, "Blob%u failed verification\n", num);
+			dev_err(&pil->dev, "%s: Blob%u failed verification\n",
+				pil->desc->name, num);
 	}
 
 release_fw:
@@ -241,38 +245,43 @@
 	snprintf(fw_name, sizeof(fw_name), "%s.mdt", pil->desc->name);
 	ret = request_firmware(&fw, fw_name, &pil->dev);
 	if (ret) {
-		dev_err(&pil->dev, "Failed to locate %s\n", fw_name);
+		dev_err(&pil->dev, "%s: Failed to locate %s\n",
+				pil->desc->name, fw_name);
 		goto out;
 	}
 
 	if (fw->size < sizeof(*ehdr)) {
-		dev_err(&pil->dev, "Not big enough to be an elf header\n");
+		dev_err(&pil->dev, "%s: Not big enough to be an elf header\n",
+				pil->desc->name);
 		ret = -EIO;
 		goto release_fw;
 	}
 
 	ehdr = (struct elf32_hdr *)fw->data;
 	if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
-		dev_err(&pil->dev, "Not an elf header\n");
+		dev_err(&pil->dev, "%s: Not an elf header\n", pil->desc->name);
 		ret = -EIO;
 		goto release_fw;
 	}
 
 	if (ehdr->e_phnum == 0) {
-		dev_err(&pil->dev, "No loadable segments\n");
+		dev_err(&pil->dev, "%s: No loadable segments\n",
+				pil->desc->name);
 		ret = -EIO;
 		goto release_fw;
 	}
 	if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
 	    sizeof(struct elf32_hdr) > fw->size) {
-		dev_err(&pil->dev, "Program headers not within mdt\n");
+		dev_err(&pil->dev, "%s: Program headers not within mdt\n",
+				pil->desc->name);
 		ret = -EIO;
 		goto release_fw;
 	}
 
 	ret = pil->desc->ops->init_image(pil->desc, fw->data, fw->size);
 	if (ret) {
-		dev_err(&pil->dev, "Invalid firmware metadata\n");
+		dev_err(&pil->dev, "%s: Invalid firmware metadata\n",
+				pil->desc->name);
 		goto release_fw;
 	}
 
@@ -283,25 +292,27 @@
 
 		ret = load_segment(phdr, i, pil);
 		if (ret) {
-			dev_err(&pil->dev, "Failed to load segment %d\n",
-					i);
+			dev_err(&pil->dev, "%s: Failed to load segment %d\n",
+					pil->desc->name, i);
 			goto release_fw;
 		}
 	}
 
 	ret = pil_proxy_vote(pil);
 	if (ret) {
-		dev_err(&pil->dev, "Failed to proxy vote\n");
+		dev_err(&pil->dev, "%s: Failed to proxy vote\n",
+					pil->desc->name);
 		goto release_fw;
 	}
 
 	ret = pil->desc->ops->auth_and_reset(pil->desc);
 	if (ret) {
-		dev_err(&pil->dev, "Failed to bring out of reset\n");
+		dev_err(&pil->dev, "%s: Failed to bring out of reset\n",
+				pil->desc->name);
 		proxy_timeout = 0; /* Remove proxy vote immediately on error */
 		goto err_boot;
 	}
-	dev_info(&pil->dev, "brought %s out of reset\n", pil->desc->name);
+	dev_info(&pil->dev, "%s: Brought out of reset\n", pil->desc->name);
 err_boot:
 	pil_proxy_unvote(pil, proxy_timeout);
 release_fw:
@@ -397,7 +408,8 @@
 		return;
 
 	mutex_lock(&pil->lock);
-	if (WARN(!pil->count, "%s: Reference count mismatch\n", __func__))
+	if (WARN(!pil->count, "%s: %s: Reference count mismatch\n",
+			pil->desc->name, __func__))
 		goto err_out;
 	if (!--pil->count)
 		pil_shutdown(pil);
@@ -428,7 +440,8 @@
 	}
 
 	mutex_lock(&pil->lock);
-	if (!WARN(!pil->count, "%s: Reference count mismatch\n", __func__))
+	if (!WARN(!pil->count, "%s: %s: Reference count mismatch\n",
+			pil->desc->name, __func__))
 		pil_shutdown(pil);
 	mutex_unlock(&pil->lock);
 
@@ -448,7 +461,8 @@
 	}
 
 	mutex_lock(&pil->lock);
-	if (!WARN(!pil->count, "%s: Reference count mismatch\n", __func__))
+	if (!WARN(!pil->count, "%s: %s: Reference count mismatch\n",
+			pil->desc->name, __func__))
 		ret = load_image(pil);
 	if (!ret)
 		pil_set_state(pil, PIL_ONLINE);
diff --git a/arch/arm/mach-msm/pm-8x60.c b/arch/arm/mach-msm/pm-8x60.c
index 070e2c5..6f68c8b 100644
--- a/arch/arm/mach-msm/pm-8x60.c
+++ b/arch/arm/mach-msm/pm-8x60.c
@@ -44,7 +44,6 @@
 #include <mach/cpuidle.h>
 #include "idle.h"
 #include "pm.h"
-#include "rpm_resources.h"
 #include "scm-boot.h"
 #include "spm.h"
 #include "timer.h"
@@ -109,6 +108,7 @@
 		"standalone_power_collapse",
 };
 
+static struct msm_pm_sleep_ops pm_sleep_ops;
 /*
  * Write out the attribute.
  */
@@ -396,7 +396,7 @@
  *
  *****************************************************************************/
 
-static struct msm_rpmrs_limits *msm_pm_idle_rs_limits;
+static void *msm_pm_idle_rs_limits;
 static bool msm_pm_use_qtimer;
 
 static void msm_pm_swfi(void)
@@ -644,7 +644,8 @@
 		struct cpuidle_state *state = &dev->states[i];
 		enum msm_pm_sleep_mode mode;
 		bool allow;
-		struct msm_rpmrs_limits *rs_limits = NULL;
+		void *rs_limits = NULL;
+		uint32_t power;
 		int idx;
 
 		mode = (enum msm_pm_sleep_mode) state->driver_data;
@@ -673,12 +674,6 @@
 		case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE:
 			if (!allow)
 				break;
-
-			if (!dev->cpu &&
-				msm_rpm_local_request_is_outstanding()) {
-				allow = false;
-				break;
-			}
 			/* fall through */
 
 		case MSM_PM_SLEEP_MODE_RETENTION:
@@ -690,8 +685,10 @@
 			if (!allow)
 				break;
 
-			rs_limits = msm_rpmrs_lowest_limits(true,
-						mode, latency_us, sleep_us);
+			if (pm_sleep_ops.lowest_limits)
+				rs_limits = pm_sleep_ops.lowest_limits(true,
+						mode, latency_us, sleep_us,
+						&power);
 
 			if (MSM_PM_DEBUG_IDLE & msm_pm_debug_mask)
 				pr_info("CPU%u: %s: %s, latency %uus, "
@@ -699,17 +696,6 @@
 					dev->cpu, __func__, state->desc,
 					latency_us, sleep_us, rs_limits);
 
-			if ((MSM_PM_DEBUG_IDLE_LIMITS & msm_pm_debug_mask) &&
-					rs_limits)
-				pr_info("CPU%u: %s: limit %p: "
-					"pxo %d, l2_cache %d, "
-					"vdd_mem %d, vdd_dig %d\n",
-					dev->cpu, __func__, rs_limits,
-					rs_limits->pxo,
-					rs_limits->l2_cache,
-					rs_limits->vdd_mem,
-					rs_limits->vdd_dig);
-
 			if (!rs_limits)
 				allow = false;
 			break;
@@ -727,7 +713,7 @@
 			state->flags &= ~CPUIDLE_FLAG_IGNORE;
 			state->target_residency = 0;
 			state->exit_latency = 0;
-			state->power_usage = rs_limits->power[dev->cpu];
+			state->power_usage = power;
 
 			if (MSM_PM_SLEEP_MODE_POWER_COLLAPSE == mode)
 				msm_pm_idle_rs_limits = rs_limits;
@@ -770,7 +756,7 @@
 		int64_t timer_expiration = 0;
 		bool timer_halted = false;
 		uint32_t sleep_delay;
-		int ret;
+		int ret = -ENODEV;
 		int notify_rpm =
 			(sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE);
 		int collapsed;
@@ -785,14 +771,17 @@
 		if (MSM_PM_DEBUG_IDLE_CLK & msm_pm_debug_mask)
 			clock_debug_print_enabled();
 
-		ret = msm_rpmrs_enter_sleep(
-			sleep_delay, msm_pm_idle_rs_limits, true, notify_rpm);
+		if (pm_sleep_ops.enter_sleep)
+			ret = pm_sleep_ops.enter_sleep(sleep_delay,
+					msm_pm_idle_rs_limits,
+					true, notify_rpm);
 		if (!ret) {
 			collapsed = msm_pm_power_collapse(true);
 			timer_halted = true;
 
-			msm_rpmrs_exit_sleep(msm_pm_idle_rs_limits, true,
-					notify_rpm, collapsed);
+			if (pm_sleep_ops.exit_sleep)
+				pm_sleep_ops.exit_sleep(msm_pm_idle_rs_limits,
+						true, notify_rpm, collapsed);
 		}
 		msm_pm_timer_exit_idle(timer_halted);
 		exit_stat = MSM_PM_STAT_IDLE_POWER_COLLAPSE;
@@ -920,8 +909,9 @@
 	}
 
 	if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE]) {
-		struct msm_rpmrs_limits *rs_limits;
-		int ret;
+		void *rs_limits = NULL;
+		int ret = -ENODEV;
+		uint32_t power;
 
 		if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
 			pr_info("%s: power collapse\n", __func__);
@@ -936,28 +926,22 @@
 			msm_pm_sleep_time_override = 0;
 		}
 #endif /* CONFIG_MSM_SLEEP_TIME_OVERRIDE */
-
-		if (MSM_PM_DEBUG_SUSPEND_LIMITS & msm_pm_debug_mask)
-			msm_rpmrs_show_resources();
-
-		rs_limits = msm_rpmrs_lowest_limits(false,
-				MSM_PM_SLEEP_MODE_POWER_COLLAPSE, -1, -1);
-
-		if ((MSM_PM_DEBUG_SUSPEND_LIMITS & msm_pm_debug_mask) &&
-				rs_limits)
-			pr_info("%s: limit %p: pxo %d, l2_cache %d, "
-				"vdd_mem %d, vdd_dig %d\n",
-				__func__, rs_limits,
-				rs_limits->pxo, rs_limits->l2_cache,
-				rs_limits->vdd_mem, rs_limits->vdd_dig);
+		if (pm_sleep_ops.lowest_limits)
+			rs_limits = pm_sleep_ops.lowest_limits(false,
+					MSM_PM_SLEEP_MODE_POWER_COLLAPSE, -1,
+					-1, &power);
 
 		if (rs_limits) {
-			ret = msm_rpmrs_enter_sleep(
-				msm_pm_max_sleep_time, rs_limits, false, true);
+			if (pm_sleep_ops.enter_sleep)
+				ret = pm_sleep_ops.enter_sleep(
+						msm_pm_max_sleep_time,
+						rs_limits, false, true);
 			if (!ret) {
 				int collapsed = msm_pm_power_collapse(false);
-				msm_rpmrs_exit_sleep(rs_limits, false, true,
-						collapsed);
+				if (pm_sleep_ops.exit_sleep) {
+					pm_sleep_ops.exit_sleep(rs_limits,
+						false, true, collapsed);
+				}
 			}
 		} else {
 			pr_err("%s: cannot find the lowest power limit\n",
@@ -1001,6 +985,12 @@
 	msm_pm_slp_sts = data;
 }
 
+void msm_pm_set_sleep_ops(struct msm_pm_sleep_ops *ops)
+{
+	if (ops)
+		pm_sleep_ops = *ops;
+}
+
 static int __init msm_pm_init(void)
 {
 	pgd_t *pc_pgd;
diff --git a/arch/arm/mach-msm/pm.h b/arch/arm/mach-msm/pm.h
index ce0d747..0d5101d 100644
--- a/arch/arm/mach-msm/pm.h
+++ b/arch/arm/mach-msm/pm.h
@@ -76,6 +76,16 @@
 	uint32_t mask;
 };
 
+struct msm_pm_sleep_ops {
+	void *(*lowest_limits)(bool from_idle,
+			enum msm_pm_sleep_mode sleep_mode, uint32_t latency_us,
+			uint32_t sleep_us, uint32_t *power);
+	int (*enter_sleep)(uint32_t sclk_count, void *limits,
+			bool from_idle, bool notify_rpm);
+	void (*exit_sleep)(void *limits, bool from_idle,
+			bool notify_rpm, bool collapsed);
+};
+
 void msm_pm_set_platform_data(struct msm_pm_platform_data *data, int count);
 void msm_pm_set_irq_extns(struct msm_pm_irq_calls *irq_calls);
 int msm_pm_idle_prepare(struct cpuidle_device *dev);
@@ -90,12 +100,13 @@
 void msm_pm_set_rpm_wakeup_irq(unsigned int irq);
 int msm_pm_wait_cpu_shutdown(unsigned int cpu);
 bool msm_pm_verify_cpu_pc(unsigned int cpu);
+void msm_pm_set_sleep_ops(struct msm_pm_sleep_ops *ops);
 #else
 static inline void msm_pm_set_rpm_wakeup_irq(unsigned int irq) {}
 static inline int msm_pm_wait_cpu_shutdown(unsigned int cpu) { return 0; }
 static inline bool msm_pm_verify_cpu_pc(unsigned int cpu) { return true; }
+static inline void msm_pm_set_sleep_ops(struct msm_pm_sleep_ops *ops) {}
 #endif
-
 #ifdef CONFIG_HOTPLUG_CPU
 int msm_platform_secondary_init(unsigned int cpu);
 #else
diff --git a/arch/arm/mach-msm/qdsp6v2/Makefile b/arch/arm/mach-msm/qdsp6v2/Makefile
index 083a9f1..cee8f04 100644
--- a/arch/arm/mach-msm/qdsp6v2/Makefile
+++ b/arch/arm/mach-msm/qdsp6v2/Makefile
@@ -1,5 +1,4 @@
 obj-y += rtac.o
-
 ifdef CONFIG_ARCH_MSM8X60
 obj-y += audio_dev_ctl.o
 obj-y += board-msm8x60-audio.o
@@ -17,6 +16,7 @@
 ifndef CONFIG_ARCH_MSM9615
 obj-y += aac_in.o qcelp_in.o evrc_in.o amrnb_in.o audio_utils.o
 obj-y += audio_wma.o audio_wmapro.o audio_aac.o audio_multi_aac.o audio_utils_aio.o
+obj-$(CONFIG_MSM_QDSP6_CODECS) += q6audio_v1.o q6audio_v1_aio.o
 obj-$(CONFIG_MSM_ULTRASOUND) += ultrasound/
 obj-y += audio_mp3.o audio_amrnb.o audio_amrwb.o audio_evrc.o audio_qcelp.o amrwb_in.o
 endif
diff --git a/arch/arm/mach-msm/qdsp6v2/aac_in.c b/arch/arm/mach-msm/qdsp6v2/aac_in.c
index 41d3ff3..6e79a75 100644
--- a/arch/arm/mach-msm/qdsp6v2/aac_in.c
+++ b/arch/arm/mach-msm/qdsp6v2/aac_in.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -22,8 +22,6 @@
 #include <linux/msm_audio_aac.h>
 #include <asm/atomic.h>
 #include <asm/ioctls.h>
-#include <sound/q6asm.h>
-#include <sound/apr_audio.h>
 #include "audio_utils.h"
 
 
@@ -35,43 +33,6 @@
 
 #define AAC_FORMAT_ADTS 65535
 
-void q6asm_aac_in_cb(uint32_t opcode, uint32_t token,
-		uint32_t *payload, void *priv)
-{
-	struct q6audio_in * audio = (struct q6audio_in *)priv;
-	unsigned long flags;
-
-	pr_debug("%s:session id %d: opcode[0x%x]\n", __func__,
-			audio->ac->session, opcode);
-
-	spin_lock_irqsave(&audio->dsp_lock, flags);
-	switch (opcode) {
-	case ASM_DATA_EVENT_READ_DONE:
-		audio_in_get_dsp_frames(audio, token, payload);
-		break;
-	case ASM_DATA_EVENT_WRITE_DONE:
-		atomic_inc(&audio->in_count);
-		wake_up(&audio->write_wait);
-		break;
-	case ASM_DATA_CMDRSP_EOS:
-		audio->eos_rsp = 1;
-		wake_up(&audio->read_wait);
-		break;
-	case ASM_STREAM_CMDRSP_GET_ENCDEC_PARAM:
-		break;
-	case ASM_STREAM_CMDRSP_GET_PP_PARAMS:
-		break;
-	case ASM_SESSION_EVENT_TX_OVERFLOW:
-		pr_err("%s:session id %d: ASM_SESSION_EVENT_TX_OVERFLOW\n",
-			__func__, audio->ac->session);
-		break;
-	default:
-		pr_debug("%s:session id %d: Ignore opcode[0x%x]\n", __func__,
-			audio->ac->session, opcode);
-		break;
-	}
-	spin_unlock_irqrestore(&audio->dsp_lock, flags);
-}
 /* ------------------- device --------------------- */
 static long aac_in_ioctl(struct file *file,
 				unsigned int cmd, unsigned long arg)
@@ -121,8 +82,8 @@
 					aac_mode,
 					enc_cfg->stream_format);
 		if (rc < 0) {
-			pr_err("%s:session id %d: cmd media format block\
-				failed\n", __func__, audio->ac->session);
+			pr_err("%s:session id %d: cmd media format block"
+				"failed\n", __func__, audio->ac->session);
 			break;
 		}
 		if (audio->feedback == NON_TUNNEL_MODE) {
@@ -130,8 +91,8 @@
 						audio->pcm_cfg.sample_rate,
 						audio->pcm_cfg.channel_count);
 			if (rc < 0) {
-				pr_err("%s:session id %d: media format block\
-				failed\n", __func__, audio->ac->session);
+				pr_err("%s:session id %d: media format block"
+				"failed\n", __func__, audio->ac->session);
 				break;
 			}
 		}
@@ -140,8 +101,8 @@
 			audio->enabled = 1;
 		} else {
 			audio->enabled = 0;
-			pr_err("%s:session id %d: Audio Start procedure\
-			failed rc=%d\n", __func__, audio->ac->session, rc);
+			pr_err("%s:session id %d: Audio Start procedure"
+			"failed rc=%d\n", __func__, audio->ac->session, rc);
 			break;
 		}
 		while (cnt++ < audio->str_cfg.buffer_count)
@@ -155,8 +116,8 @@
 				audio->ac->session);
 		rc = audio_in_disable(audio);
 		if (rc  < 0) {
-			pr_err("%s:session id %d: Audio Stop procedure failed\
-				rc=%d\n", __func__, audio->ac->session, rc);
+			pr_err("%s:session id %d: Audio Stop procedure failed"
+				"rc=%d\n", __func__, audio->ac->session, rc);
 			break;
 		}
 		break;
@@ -174,8 +135,8 @@
 		/* ADTS(-1) to ADTS(0x00), RAW(0x00) to RAW(0x03) */
 		cfg.stream_format = ((enc_cfg->stream_format == \
 			0x00) ? AUDIO_AAC_FORMAT_ADTS : AUDIO_AAC_FORMAT_RAW);
-		pr_debug("%s:session id %d: Get-aac-cfg: format=%d sr=%d\
-			bitrate=%d\n", __func__, audio->ac->session,
+		pr_debug("%s:session id %d: Get-aac-cfg: format=%d sr=%d"
+			"bitrate=%d\n", __func__, audio->ac->session,
 			cfg.stream_format, cfg.sample_rate, cfg.bit_rate);
 		if (copy_to_user((void *)arg, &cfg, sizeof(cfg)))
 			rc = -EFAULT;
@@ -229,8 +190,8 @@
 		enc_cfg->stream_format =
 			((cfg.stream_format == AUDIO_AAC_FORMAT_RAW) ? \
 								0x03 : 0x00);
-		pr_debug("%s:session id %d: Set-aac-cfg:SR= 0x%x ch=0x%x\
-			bitrate=0x%x, format(adts/raw) = %d\n",
+		pr_debug("%s:session id %d: Set-aac-cfg:SR= 0x%x ch=0x%x"
+			"bitrate=0x%x, format(adts/raw) = %d\n",
 			__func__, audio->ac->session, enc_cfg->sample_rate,
 			enc_cfg->channels, enc_cfg->bit_rate,
 			enc_cfg->stream_format);
@@ -289,16 +250,16 @@
 	audio = kzalloc(sizeof(struct q6audio_in), GFP_KERNEL);
 
 	if (audio == NULL) {
-		pr_err("%s: Could not allocate memory for aac\
-				driver\n", __func__);
+		pr_err("%s: Could not allocate memory for aac"
+				"driver\n", __func__);
 		return -ENOMEM;
 	}
 	/* Allocate memory for encoder config param */
 	audio->enc_cfg = kzalloc(sizeof(struct msm_audio_aac_enc_config),
 				GFP_KERNEL);
 	if (audio->enc_cfg == NULL) {
-		pr_err("%s:session id %d: Could not allocate memory for aac\
-				config param\n", __func__, audio->ac->session);
+		pr_err("%s:session id %d: Could not allocate memory for aac"
+				"config param\n", __func__, audio->ac->session);
 		kfree(audio);
 		return -ENOMEM;
 	}
@@ -307,8 +268,8 @@
 	audio->codec_cfg = kzalloc(sizeof(struct msm_audio_aac_config),
 				GFP_KERNEL);
 	if (audio->codec_cfg == NULL) {
-		pr_err("%s:session id %d: Could not allocate memory for aac\
-				config\n", __func__, audio->ac->session);
+		pr_err("%s:session id %d: Could not allocate memory for aac"
+				"config\n", __func__, audio->ac->session);
 		kfree(audio->enc_cfg);
 		kfree(audio);
 		return -ENOMEM;
@@ -343,12 +304,12 @@
 	aac_config->sbr_ps_on_flag = 0;
 	aac_config->channel_configuration = 1;
 
-	audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_aac_in_cb,
+	audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_in_cb,
 							(void *)audio);
 
 	if (!audio->ac) {
-		pr_err("%s: Could not allocate memory for\
-				audio client\n", __func__);
+		pr_err("%s: Could not allocate memory for"
+				"audio client\n", __func__);
 		kfree(audio->enc_cfg);
 		kfree(audio->codec_cfg);
 		kfree(audio);
@@ -386,8 +347,8 @@
 		/* register for tx overflow (valid for tunnel mode only) */
 		rc = q6asm_reg_tx_overflow(audio->ac, 0x01);
 		if (rc < 0) {
-			pr_err("%s:session id %d: TX Overflow registration\
-				failed rc=%d\n", __func__,
+			pr_err("%s:session id %d: TX Overflow registration"
+				"failed rc=%d\n", __func__,
 				audio->ac->session, rc);
 			rc = -ENODEV;
 			goto fail;
diff --git a/arch/arm/mach-msm/qdsp6v2/amrnb_in.c b/arch/arm/mach-msm/qdsp6v2/amrnb_in.c
index 6a70428..63a0774 100644
--- a/arch/arm/mach-msm/qdsp6v2/amrnb_in.c
+++ b/arch/arm/mach-msm/qdsp6v2/amrnb_in.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -22,8 +22,6 @@
 #include <linux/msm_audio_amrnb.h>
 #include <asm/atomic.h>
 #include <asm/ioctls.h>
-#include <sound/q6asm.h>
-#include <sound/apr_audio.h>
 #include "audio_utils.h"
 
 /* Buffer with meta*/
@@ -32,44 +30,6 @@
 /* Maximum 10 frames in buffer with meta */
 #define FRAME_SIZE		(1 + ((32+sizeof(struct meta_out_dsp)) * 10))
 
-void q6asm_amrnb_in_cb(uint32_t opcode, uint32_t token,
-		uint32_t *payload, void *priv)
-{
-	struct q6audio_in * audio = (struct q6audio_in *)priv;
-	unsigned long flags;
-
-	pr_debug("%s:session id %d: opcode - %d\n", __func__,
-			audio->ac->session, opcode);
-
-	spin_lock_irqsave(&audio->dsp_lock, flags);
-	switch (opcode) {
-	case ASM_DATA_EVENT_READ_DONE:
-		audio_in_get_dsp_frames(audio, token, payload);
-		break;
-	case ASM_DATA_EVENT_WRITE_DONE:
-		atomic_inc(&audio->in_count);
-		wake_up(&audio->write_wait);
-		break;
-	case ASM_DATA_CMDRSP_EOS:
-		audio->eos_rsp = 1;
-		wake_up(&audio->read_wait);
-		break;
-	case ASM_STREAM_CMDRSP_GET_ENCDEC_PARAM:
-		break;
-	case ASM_STREAM_CMDRSP_GET_PP_PARAMS:
-		break;
-	case ASM_SESSION_EVENT_TX_OVERFLOW:
-		pr_err("%s:session id %d: ASM_SESSION_EVENT_TX_OVERFLOW\n",
-			__func__, audio->ac->session);
-		break;
-	default:
-		pr_err("%s:session id %d: Ignore opcode[0x%x]\n", __func__,
-				audio->ac->session, opcode);
-		break;
-	}
-	spin_unlock_irqrestore(&audio->dsp_lock, flags);
-}
-
 /* ------------------- device --------------------- */
 static long amrnb_in_ioctl(struct file *file,
 				unsigned int cmd, unsigned long arg)
@@ -102,8 +62,8 @@
 			enc_cfg->dtx_enable);
 
 		if (rc < 0) {
-			pr_err("%s:session id %d: cmd amrnb media format block\
-				failed\n", __func__, audio->ac->session);
+			pr_err("%s:session id %d: cmd amrnb media format block"
+				"failed\n", __func__, audio->ac->session);
 			break;
 		}
 		if (audio->feedback == NON_TUNNEL_MODE) {
@@ -112,8 +72,8 @@
 				audio->pcm_cfg.channel_count);
 
 			if (rc < 0) {
-				pr_err("%s:session id %d: media format block\
-				failed\n", __func__, audio->ac->session);
+				pr_err("%s:session id %d: media format block"
+				"failed\n", __func__, audio->ac->session);
 				break;
 			}
 		}
@@ -125,8 +85,8 @@
 			audio->enabled = 1;
 		} else {
 			audio->enabled = 0;
-			pr_err("%s:session id %d: Audio Start procedure failed\
-					rc=%d\n", __func__,
+			pr_err("%s:session id %d: Audio Start procedure failed"
+					"rc=%d\n", __func__,
 					audio->ac->session, rc);
 			break;
 		}
@@ -141,8 +101,8 @@
 		pr_debug("%s:AUDIO_STOP\n", __func__);
 		rc = audio_in_disable(audio);
 		if (rc  < 0) {
-			pr_err("%s:session id %d: Audio Stop procedure failed\
-				rc=%d\n", __func__,
+			pr_err("%s:session id %d: Audio Stop procedure failed"
+				"rc=%d\n", __func__,
 				audio->ac->session, rc);
 			break;
 		}
@@ -196,16 +156,16 @@
 	audio = kzalloc(sizeof(struct q6audio_in), GFP_KERNEL);
 
 	if (audio == NULL) {
-		pr_err("%s Could not allocate memory for amrnb\
-			driver\n", __func__);
+		pr_err("%s Could not allocate memory for amrnb"
+			"driver\n", __func__);
 		return -ENOMEM;
 	}
 	/* Allocate memory for encoder config param */
 	audio->enc_cfg = kzalloc(sizeof(struct msm_audio_amrnb_enc_config_v2),
 				GFP_KERNEL);
 	if (audio->enc_cfg == NULL) {
-		pr_err("%s:session id %d: Could not allocate memory for aac\
-				config param\n", __func__, audio->ac->session);
+		pr_err("%s:session id %d: Could not allocate memory for aac"
+				"config param\n", __func__, audio->ac->session);
 		kfree(audio);
 		return -ENOMEM;
 	}
@@ -234,12 +194,12 @@
 	audio->buf_cfg.meta_info_enable = 0x01;
 	audio->buf_cfg.frames_per_buf = 0x01;
 
-	audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_amrnb_in_cb,
+	audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_in_cb,
 				(void *)audio);
 
 	if (!audio->ac) {
-		pr_err("%s: Could not allocate memory for audio\
-				client\n", __func__);
+		pr_err("%s: Could not allocate memory for audio"
+				"client\n", __func__);
 		kfree(audio->enc_cfg);
 		kfree(audio);
 		return -ENOMEM;
@@ -272,8 +232,8 @@
 		/* register for tx overflow (valid for tunnel mode only) */
 		rc = q6asm_reg_tx_overflow(audio->ac, 0x01);
 		if (rc < 0) {
-			pr_err("%s:session id %d: TX Overflow registration\
-				failed rc=%d\n", __func__, audio->ac->session,
+			pr_err("%s:session id %d: TX Overflow registration"
+				"failed rc=%d\n", __func__, audio->ac->session,
 				rc);
 			rc = -ENODEV;
 			goto fail;
diff --git a/arch/arm/mach-msm/qdsp6v2/amrwb_in.c b/arch/arm/mach-msm/qdsp6v2/amrwb_in.c
index 5df976d..d0462e0 100644
--- a/arch/arm/mach-msm/qdsp6v2/amrwb_in.c
+++ b/arch/arm/mach-msm/qdsp6v2/amrwb_in.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -19,8 +19,6 @@
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 #include <linux/wait.h>
-#include <sound/apr_audio.h>
-#include <sound/q6asm.h>
 #include <asm/atomic.h>
 #include <asm/ioctls.h>
 #include "audio_utils.h"
@@ -31,44 +29,6 @@
 /* Maximum 10 frames in buffer with meta */
 #define FRAME_SIZE		(1 + ((61+sizeof(struct meta_out_dsp)) * 10))
 
-void q6asm_amrwb_in_cb(uint32_t opcode, uint32_t token,
-		uint32_t *payload, void *priv)
-{
-	struct q6audio_in * audio = (struct q6audio_in *)priv;
-	unsigned long flags;
-
-	pr_debug("%s:session id %d: opcode - %d\n", __func__,
-			audio->ac->session, opcode);
-
-	spin_lock_irqsave(&audio->dsp_lock, flags);
-	switch (opcode) {
-	case ASM_DATA_EVENT_READ_DONE:
-		audio_in_get_dsp_frames(audio, token, payload);
-		break;
-	case ASM_DATA_EVENT_WRITE_DONE:
-		atomic_inc(&audio->in_count);
-		wake_up(&audio->write_wait);
-		break;
-	case ASM_DATA_CMDRSP_EOS:
-		audio->eos_rsp = 1;
-		wake_up(&audio->read_wait);
-		break;
-	case ASM_STREAM_CMDRSP_GET_ENCDEC_PARAM:
-		break;
-	case ASM_STREAM_CMDRSP_GET_PP_PARAMS:
-		break;
-	case ASM_SESSION_EVENT_TX_OVERFLOW:
-		pr_err("%s:session id %d: ASM_SESSION_EVENT_TX_OVERFLOW\n",
-			__func__, audio->ac->session);
-		break;
-	default:
-		pr_err("%s:session id %d: Ignore opcode[0x%x]\n", __func__,
-				audio->ac->session, opcode);
-		break;
-	}
-	spin_unlock_irqrestore(&audio->dsp_lock, flags);
-}
-
 /* ------------------- device --------------------- */
 static long amrwb_in_ioctl(struct file *file,
 				unsigned int cmd, unsigned long arg)
@@ -231,7 +191,7 @@
 	audio->buf_cfg.meta_info_enable = 0x01;
 	audio->buf_cfg.frames_per_buf = 0x01;
 
-	audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_amrwb_in_cb,
+	audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_in_cb,
 				(void *)audio);
 
 	if (!audio->ac) {
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_aac.c b/arch/arm/mach-msm/qdsp6v2/audio_aac.c
index 88189f6..485234f 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_aac.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_aac.c
@@ -2,7 +2,7 @@
  *
  * Copyright (C) 2008 Google, Inc.
  * Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -21,28 +21,6 @@
 #define AUDIO_AAC_DUAL_MONO_INVALID -1
 #define PCM_BUFSZ_MIN_AAC	((8*1024) + sizeof(struct dec_meta_out))
 
-static void q6_audio_aac_cb(uint32_t opcode, uint32_t token,
-		uint32_t *payload, void *priv)
-{
-	struct q6audio_aio *audio = (struct q6audio_aio *)priv;
-
-	pr_debug("%s:opcode = %x token = 0x%x\n", __func__, opcode, token);
-	switch (opcode) {
-	case ASM_DATA_EVENT_WRITE_DONE:
-	case ASM_DATA_EVENT_READ_DONE:
-	case ASM_DATA_CMDRSP_EOS:
-	case ASM_DATA_CMD_MEDIA_FORMAT_UPDATE:
-	case ASM_STREAM_CMD_SET_ENCDEC_PARAM:
-	case ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY:
-	case ASM_DATA_EVENT_ENC_SR_CM_NOTIFY:
-		audio_aio_cb(opcode, token, payload, audio);
-		break;
-	default:
-		pr_debug("%s:Unhandled event = 0x%8x\n", __func__, opcode);
-		break;
-	}
-}
-
 #ifdef CONFIG_DEBUG_FS
 static const struct file_operations audio_aac_debug_fops = {
 	.read = audio_aio_debug_read,
@@ -222,8 +200,8 @@
 	audio->codec_cfg = kzalloc(sizeof(struct msm_audio_aac_config),
 					GFP_KERNEL);
 	if (audio->codec_cfg == NULL) {
-		pr_err("%s:Could not allocate memory for aac\
-			config\n", __func__);
+		pr_err("%s:Could not allocate memory for aac"
+			"config\n", __func__);
 		kfree(audio);
 		return -ENOMEM;
 	}
@@ -235,7 +213,7 @@
 	audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN_AAC;
 	aac_config->dual_mono_mode = AUDIO_AAC_DUAL_MONO_INVALID;
 
-	audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_aac_cb,
+	audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb,
 					     (void *)audio);
 
 	if (!audio->ac) {
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_amrnb.c b/arch/arm/mach-msm/qdsp6v2/audio_amrnb.c
index 6768f7a..f4316d0 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_amrnb.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_amrnb.c
@@ -2,7 +2,7 @@
  *
  * Copyright (C) 2008 Google, Inc.
  * Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -16,24 +16,6 @@
  */
 #include "audio_utils_aio.h"
 
-static void q6_audio_amrnb_cb(uint32_t opcode, uint32_t token,
-		uint32_t *payload, void *priv)
-{
-	struct q6audio_aio *audio = (struct q6audio_aio *)priv;
-
-	pr_debug("%s:opcde = %d token = 0x%x\n", __func__, opcode, token);
-	switch (opcode) {
-	case ASM_DATA_EVENT_WRITE_DONE:
-	case ASM_DATA_EVENT_READ_DONE:
-	case ASM_DATA_CMDRSP_EOS:
-		audio_aio_cb(opcode, token, payload, audio);
-		break;
-	default:
-		pr_debug("%s:Unhandled event = 0x%8x\n", __func__, opcode);
-		break;
-	}
-}
-
 #ifdef CONFIG_DEBUG_FS
 static const struct file_operations audio_amrnb_debug_fops = {
 	.read = audio_aio_debug_read,
@@ -101,7 +83,7 @@
 
 	audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN;
 
-	audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_amrnb_cb,
+	audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb,
 					     (void *)audio);
 
 	if (!audio->ac) {
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_amrwb.c b/arch/arm/mach-msm/qdsp6v2/audio_amrwb.c
index f95e191..28c1732 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_amrwb.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_amrwb.c
@@ -2,7 +2,7 @@
  *
  * Copyright (C) 2008 Google, Inc.
  * Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -17,24 +17,6 @@
 
 #include "audio_utils_aio.h"
 
-static void q6_audio_amrwb_cb(uint32_t opcode, uint32_t token,
-		uint32_t *payload, void *priv)
-{
-	struct q6audio_aio *audio = (struct q6audio_aio *)priv;
-
-	pr_debug("%s:opcde = %d token = 0x%x\n", __func__, opcode, token);
-	switch (opcode) {
-	case ASM_DATA_EVENT_WRITE_DONE:
-	case ASM_DATA_EVENT_READ_DONE:
-	case ASM_DATA_CMDRSP_EOS:
-		audio_aio_cb(opcode, token, payload, audio);
-		break;
-	default:
-		pr_debug("%s:Unhandled event = 0x%8x\n", __func__, opcode);
-		break;
-	}
-}
-
 #ifdef CONFIG_DEBUG_FS
 static const struct file_operations audio_amrwb_debug_fops = {
 	.read = audio_aio_debug_read,
@@ -103,7 +85,7 @@
 	}
 	audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN;
 
-	audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_amrwb_cb,
+	audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb,
 					     (void *)audio);
 
 	if (!audio->ac) {
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_evrc.c b/arch/arm/mach-msm/qdsp6v2/audio_evrc.c
index 12c815d..ec5162d 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_evrc.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_evrc.c
@@ -2,7 +2,7 @@
  *
  * Copyright (C) 2008 Google, Inc.
  * Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -17,23 +17,7 @@
 
 #include "audio_utils_aio.h"
 
-static void q6_audio_evrc_cb(uint32_t opcode, uint32_t token,
-		uint32_t *payload, void *priv)
-{
-	struct q6audio_aio *audio = (struct q6audio_aio *)priv;
 
-	pr_debug("%s:opcde = %d token = 0x%x\n", __func__, opcode, token);
-	switch (opcode) {
-	case ASM_DATA_EVENT_WRITE_DONE:
-	case ASM_DATA_EVENT_READ_DONE:
-	case ASM_DATA_CMDRSP_EOS:
-		audio_aio_cb(opcode, token, payload, audio);
-		break;
-	default:
-		pr_debug("%s:Unhandled event = 0x%8x\n", __func__, opcode);
-		break;
-	}
-}
 
 #ifdef CONFIG_DEBUG_FS
 static const struct file_operations audio_evrc_debug_fops = {
@@ -107,7 +91,7 @@
 	 */
 	audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN;
 
-	audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_evrc_cb,
+	audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb,
 					     (void *)audio);
 
 	if (!audio->ac) {
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_mp3.c b/arch/arm/mach-msm/qdsp6v2/audio_mp3.c
index 22552c6..93a8739 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_mp3.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_mp3.c
@@ -2,7 +2,7 @@
  *
  * Copyright (C) 2008 Google, Inc.
  * Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -17,24 +17,6 @@
 
 #include "audio_utils_aio.h"
 
-static void q6_audio_mp3_cb(uint32_t opcode, uint32_t token,
-		uint32_t *payload, void *priv)
-{
-	struct q6audio_aio *audio = (struct q6audio_aio *)priv;
-
-	pr_debug("%s:opcde = %d token = 0x%x\n", __func__, opcode, token);
-	switch (opcode) {
-	case ASM_DATA_EVENT_WRITE_DONE:
-	case ASM_DATA_EVENT_READ_DONE:
-	case ASM_DATA_CMDRSP_EOS:
-		audio_aio_cb(opcode, token, payload, audio);
-		break;
-	default:
-		pr_debug("%s:Unhandled event = 0x%8x\n", __func__, opcode);
-		break;
-	}
-}
-
 #ifdef CONFIG_DEBUG_FS
 static const struct file_operations audio_mp3_debug_fops = {
 	.read = audio_aio_debug_read,
@@ -103,7 +85,7 @@
 
 	audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN;
 
-	audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_mp3_cb,
+	audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb,
 					     (void *)audio);
 
 	if (!audio->ac) {
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_multi_aac.c b/arch/arm/mach-msm/qdsp6v2/audio_multi_aac.c
index 8a0ba3e..9253056 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_multi_aac.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_multi_aac.c
@@ -25,28 +25,6 @@
 /* Default number of pre-allocated event packets */
 #define PCM_BUFSZ_MIN_AACM	((8*1024) + sizeof(struct dec_meta_out))
 
-static void q6_audio_aac_cb(uint32_t opcode, uint32_t token,
-		uint32_t *payload, void *priv)
-{
-	struct q6audio_aio *audio = (struct q6audio_aio *)priv;
-
-	pr_debug("%s:opcode = %x token = 0x%x\n", __func__, opcode, token);
-	switch (opcode) {
-	case ASM_DATA_EVENT_WRITE_DONE:
-	case ASM_DATA_EVENT_READ_DONE:
-	case ASM_DATA_CMDRSP_EOS:
-	case ASM_DATA_CMD_MEDIA_FORMAT_UPDATE:
-	case ASM_STREAM_CMD_SET_ENCDEC_PARAM:
-	case ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY:
-	case ASM_DATA_EVENT_ENC_SR_CM_NOTIFY:
-		audio_aio_cb(opcode, token, payload, audio);
-		break;
-	default:
-		pr_debug("%s:Unhandled event = 0x%8x\n", __func__, opcode);
-		break;
-	}
-}
-
 #ifdef CONFIG_DEBUG_FS
 static const struct file_operations audio_aac_debug_fops = {
 	.read = audio_aio_debug_read,
@@ -234,8 +212,8 @@
 	audio->codec_cfg = kzalloc(sizeof(struct msm_audio_aac_config),
 					GFP_KERNEL);
 	if (audio->codec_cfg == NULL) {
-		pr_err("%s: Could not allocate memory for aac\
-			config\n", __func__);
+		pr_err("%s: Could not allocate memory for aac"
+			"config\n", __func__);
 		kfree(audio);
 		return -ENOMEM;
 	}
@@ -245,7 +223,7 @@
 	audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN_AACM;
 	aac_config->dual_mono_mode = AUDIO_AAC_DUAL_MONO_INVALID;
 
-	audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_aac_cb,
+	audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb,
 					     (void *)audio);
 
 	if (!audio->ac) {
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_qcelp.c b/arch/arm/mach-msm/qdsp6v2/audio_qcelp.c
index 7b72c97..37f6e6b 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_qcelp.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_qcelp.c
@@ -2,7 +2,7 @@
  *
  * Copyright (C) 2008 Google, Inc.
  * Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -19,23 +19,6 @@
 
 #define FRAME_SIZE_DEC_QCELP  ((32) + sizeof(struct dec_meta_in))
 
-static void q6_audio_qcelp_cb(uint32_t opcode, uint32_t token,
-		uint32_t *payload, void *priv)
-{
-	struct q6audio_aio *audio = (struct q6audio_aio *)priv;
-
-	switch (opcode) {
-	case ASM_DATA_EVENT_WRITE_DONE:
-	case ASM_DATA_EVENT_READ_DONE:
-	case ASM_DATA_CMDRSP_EOS:
-		audio_aio_cb(opcode, token, payload, audio);
-		break;
-	default:
-		pr_debug("%s:Unhandled event = 0x%8x\n", __func__, opcode);
-		break;
-	}
-}
-
 #ifdef CONFIG_DEBUG_FS
 static const struct file_operations audio_qcelp_debug_fops = {
 	.read = audio_aio_debug_read,
@@ -113,7 +96,7 @@
 	audio->pcm_cfg.sample_rate = 8000;
 	audio->pcm_cfg.channel_count = 1;
 
-	audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_qcelp_cb,
+	audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb,
 					     (void *)audio);
 
 	if (!audio->ac) {
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_utils.c b/arch/arm/mach-msm/qdsp6v2/audio_utils.c
index f9445d8..6a23e37 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_utils.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_utils.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -21,8 +21,6 @@
 #include <linux/slab.h>
 #include <asm/atomic.h>
 #include <asm/ioctls.h>
-#include <sound/q6asm.h>
-#include <sound/apr_audio.h>
 #include "audio_utils.h"
 
 static int audio_in_pause(struct q6audio_in  *audio)
@@ -86,35 +84,6 @@
 	return 0;
 }
 
-void  audio_in_get_dsp_frames(struct q6audio_in *audio,
-	uint32_t token,	uint32_t *payload)
-{
-	uint32_t index;
-
-	index = token;
-	pr_debug("%s:session id %d: index=%d nr frames=%d offset[%d]\n",
-			__func__, audio->ac->session, token, payload[7],
-			payload[3]);
-	pr_debug("%s:session id %d: timemsw=%d lsw=%d\n", __func__,
-			audio->ac->session, payload[4], payload[5]);
-	pr_debug("%s:session id %d: uflags=0x%8x uid=0x%8x\n", __func__,
-			audio->ac->session, payload[6], payload[8]);
-	pr_debug("%s:session id %d: enc frame size=0x%8x\n", __func__,
-			audio->ac->session, payload[2]);
-
-	audio->out_frame_info[index][0] = payload[7];
-	audio->out_frame_info[index][1] = payload[3];
-
-	/* statistics of read */
-	atomic_add(payload[2], &audio->in_bytes);
-	atomic_add(payload[7], &audio->in_samples);
-
-	if (atomic_read(&audio->out_count) <= audio->str_cfg.buffer_count) {
-		atomic_inc(&audio->out_count);
-		wake_up(&audio->read_wait);
-	}
-}
-
 /* must be called with audio->lock held */
 int audio_in_enable(struct q6audio_in  *audio)
 {
@@ -140,8 +109,8 @@
 
 		rc = q6asm_cmd(audio->ac, CMD_CLOSE);
 		if (rc < 0)
-			pr_err("%s:session id %d: Failed to close the\
-				session rc=%d\n", __func__, audio->ac->session,
+			pr_err("%s:session id %d: Failed to close the"
+				"session rc=%d\n", __func__, audio->ac->session,
 				rc);
 		audio->stopped = 1;
 		memset(audio->out_frame_info, 0,
@@ -166,8 +135,8 @@
 				ALIGN_BUF_SIZE(audio->pcm_cfg.buffer_size),
 				audio->pcm_cfg.buffer_count);
 			if (rc < 0) {
-				pr_err("%s:session id %d: Buffer Alloc\
-						failed\n", __func__,
+				pr_err("%s:session id %d: Buffer Alloc"
+						"failed\n", __func__,
 						audio->ac->session);
 				rc = -ENOMEM;
 				break;
@@ -203,8 +172,8 @@
 				ALIGN_BUF_SIZE(audio->pcm_cfg.buffer_size),
 				audio->pcm_cfg.buffer_count);
 			if (rc < 0) {
-				pr_err("%s:session id %d: Buffer Alloc\
-					failed\n", __func__,
+				pr_err("%s:session id %d: Buffer Alloc"
+					"failed\n", __func__,
 					audio->ac->session);
 				rc = -ENOMEM;
 				break;
@@ -334,15 +303,15 @@
 		}
 		audio->buf_cfg.meta_info_enable = cfg.meta_info_enable;
 		audio->buf_cfg.frames_per_buf = cfg.frames_per_buf;
-		pr_debug("%s:session id %d: Set-buf-cfg: meta[%d]\
-				framesperbuf[%d]\n", __func__,
+		pr_debug("%s:session id %d: Set-buf-cfg: meta[%d]"
+				"framesperbuf[%d]\n", __func__,
 				audio->ac->session, cfg.meta_info_enable,
 				cfg.frames_per_buf);
 		break;
 	}
 	case AUDIO_GET_BUF_CFG: {
-		pr_debug("%s:session id %d: Get-buf-cfg: meta[%d]\
-			framesperbuf[%d]\n", __func__,
+		pr_debug("%s:session id %d: Get-buf-cfg: meta[%d]"
+			"framesperbuf[%d]\n", __func__,
 			audio->ac->session, audio->buf_cfg.meta_info_enable,
 			audio->buf_cfg.frames_per_buf);
 
@@ -437,8 +406,8 @@
 
 		if ((audio->stopped && !(atomic_read(&audio->out_count))) ||
 			audio->rflush) {
-			pr_debug("%s:session id %d: driver in stop state or\
-				flush,No more buf to read", __func__,
+			pr_debug("%s:session id %d: driver in stop state or"
+				"flush,No more buf to read", __func__,
 				audio->ac->session);
 			rc = 0;/* End of File */
 			break;
@@ -504,8 +473,8 @@
 			count -= bytes_to_copy;
 			buf += bytes_to_copy;
 		} else {
-			pr_err("%s:session id %d: short read data[%p]\
-				bytesavail[%d]bytesrequest[%d]\n", __func__,
+			pr_err("%s:session id %d: short read data[%p]"
+				"bytesavail[%d]bytesrequest[%d]\n", __func__,
 				audio->ac->session,
 				data, size, count);
 		}
@@ -585,8 +554,8 @@
 						&nflags);
 			buf += mfield_size;
 			/* send the EOS and return */
-			pr_debug("%s:session id %d: send EOS\
-				0x%8x\n", __func__,
+			pr_debug("%s:session id %d: send EOS"
+				"0x%8x\n", __func__,
 				audio->ac->session, nflags);
 			break;
 		}
@@ -613,8 +582,8 @@
 				buf += mfield_size;
 				count -= mfield_size;
 			} else {
-				pr_debug("%s:session id %d: continuous\
-				buffer\n", __func__, audio->ac->session);
+				pr_debug("%s:session id %d: continuous"
+				"buffer\n", __func__, audio->ac->session);
 			}
 		}
 		xfer = (count > (audio->pcm_cfg.buffer_size)) ?
@@ -634,8 +603,8 @@
 		buf += xfer;
 	}
 	mutex_unlock(&audio->write_lock);
-	pr_debug("%s:session id %d: eos_condition 0x%8x buf[0x%x]\
-			start[0x%x]\n", __func__, audio->ac->session,
+	pr_debug("%s:session id %d: eos_condition 0x%8x buf[0x%x]"
+			"start[0x%x]\n", __func__, audio->ac->session,
 				nflags,	(int) buf, (int) start);
 	if (nflags & AUD_EOS_SET) {
 		rc = q6asm_cmd(audio->ac, CMD_EOS);
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_utils.h b/arch/arm/mach-msm/qdsp6v2/audio_utils.h
index 7a696ca..df963f9 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_utils.h
+++ b/arch/arm/mach-msm/qdsp6v2/audio_utils.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,6 +11,7 @@
  *
 */
 #include <linux/msm_audio.h>
+#include "q6audio_common.h"
 
 #define FRAME_NUM	(8)
 
@@ -26,18 +27,18 @@
 #define BUF_ALLOC_INOUT 0x03
 #define ALIGN_BUF_SIZE(size) ((size + 4095) & (~4095))
 
-struct timestamp{
+struct timestamp {
 	unsigned long lowpart;
 	unsigned long highpart;
 } __attribute__ ((packed));
 
-struct meta_in{
+struct meta_in {
 	unsigned short offset;
 	struct timestamp ntimestamp;
 	unsigned int nflags;
 } __attribute__ ((packed));
 
-struct meta_out_dsp{
+struct meta_out_dsp {
 	u32 offset_to_frame;
 	u32 frame_size;
 	u32 encoded_pcm_samples;
@@ -46,12 +47,12 @@
 	u32 nflags;
 } __attribute__ ((packed));
 
-struct meta_out{
+struct meta_out {
 	unsigned char num_of_frames;
 	struct meta_out_dsp meta_out_dsp[];
 } __attribute__ ((packed));
 
-struct q6audio_in{
+struct q6audio_in {
 	spinlock_t			dsp_lock;
 	atomic_t			in_bytes;
 	atomic_t			in_samples;
@@ -89,8 +90,6 @@
 	long (*enc_ioctl)(struct file *, unsigned int, unsigned long);
 };
 
-void  audio_in_get_dsp_frames(struct q6audio_in *audio,
-		uint32_t token,	uint32_t *payload);
 int audio_in_enable(struct q6audio_in  *audio);
 int audio_in_disable(struct q6audio_in  *audio);
 int audio_in_buf_alloc(struct q6audio_in *audio);
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c
index 644df2d..cef2fae 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c
@@ -23,8 +23,6 @@
 #include <linux/slab.h>
 #include <asm/atomic.h>
 #include <asm/ioctls.h>
-#include <sound/q6asm.h>
-#include <sound/apr_audio.h>
 #include <linux/debugfs.h>
 #include "audio_utils_aio.h"
 
@@ -295,8 +293,8 @@
 		kfree(used_buf);
 		if (list_empty(&audio->out_queue) &&
 			(audio->drv_status & ADRV_STATUS_FSYNC)) {
-			pr_debug("%s[%p]: list is empty, reached EOS in\
-				Tunnel\n", __func__, audio);
+			pr_debug("%s[%p]: list is empty, reached EOS in"
+				"Tunnel\n", __func__, audio);
 			wake_up(&audio->write_wait);
 		}
 	} else {
@@ -424,72 +422,6 @@
 	}
 }
 
-void audio_aio_cb(uint32_t opcode, uint32_t token,
-		uint32_t *payload,  struct q6audio_aio *audio)
-{
-	union msm_audio_event_payload e_payload;
-
-	switch (opcode) {
-	case ASM_DATA_EVENT_WRITE_DONE:
-		pr_debug("%s[%p]:ASM_DATA_EVENT_WRITE_DONE token = 0x%x\n",
-			__func__, audio, token);
-		audio_aio_async_write_ack(audio, token, payload);
-		break;
-	case ASM_DATA_EVENT_READ_DONE:
-		pr_debug("%s[%p]:ASM_DATA_EVENT_READ_DONE token = 0x%x\n",
-			__func__, audio, token);
-		audio_aio_async_read_ack(audio, token, payload);
-		break;
-	case ASM_DATA_CMDRSP_EOS:
-		/* EOS Handle */
-		pr_debug("%s[%p]:ASM_DATA_CMDRSP_EOS\n", __func__, audio);
-		if (audio->feedback) { /* Non-Tunnel mode */
-			audio->eos_rsp = 1;
-			/* propagate input EOS i/p buffer,
-			after receiving DSP acknowledgement */
-			if (audio->eos_flag &&
-				(audio->eos_write_payload.aio_buf.buf_addr)) {
-				audio_aio_post_event(audio,
-						AUDIO_EVENT_WRITE_DONE,
-						audio->eos_write_payload);
-				memset(&audio->eos_write_payload , 0,
-					sizeof(union msm_audio_event_payload));
-				audio->eos_flag = 0;
-			}
-		} else { /* Tunnel mode */
-			audio->eos_rsp = 1;
-			wake_up(&audio->write_wait);
-			wake_up(&audio->cmd_wait);
-		}
-		break;
-	case ASM_DATA_CMD_MEDIA_FORMAT_UPDATE:
-	case ASM_STREAM_CMD_SET_ENCDEC_PARAM:
-		pr_debug("%s[%p]:payload0[%x] payloa1d[%x]opcode= 0x%x\n",
-			__func__, audio, payload[0], payload[1], opcode);
-		break;
-	case ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY:
-	case ASM_DATA_EVENT_ENC_SR_CM_NOTIFY:
-		pr_debug("%s[%p]: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, "
-
-				"payload[0]-sr = %d, payload[1]-chl = %d, "
-				"payload[2] = %d, payload[3] = %d\n", __func__,
-				audio, payload[0], payload[1], payload[2],
-				payload[3]);
-		pr_debug("%s[%p]: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, "
-				"sr(prev) = %d, chl(prev) = %d,",
-				__func__, audio, audio->pcm_cfg.sample_rate,
-		audio->pcm_cfg.channel_count);
-		audio->pcm_cfg.sample_rate = payload[0];
-		audio->pcm_cfg.channel_count = payload[1] & 0xFFFF;
-		e_payload.stream_info.chan_info = audio->pcm_cfg.channel_count;
-		e_payload.stream_info.sample_rate = audio->pcm_cfg.sample_rate;
-		audio_aio_post_event(audio, AUDIO_EVENT_STREAM_INFO, e_payload);
-		break;
-	default:
-		break;
-	}
-}
-
 int audio_aio_enable(struct q6audio_aio  *audio)
 {
 	/* 2nd arg: 0 -> run immediately
@@ -1041,8 +973,8 @@
 		return -EFAULT;
 	}
 
-	pr_debug("%s[%p]:node %p dir %x buf_addr %p buf_len %d data_len \
-		%d\n", __func__, audio, buf_node, dir, buf_node->buf.buf_addr,
+	pr_debug("%s[%p]:node %p dir %x buf_addr %p buf_len %d data_len"
+		"%d\n", __func__, audio, buf_node, dir, buf_node->buf.buf_addr,
 		buf_node->buf.buf_len, buf_node->buf.data_len);
 	buf_node->paddr = audio_aio_ion_fixup(audio, buf_node->buf.buf_addr,
 						buf_node->buf.buf_len, 1,
@@ -1447,8 +1379,8 @@
 		break;
 	}
 	case AUDIO_GET_BUF_CFG: {
-		pr_debug("%s[%p]:session id %d: Get-buf-cfg: meta[%d]\
-			framesperbuf[%d]\n", __func__, audio,
+		pr_debug("%s[%p]:session id %d: Get-buf-cfg: meta[%d]"
+			"framesperbuf[%d]\n", __func__, audio,
 			audio->ac->session, audio->buf_cfg.meta_info_enable,
 			audio->buf_cfg.frames_per_buf);
 
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.h b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.h
index a25ca4d..16acb06 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.h
+++ b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.h
@@ -27,8 +27,7 @@
 #include <linux/ion.h>
 #include <asm/ioctls.h>
 #include <asm/atomic.h>
-#include <sound/q6asm.h>
-#include <sound/apr_audio.h>
+#include "q6audio_common.h"
 
 #define TUNNEL_MODE     0x0000
 #define NON_TUNNEL_MODE 0x0001
@@ -190,6 +189,12 @@
 	long (*codec_ioctl)(struct file *, unsigned int, unsigned long);
 };
 
+void audio_aio_async_write_ack(struct q6audio_aio *audio, uint32_t token,
+				uint32_t *payload);
+
+void audio_aio_async_read_ack(struct q6audio_aio *audio, uint32_t token,
+			uint32_t *payload);
+
 int audio_aio_open(struct q6audio_aio *audio, struct file *file);
 int audio_aio_enable(struct q6audio_aio  *audio);
 void audio_aio_post_event(struct q6audio_aio *audio, int type,
@@ -197,8 +202,6 @@
 int audio_aio_release(struct inode *inode, struct file *file);
 long audio_aio_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
 int audio_aio_fsync(struct file *file, int datasync);
-void audio_aio_cb(uint32_t opcode, uint32_t token,
-			uint32_t *payload,  struct q6audio_aio *audio);
 void audio_aio_async_out_flush(struct q6audio_aio *audio);
 void audio_aio_async_in_flush(struct q6audio_aio *audio);
 #ifdef CONFIG_DEBUG_FS
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_wma.c b/arch/arm/mach-msm/qdsp6v2/audio_wma.c
index bea0485..021d58b 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_wma.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_wma.c
@@ -2,7 +2,7 @@
  *
  * Copyright (C) 2008 Google, Inc.
  * Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -19,24 +19,6 @@
 #include <linux/msm_audio_wma.h>
 #include "audio_utils_aio.h"
 
-static void q6_audio_wma_cb(uint32_t opcode, uint32_t token,
-		uint32_t *payload, void *priv)
-{
-	struct q6audio_aio *audio = (struct q6audio_aio *)priv;
-
-	pr_debug("%s:opcode = %x token = 0x%x\n", __func__, opcode, token);
-	switch (opcode) {
-	case ASM_DATA_EVENT_WRITE_DONE:
-	case ASM_DATA_EVENT_READ_DONE:
-	case ASM_DATA_CMDRSP_EOS:
-		audio_aio_cb(opcode, token, payload, audio);
-		break;
-	default:
-		pr_debug("%s:Unhandled event = 0x%8x\n", __func__, opcode);
-		break;
-	}
-}
-
 #ifdef CONFIG_DEBUG_FS
 static const struct file_operations audio_wma_debug_fops = {
 	.read = audio_aio_debug_read,
@@ -139,15 +121,15 @@
 	audio->codec_cfg = kzalloc(sizeof(struct msm_audio_wma_config_v2),
 					GFP_KERNEL);
 	if (audio->codec_cfg == NULL) {
-		pr_err("%s:Could not allocate memory for wma\
-			config\n", __func__);
+		pr_err("%s:Could not allocate memory for wma"
+			"config\n", __func__);
 		kfree(audio);
 		return -ENOMEM;
 	}
 
 	audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN;
 
-	audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_wma_cb,
+	audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb,
 					     (void *)audio);
 
 	if (!audio->ac) {
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_wmapro.c b/arch/arm/mach-msm/qdsp6v2/audio_wmapro.c
index 98d1b30..4fcdcc1 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_wmapro.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_wmapro.c
@@ -2,7 +2,7 @@
  *
  * Copyright (C) 2008 Google, Inc.
  * Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -19,24 +19,6 @@
 #include <linux/msm_audio_wmapro.h>
 #include "audio_utils_aio.h"
 
-static void q6_audio_wmapro_cb(uint32_t opcode, uint32_t token,
-		uint32_t *payload, void *priv)
-{
-	struct q6audio_aio *audio = (struct q6audio_aio *)priv;
-
-	pr_debug("%s:opcode = %x token = 0x%x\n", __func__, opcode, token);
-	switch (opcode) {
-	case ASM_DATA_EVENT_WRITE_DONE:
-	case ASM_DATA_EVENT_READ_DONE:
-	case ASM_DATA_CMDRSP_EOS:
-		audio_aio_cb(opcode, token, payload, audio);
-		break;
-	default:
-		pr_debug("%s:Unhandled event = 0x%8x\n", __func__, opcode);
-		break;
-	}
-}
-
 #ifdef CONFIG_DEBUG_FS
 static const struct file_operations audio_wmapro_debug_fops = {
 	.read = audio_aio_debug_read,
@@ -197,8 +179,8 @@
 	audio->codec_cfg = kzalloc(sizeof(struct msm_audio_wmapro_config),
 					GFP_KERNEL);
 	if (audio->codec_cfg == NULL) {
-		pr_err("%s: Could not allocate memory for wmapro\
-			config\n", __func__);
+		pr_err("%s: Could not allocate memory for wmapro"
+			"config\n", __func__);
 		kfree(audio);
 		return -ENOMEM;
 	}
@@ -206,7 +188,7 @@
 
 	audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN;
 
-	audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_wmapro_cb,
+	audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb,
 					     (void *)audio);
 
 	if (!audio->ac) {
diff --git a/arch/arm/mach-msm/qdsp6v2/evrc_in.c b/arch/arm/mach-msm/qdsp6v2/evrc_in.c
index ffe10bc..b95d659 100644
--- a/arch/arm/mach-msm/qdsp6v2/evrc_in.c
+++ b/arch/arm/mach-msm/qdsp6v2/evrc_in.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -22,8 +22,6 @@
 #include <linux/msm_audio_qcp.h>
 #include <asm/atomic.h>
 #include <asm/ioctls.h>
-#include <sound/q6asm.h>
-#include <sound/apr_audio.h>
 #include "audio_utils.h"
 
 /* Buffer with meta*/
@@ -32,44 +30,6 @@
 /* Maximum 10 frames in buffer with meta */
 #define FRAME_SIZE		(1 + ((23+sizeof(struct meta_out_dsp)) * 10))
 
-void q6asm_evrc_in_cb(uint32_t opcode, uint32_t token,
-		uint32_t *payload, void *priv)
-{
-	struct q6audio_in * audio = (struct q6audio_in *)priv;
-	unsigned long flags;
-
-	pr_debug("%s:session id %d: opcode - %d\n", __func__,
-			audio->ac->session, opcode);
-
-	spin_lock_irqsave(&audio->dsp_lock, flags);
-	switch (opcode) {
-	case ASM_DATA_EVENT_READ_DONE:
-		audio_in_get_dsp_frames(audio, token, payload);
-		break;
-	case ASM_DATA_EVENT_WRITE_DONE:
-		atomic_inc(&audio->in_count);
-		wake_up(&audio->write_wait);
-		break;
-	case ASM_DATA_CMDRSP_EOS:
-		audio->eos_rsp = 1;
-		wake_up(&audio->read_wait);
-		break;
-	case ASM_STREAM_CMDRSP_GET_ENCDEC_PARAM:
-		break;
-	case ASM_STREAM_CMDRSP_GET_PP_PARAMS:
-		break;
-	case ASM_SESSION_EVENT_TX_OVERFLOW:
-		pr_err("%s:session id %d: ASM_SESSION_EVENT_TX_OVERFLOW\n",
-				__func__, audio->ac->session);
-		break;
-	default:
-		pr_err("%s:session id %d: Ignore opcode[0x%x]\n", __func__,
-				audio->ac->session, opcode);
-		break;
-	}
-	spin_unlock_irqrestore(&audio->dsp_lock, flags);
-}
-
 /* ------------------- device --------------------- */
 static long evrc_in_ioctl(struct file *file,
 				unsigned int cmd, unsigned long arg)
@@ -104,8 +64,8 @@
 			enc_cfg->max_bit_rate, 0);
 
 		if (rc < 0) {
-			pr_err("%s:session id %d: cmd evrc media format block\
-				failed\n", __func__, audio->ac->session);
+			pr_err("%s:session id %d: cmd evrc media format block"
+				"failed\n", __func__, audio->ac->session);
 			break;
 		}
 		if (audio->feedback == NON_TUNNEL_MODE) {
@@ -114,8 +74,8 @@
 				audio->pcm_cfg.channel_count);
 
 			if (rc < 0) {
-				pr_err("%s:session id %d: media format block\
-				failed\n", __func__, audio->ac->session);
+				pr_err("%s:session id %d: media format block"
+				"failed\n", __func__, audio->ac->session);
 				break;
 			}
 		}
@@ -126,8 +86,8 @@
 			audio->enabled = 1;
 		} else {
 			audio->enabled = 0;
-			pr_err("%s:session id %d: Audio Start procedure failed\
-				rc=%d\n", __func__, audio->ac->session, rc);
+			pr_err("%s:session id %d: Audio Start procedure failed"
+				"rc=%d\n", __func__, audio->ac->session, rc);
 			break;
 		}
 		while (cnt++ < audio->str_cfg.buffer_count)
@@ -142,8 +102,8 @@
 				audio->ac->session);
 		rc = audio_in_disable(audio);
 		if (rc  < 0) {
-			pr_err("%s:session id %d: Audio Stop procedure failed\
-				rc=%d\n", __func__, audio->ac->session, rc);
+			pr_err("%s:session id %d: Audio Stop procedure failed"
+				"rc=%d\n", __func__, audio->ac->session, rc);
 			break;
 		}
 		break;
@@ -183,8 +143,8 @@
 		}
 		enc_cfg->min_bit_rate = cfg.min_bit_rate;
 		enc_cfg->max_bit_rate = cfg.max_bit_rate;
-		pr_debug("%s:session id %d: min_bit_rate= 0x%x\
-			max_bit_rate=0x%x\n", __func__,
+		pr_debug("%s:session id %d: min_bit_rate= 0x%x"
+			"max_bit_rate=0x%x\n", __func__,
 			audio->ac->session, enc_cfg->min_bit_rate,
 			enc_cfg->max_bit_rate);
 		break;
@@ -204,16 +164,16 @@
 	audio = kzalloc(sizeof(struct q6audio_in), GFP_KERNEL);
 
 	if (audio == NULL) {
-		pr_err("%s: Could not allocate memory for evrc\
-				driver\n", __func__);
+		pr_err("%s: Could not allocate memory for evrc"
+				"driver\n", __func__);
 		return -ENOMEM;
 	}
 	/* Allocate memory for encoder config param */
 	audio->enc_cfg = kzalloc(sizeof(struct msm_audio_evrc_enc_config),
 				GFP_KERNEL);
 	if (audio->enc_cfg == NULL) {
-		pr_err("%s:session id %d: Could not allocate memory for aac\
-				config param\n", __func__, audio->ac->session);
+		pr_err("%s:session id %d: Could not allocate memory for aac"
+				"config param\n", __func__, audio->ac->session);
 		kfree(audio);
 		return -ENOMEM;
 	}
@@ -241,12 +201,12 @@
 	audio->buf_cfg.meta_info_enable = 0x01;
 	audio->buf_cfg.frames_per_buf = 0x01;
 
-	audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_evrc_in_cb,
+	audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_in_cb,
 				(void *)audio);
 
 	if (!audio->ac) {
-		pr_err("%s: Could not allocate memory for audio\
-				client\n", __func__);
+		pr_err("%s: Could not allocate memory for audio"
+				"client\n", __func__);
 		kfree(audio->enc_cfg);
 		kfree(audio);
 		return -ENOMEM;
@@ -279,8 +239,8 @@
 		/* register for tx overflow (valid for tunnel mode only) */
 		rc = q6asm_reg_tx_overflow(audio->ac, 0x01);
 		if (rc < 0) {
-			pr_err("%s:session id %d: TX Overflow registration\
-				failed rc=%d\n", __func__,
+			pr_err("%s:session id %d: TX Overflow registration"
+				"failed rc=%d\n", __func__,
 				audio->ac->session, rc);
 			rc = -ENODEV;
 			goto fail;
diff --git a/arch/arm/mach-msm/qdsp6v2/q6audio_common.h b/arch/arm/mach-msm/qdsp6v2/q6audio_common.h
new file mode 100644
index 0000000..e108de5
--- /dev/null
+++ b/arch/arm/mach-msm/qdsp6v2/q6audio_common.h
@@ -0,0 +1,35 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+/* For Decoders */
+#ifndef __Q6_AUDIO_COMMON_H__
+#define __Q6_AUDIO_COMMON_H__
+
+#include <sound/apr_audio.h>
+#include <sound/q6asm.h>
+
+void q6_audio_cb(uint32_t opcode, uint32_t token,
+		uint32_t *payload, void *priv);
+
+void audio_aio_cb(uint32_t opcode, uint32_t token,
+			uint32_t *payload,  void *audio);
+
+
+/* For Encoders */
+void q6asm_in_cb(uint32_t opcode, uint32_t token,
+		uint32_t *payload, void *priv);
+
+void  audio_in_get_dsp_frames(void *audio,
+		uint32_t token,	uint32_t *payload);
+
+#endif /*__Q6_AUDIO_COMMON_H__*/
diff --git a/arch/arm/mach-msm/qdsp6v2/q6audio_v1.c b/arch/arm/mach-msm/qdsp6v2/q6audio_v1.c
new file mode 100644
index 0000000..f49d6e0
--- /dev/null
+++ b/arch/arm/mach-msm/qdsp6v2/q6audio_v1.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <asm/atomic.h>
+#include <asm/ioctls.h>
+#include "audio_utils.h"
+
+void q6asm_in_cb(uint32_t opcode, uint32_t token,
+		uint32_t *payload, void *priv)
+{
+	struct q6audio_in * audio = (struct q6audio_in *)priv;
+	unsigned long flags;
+
+	pr_debug("%s:session id %d: opcode[0x%x]\n", __func__,
+			audio->ac->session, opcode);
+
+	spin_lock_irqsave(&audio->dsp_lock, flags);
+	switch (opcode) {
+	case ASM_DATA_EVENT_READ_DONE:
+		audio_in_get_dsp_frames(audio, token, payload);
+		break;
+	case ASM_DATA_EVENT_WRITE_DONE:
+		atomic_inc(&audio->in_count);
+		wake_up(&audio->write_wait);
+		break;
+	case ASM_DATA_CMDRSP_EOS:
+		audio->eos_rsp = 1;
+		wake_up(&audio->read_wait);
+		break;
+	case ASM_STREAM_CMDRSP_GET_ENCDEC_PARAM:
+		break;
+	case ASM_STREAM_CMDRSP_GET_PP_PARAMS:
+		break;
+	case ASM_SESSION_EVENT_TX_OVERFLOW:
+		pr_err("%s:session id %d: ASM_SESSION_EVENT_TX_OVERFLOW\n",
+			__func__, audio->ac->session);
+		break;
+	default:
+		pr_debug("%s:session id %d: Ignore opcode[0x%x]\n", __func__,
+			audio->ac->session, opcode);
+		break;
+	}
+	spin_unlock_irqrestore(&audio->dsp_lock, flags);
+}
+
+void  audio_in_get_dsp_frames(/*struct q6audio_in *audio,*/void *aud,
+	uint32_t token,	uint32_t *payload)
+{
+	struct q6audio_in *audio = (struct q6audio_in *)aud;
+	uint32_t index;
+
+	index = token;
+	pr_debug("%s:session id %d: index=%d nr frames=%d offset[%d]\n",
+			__func__, audio->ac->session, token, payload[7],
+			payload[3]);
+	pr_debug("%s:session id %d: timemsw=%d lsw=%d\n", __func__,
+			audio->ac->session, payload[4], payload[5]);
+	pr_debug("%s:session id %d: uflags=0x%8x uid=0x%8x\n", __func__,
+			audio->ac->session, payload[6], payload[8]);
+	pr_debug("%s:session id %d: enc frame size=0x%8x\n", __func__,
+			audio->ac->session, payload[2]);
+
+	audio->out_frame_info[index][0] = payload[7];
+	audio->out_frame_info[index][1] = payload[3];
+
+	/* statistics of read */
+	atomic_add(payload[2], &audio->in_bytes);
+	atomic_add(payload[7], &audio->in_samples);
+
+	if (atomic_read(&audio->out_count) <= audio->str_cfg.buffer_count) {
+		atomic_inc(&audio->out_count);
+		wake_up(&audio->read_wait);
+	}
+}
diff --git a/arch/arm/mach-msm/qdsp6v2/q6audio_v1_aio.c b/arch/arm/mach-msm/qdsp6v2/q6audio_v1_aio.c
new file mode 100644
index 0000000..112de62
--- /dev/null
+++ b/arch/arm/mach-msm/qdsp6v2/q6audio_v1_aio.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2012 Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <asm/atomic.h>
+#include <asm/ioctls.h>
+#include "audio_utils_aio.h"
+
+void q6_audio_cb(uint32_t opcode, uint32_t token,
+		uint32_t *payload, void *priv)
+{
+	struct q6audio_aio *audio = (struct q6audio_aio *)priv;
+
+	pr_debug("%s:opcode = %x token = 0x%x\n", __func__, opcode, token);
+	switch (opcode) {
+	case ASM_DATA_EVENT_WRITE_DONE:
+	case ASM_DATA_EVENT_READ_DONE:
+	case ASM_DATA_CMDRSP_EOS:
+	case ASM_DATA_CMD_MEDIA_FORMAT_UPDATE:
+	case ASM_STREAM_CMD_SET_ENCDEC_PARAM:
+	case ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY:
+	case ASM_DATA_EVENT_ENC_SR_CM_NOTIFY:
+		audio_aio_cb(opcode, token, payload, audio);
+		break;
+	default:
+		pr_debug("%s:Unhandled event = 0x%8x\n", __func__, opcode);
+		break;
+	}
+}
+
+void audio_aio_cb(uint32_t opcode, uint32_t token,
+		uint32_t *payload,  void *priv)
+{
+	union msm_audio_event_payload e_payload;
+	struct q6audio_aio *audio = (struct q6audio_aio *)priv;
+
+	switch (opcode) {
+	case ASM_DATA_EVENT_WRITE_DONE:
+		pr_debug("%s[%p]:ASM_DATA_EVENT_WRITE_DONE token = 0x%x\n",
+			__func__, audio, token);
+		audio_aio_async_write_ack(audio, token, payload);
+		break;
+	case ASM_DATA_EVENT_READ_DONE:
+		pr_debug("%s[%p]:ASM_DATA_EVENT_READ_DONE token = 0x%x\n",
+			__func__, audio, token);
+		audio_aio_async_read_ack(audio, token, payload);
+		break;
+	case ASM_DATA_CMDRSP_EOS:
+		/* EOS Handle */
+		pr_debug("%s[%p]:ASM_DATA_CMDRSP_EOS\n", __func__, audio);
+		if (audio->feedback) { /* Non-Tunnel mode */
+			audio->eos_rsp = 1;
+			/* propagate input EOS i/p buffer,
+			after receiving DSP acknowledgement */
+			if (audio->eos_flag &&
+				(audio->eos_write_payload.aio_buf.buf_addr)) {
+				audio_aio_post_event(audio,
+						AUDIO_EVENT_WRITE_DONE,
+						audio->eos_write_payload);
+				memset(&audio->eos_write_payload , 0,
+					sizeof(union msm_audio_event_payload));
+				audio->eos_flag = 0;
+			}
+		} else { /* Tunnel mode */
+			audio->eos_rsp = 1;
+			wake_up(&audio->write_wait);
+			wake_up(&audio->cmd_wait);
+		}
+		break;
+	case ASM_DATA_CMD_MEDIA_FORMAT_UPDATE:
+	case ASM_STREAM_CMD_SET_ENCDEC_PARAM:
+		pr_debug("%s[%p]:payload0[%x] payloa1d[%x]opcode= 0x%x\n",
+			__func__, audio, payload[0], payload[1], opcode);
+		break;
+	case ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY:
+	case ASM_DATA_EVENT_ENC_SR_CM_NOTIFY:
+		pr_debug("%s[%p]: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, "
+
+				"payload[0]-sr = %d, payload[1]-chl = %d, "
+				"payload[2] = %d, payload[3] = %d\n", __func__,
+				audio, payload[0], payload[1], payload[2],
+				payload[3]);
+		pr_debug("%s[%p]: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, "
+				"sr(prev) = %d, chl(prev) = %d,",
+				__func__, audio, audio->pcm_cfg.sample_rate,
+		audio->pcm_cfg.channel_count);
+		audio->pcm_cfg.sample_rate = payload[0];
+		audio->pcm_cfg.channel_count = payload[1] & 0xFFFF;
+		e_payload.stream_info.chan_info = audio->pcm_cfg.channel_count;
+		e_payload.stream_info.sample_rate = audio->pcm_cfg.sample_rate;
+		audio_aio_post_event(audio, AUDIO_EVENT_STREAM_INFO, e_payload);
+		break;
+	default:
+		break;
+	}
+}
diff --git a/arch/arm/mach-msm/qdsp6v2/qcelp_in.c b/arch/arm/mach-msm/qdsp6v2/qcelp_in.c
index 3cf4e25..a48df39 100644
--- a/arch/arm/mach-msm/qdsp6v2/qcelp_in.c
+++ b/arch/arm/mach-msm/qdsp6v2/qcelp_in.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -22,8 +22,6 @@
 #include <linux/msm_audio_qcp.h>
 #include <asm/atomic.h>
 #include <asm/ioctls.h>
-#include <sound/q6asm.h>
-#include <sound/apr_audio.h>
 #include "audio_utils.h"
 
 /* Buffer with meta*/
@@ -32,43 +30,6 @@
 /* Maximum 10 frames in buffer with meta */
 #define FRAME_SIZE		(1 + ((35+sizeof(struct meta_out_dsp)) * 10))
 
-void q6asm_qcelp_in_cb(uint32_t opcode, uint32_t token,
-		uint32_t *payload, void *priv)
-{
-	struct q6audio_in * audio = (struct q6audio_in *)priv;
-	unsigned long flags;
-
-	pr_debug("%s:session id %d: opcode - %d\n", __func__,
-			audio->ac->session, opcode);
-
-	spin_lock_irqsave(&audio->dsp_lock, flags);
-	switch (opcode) {
-	case ASM_DATA_EVENT_READ_DONE:
-		audio_in_get_dsp_frames(audio, token, payload);
-		break;
-	case ASM_DATA_EVENT_WRITE_DONE:
-		atomic_inc(&audio->in_count);
-		wake_up(&audio->write_wait);
-		break;
-	case ASM_DATA_CMDRSP_EOS:
-		audio->eos_rsp = 1;
-		wake_up(&audio->read_wait);
-		break;
-	case ASM_STREAM_CMDRSP_GET_ENCDEC_PARAM:
-		break;
-	case ASM_STREAM_CMDRSP_GET_PP_PARAMS:
-		break;
-	case ASM_SESSION_EVENT_TX_OVERFLOW:
-		pr_err("%s:session id %d:ASM_SESSION_EVENT_TX_OVERFLOW\n",
-			__func__, audio->ac->session);
-		break;
-	default:
-		pr_err("%s:session id %d: Ignore opcode[0x%x]\n", __func__,
-			audio->ac->session, opcode);
-		break;
-	}
-	spin_unlock_irqrestore(&audio->dsp_lock, flags);
-}
 /* ------------------- device --------------------- */
 static long qcelp_in_ioctl(struct file *file,
 				unsigned int cmd, unsigned long arg)
@@ -103,8 +64,8 @@
 			enc_cfg->max_bit_rate, 0, 0);
 
 		if (rc < 0) {
-			pr_err("%s:session id %d: cmd qcelp media format block\
-				failed\n", __func__, audio->ac->session);
+			pr_err("%s:session id %d: cmd qcelp media format block"
+				"failed\n", __func__, audio->ac->session);
 			break;
 		}
 		if (audio->feedback == NON_TUNNEL_MODE) {
@@ -113,8 +74,8 @@
 				audio->pcm_cfg.channel_count);
 
 			if (rc < 0) {
-				pr_err("%s:session id %d: media format block\
-				failed\n", __func__, audio->ac->session);
+				pr_err("%s:session id %d: media format block"
+				"failed\n", __func__, audio->ac->session);
 				break;
 			}
 		}
@@ -125,8 +86,8 @@
 			audio->enabled = 1;
 		} else {
 			audio->enabled = 0;
-			pr_err("%s:session id %d: Audio Start procedure failed\
-				rc=%d\n", __func__, audio->ac->session, rc);
+			pr_err("%s:session id %d: Audio Start procedure failed"
+				"rc=%d\n", __func__, audio->ac->session, rc);
 			break;
 		}
 		while (cnt++ < audio->str_cfg.buffer_count)
@@ -141,8 +102,8 @@
 				audio->ac->session);
 		rc = audio_in_disable(audio);
 		if (rc  < 0) {
-			pr_err("%s:session id %d: Audio Stop procedure failed\
-					rc=%d\n", __func__, audio->ac->session,
+			pr_err("%s:session id %d: Audio Stop procedure failed"
+					"rc=%d\n", __func__, audio->ac->session,
 					rc);
 			break;
 		}
@@ -180,8 +141,8 @@
 		}
 		enc_cfg->min_bit_rate = cfg.min_bit_rate;
 		enc_cfg->max_bit_rate = cfg.max_bit_rate;
-		pr_debug("%s:session id %d: min_bit_rate= 0x%x\
-			max_bit_rate=0x%x\n", __func__,
+		pr_debug("%s:session id %d: min_bit_rate= 0x%x"
+			"max_bit_rate=0x%x\n", __func__,
 			audio->ac->session, enc_cfg->min_bit_rate,
 			enc_cfg->max_bit_rate);
 		break;
@@ -201,16 +162,16 @@
 	audio = kzalloc(sizeof(struct q6audio_in), GFP_KERNEL);
 
 	if (audio == NULL) {
-		pr_err("%s: Could not allocate memory for qcelp\
-				driver\n", __func__);
+		pr_err("%s: Could not allocate memory for qcelp"
+				"driver\n", __func__);
 		return -ENOMEM;
 	}
 	/* Allocate memory for encoder config param */
 	audio->enc_cfg = kzalloc(sizeof(struct msm_audio_qcelp_enc_config),
 				GFP_KERNEL);
 	if (audio->enc_cfg == NULL) {
-		pr_err("%s:session id %d: Could not allocate memory for aac\
-				config param\n", __func__, audio->ac->session);
+		pr_err("%s:session id %d: Could not allocate memory for aac"
+				"config param\n", __func__, audio->ac->session);
 		kfree(audio);
 		return -ENOMEM;
 	}
@@ -239,12 +200,12 @@
 	audio->buf_cfg.meta_info_enable = 0x01;
 	audio->buf_cfg.frames_per_buf = 0x01;
 
-	audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_qcelp_in_cb,
+	audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_in_cb,
 				(void *)audio);
 
 	if (!audio->ac) {
-		pr_err("%s: Could not allocate memory for audio\
-				client\n", __func__);
+		pr_err("%s: Could not allocate memory for audio"
+				"client\n", __func__);
 		kfree(audio->enc_cfg);
 		kfree(audio);
 		return -ENOMEM;
@@ -277,8 +238,8 @@
 		/* register for tx overflow (valid for tunnel mode only) */
 		rc = q6asm_reg_tx_overflow(audio->ac, 0x01);
 		if (rc < 0) {
-			pr_err("%s:session id %d: TX Overflow registration\
-			failed rc=%d\n", __func__, audio->ac->session, rc);
+			pr_err("%s:session id %d: TX Overflow registration"
+			"failed rc=%d\n", __func__, audio->ac->session, rc);
 			rc = -ENODEV;
 			goto fail;
 		}
diff --git a/arch/arm/mach-msm/rpm-regulator-smd.c b/arch/arm/mach-msm/rpm-regulator-smd.c
new file mode 100644
index 0000000..b892d05
--- /dev/null
+++ b/arch/arm/mach-msm/rpm-regulator-smd.c
@@ -0,0 +1,1430 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <mach/rpm-smd.h>
+#include <mach/rpm-regulator-smd.h>
+#include <mach/socinfo.h>
+
+/* Debug Definitions */
+
+enum {
+	RPM_VREG_DEBUG_REQUEST		= BIT(0),
+	RPM_VREG_DEBUG_FULL_REQUEST	= BIT(1),
+	RPM_VREG_DEBUG_DUPLICATE	= BIT(2),
+};
+
+static int rpm_vreg_debug_mask;
+module_param_named(
+	debug_mask, rpm_vreg_debug_mask, int, S_IRUSR | S_IWUSR
+);
+
+#define vreg_err(req, fmt, ...) \
+	pr_err("%s: " fmt, req->rdesc.name, ##__VA_ARGS__)
+
+/* RPM regulator request types */
+enum rpm_regulator_smd_type {
+	RPM_REGULATOR_SMD_TYPE_LDO,
+	RPM_REGULATOR_SMD_TYPE_SMPS,
+	RPM_REGULATOR_SMD_TYPE_VS,
+	RPM_REGULATOR_SMD_TYPE_NCP,
+	RPM_REGULATOR_SMD_TYPE_MAX,
+};
+
+/* RPM resource parameters */
+enum rpm_regulator_param_index {
+	RPM_REGULATOR_PARAM_ENABLE,
+	RPM_REGULATOR_PARAM_VOLTAGE,
+	RPM_REGULATOR_PARAM_CURRENT,
+	RPM_REGULATOR_PARAM_MODE_LDO,
+	RPM_REGULATOR_PARAM_MODE_SMPS,
+	RPM_REGULATOR_PARAM_PIN_CTRL_ENABLE,
+	RPM_REGULATOR_PARAM_PIN_CTRL_MODE,
+	RPM_REGULATOR_PARAM_FREQUENCY,
+	RPM_REGULATOR_PARAM_HEAD_ROOM,
+	RPM_REGULATOR_PARAM_QUIET_MODE,
+	RPM_REGULATOR_PARAM_FREQ_REASON,
+	RPM_REGULATOR_PARAM_MAX,
+};
+
+#define RPM_SET_CONFIG_ACTIVE			BIT(0)
+#define RPM_SET_CONFIG_SLEEP			BIT(1)
+#define RPM_SET_CONFIG_BOTH			(RPM_SET_CONFIG_ACTIVE \
+						 | RPM_SET_CONFIG_SLEEP)
+struct rpm_regulator_param {
+	char	*name;
+	char	*property_name;
+	u32	key;
+	u32	min;
+	u32	max;
+	u32	supported_regulator_types;
+};
+
+#define PARAM(_idx, _support_ldo, _support_smps, _support_vs, _support_ncp, \
+		_name, _min, _max, _property_name)	\
+	[RPM_REGULATOR_PARAM_##_idx] = { \
+		.name = _name, \
+		.property_name = _property_name, \
+		.min = _min, \
+		.max = _max, \
+		.supported_regulator_types = \
+			_support_ldo << RPM_REGULATOR_SMD_TYPE_LDO | \
+			_support_smps << RPM_REGULATOR_SMD_TYPE_SMPS | \
+			_support_vs << RPM_REGULATOR_SMD_TYPE_VS | \
+			_support_ncp << RPM_REGULATOR_SMD_TYPE_NCP, \
+	}
+
+static struct rpm_regulator_param params[RPM_REGULATOR_PARAM_MAX] = {
+	/*    ID             LDO SMPS VS  NCP  name  min max          property-name */
+	PARAM(ENABLE,          1,  1,  1,  1, "swen", 0, 1,          "qcom,init-enable"),
+	PARAM(VOLTAGE,         1,  1,  0,  1, "uv",   0, 0x7FFFFFF,  "qcom,init-voltage"),
+	PARAM(CURRENT,         1,  1,  0,  0, "ma",   0, 0x1FFF,     "qcom,init-current"),
+	PARAM(MODE_LDO,        1,  0,  0,  0, "lsmd", 0, 1,          "qcom,init-ldo-mode"),
+	PARAM(MODE_SMPS,       0,  1,  0,  0, "ssmd", 0, 2,          "qcom,init-smps-mode"),
+	PARAM(PIN_CTRL_ENABLE, 1,  1,  1,  0, "pcen", 0, 0xF,        "qcom,init-pin-ctrl-enable"),
+	PARAM(PIN_CTRL_MODE,   1,  1,  1,  0, "pcmd", 0, 0x1F,       "qcom,init-pin-ctrl-mode"),
+	PARAM(FREQUENCY,       0,  1,  0,  1, "freq", 0, 16,         "qcom,init-frequency"),
+	PARAM(HEAD_ROOM,       1,  0,  0,  1, "hr",   0, 0x7FFFFFFF, "qcom,init-head-room"),
+	PARAM(QUIET_MODE,      0,  1,  0,  0, "qm",   0, 2,          "qcom,init-quiet-mode"),
+	PARAM(FREQ_REASON,     0,  1,  0,  1, "resn", 0, 8,          "qcom,init-freq-reason"),
+};
+
+struct rpm_vreg_request {
+	u32			param[RPM_REGULATOR_PARAM_MAX];
+	u32			valid;
+	u32			modified;
+};
+
+struct rpm_vreg {
+	struct rpm_vreg_request	aggr_req_active;
+	struct rpm_vreg_request	aggr_req_sleep;
+	struct list_head	reg_list;
+	const char		*resource_name;
+	u32			resource_id;
+	bool			allow_atomic;
+	int			regulator_type;
+	int			hpm_min_load;
+	int			enable_time;
+	struct spinlock		slock;
+	struct mutex		mlock;
+	unsigned long		flags;
+	bool			sleep_request_sent;
+	struct msm_rpm_request	*handle_active;
+	struct msm_rpm_request	*handle_sleep;
+};
+
+struct rpm_regulator {
+	struct regulator_desc	rdesc;
+	struct regulator_dev	*rdev;
+	struct rpm_vreg		*rpm_vreg;
+	struct list_head	list;
+	bool			set_active;
+	bool			set_sleep;
+	struct rpm_vreg_request	req;
+	int			system_load;
+	int			min_uV;
+	int			max_uV;
+};
+
+/*
+ * This voltage in uV is returned by get_voltage functions when there is no way
+ * to determine the current voltage level.  It is needed because the regulator
+ * framework treats a 0 uV voltage as an error.
+ */
+#define VOLTAGE_UNKNOWN 1
+
+/*
+ * Regulator requests sent in the active set take effect immediately.  Requests
+ * sent in the sleep set take effect when the Apps processor transitions into
+ * RPM assisted power collapse.  For any given regulator, if an active set
+ * request is present, but not a sleep set request, then the active set request
+ * is used at all times, even when the Apps processor is power collapsed.
+ *
+ * The rpm-regulator-smd takes advantage of this default usage of the active set
+ * request by only sending a sleep set request if it differs from the
+ * corresponding active set request.
+ */
+#define RPM_SET_ACTIVE	MSM_RPM_CTX_ACTIVE_SET
+#define RPM_SET_SLEEP	MSM_RPM_CTX_SLEEP_SET
+
+static u32 rpm_vreg_string_to_int(const u8 *str)
+{
+	int i, len;
+	u32 output = 0;
+
+	len = strnlen(str, sizeof(u32));
+	for (i = 0; i < len; i++)
+		output |= str[i] << (i * 8);
+
+	return output;
+}
+
+static inline void rpm_vreg_lock(struct rpm_vreg *rpm_vreg)
+{
+	if (rpm_vreg->allow_atomic)
+		spin_lock_irqsave(&rpm_vreg->slock, rpm_vreg->flags);
+	else
+		mutex_lock(&rpm_vreg->mlock);
+}
+
+static inline void rpm_vreg_unlock(struct rpm_vreg *rpm_vreg)
+{
+	if (rpm_vreg->allow_atomic)
+		spin_unlock_irqrestore(&rpm_vreg->slock, rpm_vreg->flags);
+	else
+		mutex_unlock(&rpm_vreg->mlock);
+}
+
+static inline bool rpm_vreg_active_or_sleep_enabled(struct rpm_vreg *rpm_vreg)
+{
+	return (rpm_vreg->aggr_req_active.param[RPM_REGULATOR_PARAM_ENABLE]
+			&& (rpm_vreg->aggr_req_active.valid
+				& BIT(RPM_REGULATOR_PARAM_ENABLE)))
+	    || ((rpm_vreg->aggr_req_sleep.param[RPM_REGULATOR_PARAM_ENABLE])
+				&& (rpm_vreg->aggr_req_sleep.valid
+					& BIT(RPM_REGULATOR_PARAM_ENABLE)));
+}
+
+/*
+ * This is used when voting for LPM or HPM by subtracting or adding to the
+ * hpm_min_load of a regulator.  It has units of uA.
+ */
+#define LOAD_THRESHOLD_STEP	1000
+
+static inline int rpm_vreg_hpm_min_uA(struct rpm_vreg *rpm_vreg)
+{
+	return rpm_vreg->hpm_min_load;
+}
+
+static inline int rpm_vreg_lpm_max_uA(struct rpm_vreg *rpm_vreg)
+{
+	return rpm_vreg->hpm_min_load - LOAD_THRESHOLD_STEP;
+}
+
+#define MICRO_TO_MILLI(uV)	((uV) / 1000)
+#define MILLI_TO_MICRO(uV)	((uV) * 1000)
+
+#define DEBUG_PRINT_BUFFER_SIZE 512
+#define REQ_SENT	0
+#define REQ_PREV	1
+#define REQ_CACHED	2
+#define REQ_TYPES	3
+
+static void rpm_regulator_req(struct rpm_regulator *regulator, int set,
+				bool sent)
+{
+	char buf[DEBUG_PRINT_BUFFER_SIZE];
+	size_t buflen = DEBUG_PRINT_BUFFER_SIZE;
+	struct rpm_vreg *rpm_vreg = regulator->rpm_vreg;
+	struct rpm_vreg_request *aggr;
+	bool first;
+	u32 mask[REQ_TYPES] = {0, 0, 0};
+	const char *req_names[REQ_TYPES] = {"sent", "prev", "cached"};
+	int pos = 0;
+	int i, j;
+
+	aggr = (set == RPM_SET_ACTIVE)
+		? &rpm_vreg->aggr_req_active : &rpm_vreg->aggr_req_sleep;
+
+	if (rpm_vreg_debug_mask & RPM_VREG_DEBUG_DUPLICATE) {
+		mask[REQ_SENT] = aggr->modified;
+		mask[REQ_PREV] = aggr->valid & ~aggr->modified;
+	} else if (sent
+		   && (rpm_vreg_debug_mask & RPM_VREG_DEBUG_FULL_REQUEST)) {
+		mask[REQ_SENT] = aggr->modified;
+		mask[REQ_PREV] = aggr->valid & ~aggr->modified;
+	} else if (sent && (rpm_vreg_debug_mask & RPM_VREG_DEBUG_REQUEST)) {
+		mask[REQ_SENT] = aggr->modified;
+	}
+
+	if (!(mask[REQ_SENT] | mask[REQ_PREV]))
+		return;
+
+	if (set == RPM_SET_SLEEP && !rpm_vreg->sleep_request_sent) {
+		mask[REQ_CACHED] = mask[REQ_SENT] | mask[REQ_PREV];
+		mask[REQ_SENT] = 0;
+		mask[REQ_PREV] = 0;
+	}
+
+	pos += scnprintf(buf + pos, buflen - pos, "%s%s: ",
+			KERN_INFO, __func__);
+
+	pos += scnprintf(buf + pos, buflen - pos, "%s %u (%s): s=%s",
+			rpm_vreg->resource_name, rpm_vreg->resource_id,
+			regulator->rdesc.name,
+			(set == RPM_SET_ACTIVE ? "act" : "slp"));
+
+	for (i = 0; i < REQ_TYPES; i++) {
+		if (mask[i])
+			pos += scnprintf(buf + pos, buflen - pos, "; %s: ",
+					req_names[i]);
+
+		first = true;
+		for (j = 0; j < RPM_REGULATOR_PARAM_MAX; j++) {
+			if (mask[i] & BIT(j)) {
+				pos += scnprintf(buf + pos, buflen - pos,
+					"%s%s=%u", (first ? "" : ", "),
+					params[j].name, aggr->param[j]);
+				first = false;
+			}
+		}
+	}
+
+	pos += scnprintf(buf + pos, buflen - pos, "\n");
+	printk(buf);
+}
+
+#define RPM_VREG_SET_PARAM(_regulator, _param, _val) \
+{ \
+	(_regulator)->req.param[RPM_REGULATOR_PARAM_##_param] = _val; \
+	(_regulator)->req.modified |= BIT(RPM_REGULATOR_PARAM_##_param); \
+} \
+
+static int rpm_vreg_add_kvp_to_request(struct rpm_vreg *rpm_vreg,
+				       const u32 *param, int idx, u32 set)
+{
+	struct msm_rpm_request *handle;
+
+	handle = (set == RPM_SET_ACTIVE	? rpm_vreg->handle_active
+					: rpm_vreg->handle_sleep);
+
+	if (rpm_vreg->allow_atomic)
+		return msm_rpm_add_kvp_data_noirq(handle, params[idx].key,
+						  (u8 *)&param[idx], 4);
+	else
+		return msm_rpm_add_kvp_data(handle, params[idx].key,
+					    (u8 *)&param[idx], 4);
+}
+
+static void rpm_vreg_check_modified_requests(const u32 *prev_param,
+		const u32 *param, u32 prev_valid, u32 *modified)
+{
+	u32 value_changed = 0;
+	int i;
+
+	for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++) {
+		if (param[i] != prev_param[i])
+			value_changed |= BIT(i);
+	}
+
+	/*
+	 * Only keep bits that are for changed parameters or previously
+	 * invalid parameters.
+	 */
+	*modified &= value_changed | ~prev_valid;
+}
+
+static int rpm_vreg_add_modified_requests(struct rpm_regulator *regulator,
+		u32 set, const u32 *param, u32 modified)
+{
+	struct rpm_vreg *rpm_vreg = regulator->rpm_vreg;
+	int rc = 0;
+	int i;
+
+	for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++) {
+		/* Only send requests for modified parameters. */
+		if (modified & BIT(i)) {
+			rc = rpm_vreg_add_kvp_to_request(rpm_vreg, param, i,
+							set);
+			if (rc) {
+				vreg_err(regulator,
+					"add KVP failed: %s %u; %s, rc=%d\n",
+					rpm_vreg->resource_name,
+					rpm_vreg->resource_id, params[i].name,
+					rc);
+				return rc;
+			}
+		}
+	}
+
+	return rc;
+}
+
+static int rpm_vreg_send_request(struct rpm_regulator *regulator, u32 set)
+{
+	struct rpm_vreg *rpm_vreg = regulator->rpm_vreg;
+	struct msm_rpm_request *handle
+		= (set == RPM_SET_ACTIVE ? rpm_vreg->handle_active
+					: rpm_vreg->handle_sleep);
+	int rc;
+
+	if (rpm_vreg->allow_atomic)
+		rc = msm_rpm_wait_for_ack_noirq(msm_rpm_send_request_noirq(
+						  handle));
+	else
+		rc = msm_rpm_wait_for_ack(msm_rpm_send_request(handle));
+
+	if (rc)
+		vreg_err(regulator, "msm rpm send failed: %s %u; set=%s, "
+			"rc=%d\n", rpm_vreg->resource_name,
+			rpm_vreg->resource_id,
+			(set == RPM_SET_ACTIVE ? "act" : "slp"), rc);
+
+	return rc;
+}
+
+#define RPM_VREG_AGGR_MAX(_idx, _param_aggr, _param_reg) \
+{ \
+	_param_aggr[RPM_REGULATOR_PARAM_##_idx] \
+	 = max(_param_aggr[RPM_REGULATOR_PARAM_##_idx], \
+		_param_reg[RPM_REGULATOR_PARAM_##_idx]); \
+}
+
+#define RPM_VREG_AGGR_SUM(_idx, _param_aggr, _param_reg) \
+{ \
+	_param_aggr[RPM_REGULATOR_PARAM_##_idx] \
+		 += _param_reg[RPM_REGULATOR_PARAM_##_idx]; \
+}
+
+#define RPM_VREG_AGGR_OR(_idx, _param_aggr, _param_reg) \
+{ \
+	_param_aggr[RPM_REGULATOR_PARAM_##_idx] \
+		|= _param_reg[RPM_REGULATOR_PARAM_##_idx]; \
+}
+
+/*
+ * The RPM treats freq=0 as a special value meaning that this consumer does not
+ * care what the SMPS switching freqency is.
+ */
+#define RPM_REGULATOR_FREQ_DONT_CARE 0
+
+static inline void rpm_vreg_freqency_aggr(u32 *freq, u32 consumer_freq)
+{
+	if (consumer_freq != RPM_REGULATOR_FREQ_DONT_CARE
+		&& (consumer_freq < *freq
+			|| *freq == RPM_REGULATOR_FREQ_DONT_CARE))
+		*freq = consumer_freq;
+}
+
+/*
+ * Aggregation is performed on each parameter based on the way that the RPM
+ * aggregates that type internally between RPM masters.
+ */
+static void rpm_vreg_aggregate_params(u32 *param_aggr, const u32 *param_reg)
+{
+	RPM_VREG_AGGR_MAX(ENABLE, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(VOLTAGE, param_aggr, param_reg);
+	RPM_VREG_AGGR_SUM(CURRENT, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(MODE_LDO, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(MODE_SMPS, param_aggr, param_reg);
+	RPM_VREG_AGGR_OR(PIN_CTRL_ENABLE, param_aggr, param_reg);
+	RPM_VREG_AGGR_OR(PIN_CTRL_MODE, param_aggr, param_reg);
+	rpm_vreg_freqency_aggr(&param_aggr[RPM_REGULATOR_PARAM_FREQUENCY],
+		param_reg[RPM_REGULATOR_PARAM_FREQUENCY]);
+	RPM_VREG_AGGR_MAX(HEAD_ROOM, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(QUIET_MODE, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(FREQ_REASON, param_aggr, param_reg);
+}
+
+static int rpm_vreg_aggregate_requests(struct rpm_regulator *regulator)
+{
+	struct rpm_vreg *rpm_vreg = regulator->rpm_vreg;
+	u32 param_active[RPM_REGULATOR_PARAM_MAX];
+	u32 param_sleep[RPM_REGULATOR_PARAM_MAX];
+	u32 modified_active, modified_sleep;
+	struct rpm_regulator *reg;
+	bool sleep_set_differs = false;
+	bool send_active = false;
+	bool send_sleep = false;
+	int rc = 0;
+	int i;
+
+	memset(param_active, 0, sizeof(param_active));
+	memset(param_sleep, 0, sizeof(param_sleep));
+	modified_active = rpm_vreg->aggr_req_active.modified;
+	modified_sleep = rpm_vreg->aggr_req_sleep.modified;
+
+	/*
+	 * Aggregate all of the requests for this regulator in both active
+	 * and sleep sets.
+	 */
+	list_for_each_entry(reg, &rpm_vreg->reg_list, list) {
+		if (reg->set_active) {
+			rpm_vreg_aggregate_params(param_active, reg->req.param);
+			modified_active |= reg->req.modified;
+		}
+		if (reg->set_sleep) {
+			rpm_vreg_aggregate_params(param_sleep, reg->req.param);
+			modified_sleep |= reg->req.modified;
+		}
+	}
+
+	/*
+	 * Check if the aggregated sleep set parameter values differ from the
+	 * aggregated active set parameter values.
+	 */
+	if (!rpm_vreg->sleep_request_sent) {
+		for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++) {
+			if ((param_active[i] != param_sleep[i])
+			    && (modified_sleep & BIT(i))) {
+				sleep_set_differs = true;
+				break;
+			}
+		}
+	}
+
+	/* Add KVPs to the active set RPM request if they have new values. */
+	rpm_vreg_check_modified_requests(rpm_vreg->aggr_req_active.param,
+		param_active, rpm_vreg->aggr_req_active.valid,
+		&modified_active);
+	rc = rpm_vreg_add_modified_requests(regulator, RPM_SET_ACTIVE,
+		param_active, modified_active);
+	if (rc)
+		return rc;
+	send_active = modified_active;
+
+	/*
+	 * Sleep set configurations are only sent if they differ from the
+	 * active set values.  This is because the active set values will take
+	 * effect during rpm assisted power collapse in the absence of sleep set
+	 * values.
+	 *
+	 * However, once a sleep set request is sent for a given regulator,
+	 * additional sleep set requests must be sent in the future even if they
+	 * match the corresponding active set requests.
+	 */
+	if (rpm_vreg->sleep_request_sent || sleep_set_differs) {
+		/* Add KVPs to the sleep set RPM request if they are new. */
+		rpm_vreg_check_modified_requests(rpm_vreg->aggr_req_sleep.param,
+			param_sleep, rpm_vreg->aggr_req_sleep.valid,
+			&modified_sleep);
+		rc = rpm_vreg_add_modified_requests(regulator, RPM_SET_SLEEP,
+			param_sleep, modified_sleep);
+		if (rc)
+			return rc;
+		send_sleep = modified_sleep;
+	}
+
+	/* Send active set request to the RPM if it contains new KVPs. */
+	if (send_active) {
+		rc = rpm_vreg_send_request(regulator, RPM_SET_ACTIVE);
+		if (rc)
+			return rc;
+		rpm_vreg->aggr_req_active.valid |= modified_active;
+	}
+	/* Store the results of the aggregation. */
+	rpm_vreg->aggr_req_active.modified = modified_active;
+	memcpy(rpm_vreg->aggr_req_active.param, param_active,
+		sizeof(param_active));
+
+	/* Handle debug printing of the active set request. */
+	rpm_regulator_req(regulator, RPM_SET_ACTIVE, send_active);
+	if (send_active)
+		rpm_vreg->aggr_req_active.modified = 0;
+
+	/* Send sleep set request to the RPM if it contains new KVPs. */
+	if (send_sleep) {
+		rc = rpm_vreg_send_request(regulator, RPM_SET_SLEEP);
+		if (rc)
+			return rc;
+		else
+			rpm_vreg->sleep_request_sent = true;
+		rpm_vreg->aggr_req_sleep.valid |= modified_sleep;
+	}
+	/* Store the results of the aggregation. */
+	rpm_vreg->aggr_req_sleep.modified = modified_sleep;
+	memcpy(rpm_vreg->aggr_req_sleep.param, param_sleep,
+		sizeof(param_sleep));
+
+	/* Handle debug printing of the sleep set request. */
+	rpm_regulator_req(regulator, RPM_SET_SLEEP, send_sleep);
+	if (send_sleep)
+		rpm_vreg->aggr_req_sleep.modified = 0;
+
+	/*
+	 * Loop over all requests for this regulator to update the valid and
+	 * modified values for use in future aggregation.
+	 */
+	list_for_each_entry(reg, &rpm_vreg->reg_list, list) {
+		reg->req.valid |= reg->req.modified;
+		reg->req.modified = 0;
+	}
+
+	return rc;
+}
+
+static int rpm_vreg_is_enabled(struct regulator_dev *rdev)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+
+	return reg->req.param[RPM_REGULATOR_PARAM_ENABLE];
+}
+
+static int rpm_vreg_enable(struct regulator_dev *rdev)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+	int rc;
+	u32 prev_enable;
+
+	rpm_vreg_lock(reg->rpm_vreg);
+
+	prev_enable = reg->req.param[RPM_REGULATOR_PARAM_ENABLE];
+	RPM_VREG_SET_PARAM(reg, ENABLE, 1);
+	rc = rpm_vreg_aggregate_requests(reg);
+	if (rc) {
+		vreg_err(reg, "enable failed, rc=%d", rc);
+		RPM_VREG_SET_PARAM(reg, ENABLE, prev_enable);
+	}
+
+	rpm_vreg_unlock(reg->rpm_vreg);
+
+	return rc;
+}
+
+static int rpm_vreg_disable(struct regulator_dev *rdev)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+	int rc;
+	u32 prev_enable;
+
+	rpm_vreg_lock(reg->rpm_vreg);
+
+	prev_enable = reg->req.param[RPM_REGULATOR_PARAM_ENABLE];
+	RPM_VREG_SET_PARAM(reg, ENABLE, 0);
+	rc = rpm_vreg_aggregate_requests(reg);
+	if (rc) {
+		vreg_err(reg, "enable failed, rc=%d", rc);
+		RPM_VREG_SET_PARAM(reg, ENABLE, prev_enable);
+	}
+
+	rpm_vreg_unlock(reg->rpm_vreg);
+
+	return rc;
+}
+
+static int rpm_vreg_set_voltage(struct regulator_dev *rdev, int min_uV,
+				int max_uV, unsigned *selector)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+	int rc = 0;
+	u32 prev_voltage;
+
+	rpm_vreg_lock(reg->rpm_vreg);
+
+	prev_voltage = reg->req.param[RPM_REGULATOR_PARAM_VOLTAGE];
+	RPM_VREG_SET_PARAM(reg, VOLTAGE, min_uV);
+
+	/* Only send a new voltage if the regulator is currently enabled. */
+	if (rpm_vreg_active_or_sleep_enabled(reg->rpm_vreg))
+		rc = rpm_vreg_aggregate_requests(reg);
+
+	if (rc) {
+		vreg_err(reg, "set voltage failed, rc=%d", rc);
+		RPM_VREG_SET_PARAM(reg, VOLTAGE, prev_voltage);
+	}
+
+	rpm_vreg_unlock(reg->rpm_vreg);
+
+	return rc;
+}
+
+static int rpm_vreg_get_voltage(struct regulator_dev *rdev)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+	int uV;
+
+	uV = reg->req.param[RPM_REGULATOR_PARAM_VOLTAGE];
+	if (uV == 0)
+		uV = VOLTAGE_UNKNOWN;
+
+	return uV;
+}
+
+static int rpm_vreg_list_voltage(struct regulator_dev *rdev, unsigned selector)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+	int uV = 0;
+
+	if (selector == 0)
+		uV = reg->min_uV;
+	else if (selector == 1)
+		uV = reg->max_uV;
+
+	return uV;
+}
+
+static int rpm_vreg_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+	int rc = 0;
+	u32 prev_current;
+	int prev_uA;
+
+	rpm_vreg_lock(reg->rpm_vreg);
+
+	prev_current = reg->req.param[RPM_REGULATOR_PARAM_CURRENT];
+	prev_uA = MILLI_TO_MICRO(prev_current);
+
+	if (mode == REGULATOR_MODE_NORMAL) {
+		/* Make sure that request current is in HPM range. */
+		if (prev_uA < rpm_vreg_hpm_min_uA(reg->rpm_vreg))
+			RPM_VREG_SET_PARAM(reg, CURRENT,
+			    MICRO_TO_MILLI(rpm_vreg_hpm_min_uA(reg->rpm_vreg)));
+	} else if (REGULATOR_MODE_IDLE) {
+		/* Make sure that request current is in LPM range. */
+		if (prev_uA > rpm_vreg_lpm_max_uA(reg->rpm_vreg))
+			RPM_VREG_SET_PARAM(reg, CURRENT,
+			    MICRO_TO_MILLI(rpm_vreg_lpm_max_uA(reg->rpm_vreg)));
+	} else {
+		vreg_err(reg, "invalid mode: %u\n", mode);
+		rpm_vreg_unlock(reg->rpm_vreg);
+		return -EINVAL;
+	}
+
+	/* Only send a new mode value if the regulator is currently enabled. */
+	if (rpm_vreg_active_or_sleep_enabled(reg->rpm_vreg))
+		rc = rpm_vreg_aggregate_requests(reg);
+
+	if (rc) {
+		vreg_err(reg, "set mode failed, rc=%d", rc);
+		RPM_VREG_SET_PARAM(reg, CURRENT, prev_current);
+	}
+
+	rpm_vreg_unlock(reg->rpm_vreg);
+
+	return rc;
+}
+
+static unsigned int rpm_vreg_get_mode(struct regulator_dev *rdev)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+
+	return (reg->req.param[RPM_REGULATOR_PARAM_CURRENT]
+			>= MICRO_TO_MILLI(reg->rpm_vreg->hpm_min_load))
+		? REGULATOR_MODE_NORMAL : REGULATOR_MODE_IDLE;
+}
+
+static unsigned int rpm_vreg_get_optimum_mode(struct regulator_dev *rdev,
+			int input_uV, int output_uV, int load_uA)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+	u32 load_mA;
+
+	load_uA += reg->system_load;
+
+	load_mA = MICRO_TO_MILLI(load_uA);
+	if (load_mA > params[RPM_REGULATOR_PARAM_CURRENT].max)
+		load_mA = params[RPM_REGULATOR_PARAM_CURRENT].max;
+
+	rpm_vreg_lock(reg->rpm_vreg);
+	RPM_VREG_SET_PARAM(reg, CURRENT, MICRO_TO_MILLI(load_uA));
+	rpm_vreg_unlock(reg->rpm_vreg);
+
+	return (load_uA >= reg->rpm_vreg->hpm_min_load)
+		? REGULATOR_MODE_NORMAL : REGULATOR_MODE_IDLE;
+}
+
+static int rpm_vreg_enable_time(struct regulator_dev *rdev)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+
+	return reg->rpm_vreg->enable_time;
+}
+
+/**
+ * rpm_regulator_get() - lookup and obtain a handle to an RPM regulator
+ * @dev: device for regulator consumer
+ * @supply: supply name
+ *
+ * Returns a struct rpm_regulator corresponding to the regulator producer,
+ * or ERR_PTR() containing errno.
+ *
+ * This function may only be called from nonatomic context.
+ */
+struct rpm_regulator *rpm_regulator_get(struct device *dev, const char *supply)
+{
+	struct rpm_regulator *framework_reg;
+	struct rpm_regulator *priv_reg = NULL;
+	struct regulator *regulator;
+	struct rpm_vreg *rpm_vreg;
+
+	regulator = regulator_get(dev, supply);
+	if (regulator == NULL) {
+		pr_err("could not find regulator for: dev=%s, id=%s\n",
+			(dev ? dev_name(dev) : ""), (supply ? supply : ""));
+		return ERR_PTR(-ENODEV);
+	}
+
+	framework_reg = regulator_get_drvdata(regulator);
+	if (framework_reg == NULL) {
+		pr_err("regulator structure not found.\n");
+		regulator_put(regulator);
+		return ERR_PTR(-ENODEV);
+	}
+	regulator_put(regulator);
+
+	rpm_vreg = framework_reg->rpm_vreg;
+
+	priv_reg = kzalloc(sizeof(struct rpm_regulator), GFP_KERNEL);
+	if (priv_reg == NULL) {
+		vreg_err(framework_reg, "could not allocate memory for "
+			"regulator\n");
+		rpm_vreg_unlock(rpm_vreg);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/*
+	 * Allocate a regulator_dev struct so that framework callback functions
+	 * can be called from the private API functions.
+	 */
+	priv_reg->rdev = kzalloc(sizeof(struct regulator_dev), GFP_KERNEL);
+	if (priv_reg->rdev == NULL) {
+		vreg_err(framework_reg, "could not allocate memory for "
+			"regulator_dev\n");
+		kfree(priv_reg);
+		rpm_vreg_unlock(rpm_vreg);
+		return ERR_PTR(-ENOMEM);
+	}
+	priv_reg->rdev->reg_data	= priv_reg;
+	priv_reg->rpm_vreg		= rpm_vreg;
+	priv_reg->rdesc.name		= framework_reg->rdesc.name;
+	priv_reg->set_active		= framework_reg->set_active;
+	priv_reg->set_sleep		= framework_reg->set_sleep;
+	priv_reg->min_uV		= framework_reg->min_uV;
+	priv_reg->max_uV		= framework_reg->max_uV;
+	priv_reg->system_load		= framework_reg->system_load;
+
+	might_sleep_if(!rpm_vreg->allow_atomic);
+	rpm_vreg_lock(rpm_vreg);
+	list_add(&priv_reg->list, &rpm_vreg->reg_list);
+	rpm_vreg_unlock(rpm_vreg);
+
+	return priv_reg;
+}
+EXPORT_SYMBOL_GPL(rpm_regulator_get);
+
+static int rpm_regulator_check_input(struct rpm_regulator *regulator)
+{
+	if (regulator == NULL || regulator->rpm_vreg == NULL) {
+		pr_err("invalid rpm_regulator pointer\n");
+		return -EINVAL;
+	}
+
+	might_sleep_if(!regulator->rpm_vreg->allow_atomic);
+
+	return 0;
+}
+
+/**
+ * rpm_regulator_put() - free the RPM regulator handle
+ * @regulator: RPM regulator handle
+ *
+ * Parameter reaggregation does not take place when rpm_regulator_put is called.
+ * Therefore, regulator enable state and voltage must be configured
+ * appropriately before calling rpm_regulator_put.
+ *
+ * This function may be called from either atomic or nonatomic context.  If this
+ * function is called from atomic context, then the regulator being operated on
+ * must be configured via device tree with qcom,allow-atomic == 1.
+ */
+void rpm_regulator_put(struct rpm_regulator *regulator)
+{
+	struct rpm_vreg *rpm_vreg;
+	int rc = rpm_regulator_check_input(regulator);
+
+	if (rc)
+		return;
+
+	rpm_vreg = regulator->rpm_vreg;
+
+	might_sleep_if(!rpm_vreg->allow_atomic);
+	rpm_vreg_lock(rpm_vreg);
+	list_del(&regulator->list);
+	rpm_vreg_unlock(rpm_vreg);
+
+	kfree(regulator->rdev);
+	kfree(regulator);
+}
+EXPORT_SYMBOL_GPL(rpm_regulator_put);
+
+/**
+ * rpm_regulator_enable() - enable regulator output
+ * @regulator: RPM regulator handle
+ *
+ * Returns 0 on success or errno on failure.
+ *
+ * This function may be called from either atomic or nonatomic context.  If this
+ * function is called from atomic context, then the regulator being operated on
+ * must be configured via device tree with qcom,allow-atomic == 1.
+ */
+int rpm_regulator_enable(struct rpm_regulator *regulator)
+{
+	int rc = rpm_regulator_check_input(regulator);
+
+	if (rc)
+		return rc;
+
+	return rpm_vreg_enable(regulator->rdev);
+}
+EXPORT_SYMBOL_GPL(rpm_regulator_enable);
+
+/**
+ * rpm_regulator_disable() - disable regulator output
+ * @regulator: RPM regulator handle
+ *
+ * Returns 0 on success or errno on failure.
+ *
+ * The enable state of the regulator is determined by aggregating the requests
+ * of all consumers.  Therefore, it is possible that the regulator will remain
+ * enabled even after rpm_regulator_disable is called.
+ *
+ * This function may be called from either atomic or nonatomic context.  If this
+ * function is called from atomic context, then the regulator being operated on
+ * must be configured via device tree with qcom,allow-atomic == 1.
+ */
+int rpm_regulator_disable(struct rpm_regulator *regulator)
+{
+	int rc = rpm_regulator_check_input(regulator);
+
+	if (rc)
+		return rc;
+
+	return rpm_vreg_disable(regulator->rdev);
+}
+EXPORT_SYMBOL_GPL(rpm_regulator_disable);
+
+/**
+ * rpm_regulator_set_voltage() - set regulator output voltage
+ * @regulator: RPM regulator handle
+ * @min_uV: minimum required voltage in uV
+ * @max_uV: maximum acceptable voltage in uV
+ *
+ * Sets a voltage regulator to the desired output voltage. This can be set
+ * while the regulator is disabled or enabled.  If the regulator is enabled then
+ * the voltage will change to the new value immediately; otherwise, if the
+ * regulator is disabled, then the regulator will output at the new voltage when
+ * enabled.
+ *
+ * The min_uV to max_uV voltage range requested must intersect with the
+ * voltage constraint range configured for the regulator.
+ *
+ * Returns 0 on success or errno on failure.
+ *
+ * The final voltage value that is sent to the RPM is aggregated based upon the
+ * values requested by all consumers of the regulator.  This corresponds to the
+ * maximum min_uV value.
+ *
+ * This function may be called from either atomic or nonatomic context.  If this
+ * function is called from atomic context, then the regulator being operated on
+ * must be configured via device tree with qcom,allow-atomic == 1.
+ */
+int rpm_regulator_set_voltage(struct rpm_regulator *regulator, int min_uV,
+			      int max_uV)
+{
+	int rc = rpm_regulator_check_input(regulator);
+	int uV = min_uV;
+
+	if (rc)
+		return rc;
+
+	if (regulator->rpm_vreg->regulator_type == RPM_REGULATOR_SMD_TYPE_VS) {
+		vreg_err(regulator, "unsupported regulator type: %d\n",
+			regulator->rpm_vreg->regulator_type);
+		return -EINVAL;
+	}
+
+	if (min_uV > max_uV) {
+		vreg_err(regulator, "min_uV=%d must be less than max_uV=%d\n",
+			min_uV, max_uV);
+		return -EINVAL;
+	}
+
+	if (uV < regulator->min_uV && max_uV >= regulator->min_uV)
+		uV = regulator->min_uV;
+
+	if (uV < regulator->min_uV || uV > regulator->max_uV) {
+		vreg_err(regulator, "request v=[%d, %d] is outside allowed "
+			"v=[%d, %d]\n", min_uV, max_uV, regulator->min_uV,
+			regulator->max_uV);
+		return -EINVAL;
+	}
+
+	return rpm_vreg_set_voltage(regulator->rdev, uV, uV, NULL);
+}
+EXPORT_SYMBOL_GPL(rpm_regulator_set_voltage);
+
+static struct regulator_ops ldo_ops = {
+	.enable			= rpm_vreg_enable,
+	.disable		= rpm_vreg_disable,
+	.is_enabled		= rpm_vreg_is_enabled,
+	.set_voltage		= rpm_vreg_set_voltage,
+	.get_voltage		= rpm_vreg_get_voltage,
+	.list_voltage		= rpm_vreg_list_voltage,
+	.set_mode		= rpm_vreg_set_mode,
+	.get_mode		= rpm_vreg_get_mode,
+	.get_optimum_mode	= rpm_vreg_get_optimum_mode,
+	.enable_time		= rpm_vreg_enable_time,
+};
+
+static struct regulator_ops smps_ops = {
+	.enable			= rpm_vreg_enable,
+	.disable		= rpm_vreg_disable,
+	.is_enabled		= rpm_vreg_is_enabled,
+	.set_voltage		= rpm_vreg_set_voltage,
+	.get_voltage		= rpm_vreg_get_voltage,
+	.list_voltage		= rpm_vreg_list_voltage,
+	.set_mode		= rpm_vreg_set_mode,
+	.get_mode		= rpm_vreg_get_mode,
+	.get_optimum_mode	= rpm_vreg_get_optimum_mode,
+	.enable_time		= rpm_vreg_enable_time,
+};
+
+static struct regulator_ops switch_ops = {
+	.enable			= rpm_vreg_enable,
+	.disable		= rpm_vreg_disable,
+	.is_enabled		= rpm_vreg_is_enabled,
+	.enable_time		= rpm_vreg_enable_time,
+};
+
+static struct regulator_ops ncp_ops = {
+	.enable			= rpm_vreg_enable,
+	.disable		= rpm_vreg_disable,
+	.is_enabled		= rpm_vreg_is_enabled,
+	.set_voltage		= rpm_vreg_set_voltage,
+	.get_voltage		= rpm_vreg_get_voltage,
+	.list_voltage		= rpm_vreg_list_voltage,
+	.enable_time		= rpm_vreg_enable_time,
+};
+
+static struct regulator_ops *vreg_ops[] = {
+	[RPM_REGULATOR_SMD_TYPE_LDO]	= &ldo_ops,
+	[RPM_REGULATOR_SMD_TYPE_SMPS]	= &smps_ops,
+	[RPM_REGULATOR_SMD_TYPE_VS]	= &switch_ops,
+	[RPM_REGULATOR_SMD_TYPE_NCP]	= &ncp_ops,
+};
+
+static int __devexit rpm_vreg_device_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct rpm_regulator *reg;
+
+	reg = platform_get_drvdata(pdev);
+	if (reg) {
+		rpm_vreg_lock(reg->rpm_vreg);
+		regulator_unregister(reg->rdev);
+		list_del(&reg->list);
+		kfree(reg);
+		rpm_vreg_unlock(reg->rpm_vreg);
+	} else {
+		dev_err(dev, "%s: drvdata missing\n", __func__);
+		return -EINVAL;
+	}
+
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static int __devexit rpm_vreg_resource_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct rpm_regulator *reg, *reg_temp;
+	struct rpm_vreg *rpm_vreg;
+
+	rpm_vreg = platform_get_drvdata(pdev);
+	if (rpm_vreg) {
+		rpm_vreg_lock(rpm_vreg);
+		list_for_each_entry_safe(reg, reg_temp, &rpm_vreg->reg_list,
+				list) {
+			/* Only touch data for private consumers. */
+			if (reg->rdev->desc == NULL) {
+				list_del(&reg->list);
+				kfree(reg->rdev);
+				kfree(reg);
+			} else {
+				dev_err(dev, "%s: not all child devices have "
+					"been removed\n", __func__);
+			}
+		}
+		rpm_vreg_unlock(rpm_vreg);
+
+		msm_rpm_free_request(rpm_vreg->handle_active);
+		msm_rpm_free_request(rpm_vreg->handle_sleep);
+
+		kfree(rpm_vreg);
+	} else {
+		dev_err(dev, "%s: drvdata missing\n", __func__);
+		return -EINVAL;
+	}
+
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+/*
+ * This probe is called for child rpm-regulator devices which have
+ * properties which are required to configure individual regulator
+ * framework regulators for a given RPM regulator resource.
+ */
+static int __devinit rpm_vreg_device_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct regulator_init_data *init_data;
+	struct rpm_vreg *rpm_vreg;
+	struct rpm_regulator *reg;
+	int rc = 0;
+	int i, regulator_type;
+	u32 val;
+
+	if (!dev->of_node) {
+		dev_err(dev, "%s: device tree information missing\n", __func__);
+		return -ENODEV;
+	}
+
+	if (pdev->dev.parent == NULL) {
+		dev_err(dev, "%s: parent device missing\n", __func__);
+		return -ENODEV;
+	}
+
+	rpm_vreg = dev_get_drvdata(pdev->dev.parent);
+	if (rpm_vreg == NULL) {
+		dev_err(dev, "%s: rpm_vreg not found in parent device\n",
+			__func__);
+		return -ENODEV;
+	}
+
+	reg = kzalloc(sizeof(struct rpm_regulator), GFP_KERNEL);
+	if (reg == NULL) {
+		dev_err(dev, "%s: could not allocate memory for reg\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	regulator_type		= rpm_vreg->regulator_type;
+	reg->rpm_vreg		= rpm_vreg;
+	reg->rdesc.ops		= vreg_ops[regulator_type];
+	reg->rdesc.owner	= THIS_MODULE;
+	reg->rdesc.type		= REGULATOR_VOLTAGE;
+
+	if (regulator_type == RPM_REGULATOR_SMD_TYPE_VS)
+		reg->rdesc.n_voltages = 0;
+	else
+		reg->rdesc.n_voltages = 2;
+
+	rc = of_property_read_u32(node, "qcom,set", &val);
+	if (rc) {
+		dev_err(dev, "%s: sleep set and/or active set must be "
+			"configured via qcom,set property, rc=%d\n", __func__,
+			rc);
+		goto fail_free_reg;
+	} else if (!(val & RPM_SET_CONFIG_BOTH)) {
+		dev_err(dev, "%s: qcom,set=%u property is invalid\n", __func__,
+			val);
+		rc = -EINVAL;
+		goto fail_free_reg;
+	}
+
+	reg->set_active = !!(val & RPM_SET_CONFIG_ACTIVE);
+	reg->set_sleep = !!(val & RPM_SET_CONFIG_SLEEP);
+
+	init_data = of_get_regulator_init_data(dev);
+	if (init_data == NULL) {
+		dev_err(dev, "%s: unable to allocate memory\n", __func__);
+		rc = -ENOMEM;
+		goto fail_free_reg;
+	}
+	if (init_data->constraints.name == NULL) {
+		dev_err(dev, "%s: regulator name not specified\n", __func__);
+		rc = -EINVAL;
+		goto fail_free_reg;
+	}
+
+	init_data->constraints.input_uV	= init_data->constraints.max_uV;
+
+	if (of_get_property(node, "parent-supply", NULL))
+		init_data->supply_regulator = "parent";
+
+	/*
+	 * Fill in ops and mode masks based on callbacks specified for
+	 * this type of regulator.
+	 */
+	if (reg->rdesc.ops->enable)
+		init_data->constraints.valid_ops_mask
+			|= REGULATOR_CHANGE_STATUS;
+	if (reg->rdesc.ops->get_voltage)
+		init_data->constraints.valid_ops_mask
+			|= REGULATOR_CHANGE_VOLTAGE;
+	if (reg->rdesc.ops->get_mode) {
+		init_data->constraints.valid_ops_mask
+			|= REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_DRMS;
+		init_data->constraints.valid_modes_mask
+			|= REGULATOR_MODE_NORMAL | REGULATOR_MODE_IDLE;
+	}
+
+	reg->rdesc.name		= init_data->constraints.name;
+	reg->min_uV		= init_data->constraints.min_uV;
+	reg->max_uV		= init_data->constraints.max_uV;
+
+	/* Initialize the param array based on optional properties. */
+	for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++) {
+		rc = of_property_read_u32(node, params[i].property_name, &val);
+		if (rc == 0) {
+			if (params[i].supported_regulator_types
+					& BIT(regulator_type)) {
+				if (val < params[i].min
+						|| val > params[i].max) {
+					pr_warn("%s: device tree property: "
+						"%s=%u is outsided allowed "
+						"range [%u, %u]\n",
+						reg->rdesc.name,
+						params[i].property_name, val,
+						params[i].min, params[i].max);
+					continue;
+				}
+				reg->req.param[i] = val;
+				reg->req.modified |= BIT(i);
+			} else {
+				pr_warn("%s: regulator type=%d does not support"
+					" device tree property: %s\n",
+					reg->rdesc.name, regulator_type,
+					params[i].property_name);
+			}
+		}
+	}
+
+	of_property_read_u32(node, "qcom,system_load", &reg->system_load);
+
+	rpm_vreg_lock(rpm_vreg);
+	list_add(&reg->list, &rpm_vreg->reg_list);
+	rpm_vreg_unlock(rpm_vreg);
+
+	reg->rdev = regulator_register(&reg->rdesc, dev, init_data, reg, node);
+	if (IS_ERR(reg->rdev)) {
+		rc = PTR_ERR(reg->rdev);
+		reg->rdev = NULL;
+		pr_err("regulator_register failed: %s, rc=%d\n",
+			reg->rdesc.name, rc);
+		goto fail_remove_from_list;
+	}
+
+	platform_set_drvdata(pdev, reg);
+
+	pr_debug("successfully probed: %s\n", reg->rdesc.name);
+
+	return 0;
+
+fail_remove_from_list:
+	rpm_vreg_lock(rpm_vreg);
+	list_del(&reg->list);
+	rpm_vreg_unlock(rpm_vreg);
+
+fail_free_reg:
+	kfree(reg);
+	return rc;
+}
+
+/*
+ * This probe is called for parent rpm-regulator devices which have
+ * properties which are required to identify a given RPM resource.
+ */
+static int __devinit rpm_vreg_resource_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct rpm_vreg *rpm_vreg;
+	int val = 0;
+	u32 resource_type;
+	int rc;
+
+	if (!dev->of_node) {
+		dev_err(dev, "%s: device tree information missing\n", __func__);
+		return -ENODEV;
+	}
+
+	/* Create new rpm_vreg entry. */
+	rpm_vreg = kzalloc(sizeof(struct rpm_vreg), GFP_KERNEL);
+	if (rpm_vreg == NULL) {
+		dev_err(dev, "%s: could not allocate memory for vreg\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	/* Required device tree properties: */
+	rc = of_property_read_string(node, "qcom,resource-name",
+			&rpm_vreg->resource_name);
+	if (rc) {
+		dev_err(dev, "%s: qcom,resource-name missing in DT node\n",
+			__func__);
+		goto fail_free_vreg;
+	}
+	resource_type = rpm_vreg_string_to_int(rpm_vreg->resource_name);
+
+	rc = of_property_read_u32(node, "qcom,resource-id",
+			&rpm_vreg->resource_id);
+	if (rc) {
+		dev_err(dev, "%s: qcom,resource-id missing in DT node\n",
+			__func__);
+		goto fail_free_vreg;
+	}
+
+	rc = of_property_read_u32(node, "qcom,regulator-type",
+			&rpm_vreg->regulator_type);
+	if (rc) {
+		dev_err(dev, "%s: qcom,regulator-type missing in DT node\n",
+			__func__);
+		goto fail_free_vreg;
+	}
+
+	if ((rpm_vreg->regulator_type < 0)
+	    || (rpm_vreg->regulator_type >= RPM_REGULATOR_SMD_TYPE_MAX)) {
+		dev_err(dev, "%s: invalid regulator type: %d\n", __func__,
+			rpm_vreg->regulator_type);
+		rc = -EINVAL;
+		goto fail_free_vreg;
+	}
+
+	/* Optional device tree properties: */
+	of_property_read_u32(node, "qcom,allow-atomic", &val);
+	rpm_vreg->allow_atomic = !!val;
+	of_property_read_u32(node, "qcom,enable-time", &rpm_vreg->enable_time);
+	of_property_read_u32(node, "qcom,hpm-min-load",
+		&rpm_vreg->hpm_min_load);
+
+	rpm_vreg->handle_active = msm_rpm_create_request(RPM_SET_ACTIVE,
+		resource_type, rpm_vreg->resource_id, RPM_REGULATOR_PARAM_MAX);
+	if (rpm_vreg->handle_active == NULL
+	    || IS_ERR(rpm_vreg->handle_active)) {
+		rc = PTR_ERR(rpm_vreg->handle_active);
+		dev_err(dev, "%s: failed to create active RPM handle, rc=%d\n",
+			__func__, rc);
+		goto fail_free_vreg;
+	}
+
+	rpm_vreg->handle_sleep = msm_rpm_create_request(RPM_SET_SLEEP,
+		resource_type, rpm_vreg->resource_id, RPM_REGULATOR_PARAM_MAX);
+	if (rpm_vreg->handle_sleep == NULL || IS_ERR(rpm_vreg->handle_sleep)) {
+		rc = PTR_ERR(rpm_vreg->handle_sleep);
+		dev_err(dev, "%s: failed to create sleep RPM handle, rc=%d\n",
+			__func__, rc);
+		goto fail_free_handle_active;
+	}
+
+	INIT_LIST_HEAD(&rpm_vreg->reg_list);
+
+	if (rpm_vreg->allow_atomic)
+		spin_lock_init(&rpm_vreg->slock);
+	else
+		mutex_init(&rpm_vreg->mlock);
+
+	platform_set_drvdata(pdev, rpm_vreg);
+
+	rc = of_platform_populate(node, NULL, NULL, dev);
+	if (rc) {
+		dev_err(dev, "%s: failed to add child nodes, rc=%d\n", __func__,
+			rc);
+		goto fail_unset_drvdata;
+	}
+
+	pr_debug("successfully probed: %s (%08X) %u\n", rpm_vreg->resource_name,
+		resource_type, rpm_vreg->resource_id);
+
+	return rc;
+
+fail_unset_drvdata:
+	platform_set_drvdata(pdev, NULL);
+	msm_rpm_free_request(rpm_vreg->handle_sleep);
+
+fail_free_handle_active:
+	msm_rpm_free_request(rpm_vreg->handle_active);
+
+fail_free_vreg:
+	kfree(rpm_vreg);
+
+	return rc;
+}
+
+static struct of_device_id rpm_vreg_match_table_device[] = {
+	{ .compatible = "qcom,rpm-regulator-smd", },
+	{}
+};
+
+static struct of_device_id rpm_vreg_match_table_resource[] = {
+	{ .compatible = "qcom,rpm-regulator-smd-resource", },
+	{}
+};
+
+static struct platform_driver rpm_vreg_device_driver = {
+	.probe = rpm_vreg_device_probe,
+	.remove = __devexit_p(rpm_vreg_device_remove),
+	.driver = {
+		.name = "qcom,rpm-regulator-smd",
+		.owner = THIS_MODULE,
+		.of_match_table = rpm_vreg_match_table_device,
+	},
+};
+
+static struct platform_driver rpm_vreg_resource_driver = {
+	.probe = rpm_vreg_resource_probe,
+	.remove = __devexit_p(rpm_vreg_resource_remove),
+	.driver = {
+		.name = "qcom,rpm-regulator-smd-resource",
+		.owner = THIS_MODULE,
+		.of_match_table = rpm_vreg_match_table_resource,
+	},
+};
+
+/**
+ * rpm_regulator_smd_driver_init() - initialized SMD RPM regulator driver
+ *
+ * This function registers the SMD RPM regulator platform drivers.
+ *
+ * Returns 0 on success or errno on failure.
+ */
+int __init rpm_regulator_smd_driver_init(void)
+{
+	static bool initialized;
+	int i, rc;
+
+	if (initialized)
+		return 0;
+	else
+		initialized = true;
+
+	/* Store parameter string names as integers */
+	for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++)
+		params[i].key = rpm_vreg_string_to_int(params[i].name);
+
+	rc = platform_driver_register(&rpm_vreg_device_driver);
+	if (rc)
+		return rc;
+
+	return platform_driver_register(&rpm_vreg_resource_driver);
+}
+EXPORT_SYMBOL_GPL(rpm_regulator_smd_driver_init);
+
+static void __exit rpm_vreg_exit(void)
+{
+	platform_driver_unregister(&rpm_vreg_device_driver);
+	platform_driver_unregister(&rpm_vreg_resource_driver);
+}
+
+module_init(rpm_regulator_smd_driver_init);
+module_exit(rpm_vreg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM SMD RPM regulator driver");
diff --git a/arch/arm/mach-msm/rpm-regulator.c b/arch/arm/mach-msm/rpm-regulator.c
index c708df5..f663695 100644
--- a/arch/arm/mach-msm/rpm-regulator.c
+++ b/arch/arm/mach-msm/rpm-regulator.c
@@ -19,10 +19,12 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
+#include <linux/string.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/driver.h>
 #include <mach/rpm.h>
 #include <mach/rpm-regulator.h>
+#include <mach/rpm-regulator-smd.h>
 #include <mach/socinfo.h>
 
 #include "rpm_resources.h"
@@ -42,6 +44,15 @@
 	debug_mask, msm_rpm_vreg_debug_mask, int, S_IRUSR | S_IWUSR
 );
 
+/* Used for access via the rpm_regulator_* API. */
+struct rpm_regulator {
+	int			vreg_id;
+	enum rpm_vreg_voter	voter;
+	int			sleep_also;
+	int			min_uV;
+	int			max_uV;
+};
+
 struct vreg_config *(*get_config[])(void) = {
 	[RPM_VREG_VERSION_8660] = get_config_8660,
 	[RPM_VREG_VERSION_8960] = get_config_8960,
@@ -49,6 +60,9 @@
 	[RPM_VREG_VERSION_8930] = get_config_8930,
 };
 
+static struct rpm_regulator_consumer_mapping *consumer_map;
+static int consumer_map_len;
+
 #define SET_PART(_vreg, _part, _val) \
 	_vreg->req[_vreg->part->_part.word].value \
 		= (_vreg->req[_vreg->part->_part.word].value \
@@ -615,6 +629,243 @@
 }
 EXPORT_SYMBOL_GPL(rpm_vreg_set_frequency);
 
+#define MAX_NAME_LEN 64
+/**
+ * rpm_regulator_get() - lookup and obtain a handle to an RPM regulator
+ * @dev: device for regulator consumer
+ * @supply: supply name
+ *
+ * Returns a struct rpm_regulator corresponding to the regulator producer,
+ * or ERR_PTR() containing errno.
+ *
+ * This function may only be called from nonatomic context.  The mapping between
+ * <dev, supply> tuples and rpm_regulators struct pointers is specified via
+ * rpm-regulator platform data.
+ */
+struct rpm_regulator *rpm_regulator_get(struct device *dev, const char *supply)
+{
+	struct rpm_regulator_consumer_mapping *mapping = NULL;
+	const char *devname = NULL;
+	struct rpm_regulator *regulator;
+	int i;
+
+	if (!config) {
+		pr_err("rpm-regulator driver has not probed yet.\n");
+		return ERR_PTR(-ENODEV);
+	}
+
+	if (consumer_map == NULL || consumer_map_len == 0) {
+		pr_err("No private consumer mapping has been specified.\n");
+		return ERR_PTR(-ENODEV);
+	}
+
+	if (supply == NULL) {
+		pr_err("supply name must be specified\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (dev)
+		devname = dev_name(dev);
+
+	for (i = 0; i < consumer_map_len; i++) {
+		/* If the mapping has a device set up it must match */
+		if (consumer_map[i].dev_name &&
+			(!devname || strncmp(consumer_map[i].dev_name, devname,
+					     MAX_NAME_LEN)))
+			continue;
+
+		if (strncmp(consumer_map[i].supply, supply, MAX_NAME_LEN)
+		    == 0) {
+			mapping = &consumer_map[i];
+			break;
+		}
+	}
+
+	if (mapping == NULL) {
+		pr_err("could not find mapping for dev=%s, supply=%s\n",
+			(devname ? devname : "(null)"), supply);
+		return ERR_PTR(-ENODEV);
+	}
+
+	regulator = kzalloc(sizeof(struct rpm_regulator), GFP_KERNEL);
+	if (regulator == NULL) {
+		pr_err("could not allocate memory for regulator\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	regulator->vreg_id	= mapping->vreg_id;
+	regulator->voter	= mapping->voter;
+	regulator->sleep_also	= mapping->sleep_also;
+
+	return regulator;
+}
+EXPORT_SYMBOL_GPL(rpm_regulator_get);
+
+static int rpm_regulator_check_input(struct rpm_regulator *regulator)
+{
+	int rc = 0;
+
+	if (regulator == NULL) {
+		rc = -EINVAL;
+		pr_err("invalid (null) rpm_regulator pointer\n");
+	} else if (IS_ERR(regulator)) {
+		rc = PTR_ERR(regulator);
+		pr_err("invalid rpm_regulator pointer, rc=%d\n", rc);
+	}
+
+	return rc;
+}
+
+/**
+ * rpm_regulator_put() - free the RPM regulator handle
+ * @regulator: RPM regulator handle
+ *
+ * Parameter reaggregation does not take place when rpm_regulator_put is called.
+ * Therefore, regulator enable state and voltage must be configured
+ * appropriately before calling rpm_regulator_put.
+ *
+ * This function may be called from either atomic or nonatomic context.
+ */
+void rpm_regulator_put(struct rpm_regulator *regulator)
+{
+	kfree(regulator);
+}
+EXPORT_SYMBOL_GPL(rpm_regulator_put);
+
+/**
+ * rpm_regulator_enable() - enable regulator output
+ * @regulator: RPM regulator handle
+ *
+ * Returns 0 on success or errno on failure.
+ *
+ * This function may be called from either atomic or nonatomic context.  This
+ * function may only be called for regulators which have the sleep_selectable
+ * flag set in their configuration data.
+ *
+ * rpm_regulator_set_voltage must be called before rpm_regulator_enable because
+ * enabling is defined by the RPM interface to be requesting the desired
+ * non-zero regulator output voltage.
+ */
+int rpm_regulator_enable(struct rpm_regulator *regulator)
+{
+	int rc = rpm_regulator_check_input(regulator);
+	struct vreg *vreg;
+
+	if (rc)
+		return rc;
+
+	if (regulator->vreg_id < config->vreg_id_min
+			|| regulator->vreg_id > config->vreg_id_max) {
+		pr_err("invalid regulator id=%d\n", regulator->vreg_id);
+		return -EINVAL;
+	}
+
+	vreg = &config->vregs[regulator->vreg_id];
+
+	/*
+	 * Handle voltage switches which can be enabled without
+	 * rpm_regulator_set_voltage ever being called.
+	 */
+	if (regulator->min_uV == 0 && regulator->max_uV == 0
+	    && vreg->part->uV.mask == 0 && vreg->part->mV.mask == 0) {
+		regulator->min_uV = 1;
+		regulator->max_uV = 1;
+	}
+
+	if (regulator->min_uV == 0 && regulator->max_uV == 0) {
+		pr_err("Voltage must be set with rpm_regulator_set_voltage "
+			"before calling rpm_regulator_enable; vreg_id=%d, "
+			"voter=%d\n", regulator->vreg_id, regulator->voter);
+		return -EINVAL;
+	}
+
+	rc = rpm_vreg_set_voltage(regulator->vreg_id, regulator->voter,
+		regulator->min_uV, regulator->max_uV, regulator->sleep_also);
+
+	if (rc)
+		pr_err("rpm_vreg_set_voltage failed, rc=%d\n", rc);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(rpm_regulator_enable);
+
+/**
+ * rpm_regulator_disable() - disable regulator output
+ * @regulator: RPM regulator handle
+ *
+ * Returns 0 on success or errno on failure.
+ *
+ * The enable state of the regulator is determined by aggregating the requests
+ * of all consumers.  Therefore, it is possible that the regulator will remain
+ * enabled even after rpm_regulator_disable is called.
+ *
+ * This function may be called from either atomic or nonatomic context.  This
+ * function may only be called for regulators which have the sleep_selectable
+ * flag set in their configuration data.
+ */
+int rpm_regulator_disable(struct rpm_regulator *regulator)
+{
+	int rc = rpm_regulator_check_input(regulator);
+
+	if (rc)
+		return rc;
+
+	rc = rpm_vreg_set_voltage(regulator->vreg_id, regulator->voter, 0, 0,
+				  regulator->sleep_also);
+
+	if (rc)
+		pr_err("rpm_vreg_set_voltage failed, rc=%d\n", rc);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(rpm_regulator_disable);
+
+/**
+ * rpm_regulator_set_voltage() - set regulator output voltage
+ * @regulator: RPM regulator handle
+ * @min_uV: minimum required voltage in uV
+ * @max_uV: maximum acceptable voltage in uV
+ *
+ * Sets a voltage regulator to the desired output voltage. This can be set
+ * while the regulator is disabled or enabled.  If the regulator is disabled,
+ * then rpm_regulator_set_voltage will both enable the regulator and set it to
+ * output at the requested voltage.
+ *
+ * The min_uV to max_uV voltage range requested must intersect with the
+ * voltage constraint range configured for the regulator.
+ *
+ * Returns 0 on success or errno on failure.
+ *
+ * The final voltage value that is sent to the RPM is aggregated based upon the
+ * values requested by all consumers of the regulator.  This corresponds to the
+ * maximum min_uV value.
+ *
+ * This function may be called from either atomic or nonatomic context.  This
+ * function may only be called for regulators which have the sleep_selectable
+ * flag set in their configuration data.
+ */
+int rpm_regulator_set_voltage(struct rpm_regulator *regulator, int min_uV,
+			      int max_uV)
+{
+	int rc = rpm_regulator_check_input(regulator);
+
+	if (rc)
+		return rc;
+
+	rc = rpm_vreg_set_voltage(regulator->vreg_id, regulator->voter, min_uV,
+				 max_uV, regulator->sleep_also);
+
+	if (rc) {
+		pr_err("rpm_vreg_set_voltage failed, rc=%d\n", rc);
+	} else {
+		regulator->min_uV = min_uV;
+		regulator->max_uV = max_uV;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(rpm_regulator_set_voltage);
+
 static inline int vreg_hpm_min_uA(struct vreg *vreg)
 {
 	return vreg->hpm_min_load;
@@ -1374,6 +1625,8 @@
 static int __devinit rpm_vreg_probe(struct platform_device *pdev)
 {
 	struct rpm_regulator_platform_data *platform_data;
+	static struct rpm_regulator_consumer_mapping *prev_consumer_map;
+	static int prev_consumer_map_len;
 	int rc = 0;
 	int i, id;
 
@@ -1416,6 +1669,42 @@
 			mutex_init(&config->vregs[i].pc_lock);
 	}
 
+	/* Copy the list of private API consumers. */
+	if (platform_data->consumer_map_len > 0) {
+		if (consumer_map_len == 0) {
+			consumer_map_len = platform_data->consumer_map_len;
+			consumer_map = kmemdup(platform_data->consumer_map,
+				sizeof(struct rpm_regulator_consumer_mapping)
+				* consumer_map_len, GFP_KERNEL);
+			if (consumer_map == NULL) {
+				pr_err("memory allocation failed\n");
+				consumer_map_len = 0;
+				return -ENOMEM;
+			}
+		} else {
+			/* Concatenate new map with the existing one. */
+			prev_consumer_map = consumer_map;
+			prev_consumer_map_len = consumer_map_len;
+			consumer_map_len += platform_data->consumer_map_len;
+			consumer_map = kmalloc(
+				sizeof(struct rpm_regulator_consumer_mapping)
+				* consumer_map_len, GFP_KERNEL);
+			if (consumer_map == NULL) {
+				pr_err("memory allocation failed\n");
+				consumer_map_len = 0;
+				return -ENOMEM;
+			}
+			memcpy(consumer_map, prev_consumer_map,
+				sizeof(struct rpm_regulator_consumer_mapping)
+				* prev_consumer_map_len);
+			memcpy(&consumer_map[prev_consumer_map_len],
+				platform_data->consumer_map,
+				sizeof(struct rpm_regulator_consumer_mapping)
+				* platform_data->consumer_map_len);
+		}
+
+	}
+
 	/* Initialize all of the regulators listed in the platform data. */
 	for (i = 0; i < platform_data->num_regulators; i++) {
 		rc = rpm_vreg_init_regulator(&platform_data->init_data[i],
@@ -1492,6 +1781,8 @@
 
 	platform_driver_unregister(&rpm_vreg_driver);
 
+	kfree(consumer_map);
+
 	for (i = 0; i < config->vregs_len; i++)
 		mutex_destroy(&config->vregs[i].pc_lock);
 }
diff --git a/arch/arm/mach-msm/rpm_resources.c b/arch/arm/mach-msm/rpm_resources.c
index 7daea5c..5314cee 100644
--- a/arch/arm/mach-msm/rpm_resources.c
+++ b/arch/arm/mach-msm/rpm_resources.c
@@ -867,15 +867,16 @@
 	spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
 }
 
-struct msm_rpmrs_limits *msm_rpmrs_lowest_limits(
-	bool from_idle, enum msm_pm_sleep_mode sleep_mode, uint32_t latency_us,
-	uint32_t sleep_us)
+static void *msm_rpmrs_lowest_limits(bool from_idle,
+		enum msm_pm_sleep_mode sleep_mode, uint32_t latency_us,
+		uint32_t sleep_us, uint32_t *power)
 {
 	unsigned int cpu = smp_processor_id();
 	struct msm_rpmrs_level *best_level = NULL;
 	bool irqs_detectable = false;
 	bool gpio_detectable = false;
 	int i;
+	uint32_t pwr;
 
 	if (sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE) {
 		irqs_detectable = msm_mpm_irqs_detectable(from_idle);
@@ -884,7 +885,6 @@
 
 	for (i = 0; i < msm_rpmrs_level_count; i++) {
 		struct msm_rpmrs_level *level = &msm_rpmrs_levels[i];
-		uint32_t power;
 
 		if (!level->available)
 			continue;
@@ -902,31 +902,38 @@
 					irqs_detectable, gpio_detectable))
 			continue;
 
+		if (MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE == sleep_mode)
+			if (!cpu && msm_rpm_local_request_is_outstanding())
+					break;
+
+
 		if (sleep_us <= 1) {
-			power = level->energy_overhead;
+			pwr = level->energy_overhead;
 		} else if (sleep_us <= level->time_overhead_us) {
-			power = level->energy_overhead / sleep_us;
+			pwr = level->energy_overhead / sleep_us;
 		} else if ((sleep_us >> 10) > level->time_overhead_us) {
-			power = level->steady_state_power;
+			pwr = level->steady_state_power;
 		} else {
-			power = level->steady_state_power;
-			power -= (level->time_overhead_us *
+			pwr = level->steady_state_power;
+			pwr -= (level->time_overhead_us *
 					level->steady_state_power)/sleep_us;
-			power += level->energy_overhead / sleep_us;
+			pwr += level->energy_overhead / sleep_us;
 		}
 
 		if (!best_level ||
-				best_level->rs_limits.power[cpu] >= power) {
+				best_level->rs_limits.power[cpu] >= pwr) {
 			level->rs_limits.latency_us[cpu] = level->latency_us;
-			level->rs_limits.power[cpu] = power;
+			level->rs_limits.power[cpu] = pwr;
 			best_level = level;
+			if (power)
+				*power = pwr;
 		}
 	}
 
 	return best_level ? &best_level->rs_limits : NULL;
 }
 
-int msm_rpmrs_enter_sleep(uint32_t sclk_count, struct msm_rpmrs_limits *limits,
+static int msm_rpmrs_enter_sleep(uint32_t sclk_count, void *limits,
 		bool from_idle, bool notify_rpm)
 {
 	int rc = 0;
@@ -944,7 +951,7 @@
 	return rc;
 }
 
-void msm_rpmrs_exit_sleep(struct msm_rpmrs_limits *limits, bool from_idle,
+static void msm_rpmrs_exit_sleep(void *limits, bool from_idle,
 		bool notify_rpm, bool collapsed)
 {
 
@@ -1067,6 +1074,12 @@
 }
 device_initcall(msm_rpmrs_init);
 
+static struct msm_pm_sleep_ops msm_rpmrs_ops = {
+	.lowest_limits = msm_rpmrs_lowest_limits,
+	.enter_sleep = msm_rpmrs_enter_sleep,
+	.exit_sleep = msm_rpmrs_exit_sleep,
+};
+
 static int __init msm_rpmrs_l2_init(void)
 {
 	if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_apq8064()) {
@@ -1085,6 +1098,9 @@
 		msm_rpmrs_l2_cache.aggregate = NULL;
 		msm_rpmrs_l2_cache.restore = NULL;
 	}
+
+	msm_pm_set_sleep_ops(&msm_rpmrs_ops);
+
 	return 0;
 }
 early_initcall(msm_rpmrs_l2_init);
diff --git a/arch/arm/mach-msm/rpm_resources.h b/arch/arm/mach-msm/rpm_resources.h
index a5c61b2..d594405 100644
--- a/arch/arm/mach-msm/rpm_resources.h
+++ b/arch/arm/mach-msm/rpm_resources.h
@@ -137,16 +137,6 @@
 }
 
 void msm_rpmrs_show_resources(void);
-
-struct msm_rpmrs_limits *msm_rpmrs_lowest_limits(
-	bool from_idle, enum msm_pm_sleep_mode sleep_mode, uint32_t latency_us,
-	uint32_t sleep_us);
-
-int msm_rpmrs_enter_sleep(uint32_t sclk_count, struct msm_rpmrs_limits *limits,
-		bool from_idle, bool notify_rpm);
-void msm_rpmrs_exit_sleep(struct msm_rpmrs_limits *limits, bool from_idle,
-		bool notify_rpm, bool collapsed);
-
 int msm_rpmrs_levels_init(struct msm_rpmrs_platform_data *data);
 
 #endif /* __ARCH_ARM_MACH_MSM_RPM_RESOURCES_H */
diff --git a/arch/arm/mach-msm/sdio_al_dloader.c b/arch/arm/mach-msm/sdio_al_dloader.c
index 74b8478..f48c32b 100644
--- a/arch/arm/mach-msm/sdio_al_dloader.c
+++ b/arch/arm/mach-msm/sdio_al_dloader.c
@@ -1140,14 +1140,6 @@
 		return status;
 	}
 
-	status = sdio_dld_create_thread();
-	if (status) {
-		sdio_dld_dealloc_local_buffers();
-		pr_err(MODULE_NAME ": %s, failed in sdio_dld_create_thread()."
-				   "status=%d\n", __func__, status);
-		return status;
-	}
-
 	/* init waiting event of the write callback */
 	init_waitqueue_head(&sdio_dld->write_callback_event.wait_event);
 
@@ -1168,6 +1160,15 @@
 	sdio_dld->push_timer.data = (unsigned long) sdio_dld;
 	sdio_dld->push_timer.function = sdio_dld_push_timer_handler;
 
+	status = sdio_dld_create_thread();
+	if (status) {
+		del_timer_sync(&sdio_dld->timer);
+		del_timer_sync(&sdio_dld->push_timer);
+		sdio_dld_dealloc_local_buffers();
+		pr_err(MODULE_NAME ": %s, failed in sdio_dld_create_thread()."
+				   "status=%d\n", __func__, status);
+		return status;
+	}
 	return 0;
 }
 
diff --git a/arch/arm/mach-msm/timer.h b/arch/arm/mach-msm/timer.h
index 368dd7b..5d18bb4 100644
--- a/arch/arm/mach-msm/timer.h
+++ b/arch/arm/mach-msm/timer.h
@@ -17,12 +17,12 @@
 extern struct sys_timer msm_timer;
 
 void __iomem *msm_timer_get_timer0_base(void);
-int64_t msm_timer_get_sclk_time(int64_t *period);
 uint32_t msm_timer_get_sclk_ticks(void);
 int msm_timer_init_time_sync(void (*timeout)(void));
 #ifndef CONFIG_ARM_ARCH_TIMER
 int64_t msm_timer_enter_idle(void);
 void msm_timer_exit_idle(int low_power);
+int64_t msm_timer_get_sclk_time(int64_t *period);
 #else
 static inline int64_t msm_timer_enter_idle(void) { return 0; }
 static inline void msm_timer_exit_idle(int low_power) { return; }
diff --git a/drivers/char/hw_random/msm_rng.c b/drivers/char/hw_random/msm_rng.c
index 974b77e..7e6670d 100644
--- a/drivers/char/hw_random/msm_rng.c
+++ b/drivers/char/hw_random/msm_rng.c
@@ -231,12 +231,19 @@
 	return 0;
 }
 
+static struct of_device_id qrng_match[] = {
+	{	.compatible = "qcom,msm-rng",
+	},
+	{}
+};
+
 static struct platform_driver rng_driver = {
 	.probe      = msm_rng_probe,
 	.remove     = __devexit_p(msm_rng_remove),
 	.driver     = {
 		.name   = DRIVER_NAME,
 		.owner  = THIS_MODULE,
+		.of_match_table = qrng_match,
 	}
 };
 
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 36375c05..473b0ef 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -281,8 +281,23 @@
 	}
 }
 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
+/**
+ * cpufreq_notify_utilization - notify CPU userspace about CPU utilization
+ * change
+ *
+ * This function is called everytime the CPU load is evaluated by the
+ * ondemand governor. It notifies userspace of cpu load changes via sysfs.
+ */
+void cpufreq_notify_utilization(struct cpufreq_policy *policy,
+		unsigned int util)
+{
+	if (policy)
+		policy->util = util;
 
+	if (policy->util >= MIN_CPU_UTIL_NOTIFY)
+		sysfs_notify(&policy->kobj, NULL, "cpu_utilization");
 
+}
 
 /*********************************************************************
  *                          SYSFS INTERFACE                          *
@@ -370,6 +385,7 @@
 show_one(scaling_min_freq, min);
 show_one(scaling_max_freq, max);
 show_one(scaling_cur_freq, cur);
+show_one(cpu_utilization, util);
 
 static int __cpufreq_set_policy(struct cpufreq_policy *data,
 				struct cpufreq_policy *policy);
@@ -459,6 +475,8 @@
 	policy->user_policy.policy = policy->policy;
 	policy->user_policy.governor = policy->governor;
 
+	sysfs_notify(&policy->kobj, NULL, "scaling_governor");
+
 	if (ret)
 		return ret;
 	else
@@ -584,6 +602,7 @@
 cpufreq_freq_attr_ro(bios_limit);
 cpufreq_freq_attr_ro(related_cpus);
 cpufreq_freq_attr_ro(affected_cpus);
+cpufreq_freq_attr_ro(cpu_utilization);
 cpufreq_freq_attr_rw(scaling_min_freq);
 cpufreq_freq_attr_rw(scaling_max_freq);
 cpufreq_freq_attr_rw(scaling_governor);
@@ -596,6 +615,7 @@
 	&scaling_min_freq.attr,
 	&scaling_max_freq.attr,
 	&affected_cpus.attr,
+	&cpu_utilization.attr,
 	&related_cpus.attr,
 	&scaling_governor.attr,
 	&scaling_driver.attr,
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 2d33096..cfc2534 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -552,7 +552,11 @@
 
 static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
 {
+	/* Extrapolated load of this CPU */
+	unsigned int load_at_max_freq = 0;
 	unsigned int max_load_freq;
+	/* Current load across this CPU */
+	unsigned int cur_load = 0;
 
 	struct cpufreq_policy *policy;
 	unsigned int j;
@@ -579,7 +583,7 @@
 		struct cpu_dbs_info_s *j_dbs_info;
 		cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
 		unsigned int idle_time, wall_time, iowait_time;
-		unsigned int load, load_freq;
+		unsigned int load_freq;
 		int freq_avg;
 
 		j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
@@ -629,16 +633,20 @@
 		if (unlikely(!wall_time || wall_time < idle_time))
 			continue;
 
-		load = 100 * (wall_time - idle_time) / wall_time;
+		cur_load = 100 * (wall_time - idle_time) / wall_time;
 
 		freq_avg = __cpufreq_driver_getavg(policy, j);
 		if (freq_avg <= 0)
 			freq_avg = policy->cur;
 
-		load_freq = load * freq_avg;
+		load_freq = cur_load * freq_avg;
 		if (load_freq > max_load_freq)
 			max_load_freq = load_freq;
 	}
+	/* calculate the scaled load across CPU */
+	load_at_max_freq = (cur_load * policy->cur)/policy->cpuinfo.max_freq;
+
+	cpufreq_notify_utilization(policy, load_at_max_freq);
 
 	/* Check for frequency increase */
 	if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
index 710583b..3561a8a 100644
--- a/drivers/gpu/ion/ion_carveout_heap.c
+++ b/drivers/gpu/ion/ion_carveout_heap.c
@@ -30,6 +30,7 @@
 
 #include <mach/iommu_domains.h>
 #include <asm/mach/map.h>
+#include <asm/cacheflush.h>
 
 struct ion_carveout_heap {
 	struct ion_heap heap;
@@ -41,6 +42,7 @@
 	int (*release_region)(void *);
 	atomic_t map_count;
 	void *bus_id;
+	unsigned int has_outer_cache;
 };
 
 ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
@@ -229,25 +231,31 @@
 			void *vaddr, unsigned int offset, unsigned int length,
 			unsigned int cmd)
 {
-	unsigned long vstart, pstart;
-
-	pstart = buffer->priv_phys + offset;
-	vstart = (unsigned long)vaddr;
+	void (*outer_cache_op)(phys_addr_t, phys_addr_t);
+	struct ion_carveout_heap *carveout_heap =
+	     container_of(heap, struct  ion_carveout_heap, heap);
 
 	switch (cmd) {
 	case ION_IOC_CLEAN_CACHES:
-		clean_caches(vstart, length, pstart);
+		dmac_clean_range(vaddr, vaddr + length);
+		outer_cache_op = outer_clean_range;
 		break;
 	case ION_IOC_INV_CACHES:
-		invalidate_caches(vstart, length, pstart);
+		dmac_inv_range(vaddr, vaddr + length);
+		outer_cache_op = outer_inv_range;
 		break;
 	case ION_IOC_CLEAN_INV_CACHES:
-		clean_and_invalidate_caches(vstart, length, pstart);
+		dmac_flush_range(vaddr, vaddr + length);
+		outer_cache_op = outer_flush_range;
 		break;
 	default:
 		return -EINVAL;
 	}
 
+	if (carveout_heap->has_outer_cache) {
+		unsigned long pstart = buffer->priv_phys + offset;
+		outer_cache_op(pstart, pstart + length);
+	}
 	return 0;
 }
 
@@ -447,6 +455,7 @@
 	carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
 	carveout_heap->allocated_bytes = 0;
 	carveout_heap->total_size = heap_data->size;
+	carveout_heap->has_outer_cache = heap_data->has_outer_cache;
 
 	if (heap_data->extra_data) {
 		struct ion_co_heap_pdata *extra_data =
diff --git a/drivers/gpu/ion/ion_cp_heap.c b/drivers/gpu/ion/ion_cp_heap.c
index fcbf1d4..1c50c04 100644
--- a/drivers/gpu/ion/ion_cp_heap.c
+++ b/drivers/gpu/ion/ion_cp_heap.c
@@ -34,6 +34,7 @@
  #include "ion_priv.h"
 
 #include <asm/mach/map.h>
+#include <asm/cacheflush.h>
 
 /**
  * struct ion_cp_heap - container for the heap and shared heap data
@@ -66,7 +67,8 @@
  * @reserved_vrange: reserved virtual address range for use with fmem
  * @iommu_map_all:	Indicates whether we should map whole heap into IOMMU.
  * @iommu_2x_map_domain: Indicates the domain to use for overmapping.
- */
+ * @has_outer_cache:    set to 1 if outer cache is used, 0 otherwise.
+*/
 struct ion_cp_heap {
 	struct ion_heap heap;
 	struct gen_pool *pool;
@@ -90,7 +92,7 @@
 	void *reserved_vrange;
 	int iommu_map_all;
 	int iommu_2x_map_domain;
-
+	unsigned int has_outer_cache;
 };
 
 enum {
@@ -541,25 +543,31 @@
 			void *vaddr, unsigned int offset, unsigned int length,
 			unsigned int cmd)
 {
-	unsigned long vstart, pstart;
-
-	pstart = buffer->priv_phys + offset;
-	vstart = (unsigned long)vaddr;
+	void (*outer_cache_op)(phys_addr_t, phys_addr_t);
+	struct ion_cp_heap *cp_heap =
+	     container_of(heap, struct  ion_cp_heap, heap);
 
 	switch (cmd) {
 	case ION_IOC_CLEAN_CACHES:
-		clean_caches(vstart, length, pstart);
+		dmac_clean_range(vaddr, vaddr + length);
+		outer_cache_op = outer_clean_range;
 		break;
 	case ION_IOC_INV_CACHES:
-		invalidate_caches(vstart, length, pstart);
+		dmac_inv_range(vaddr, vaddr + length);
+		outer_cache_op = outer_inv_range;
 		break;
 	case ION_IOC_CLEAN_INV_CACHES:
-		clean_and_invalidate_caches(vstart, length, pstart);
+		dmac_flush_range(vaddr, vaddr + length);
+		outer_cache_op = outer_flush_range;
 		break;
 	default:
 		return -EINVAL;
 	}
 
+	if (cp_heap->has_outer_cache) {
+		unsigned long pstart = buffer->priv_phys + offset;
+		outer_cache_op(pstart, pstart + length);
+	}
 	return 0;
 }
 
@@ -915,6 +923,7 @@
 	cp_heap->heap_protected = HEAP_NOT_PROTECTED;
 	cp_heap->secure_base = cp_heap->base;
 	cp_heap->secure_size = heap_data->size;
+	cp_heap->has_outer_cache = heap_data->has_outer_cache;
 	if (heap_data->extra_data) {
 		struct ion_cp_heap_pdata *extra_data =
 				heap_data->extra_data;
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c
index 70bdc7f..621144b 100644
--- a/drivers/gpu/ion/ion_iommu_heap.c
+++ b/drivers/gpu/ion/ion_iommu_heap.c
@@ -23,10 +23,12 @@
 
 #include <asm/mach/map.h>
 #include <asm/page.h>
+#include <asm/cacheflush.h>
 #include <mach/iommu_domains.h>
 
 struct ion_iommu_heap {
 	struct ion_heap heap;
+	unsigned int has_outer_cache;
 };
 
 struct ion_iommu_priv_data {
@@ -261,34 +263,39 @@
 			void *vaddr, unsigned int offset, unsigned int length,
 			unsigned int cmd)
 {
-	unsigned long vstart, pstart;
-	void (*op)(unsigned long, unsigned long, unsigned long);
-	unsigned int i;
-	struct ion_iommu_priv_data *data = buffer->priv_virt;
-
-	if (!data)
-		return -ENOMEM;
+	void (*outer_cache_op)(phys_addr_t, phys_addr_t);
+	struct ion_iommu_heap *iommu_heap =
+	     container_of(heap, struct  ion_iommu_heap, heap);
 
 	switch (cmd) {
 	case ION_IOC_CLEAN_CACHES:
-		op = clean_caches;
+		dmac_clean_range(vaddr, vaddr + length);
+		outer_cache_op = outer_clean_range;
 		break;
 	case ION_IOC_INV_CACHES:
-		op = invalidate_caches;
+		dmac_inv_range(vaddr, vaddr + length);
+		outer_cache_op = outer_inv_range;
 		break;
 	case ION_IOC_CLEAN_INV_CACHES:
-		op = clean_and_invalidate_caches;
+		dmac_flush_range(vaddr, vaddr + length);
+		outer_cache_op = outer_flush_range;
 		break;
 	default:
 		return -EINVAL;
 	}
 
-	vstart = (unsigned long) vaddr;
-	for (i = 0; i < data->nrpages; ++i, vstart += PAGE_SIZE) {
-		pstart = page_to_phys(data->pages[i]);
-		op(vstart, PAGE_SIZE, pstart);
-	}
+	if (iommu_heap->has_outer_cache) {
+		unsigned long pstart;
+		unsigned int i;
+		struct ion_iommu_priv_data *data = buffer->priv_virt;
+		if (!data)
+			return -ENOMEM;
 
+		for (i = 0; i < data->nrpages; ++i) {
+			pstart = page_to_phys(data->pages[i]);
+			outer_cache_op(pstart, pstart + PAGE_SIZE);
+		}
+	}
 	return 0;
 }
 
@@ -327,6 +334,7 @@
 
 	iommu_heap->heap.ops = &iommu_heap_ops;
 	iommu_heap->heap.type = ION_HEAP_TYPE_IOMMU;
+	iommu_heap->has_outer_cache = heap_data->has_outer_cache;
 
 	return &iommu_heap->heap;
 }
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
index 26c6632..08b271b 100644
--- a/drivers/gpu/ion/ion_system_heap.c
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -26,9 +26,12 @@
 #include <mach/iommu_domains.h>
 #include "ion_priv.h"
 #include <mach/memory.h>
+#include <asm/cacheflush.h>
 
 static atomic_t system_heap_allocated;
 static atomic_t system_contig_heap_allocated;
+static unsigned int system_heap_has_outer_cache;
+static unsigned int system_heap_contig_has_outer_cache;
 
 static int ion_system_heap_allocate(struct ion_heap *heap,
 				     struct ion_buffer *buffer,
@@ -144,63 +147,66 @@
 			void *vaddr, unsigned int offset, unsigned int length,
 			unsigned int cmd)
 {
-	unsigned long vstart, pstart;
-	void *vend;
-	void *vtemp;
-	unsigned long ln = 0;
-	void (*op)(unsigned long, unsigned long, unsigned long);
+	void (*outer_cache_op)(phys_addr_t, phys_addr_t);
 
 	switch (cmd) {
 	case ION_IOC_CLEAN_CACHES:
-		op = clean_caches;
+		dmac_clean_range(vaddr, vaddr + length);
+		outer_cache_op = outer_clean_range;
 		break;
 	case ION_IOC_INV_CACHES:
-		op = invalidate_caches;
+		dmac_inv_range(vaddr, vaddr + length);
+		outer_cache_op = outer_inv_range;
 		break;
 	case ION_IOC_CLEAN_INV_CACHES:
-		op = clean_and_invalidate_caches;
+		dmac_flush_range(vaddr, vaddr + length);
+		outer_cache_op = outer_flush_range;
 		break;
 	default:
 		return -EINVAL;
 	}
 
-	vend = buffer->priv_virt + buffer->size;
-	vtemp = buffer->priv_virt + offset;
+	if (system_heap_has_outer_cache) {
+		unsigned long pstart;
+		void *vend;
+		void *vtemp;
+		unsigned long ln = 0;
+		vend = buffer->priv_virt + buffer->size;
+		vtemp = buffer->priv_virt + offset;
 
-	if ((vtemp+length) > vend) {
-		pr_err("Trying to flush outside of mapped range.\n");
-		pr_err("End of mapped range: %p, trying to flush to "
-			"address %p\n", vend, vtemp+length);
-		WARN(1, "%s: called with heap name %s, buffer size 0x%x, "
-			"vaddr 0x%p, offset 0x%x, length: 0x%x\n", __func__,
-			heap->name, buffer->size, vaddr, offset, length);
-		return -EINVAL;
-	}
-
-	for (vstart = (unsigned long) vaddr;
-			ln < length && vtemp < vend;
-			vtemp += PAGE_SIZE, ln += PAGE_SIZE,
-			vstart += PAGE_SIZE) {
-		struct page *page = vmalloc_to_page(vtemp);
-		if (!page) {
-			WARN(1, "Could not find page for virt. address %p\n",
-				vtemp);
-			return -EINVAL;
-		}
-		pstart = page_to_phys(page);
-		/*
-		 * If page -> phys is returning NULL, something
-		 * has really gone wrong...
-		 */
-		if (!pstart) {
-			WARN(1, "Could not translate %p to physical address\n",
-				vtemp);
+		if ((vtemp+length) > vend) {
+			pr_err("Trying to flush outside of mapped range.\n");
+			pr_err("End of mapped range: %p, trying to flush to "
+				"address %p\n", vend, vtemp+length);
+			WARN(1, "%s: called with heap name %s, buffer size 0x%x, "
+				"vaddr 0x%p, offset 0x%x, length: 0x%x\n",
+				__func__, heap->name, buffer->size, vaddr,
+				offset, length);
 			return -EINVAL;
 		}
 
-		op(vstart, PAGE_SIZE, pstart);
-	}
+		for (; ln < length && vtemp < vend;
+		      vtemp += PAGE_SIZE, ln += PAGE_SIZE) {
+			struct page *page = vmalloc_to_page(vtemp);
+			if (!page) {
+				WARN(1, "Could not find page for virt. address %p\n",
+					vtemp);
+				return -EINVAL;
+			}
+			pstart = page_to_phys(page);
+			/*
+			 * If page -> phys is returning NULL, something
+			 * has really gone wrong...
+			 */
+			if (!pstart) {
+				WARN(1, "Could not translate %p to physical address\n",
+					vtemp);
+				return -EINVAL;
+			}
 
+			outer_cache_op(pstart, pstart + PAGE_SIZE);
+		}
+	}
 	return 0;
 }
 
@@ -314,7 +320,7 @@
 	.unmap_iommu = ion_system_heap_unmap_iommu,
 };
 
-struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
+struct ion_heap *ion_system_heap_create(struct ion_platform_heap *pheap)
 {
 	struct ion_heap *heap;
 
@@ -323,6 +329,7 @@
 		return ERR_PTR(-ENOMEM);
 	heap->ops = &vmalloc_ops;
 	heap->type = ION_HEAP_TYPE_SYSTEM;
+	system_heap_has_outer_cache = pheap->has_outer_cache;
 	return heap;
 }
 
@@ -394,31 +401,38 @@
 			unsigned int offset, unsigned int length,
 			unsigned int cmd)
 {
-	unsigned long vstart, pstart;
-
-	pstart = virt_to_phys(buffer->priv_virt) + offset;
-	if (!pstart) {
-		WARN(1, "Could not do virt to phys translation on %p\n",
-			buffer->priv_virt);
-		return -EINVAL;
-	}
-
-	vstart = (unsigned long) vaddr;
+	void (*outer_cache_op)(phys_addr_t, phys_addr_t);
 
 	switch (cmd) {
 	case ION_IOC_CLEAN_CACHES:
-		clean_caches(vstart, length, pstart);
+		dmac_clean_range(vaddr, vaddr + length);
+		outer_cache_op = outer_clean_range;
 		break;
 	case ION_IOC_INV_CACHES:
-		invalidate_caches(vstart, length, pstart);
+		dmac_inv_range(vaddr, vaddr + length);
+		outer_cache_op = outer_inv_range;
 		break;
 	case ION_IOC_CLEAN_INV_CACHES:
-		clean_and_invalidate_caches(vstart, length, pstart);
+		dmac_flush_range(vaddr, vaddr + length);
+		outer_cache_op = outer_flush_range;
 		break;
 	default:
 		return -EINVAL;
 	}
 
+	if (system_heap_contig_has_outer_cache) {
+		unsigned long pstart;
+
+		pstart = virt_to_phys(buffer->priv_virt) + offset;
+		if (!pstart) {
+			WARN(1, "Could not do virt to phys translation on %p\n",
+				buffer->priv_virt);
+			return -EINVAL;
+		}
+
+		outer_cache_op(pstart, pstart + PAGE_SIZE);
+	}
+
 	return 0;
 }
 
@@ -524,7 +538,7 @@
 	.unmap_iommu = ion_system_heap_unmap_iommu,
 };
 
-struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
+struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *pheap)
 {
 	struct ion_heap *heap;
 
@@ -533,6 +547,7 @@
 		return ERR_PTR(-ENOMEM);
 	heap->ops = &kmalloc_ops;
 	heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
+	system_heap_contig_has_outer_cache = pheap->has_outer_cache;
 	return heap;
 }
 
diff --git a/drivers/gpu/ion/msm/msm_ion.c b/drivers/gpu/ion/msm/msm_ion.c
index c8bfce3..83afb25 100644
--- a/drivers/gpu/ion/msm/msm_ion.c
+++ b/drivers/gpu/ion/msm/msm_ion.c
@@ -136,9 +136,11 @@
 				}
 
 				cp_data->virt_addr = fmem_info->virt;
-				cp_data->secure_base = heap->base;
-				cp_data->secure_size =
+				if (!cp_data->secure_base) {
+					cp_data->secure_base = heap->base;
+					cp_data->secure_size =
 						heap->size + shared_heap->size;
+				}
 			} else if (!heap->base) {
 				ion_set_base_address(heap, shared_heap,
 					co_heap_data, cp_data);
@@ -280,6 +282,7 @@
 		struct ion_platform_heap *heap_data = &pdata->heaps[i];
 		msm_ion_allocate(heap_data);
 
+		heap_data->has_outer_cache = pdata->has_outer_cache;
 		heaps[i] = ion_heap_create(heap_data);
 		if (IS_ERR_OR_NULL(heaps[i])) {
 			heaps[i] = 0;
diff --git a/drivers/gpu/msm/a3xx_reg.h b/drivers/gpu/msm/a3xx_reg.h
index 0a71982..35af06e 100644
--- a/drivers/gpu/msm/a3xx_reg.h
+++ b/drivers/gpu/msm/a3xx_reg.h
@@ -248,6 +248,10 @@
 #define A3XX_VBIF_ARB_CTL 0x303C
 #define A3XX_VBIF_OUT_AXI_AOOO_EN 0x305E
 #define A3XX_VBIF_OUT_AXI_AOOO 0x305F
+#define A3XX_VBIF_ERR_PENDING  0x3064
+#define A3XX_VBIF_ERR_MASK 0x3066
+#define A3XX_VBIF_ERR_CLEAR 0x3067
+#define A3XX_VBIF_ERR_INFO 0x3068
 
 /* Bit flags for RBBM_CTL */
 #define RBBM_RBBM_CTL_RESET_PWR_CTR1  (1 << 1)
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index 5187eb1..8362b65 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -2335,6 +2335,39 @@
 	adreno_ringbuffer_submit(rb);
 }
 
+#define VBIF_MAX_CLIENTS 6
+
+static void a3xx_vbif_callback(struct adreno_device *adreno_dev,
+	unsigned int status)
+{
+	struct kgsl_device *device = &adreno_dev->dev;
+	int i;
+	char str[80], *ptr = str;
+	int slen = sizeof(str) - 1;
+
+	KGSL_DRV_INFO(device, "VBIF error | status=%X\n",
+		status);
+
+	for (i = 0; i < VBIF_MAX_CLIENTS; i++) {
+		if (status & (1 << i)) {
+			unsigned int err;
+			int ret;
+
+			adreno_regwrite(device, A3XX_VBIF_ERR_INFO, i);
+			adreno_regread(device, A3XX_VBIF_ERR_INFO, &err);
+
+			ret = snprintf(ptr, slen, "%d:%8.8X ", i, err);
+			ptr += ret;
+			slen -= ret;
+		}
+	}
+
+	KGSL_DRV_INFO(device, "%s\n", str);
+
+	/* Clear the errors */
+	adreno_regwrite(device, A3XX_VBIF_ERR_CLEAR, status);
+}
+
 static void a3xx_err_callback(struct adreno_device *adreno_dev, int bit)
 {
 	struct kgsl_device *device = &adreno_dev->dev;
@@ -2511,6 +2544,15 @@
 	if (status)
 		adreno_regwrite(&adreno_dev->dev, A3XX_RBBM_INT_CLEAR_CMD,
 			status);
+
+	/* Check for VBIF errors */
+	adreno_regread(&adreno_dev->dev, A3XX_VBIF_ERR_PENDING, &status);
+
+	if (status) {
+		a3xx_vbif_callback(adreno_dev, status);
+		ret = IRQ_HANDLED;
+	}
+
 	return ret;
 }
 
@@ -2518,10 +2560,17 @@
 {
 	struct kgsl_device *device = &adreno_dev->dev;
 
-	if (state)
+	if (state) {
 		adreno_regwrite(device, A3XX_RBBM_INT_0_MASK, A3XX_INT_MASK);
-	else
+
+		/* Enable VBIF interrupts - write 0 to enable them all */
+		adreno_regwrite(device, A3XX_VBIF_ERR_MASK, 0);
+		/* Clear outstanding VBIF errors */
+		adreno_regwrite(device, A3XX_VBIF_ERR_CLEAR, 0x3F);
+	} else {
 		adreno_regwrite(device, A3XX_RBBM_INT_0_MASK, 0);
+		adreno_regwrite(device, A3XX_VBIF_ERR_MASK, 0xFFFFFFFF);
+	}
 }
 
 static unsigned int a3xx_busy_cycles(struct adreno_device *adreno_dev)
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 1a34e80..662a1c4 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -2595,11 +2595,14 @@
 
 static void kgsl_core_exit(void)
 {
-	unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX);
-
-	kgsl_mmu_ptpool_destroy(&kgsl_driver.ptpool);
+	kgsl_mmu_ptpool_destroy(kgsl_driver.ptpool);
 	kgsl_driver.ptpool = NULL;
 
+	kgsl_drm_exit();
+	kgsl_cffdump_destroy();
+	kgsl_core_debugfs_close();
+	kgsl_sharedmem_uninit_sysfs();
+
 	device_unregister(&kgsl_driver.virtdev);
 
 	if (kgsl_driver.class) {
@@ -2607,10 +2610,7 @@
 		kgsl_driver.class = NULL;
 	}
 
-	kgsl_drm_exit();
-	kgsl_cffdump_destroy();
-	kgsl_core_debugfs_close();
-	kgsl_sharedmem_uninit_sysfs();
+	unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX);
 }
 
 static int __init kgsl_core_init(void)
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 2aaefba..6854d6c 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -443,8 +443,12 @@
 		if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
 			&pwr->power_flags)) {
 			trace_kgsl_rail(device, state);
-			if (pwr->gpu_reg)
-				regulator_enable(pwr->gpu_reg);
+			if (pwr->gpu_reg) {
+				int status = regulator_enable(pwr->gpu_reg);
+				if (status)
+					KGSL_DRV_ERR(device, "regulator_enable "
+							"failed: %d\n", status);
+			}
 		}
 	}
 }
diff --git a/drivers/gud/README b/drivers/gud/README
new file mode 100644
index 0000000..c6d62a1
--- /dev/null
+++ b/drivers/gud/README
@@ -0,0 +1,6 @@
+MobiCore is an operating system being shipped with TZBSP
+on msm chipsets. MobiCore consists of several components in
+the secure world(TrustZone) and non-secure world(linux
+kernel, Android user space). The MobiCore driver
+communicates with the MobiCore kernel that exists in
+TrustZone.
diff --git a/drivers/gud/mobicore_driver/build_tag.h b/drivers/gud/mobicore_driver/build_tag.h
new file mode 100644
index 0000000..43541bb
--- /dev/null
+++ b/drivers/gud/mobicore_driver/build_tag.h
@@ -0,0 +1,29 @@
+/**
+ *
+ * <!-- Copyright Giesecke & Devrient GmbH 2012-2012 -->
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ *	products derived from this software without specific prior
+ *	written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#define MOBICORE_COMPONENT_BUILD_TAG "*** GC_MSM8960_Release_V010 ###"
diff --git a/drivers/gud/mobicore_driver/logging.c b/drivers/gud/mobicore_driver/logging.c
new file mode 100644
index 0000000..eb44c8a
--- /dev/null
+++ b/drivers/gud/mobicore_driver/logging.c
@@ -0,0 +1,336 @@
+/** MobiCore driver module.(interface to the secure world SWD)
+ * @addtogroup MCD_MCDIMPL_KMOD_LOGGING MobiCore Driver Logging Subsystem.
+ * @ingroup  MCD_MCDIMPL_KMOD
+ * @{
+ * @file
+ * MobiCore Driver Logging Subsystem.
+ * The logging subsytem provides the interface between the Mobicore trace
+ * buffer and the Linux log
+ *
+ * <!-- Copyright Giesecke & Devrient GmbH 2009-2012 -->
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include "mc_drv_module.h"
+#include "mc_drv_module_linux_api.h"
+#include "mc_drv_module_fastcalls.h"
+
+/* Default len of the log ring buffer 256KB*/
+#define LOG_BUF_SIZE	(64 * PAGE_SIZE)
+
+/* Max Len of a log line for printing */
+#define LOG_LINE_SIZE	256
+
+static uint32_t log_size = LOG_BUF_SIZE;
+module_param(log_size, uint, 0);
+MODULE_PARM_DESC(log_size, " Size of the MobiCore log ringbuffer "
+						"(or 256KB default).");
+
+/*----------------------------------------------------------------------------*/
+/* Definitions for log version 2 */
+#define LOG_TYPE_MASK				(0x0007)
+#define LOG_TYPE_CHAR				0
+#define LOG_TYPE_INTEGER			1
+/* Field length */
+#define LOG_LENGTH_MASK				(0x00F8)
+#define LOG_LENGTH_SHIFT			3
+/* Extra attributes */
+#define LOG_EOL					(0x0100)
+#define LOG_INTEGER_DECIMAL			(0x0200)
+#define LOG_INTEGER_SIGNED			(0x0400)
+
+struct logmsg_struct {
+	/* Type and format of data */
+	uint16_t ctrl;
+	/* Unique value for each event source */
+	uint16_t source;
+	/* Value, if any */
+	uint32_t log_data;
+};
+
+/** MobiCore log previous position */
+static uint32_t log_pos;
+/** MobiCore log buffer structure */
+static struct mc_trace_buf *log_buf;
+/** Log Thread task structure */
+struct task_struct *log_thread;
+/** Log Line buffer */
+static char *log_line;
+
+static void log_msg(struct logmsg_struct *msg);
+
+/*----------------------------------------------------------------------------*/
+static void log_eol(void)
+{
+	if (!strnlen(log_line, LOG_LINE_SIZE))
+		return;
+	printk(KERN_INFO "%s\n", log_line);
+	log_line[0] = 0;
+}
+/*----------------------------------------------------------------------------*/
+/**
+ * Put a char to the log line if there is enough space if not then also
+ * output the line. Assume nobody else is updating the line! */
+static void log_char(char ch)
+{
+	uint32_t len;
+	if (ch == '\n' || ch == '\r') {
+		log_eol();
+		return;
+	}
+
+	if (strnlen(log_line, LOG_LINE_SIZE) >= LOG_LINE_SIZE - 1) {
+		printk(KERN_INFO "%s\n", log_line);
+		log_line[0] = 0;
+	}
+
+	len = strnlen(log_line, LOG_LINE_SIZE);
+	log_line[len] = ch;
+	log_line[len + 1] = 0;
+}
+
+/*----------------------------------------------------------------------------*/
+/**
+ * Put a string to the log line if there is enough space if not then also
+ * output the line. Assume nobody else is updating the line! */
+static void log_str(const char *s)
+{
+	int i;
+	for (i = 0; i < strnlen(s, LOG_LINE_SIZE); i++)
+		log_char(s[i]);
+}
+
+/*----------------------------------------------------------------------------*/
+static uint32_t process_v1log(void)
+{
+	char *last_char = log_buf->buff + log_buf->write_pos;
+	char *buff = log_buf->buff + log_pos;
+	while (buff != last_char) {
+		log_char(*(buff++));
+		/* Wrap around */
+		if (buff - (char *)log_buf >= log_size)
+			buff = log_buf->buff;
+	}
+	return buff - log_buf->buff;
+}
+
+/*----------------------------------------------------------------------------*/
+static uint32_t process_v2log(void)
+{
+	char *last_msg = log_buf->buff + log_buf->write_pos;
+	char *buff = log_buf->buff + log_pos;
+	while (buff != last_msg) {
+		log_msg((struct logmsg_struct *)buff);
+		buff += sizeof(struct logmsg_struct);
+		/* Wrap around */
+		if (buff + sizeof(struct logmsg_struct) >
+			(char *)log_buf + log_size)
+			buff = log_buf->buff;
+	}
+	return buff - log_buf->buff;
+}
+
+static const uint8_t HEX2ASCII[16] = { '0', '1', '2', '3', '4', '5', '6', '7',
+				'8', '9', 'a', 'b', 'c', 'd', 'e', 'f' };
+
+/*----------------------------------------------------------------------------*/
+static void dbg_raw_nro(uint32_t format, uint32_t value)
+{
+	int digits = 1;
+	uint32_t base = (format & LOG_INTEGER_DECIMAL) ? 10 : 16;
+	int width = (format & LOG_LENGTH_MASK) >> LOG_LENGTH_SHIFT;
+	int negative = FALSE;
+	uint32_t digit_base = 1;
+
+	if ((format & LOG_INTEGER_SIGNED) != 0 && ((signed int)value) < 0) {
+			negative = TRUE;
+			value = (uint32_t)(-(signed int)value);
+			width--;
+	}
+
+	/* Find length and divider to get largest digit */
+	while (value / digit_base >= base) {
+			digit_base *= base;
+			digits++;
+	}
+
+	if (width > digits) {
+		char ch = (base == 10) ? ' ' : '0';
+		while (width > digits) {
+			log_char(ch);
+			width--;
+		}
+	}
+
+	if (negative)
+		log_char('-');
+
+	while (digits-- > 0) {
+		uint32_t d = value / digit_base;
+		log_char(HEX2ASCII[d]);
+		value = value - d * digit_base;
+		digit_base /= base;
+	}
+}
+
+/*----------------------------------------------------------------------------*/
+static void log_msg(struct logmsg_struct *msg)
+{
+	unsigned char msgtxt[5];
+	int mpos = 0;
+	switch (msg->ctrl & LOG_TYPE_MASK) {
+	case LOG_TYPE_CHAR: {
+		uint32_t ch;
+		ch = msg->log_data;
+		while (ch != 0) {
+			msgtxt[mpos++] = ch&0xFF;
+			ch >>= 8;
+		}
+		msgtxt[mpos] = 0;
+		log_str(msgtxt);
+		break;
+	}
+	case LOG_TYPE_INTEGER: {
+		dbg_raw_nro(msg->ctrl, msg->log_data);
+		break;
+	}
+	default:
+		break;
+	}
+	if (msg->ctrl & LOG_EOL)
+		log_eol();
+}
+
+/*----------------------------------------------------------------------------*/
+static int log_worker(void *p)
+{
+	if (log_buf == NULL)
+		return -EFAULT;
+
+	/* The thread should have never started */
+	if (log_buf == NULL)
+		return -EFAULT;
+
+	while (!kthread_should_stop()) {
+		if (log_buf->write_pos == log_pos)
+			schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
+
+		switch (log_buf->version) {
+		case 1:
+			log_pos = process_v1log();
+			break;
+		case 2:
+			log_pos = process_v2log();
+			break;
+		default:
+			MCDRV_DBG_ERROR("Unknown Mobicore log data "
+				"version %d logging disabled.",
+				log_buf->version);
+			log_pos = log_buf->write_pos;
+			/* Stop the thread as we have no idea what
+			 * happens next */
+			return -EFAULT;
+		}
+	}
+	MCDRV_DBG("Logging thread stopped!");
+	return 0;
+}
+
+
+/*----------------------------------------------------------------------------*/
+/**
+ * Wakeup the log reader thread
+ * This should be called from the places where calls into MobiCore have
+ * generated some logs(eg, yield, SIQ...)
+ */
+void mobicore_log_read(void)
+{
+	if (log_thread == NULL || IS_ERR(log_thread))
+		return;
+
+	wake_up_process(log_thread);
+}
+
+/*----------------------------------------------------------------------------*/
+/**
+ * Setup mobicore kernel log. It assumes it's running on CORE 0!
+ * The fastcall will complain is that is not the case!
+ */
+long mobicore_log_setup(void *data)
+{
+	unsigned long phys_log_buf;
+	union fc_generic fc_log;
+
+	log_pos = 0;
+	log_buf = NULL;
+	log_thread = NULL;
+	log_line = NULL;
+
+	/* Sanity check for the log size */
+	if (log_size < PAGE_SIZE)
+		return -EFAULT;
+	else
+		log_size =
+			get_nr_of_pages_for_buffer(NULL, log_size) * PAGE_SIZE;
+
+	log_line = kzalloc(LOG_LINE_SIZE, GFP_KERNEL);
+	if (IS_ERR(log_line)) {
+		MCDRV_DBG_ERROR("failed to allocate log line!");
+		return -ENOMEM;
+	}
+
+	log_thread = kthread_create(log_worker, NULL, "mobicore_log");
+	if (IS_ERR(log_thread)) {
+		MCDRV_DBG_ERROR("mobicore log thread creation failed!");
+		return -EFAULT;
+	}
+
+	log_pos = 0;
+	log_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+					size_to_order(log_size));
+	if (!log_buf) {
+		MCDRV_DBG_ERROR("Failed to get page for logger!");
+		return -ENOMEM;
+	}
+	phys_log_buf = virt_to_phys(log_buf);
+
+	memset(&fc_log, 0, sizeof(fc_log));
+	fc_log.as_in.cmd      = MC_FC_NWD_TRACE;
+	fc_log.as_in.param[0] = phys_log_buf;
+	fc_log.as_in.param[1] = log_size;
+
+	MCDRV_DBG("fc_log virt=%p phys=%p ", log_buf, (void *)phys_log_buf);
+	mc_fastcall(&fc_log);
+	MCDRV_DBG("fc_log out ret=0x%08x", fc_log.as_out.ret);
+	/* If the setup failed we must free the memory allocated */
+	if (fc_log.as_out.ret) {
+		MCDRV_DBG_ERROR("MobiCore shared traces setup failed!");
+		kthread_stop(log_thread);
+		free_pages((unsigned long)log_buf, size_to_order(log_size));
+
+		log_buf = NULL;
+		log_thread = NULL;
+		return -EIO;
+	}
+
+	MCDRV_DBG("fc_log Logger version %u\n", log_buf->version);
+	return 0;
+}
+
+/*----------------------------------------------------------------------------*/
+/**
+ * Free kernel log componenets.
+ * ATTN: We can't free the log buffer because it's also in use by MobiCore and
+ * even if the module is unloaded MobiCore is still running.
+ */
+void mobicore_log_free(void)
+{
+	if (log_thread && !IS_ERR(log_thread)) {
+		/* We don't really care what the thread returns for exit */
+		kthread_stop(log_thread);
+	}
+
+	kfree(log_line);
+}
diff --git a/drivers/gud/mobicore_driver/main.c b/drivers/gud/mobicore_driver/main.c
new file mode 100644
index 0000000..d1c67f6
--- /dev/null
+++ b/drivers/gud/mobicore_driver/main.c
@@ -0,0 +1,2868 @@
+/** MobiCore driver module.(interface to the secure world SWD)
+ * @addtogroup MCD_MCDIMPL_KMOD_IMPL
+ * @{
+ * @file
+ * MobiCore Driver Kernel Module.
+ * This module is written as a Linux device driver.
+ * This driver represents the command proxy on the lowest layer, from the
+ * secure world to the non secure world, and vice versa.
+ * This driver is located in the non secure world (Linux).
+ * This driver offers IOCTL commands, for access to the secure world, and has
+ * the interface from the secure world to the normal world.
+ * The access to the driver is possible with a file descriptor,
+ * which has to be created by the fd = open(/dev/mobicore) command.
+ *
+ * <!-- Copyright Giesecke & Devrient GmbH 2009-2012 -->
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "mc_drv_module.h"
+#include "mc_drv_module_linux_api.h"
+#include "mc_drv_module_android.h"
+#include "mc_drv_module_fastcalls.h"
+#include "public/mc_kernel_api.h"
+
+/* Initial value for the daemon sempahore signaling */
+#define DAEMON_SEM_VAL 0
+
+/** MobiCore interrupt context data */
+static struct mc_drv_kmod_ctx	mc_drv_kmod_ctx;
+
+/** MobiCore MCI information */
+static uint32_t mci_base;
+/*
+#############################################################################
+##
+## Convenience functions for Linux API functions
+##
+#############################################################################*/
+static int goto_cpu0(void);
+static int goto_all_cpu(void) __attribute__ ((unused));
+
+
+/*----------------------------------------------------------------------------*/
+static void init_and_add_to_list(
+	struct list_head *item,
+	struct list_head *list_head
+)
+{
+	INIT_LIST_HEAD(item);
+
+	list_add(item, list_head);
+}
+
+/*----------------------------------------------------------------------------*/
+/** check if CPU supports the ARM TrustZone Security Extensions
+ *	@return int TRUE or FALSE */
+static int has_security_extensions(
+	void
+)
+{
+	u32 fea = 0;
+	asm volatile(
+		"mrc p15, 0, %[fea], cr0, cr1, 0" :
+		[fea]"=r" (fea));
+
+	MCDRV_DBG_VERBOSE("CPU Features: 0x%X", fea);
+
+	/* If the CPU features ID has 0 for security features then the CPU
+	 * doesn't support TrustZone at all!
+	 */
+	if ((fea & ARM_SECURITY_EXTENSION_MASK) == 0)
+		return 0;
+
+	return 1;
+}
+
+/*----------------------------------------------------------------------------*/
+/** check if running in secure mode
+ *	@return int TRUE or FALSE */
+static int is_secure_mode(
+	void
+)
+{
+	u32 cpsr = 0, nsacr = 0;
+	asm volatile(
+		"mrc	p15, 0, %[nsacr], cr1, cr1, 2\n"
+		"mrs %[cpsr], cpsr\n" :
+		[nsacr]"=r" (nsacr),
+		[cpsr]"=r"(cpsr));
+
+	MCDRV_DBG_VERBOSE("CPRS.M = set to 0x%X\n", cpsr & ARM_CPSR_MASK);
+	MCDRV_DBG_VERBOSE("SCR.NS = set to 0x%X\n", nsacr);
+
+	/* If the NSACR contains the reset value(=0) then most likely we are
+	 * running in Secure MODE.
+	 * If the cpsr mode is set to monitor mode then we cannot load!
+	 */
+	if (nsacr == 0 || ((cpsr & ARM_CPSR_MASK) == ARM_MONITOR_MODE))
+		return 1;
+
+	return 0;
+}
+
+/*----------------------------------------------------------------------------*/
+/** check if userland caller is privileged (aka has "root" access rights).
+	@return int TRUE or FALSE */
+static int is_userland_caller_privileged(
+	void
+) {
+	/* For some platforms we cannot run the Daemon as root - for Android
+	 * compliance tests it is not allowed, thus we assume the daemon is ran
+	 * as the system user.
+	 * In Android the system user for daemons has no particular capabilities
+	 * other than a fixed UID: AID_SYSTEM 1000
+	 * The actual number is guaranteed to be the same in all Android systems
+	 * so we will take it for granted: see android_filesystem_config.h in
+	 * the Android source tree for all UIDs and their meaning:
+	 * http://android-dls.com/wiki/index.php?title=Android_UIDs_and_GIDs
+	 */
+#ifdef MC_ANDROID_UID_CHECK
+	return current_euid() <= AID_SYSTEM;
+#else
+	/* capable should cover all possibilities, root or sudo, uid checking
+	 * was not very reliable */
+	return capable(CAP_SYS_ADMIN);
+#endif
+}
+
+
+
+/*----------------------------------------------------------------------------*/
+static void unlock_page_from_used_l2_table(
+	struct page *page
+){
+	/* REV axh: check if we should do this. */
+	SetPageDirty(page);
+
+	/* release page, old api was page_cache_release() */
+	ClearPageReserved(page);
+	put_page(page);
+}
+
+/*----------------------------------------------------------------------------*/
+/* convert L2 PTE to page pointer */
+static struct page *l2_pte_to_page(
+	pte_t pte
+) {
+	void *phys_page_addr	= (void *)((unsigned int)pte & PAGE_MASK);
+	unsigned int pfn	= addr_to_pfn(phys_page_addr);
+	struct page *page	= pfn_to_page(pfn);
+	return page;
+}
+
+/*----------------------------------------------------------------------------*/
+/* convert page pointer to L2 PTE */
+static pte_t page_to_l2_pte(
+	struct page *page
+)
+{
+	unsigned int pfn	= page_to_pfn(page);
+	void *phys_addr		= pfn_to_addr(pfn);
+	pte_t pte		= (pte_t)((unsigned int)phys_addr & PAGE_MASK);
+	return pte;
+}
+
+
+/*----------------------------------------------------------------------------*/
+static inline int lock_user_pages(
+	struct task_struct	*task,
+	void			*virt_start_page_addr,
+	int			nr_of_pages,
+	struct page		**pages
+)
+{
+	int		ret = 0;
+	int		locked_pages = 0;
+	unsigned int	i;
+
+	do {
+
+		/* lock user pages, must hold the mmap_sem to do this. */
+		down_read(&(task->mm->mmap_sem));
+		locked_pages = get_user_pages(
+					  task,
+					  task->mm,
+					  (unsigned long)virt_start_page_addr,
+					  nr_of_pages,
+					  1, /* write access */
+					  0, /* they say drivers should always
+						pass 0 here..... */
+					  pages,
+					  NULL); /* we don't need the VMAs */
+		up_read(&(task->mm->mmap_sem));
+
+		/* could as lock all pages? */
+		if (locked_pages != nr_of_pages) {
+			MCDRV_DBG_ERROR(
+				"get_user_pages() failed, "
+				"locked_pages=%d\n",
+				locked_pages);
+			ret = -ENOMEM;
+			/* check if an error has been returned. */
+			if (locked_pages < 0) {
+				ret = locked_pages;
+				locked_pages = 0;
+			}
+			break;
+		}
+
+		/* do cache maintenance on locked pages. */
+		for (i = 0; i < nr_of_pages; i++)
+			flush_dcache_page(pages[i]);
+
+	} while (FALSE);
+
+
+	if (ret != 0) {
+		/* release all locked pages. */
+		MCDRV_ASSERT(locked_pages >= 0);
+		for (i = 0; i < locked_pages; i++)
+			put_page(pages[i]);
+	}
+
+	return ret;
+
+}
+
+/*
+#############################################################################
+##
+## Driver implementation functions
+##
+#############################################################################*/
+/*----------------------------------------------------------------------------*/
+/* check if caller is MobiCore Daemon */
+static unsigned int is_caller_mc_daemon(
+	struct mc_instance *instance
+)
+{
+	return ((instance != NULL)
+		&& (mc_drv_kmod_ctx.daemon_inst == instance));
+}
+
+
+/*----------------------------------------------------------------------------*/
+/* Get process context from file pointer */
+static struct mc_instance *get_instance(
+	struct file *file
+) {
+	MCDRV_ASSERT(file != NULL);
+
+	return (struct mc_instance *)(file->private_data);
+}
+
+
+/*----------------------------------------------------------------------------*/
+/* Get a unique ID */
+static unsigned int get_mc_kmod_unique_id(
+	void
+)
+{
+	return (unsigned int)atomic_inc_return(
+			   &(mc_drv_kmod_ctx.unique_counter));
+}
+
+
+/*----------------------------------------------------------------------------*/
+/* Get kernel pointer to shared L2 table given a per-process reference */
+static struct l2table *get_l2_table_kernel_virt(
+	struct mc_used_l2_table	*used_l2table
+)
+{
+	MCDRV_ASSERT(used_l2table != NULL);
+	MCDRV_ASSERT(used_l2table->set != NULL);
+	MCDRV_ASSERT(used_l2table->set->kernel_virt != NULL);
+	return &(used_l2table->set->kernel_virt->table[used_l2table->idx]);
+}
+
+/*----------------------------------------------------------------------------*/
+/* Get physical address of a shared L2 table given a per-process reference */
+static struct l2table *get_l2_table_phys(
+	struct mc_used_l2_table  *used_l2table
+)
+{
+	MCDRV_ASSERT(used_l2table != NULL);
+	MCDRV_ASSERT(used_l2table->set != NULL);
+	MCDRV_ASSERT(used_l2table->set->phys != NULL);
+	return &(used_l2table->set->phys->table[used_l2table->idx]);
+}
+
+/*----------------------------------------------------------------------------*/
+static unsigned int is_in_use_used_l2_table(
+	struct mc_used_l2_table	*used_l2table
+)
+{
+	return ((used_l2table->flags &
+			  (MC_WSM_L2_CONTAINER_WSM_LOCKED_BY_APP
+			   | MC_WSM_L2_CONTAINER_WSM_LOCKED_BY_MC)) != 0);
+}
+
+
+
+/*----------------------------------------------------------------------------*/
+static struct mc_used_l2_table *find_used_l2_table_by_handle(
+	unsigned int	handle
+) {
+	struct mc_used_l2_table  *used_l2table;
+	struct mc_used_l2_table  *used_l2table_with_handle = NULL;
+
+	list_for_each_entry(
+		used_l2table,
+		&(mc_drv_kmod_ctx.mc_used_l2_tables),
+		list
+	) {
+		if (handle == used_l2table->handle) {
+			used_l2table_with_handle = used_l2table;
+			break;
+		}
+	}
+
+	return used_l2table_with_handle;
+}
+
+/*
+#############################################################################
+##
+## L2 Table Pool
+##
+#############################################################################*/
+
+/*----------------------------------------------------------------------------*/
+static struct mc_used_l2_table *allocate_used_l2_table(
+	struct mc_instance	*instance
+) {
+	int				ret = 0;
+	struct mc_l2_table_store	*l2table_store = NULL;
+	struct mc_l2_tables_set		*l2table_set = NULL;
+	struct mc_used_l2_table		*used_l2table = NULL;
+	struct page			*page;
+	unsigned int			i = 0;
+
+	do {
+		/* allocate a WSM L2 descriptor */
+		used_l2table  = kmalloc(sizeof(*used_l2table), GFP_KERNEL);
+		if (used_l2table == NULL) {
+			ret = -ENOMEM;
+			MCDRV_DBG_ERROR("out of memory\n");
+			break;
+		}
+		/* clean */
+		memset(used_l2table, 0, sizeof(*used_l2table));
+		used_l2table->handle = get_mc_kmod_unique_id();
+		used_l2table->owner = instance;
+
+		/* add to global list. */
+		init_and_add_to_list(
+			&(used_l2table->list),
+			&(mc_drv_kmod_ctx.mc_used_l2_tables));
+
+		/* walk though list to find free set. */
+		list_for_each_entry(
+			l2table_set,
+			&(mc_drv_kmod_ctx.mc_l2_tables_sets),
+			list
+		) {
+			for (i = 0; i < MC_DRV_KMOD_L2_TABLE_PER_PAGES; i++) {
+				if ((l2table_set->usage_bitmap & (1U << i))
+					== 0) {
+					/* found a set,
+						l2table_set and i are set. */
+					l2table_store =
+						l2table_set->kernel_virt;
+					break;
+				}
+			}
+			if (l2table_store != NULL)
+				break;
+		} /* end while */
+
+		if (l2table_store == NULL) {
+			l2table_store = (struct mc_l2_table_store *)
+						get_zeroed_page(GFP_KERNEL);
+			if (l2table_store == NULL) {
+				ret = -ENOMEM;
+				break;
+			}
+
+			/* Actually, locking is not necessary, because kernel
+				memory is not supposed to get swapped out. But
+				we play safe.... */
+			page = virt_to_page(l2table_store);
+			SetPageReserved(page);
+
+			/* allocate a descriptor */
+			l2table_set = kmalloc(sizeof(*l2table_set), GFP_KERNEL);
+			if (l2table_set == NULL) {
+				kfree(l2table_store);
+				ret = -ENOMEM;
+				break;
+			}
+			/* initialize */
+			memset(l2table_set, 0, sizeof(*l2table_set));
+
+			l2table_set->kernel_virt = l2table_store;
+			l2table_set->page = page;
+			l2table_set->phys = (void *)virt_to_phys(l2table_store);
+
+			/* init add to list. */
+			init_and_add_to_list(
+				&(l2table_set->list),
+				&(mc_drv_kmod_ctx.mc_l2_tables_sets));
+
+			/* use first table */
+			i = 0;
+		}
+
+		/* set set usage */
+		l2table_set->usage_bitmap |= (1U << i);
+
+		/* set set reference */
+		used_l2table->set = l2table_set;
+		used_l2table->idx = i;
+
+		MCDRV_DBG_VERBOSE(
+			"chunkPhys=%p,idx=%d\n",
+			l2table_set->phys, i);
+
+	} while (FALSE);
+
+	if (ret != 0) {
+		if (used_l2table != NULL) {
+			/* remove from list */
+			list_del(&(l2table_set->list));
+			/* free memory */
+			kfree(used_l2table);
+			used_l2table = NULL;
+		}
+	}
+
+	return used_l2table;
+}
+
+/*----------------------------------------------------------------------------*/
+static void free_used_l2_table(
+	struct mc_used_l2_table *used_l2table
+)
+{
+	struct mc_l2_tables_set	*l2table_set;
+	unsigned int		idx;
+
+	MCDRV_ASSERT(used_l2table != NULL);
+
+	l2table_set = used_l2table->set;
+	MCDRV_ASSERT(l2table_set != NULL);
+
+	/* clean usage flag */
+	idx = used_l2table->idx;
+	MCDRV_ASSERT(idx < MC_DRV_KMOD_L2_TABLE_PER_PAGES);
+	l2table_set->usage_bitmap &= ~(1U << idx);
+
+	/* if nobody uses this set, we can release it. */
+	if (l2table_set->usage_bitmap == 0) {
+		MCDRV_ASSERT(l2table_set->page != NULL);
+		ClearPageReserved(l2table_set->page);
+
+		MCDRV_ASSERT(l2table_set->kernel_virt != NULL);
+		free_page((unsigned long)l2table_set->kernel_virt);
+
+		/* remove from list */
+		list_del(&(l2table_set->list));
+
+		/* free memory */
+		kfree(l2table_set);
+	}
+
+	return;
+}
+
+
+
+/*----------------------------------------------------------------------------*/
+/**
+ * Create a L2 table in a WSM container that has been allocates previously.
+ *
+ * @param task		pointer to task owning WSM
+ * @param wsm_buffer	user space WSM start
+ * @param wsm_len	WSM length
+ * @param used_l2table	Pointer to L2 table details
+ */
+static int map_buffer_into_used_l2_table(
+	struct task_struct	*task,
+	void			*wsm_buffer,
+	unsigned int		wsm_len,
+	struct mc_used_l2_table	*used_l2table
+)
+{
+	int		ret = 0;
+	unsigned int	i, nr_of_pages;
+	void		*virt_addr_page;
+	struct page	*page;
+	struct l2table	*l2table;
+	struct page	**l2table_as_array_of_pointers_to_page;
+
+	/* task can be null when called from kernel space */
+	MCDRV_ASSERT(wsm_buffer != NULL);
+	MCDRV_ASSERT(wsm_len != 0);
+	MCDRV_ASSERT(used_l2table != NULL);
+
+	MCDRV_DBG_VERBOSE("WSM addr=0x%p, len=0x%08x\n", wsm_buffer, wsm_len);
+
+	/* Check if called from kernel space wsm_buffer is actually
+	 * vmalloced or not */
+	if (task == NULL && !is_vmalloc_addr(wsm_buffer)) {
+		MCDRV_DBG_ERROR("WSM addr is not a vmalloc address");
+		return -EINVAL;
+	}
+
+	l2table = get_l2_table_kernel_virt(used_l2table);
+	/* We use the memory for the L2 table to hold the pointer
+	and convert them later. This works, as everything comes
+	down to a 32 bit value. */
+	l2table_as_array_of_pointers_to_page = (struct page **)l2table;
+
+	do {
+
+		/* no size > 1Mib supported */
+		if (wsm_len > SZ_1M) {
+			MCDRV_DBG_ERROR("size > 1 MiB\n");
+			ret = -EINVAL;
+			break;
+		}
+
+		/* calculate page usage */
+		virt_addr_page = get_page_start(wsm_buffer);
+		nr_of_pages  = get_nr_of_pages_for_buffer(wsm_buffer, wsm_len);
+
+
+		MCDRV_DBG_VERBOSE("virt addr pageStart=0x%p,pages=%d\n",
+				  virt_addr_page,
+				  nr_of_pages);
+
+		/* L2 table can hold max 1MiB in 256 pages. */
+		if ((nr_of_pages*PAGE_SIZE) > SZ_1M) {
+			MCDRV_DBG_ERROR("WSM paged exceed 1 MiB\n");
+			ret = -EINVAL;
+			break;
+		}
+
+		/* Request comes from user space */
+		if (task != NULL) {
+			/* lock user page in memory, so they do not get swapped
+			* out.
+			* REV axh:
+			* Kernel 2.6.27 added a new get_user_pages_fast()
+			* function, maybe it is called fast_gup() in some
+			* versions.
+			* handle user process doing a fork().
+			* Child should not get things.
+			* http://osdir.com/ml/linux-media/2009-07/msg00813.html
+			* http://lwn.net/Articles/275808/ */
+
+			ret = lock_user_pages(
+					  task,
+					  virt_addr_page,
+					  nr_of_pages,
+					  l2table_as_array_of_pointers_to_page);
+			if (ret != 0) {
+				MCDRV_DBG_ERROR("lock_user_pages() failed\n");
+				break;
+			}
+		}
+		/* Request comes from kernel space(vmalloc buffer) */
+		else {
+			void *uaddr = wsm_buffer;
+			for (i = 0; i < nr_of_pages; i++) {
+				page = vmalloc_to_page(uaddr);
+				if (!page) {
+					MCDRV_DBG_ERROR(
+						"vmalloc_to_Page()"
+						" failed to map address\n");
+					ret = -EINVAL;
+					break;
+				}
+				get_page(page);
+				/* Lock the page in memory, it can't be swapped
+				 * out */
+				SetPageReserved(page);
+				l2table_as_array_of_pointers_to_page[i] = page;
+				uaddr += PAGE_SIZE;
+			}
+		}
+
+		used_l2table->nr_of_pages = nr_of_pages;
+		used_l2table->flags |= MC_WSM_L2_CONTAINER_WSM_LOCKED_BY_APP;
+
+		/* create L2 Table entries. used_l2table->table contains a list
+		of page pointers here. For a proper cleanup we have to ensure
+		that the following code either works and used_l2table contains
+		a valid L2 table - or fails and used_l2table->table contains the
+		list of page pointers. Any mixed contents will make cleanup
+		difficult.*/
+
+		for (i = 0; i < nr_of_pages; i++) {
+			pte_t pte;
+			page = l2table_as_array_of_pointers_to_page[i];
+
+			/* create L2 table entry, see ARM MMU docu for details
+			about flags stored in the lowest 12 bits. As a side
+			reference, the Article "ARM's multiply-mapped memory
+			mess" found in the collection at at
+			http://lwn.net/Articles/409032/ is also worth reading.*/
+			pte = page_to_l2_pte(page)
+					| L2_FLAG_AP1 | L2_FLAG_AP0
+					| L2_FLAG_C | L2_FLAG_B
+					| L2_FLAG_SMALL | L2_FLAG_SMALL_XN
+			/* Linux uses different mappings for SMP systems(the
+			 * sharing flag is set for the pte. In order not to
+			 * confuse things too much in Mobicore make sure the
+			 * shared buffers have the same flags.
+			 * This should also be done in SWD side
+			 */
+#ifdef CONFIG_SMP
+					| L2_FLAG_S | L2_FLAG_SMALL_TEX0
+#endif
+				  ;
+
+			l2table->table_entries[i] = pte;
+			MCDRV_DBG_VERBOSE("L2 entry %d:  0x%08x\n", i,
+					  (unsigned int)(pte));
+		}
+
+		/* ensure rest of table is empty */
+		while (i < 255)
+			l2table->table_entries[i++] = (pte_t)0;
+
+	} while (FALSE);
+
+	return ret;
+}
+
+
+/*----------------------------------------------------------------------------*/
+/**
+ * Remove a L2 table in a WSM container. Afterwards the container may be
+ * released.
+ *
+ * @param used_l2table	Pointer to L2 table details
+ */
+
+static void unmap_buffers_from_used_l2_table(
+	struct mc_used_l2_table	*used_l2table
+)
+{
+	unsigned int	i;
+	struct l2table	*l2table;
+
+	MCDRV_ASSERT(used_l2table != NULL);
+	/* this should not happen, as we have no empty tables. */
+	MCDRV_ASSERT(!is_in_use_used_l2_table(used_l2table));
+
+	/* found the table, now release the resources. */
+	MCDRV_DBG_VERBOSE("clear L2 table, phys_base=%p, nr_of_pages=%d\n",
+			  get_l2_table_phys(used_l2table),
+			  used_l2table->nr_of_pages);
+
+	l2table = get_l2_table_kernel_virt(used_l2table);
+
+	/* release all locked user space pages */
+	for (i = 0; i < used_l2table->nr_of_pages; i++) {
+		/* convert physical entries from L2 table to page pointers */
+		pte_t pte = get_l2_table_kernel_virt(used_l2table)->
+							table_entries[i];
+		struct page *page = l2_pte_to_page(pte);
+		unlock_page_from_used_l2_table(page);
+	}
+
+	/* remember that all pages have been freed */
+	used_l2table->nr_of_pages = 0;
+
+	return;
+}
+
+
+/*
+#############################################################################
+##
+## Helper functions
+##
+#############################################################################*/
+/*----------------------------------------------------------------------------*/
+#define FREE_FROM_SWD	TRUE
+#define FREE_FROM_NWD	FALSE
+/** Delete a used l2 table. */
+static void delete_used_l2_table(
+	struct mc_used_l2_table	*used_l2table,
+	unsigned int		is_swd
+)
+{
+	if (is_swd) {
+		used_l2table->flags &=
+			~MC_WSM_L2_CONTAINER_WSM_LOCKED_BY_MC;
+	} else {
+		used_l2table->flags &=
+			~MC_WSM_L2_CONTAINER_WSM_LOCKED_BY_APP;
+		used_l2table->owner = NULL;
+	}
+
+	/* release if Nwd and Swd/MC do no longer use it. */
+	if (is_in_use_used_l2_table(used_l2table)) {
+		MCDRV_DBG_WARN(
+			"WSM L2 table still in use: physBase=%p, "
+			"nr_of_pages=%d\n",
+			get_l2_table_phys(used_l2table),
+			used_l2table->nr_of_pages);
+	} else {
+		unmap_buffers_from_used_l2_table(used_l2table);
+		free_used_l2_table(used_l2table);
+
+		list_del(&(used_l2table->list));
+
+		kfree(used_l2table);
+	}
+	return;
+}
+
+/*----------------------------------------------------------------------------*/
+/** Allocate L2 table and map buffer into it. That is, create respective table
+	entries. Must hold Semaphore mc_drv_kmod_ctx.wsm_l2_sem */
+static struct mc_used_l2_table *new_used_l2_table(
+	struct mc_instance	*instance,
+	struct task_struct	*task,
+	void			*wsm_buffer,
+	unsigned int		wsm_len
+) {
+	int			ret = 0;
+	struct mc_used_l2_table	*used_l2table;
+
+	do {
+		used_l2table = allocate_used_l2_table(instance);
+		if (used_l2table == NULL) {
+			MCDRV_DBG_ERROR(
+				"allocate_used_l2_table() failed\n");
+			break;
+		}
+
+		/* create the L2 page for the WSM */
+		ret = map_buffer_into_used_l2_table(
+				  task,
+				  wsm_buffer,
+				  wsm_len,
+				  used_l2table);
+		if (ret != 0) {
+			MCDRV_DBG_ERROR(
+				"map_buffer_into_used_l2_table() failed\n");
+			delete_used_l2_table(used_l2table, FREE_FROM_NWD);
+			used_l2table = NULL;
+			break;
+		}
+
+	} while (FALSE);
+
+
+	return used_l2table;
+}
+
+/*
+#############################################################################
+##
+## IoCtl handler
+##
+#############################################################################*/
+
+/**
+ * Map a virtual memory buffer structure to Mobicore
+ * @param instance
+ * @param addr		address of the buffer(NB it must be kernel virtual!)
+ * @param len		buffer length
+ * @param handle	pointer to handle
+ * @param phys_wsm_l2_table	pointer to physical L2 table(?)
+ *
+ * @return 0 if no error
+ *
+ */
+/*----------------------------------------------------------------------------*/
+int mobicore_map_vmem(
+	struct mc_instance	*instance,
+	void			*addr,
+	uint32_t		len,
+	uint32_t		*handle,
+	void			**phys_wsm_l2_table
+)
+{
+	int ret = 0;
+	struct mc_used_l2_table *used_l2table = NULL;
+	MCDRV_ASSERT(instance != NULL);
+
+	MCDRV_DBG_VERBOSE("enter\n");
+
+	do {
+		if (len == 0) {
+			MCDRV_DBG_ERROR("len=0 is not supported!\n");
+			ret = -EINVAL;
+			break;
+		}
+
+		/* try to get the semaphore */
+		ret = down_interruptible(&(mc_drv_kmod_ctx.wsm_l2_sem));
+		if (ret != 0) {
+			MCDRV_DBG_ERROR("down_interruptible() failed with %d\n",
+					ret);
+			ret = -ERESTARTSYS;
+			break;
+		}
+
+		do {
+			used_l2table = new_used_l2_table(
+						   instance,
+						   NULL,
+						   addr,
+						   len);
+
+			if (used_l2table == NULL) {
+				MCDRV_DBG_ERROR("new_used_l2_table() failed\n");
+				ret = -EINVAL;
+				break;
+			}
+
+			/* set response */
+			*handle = used_l2table->handle;
+			*phys_wsm_l2_table =
+				(void *)get_l2_table_phys(used_l2table);
+			MCDRV_DBG_VERBOSE("handle: %d, phys=%p\n",
+					  *handle,
+					  (void *)(*phys_wsm_l2_table));
+
+		} while (FALSE);
+
+		/* release semaphore */
+		up(&(mc_drv_kmod_ctx.wsm_l2_sem));
+
+	} while (FALSE);
+
+	MCDRV_DBG_VERBOSE("exit with %d/0x%08X\n", ret, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL(mobicore_map_vmem);
+/*----------------------------------------------------------------------------*/
+/**
+ *
+ * @param instance
+ * @param arg
+ *
+ * @return 0 if no error
+ *
+ */
+static int handle_ioctl_app_register_wsm_l2(
+	struct mc_instance			*instance,
+	union mc_ioctl_app_reg_wsm_l2_params	*user_params
+)
+{
+	int					ret = 0;
+	union mc_ioctl_app_reg_wsm_l2_params	params;
+	struct mc_used_l2_table			*used_l2table = NULL;
+	struct pid				*pid_struct = NULL;
+	struct task_struct			*task = current;
+
+	MCDRV_ASSERT(instance != NULL);
+
+	MCDRV_DBG_VERBOSE("enter\n");
+
+	do {
+		/* get use parameters */
+		ret = copy_from_user(
+				  &(params.in),
+				  &(user_params->in),
+				  sizeof(params.in));
+		if (ret != 0) {
+			MCDRV_DBG_ERROR("copy_from_user() failed\n");
+			break;
+		}
+
+		/* daemon can do this for another task. */
+		if (params.in.pid != 0) {
+			MCDRV_DBG_ERROR("pid != 0 unsupported\n");
+			ret = -EINVAL;
+			break;
+		}
+		if (params.in.len == 0) {
+			MCDRV_DBG_ERROR("len=0 is not supported!\n");
+			ret = -EINVAL;
+			break;
+		}
+
+		/* try to get the semaphore */
+		ret = down_interruptible(&(mc_drv_kmod_ctx.wsm_l2_sem));
+		if (ret != 0) {
+			MCDRV_DBG_ERROR("down_interruptible() failed with %d\n",
+					ret);
+			ret = -ERESTARTSYS;
+			break;
+		}
+
+		do {
+			used_l2table = new_used_l2_table(
+						   instance,
+						   task,
+						   (void *)(params.in.buffer),
+						   params.in.len);
+
+			if (used_l2table == NULL) {
+				MCDRV_DBG_ERROR("new_used_l2_table() failed\n");
+				ret = -EINVAL;
+				break;
+			}
+
+			/* if the daemon does this, we set the MC lock */
+			if (is_caller_mc_daemon(instance))
+				used_l2table->flags |=
+					MC_WSM_L2_CONTAINER_WSM_LOCKED_BY_MC;
+
+			/* set response */
+			memset(&params.out, 0, sizeof(params.out));
+			params.out.handle = used_l2table->handle;
+			/* TODO: return the physical address for daemon only,
+				otherwise set NULL */
+			params.out.phys_wsm_l2_table =
+				(uint32_t)get_l2_table_phys(used_l2table);
+
+			MCDRV_DBG_VERBOSE("handle: %d, phys=%p\n",
+					params.out.handle,
+					(void *)(params.out.phys_wsm_l2_table));
+
+
+			/* copy L2Table to user space */
+			ret = copy_to_user(
+					  &(user_params->out),
+					  &(params.out),
+					  sizeof(params.out));
+			if (ret != 0) {
+				MCDRV_DBG_ERROR("copy_to_user() failed\n");
+
+				/* free the table again, as app does not know
+					about anything. */
+				if (is_caller_mc_daemon(instance)) {
+					used_l2table->flags &=
+					~MC_WSM_L2_CONTAINER_WSM_LOCKED_BY_MC;
+				}
+				delete_used_l2_table(used_l2table,
+						FREE_FROM_NWD);
+				used_l2table = NULL;
+				break;
+			}
+
+		} while (FALSE);
+
+		/* release semaphore */
+		up(&(mc_drv_kmod_ctx.wsm_l2_sem));
+
+	} while (FALSE);
+
+
+
+	/* release PID struct reference */
+	if (pid_struct != NULL)
+		put_pid(pid_struct);
+
+
+	MCDRV_DBG_VERBOSE("exit with %d/0x%08X\n", ret, ret);
+
+	return ret;
+}
+
+
+/*----------------------------------------------------------------------------*/
+/**
+ * Unmap a virtual memory buffer from mobicore
+ * @param instance
+ * @param handle
+ *
+ * @return 0 if no error
+ *
+ */
+int mobicore_unmap_vmem(
+	struct mc_instance	*instance,
+	uint32_t		handle
+)
+{
+	int ret = 0;
+	struct mc_used_l2_table *used_l2table = NULL;
+
+	MCDRV_ASSERT(instance != NULL);
+	MCDRV_DBG_VERBOSE("enter\n");
+
+	do {
+		/* try to get the semaphore */
+		ret = down_interruptible(&(mc_drv_kmod_ctx.wsm_l2_sem));
+		if (ret != 0) {
+			MCDRV_DBG_ERROR("processOpenSession() failed with %d\n",
+					ret);
+			ret = -ERESTARTSYS;
+			break;
+		}
+
+		do {
+			used_l2table = find_used_l2_table_by_handle(handle);
+			if (used_l2table == NULL) {
+				ret = -EINVAL;
+				MCDRV_DBG_ERROR("entry not found\n");
+				break;
+			}
+
+			if (instance != used_l2table->owner) {
+				ret = -EINVAL;
+				MCDRV_DBG_ERROR("instance does no own it\n");
+				break;
+			}
+
+			/* free table (if no further locks exist) */
+			delete_used_l2_table(used_l2table, FREE_FROM_NWD);
+			used_l2table = NULL;
+			/* there are no out parameters */
+		} while (FALSE);
+		/* release semaphore */
+		up(&(mc_drv_kmod_ctx.wsm_l2_sem));
+
+	} while (FALSE);
+
+	MCDRV_DBG_VERBOSE("exit with %d/0x%08X\n", ret, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL(mobicore_unmap_vmem);
+/*----------------------------------------------------------------------------*/
+/**
+ *
+ * @param instance
+ * @param arg
+ *
+ * @return 0 if no error
+ *
+ */
+static int handle_ioctl_app_unregister_wsm_l2(
+	struct mc_instance			*instance,
+	struct mc_ioctl_app_unreg_wsm_l2_params	*user_params
+)
+{
+	int					ret = 0;
+	struct mc_ioctl_app_unreg_wsm_l2_params	params;
+	struct mc_used_l2_table			*used_l2table = NULL;
+
+	MCDRV_ASSERT(instance != NULL);
+	MCDRV_DBG_VERBOSE("enter\n");
+
+	do {
+		ret = copy_from_user(
+				  &(params.in),
+				  &(user_params->in),
+				  sizeof(params.in));
+
+		if (ret != 0) {
+			MCDRV_DBG_ERROR("copy_from_user\n");
+			break;
+		}
+
+		/* try to get the semaphore */
+		ret = down_interruptible(&(mc_drv_kmod_ctx.wsm_l2_sem));
+		if (ret != 0) {
+			MCDRV_DBG_ERROR("down_interruptible() failed with %d\n",
+					ret);
+			ret = -ERESTARTSYS;
+			break;
+		}
+
+		do {
+			/* daemon can do this for another task. */
+			if (params.in.pid != 0) {
+				MCDRV_DBG_ERROR("pid != 0 unsupported\n");
+				ret = -EINVAL;
+				break;
+			}
+
+			used_l2table =
+				find_used_l2_table_by_handle(params.in.handle);
+			if (used_l2table == NULL) {
+				ret = -EINVAL;
+				MCDRV_DBG_ERROR("entry not found\n");
+				break;
+			}
+
+			if (is_caller_mc_daemon(instance)) {
+				/* if daemon does this, we have to release the
+					MobiCore lock. */
+				used_l2table->flags &=
+					~MC_WSM_L2_CONTAINER_WSM_LOCKED_BY_MC;
+			} else if (instance != used_l2table->owner) {
+				ret = -EINVAL;
+				MCDRV_DBG_ERROR("instance does no own it\n");
+				break;
+			}
+
+			/* free table (if no further locks exist) */
+			delete_used_l2_table(used_l2table, FREE_FROM_NWD);
+			used_l2table = NULL;
+
+			/* there are no out parameters */
+
+		} while (FALSE);
+
+		/* release semaphore */
+		up(&(mc_drv_kmod_ctx.wsm_l2_sem));
+
+	} while (FALSE);
+
+	MCDRV_DBG_VERBOSE("exit with %d/0x%08X\n", ret, ret);
+
+	return ret;
+}
+
+
+/*----------------------------------------------------------------------------*/
+static int handle_ioctl_daemon_lock_wsm_l2(
+	struct mc_instance				*instance,
+	struct mc_ioctl_daemon_lock_wsm_l2_params	*user_params
+)
+{
+	int						ret = 0;
+	struct mc_ioctl_daemon_lock_wsm_l2_params	params;
+	struct mc_used_l2_table				*used_l2table = NULL;
+
+	MCDRV_ASSERT(instance != NULL);
+	MCDRV_DBG_VERBOSE("enter\n");
+
+	do {
+		if (!is_caller_mc_daemon(instance)) {
+			MCDRV_DBG_ERROR("caller not MobiCore Daemon\n");
+			ret = -EFAULT;
+			break;
+		}
+
+		ret = copy_from_user(
+				  &(params.in),
+				  &(user_params->in),
+				  sizeof(params.in));
+
+		if (ret != 0) {
+			MCDRV_DBG_ERROR("copy_from_user\n");
+			break;
+		}
+		/* try to get the semaphore */
+		ret = down_interruptible(&(mc_drv_kmod_ctx.wsm_l2_sem));
+		if (ret != 0) {
+			MCDRV_DBG_ERROR("down_interruptible() failed with %d\n",
+					ret);
+			ret = -ERESTARTSYS;
+			break;
+		}
+
+		do {
+			used_l2table =
+				find_used_l2_table_by_handle(params.in.handle);
+			if (used_l2table == NULL) {
+				ret = -EINVAL;
+				MCDRV_DBG_ERROR("entry not found\n");
+				break;
+			}
+			if (instance != used_l2table->owner) {
+				ret = -EINVAL;
+				MCDRV_DBG_ERROR("instance does no own it\n");
+				break;
+			}
+
+			/* lock entry */
+			if ((used_l2table->flags &
+				  MC_WSM_L2_CONTAINER_WSM_LOCKED_BY_MC) != 0) {
+				MCDRV_DBG_WARN("entry already locked\n");
+			}
+			used_l2table->flags |=
+				MC_WSM_L2_CONTAINER_WSM_LOCKED_BY_MC;
+
+			/* prepare response */
+			memset(&(params.out), 0, sizeof(params.out));
+			params.out.phys_wsm_l2_table =
+				(uint32_t)get_l2_table_phys(used_l2table);
+
+			/* copy to user space */
+			ret = copy_to_user(
+					  &(user_params->out),
+					  &(params.out),
+					  sizeof(params.out));
+			if (ret != 0) {
+				MCDRV_DBG_ERROR("copy_to_user() failed\n");
+
+				/* undo, as userspace did not get it. */
+				used_l2table->flags |=
+					MC_WSM_L2_CONTAINER_WSM_LOCKED_BY_MC;
+				break;
+			}
+
+		} while (FALSE);
+
+		/* release semaphore */
+		up(&(mc_drv_kmod_ctx.wsm_l2_sem));
+
+	} while (FALSE);
+
+	MCDRV_DBG_VERBOSE("exit with %d/0x%08X\n", ret, ret);
+
+	return ret;
+}
+
+
+/*----------------------------------------------------------------------------*/
+static int handle_ioctl_daemon_unlock_wsm_l2(
+	struct mc_instance				*instance,
+	struct mc_ioctl_daemon_unlock_wsm_l2_params	*user_params
+)
+{
+	int						ret = 0;
+	struct mc_ioctl_daemon_unlock_wsm_l2_params	params;
+	struct mc_used_l2_table				*used_l2table = NULL;
+
+	MCDRV_ASSERT(instance != NULL);
+	MCDRV_DBG_VERBOSE("enter\n");
+
+	do {
+		if (!is_caller_mc_daemon(instance)) {
+			MCDRV_DBG_ERROR("caller not MobiCore Daemon\n");
+			ret = -EFAULT;
+			break;
+		}
+
+		ret = copy_from_user(
+				  &(params.in),
+				  &(user_params->in),
+				  sizeof(params.in));
+
+		if (ret != 0) {
+			MCDRV_DBG_ERROR("copy_from_user\n");
+			break;
+		}
+		/* try to get the semaphore */
+		ret = down_interruptible(&(mc_drv_kmod_ctx.wsm_l2_sem));
+		if (ret != 0) {
+			MCDRV_DBG_ERROR("down_interruptible() failed with %d\n",
+						ret);
+			ret = -ERESTARTSYS;
+			break;
+		}
+
+		do {
+			used_l2table =
+				find_used_l2_table_by_handle(params.in.handle);
+			if (used_l2table == NULL) {
+				ret = -EINVAL;
+				MCDRV_DBG_ERROR("entry not found\n");
+				break;
+			}
+			if (instance != used_l2table->owner) {
+				ret = -EINVAL;
+				MCDRV_DBG_ERROR("instance does no own it\n");
+				break;
+			}
+
+			/* lock entry */
+			if ((used_l2table->flags &
+				  MC_WSM_L2_CONTAINER_WSM_LOCKED_BY_MC) == 0) {
+				MCDRV_DBG_WARN("entry is not locked locked\n");
+			}
+
+			/* free table (if no further locks exist) */
+			delete_used_l2_table(used_l2table, FREE_FROM_SWD);
+			used_l2table = NULL;
+
+			/* there are no out parameters */
+
+		} while (FALSE);
+
+	} while (FALSE);
+
+
+	MCDRV_DBG_VERBOSE("exit with %d/0x%08X\n", ret, ret);
+
+	return ret;
+}
+
+/*----------------------------------------------------------------------------*/
+/** Clears the reserved bit of each page and frees the pages */
+static inline void free_continguous_pages(
+	void		*addr,
+	unsigned int	size
+)
+{
+	struct page *page = virt_to_page(addr);
+	int i;
+	for (i = 0; i < size; i++) {
+		MCDRV_DBG_VERBOSE("free page at 0x%p\n", page);
+		ClearPageReserved(page);
+		page++;
+	}
+	/* REV luh: see man kmalloc */
+	free_pages((unsigned long)addr, size_to_order(size));
+}
+
+/*----------------------------------------------------------------------------*/
+/**
+ * Free a WSM buffer allocated with mobicore_allocate_wsm
+ * @param instance
+ * @param handle		handle of the buffer
+ *
+ * @return 0 if no error
+ *
+ */
+int mobicore_free(
+	struct mc_instance	*instance,
+	uint32_t		handle
+)
+{
+	int ret = 0;
+	unsigned int i;
+	struct mc_contg_buffer	*contg_buffer;
+
+	do {
+		/* search for the given address in the contg_buffers list */
+		for (i = 0; i < MC_DRV_KMOD_CONTG_BUFFER_MAX; i++) {
+			contg_buffer = &(instance->contg_buffers[i]);
+			if (contg_buffer->handle == handle)
+				break;
+		}
+		if (i == MC_DRV_KMOD_CONTG_BUFFER_MAX) {
+			MCDRV_DBG_ERROR("contigous buffer not found\n");
+			ret = -EFAULT;
+			break;
+		}
+
+		MCDRV_DBG_VERBOSE("phys_addr=0x%p, virt_addr=0x%p\n",
+				contg_buffer->phys_addr,
+				contg_buffer->virt_kernel_addr);
+
+		free_continguous_pages(contg_buffer->virt_kernel_addr,
+					contg_buffer->num_pages);
+
+		memset(contg_buffer, 0, sizeof(*contg_buffer));
+
+		/* there are no out parameters */
+
+	} while (FALSE);
+
+
+	return ret;
+}
+EXPORT_SYMBOL(mobicore_free);
+/*----------------------------------------------------------------------------*/
+
+/**
+ *
+ * @param instance
+ * @param arg
+ *
+ * @return 0 if no error
+ *
+ */
+static int handle_ioctl_free(
+	struct mc_instance		*instance,
+	union mc_ioctl_free_params	*user_params
+)
+{
+	int				ret = 0;
+	union mc_ioctl_free_params	params;
+
+
+	MCDRV_ASSERT(instance != NULL);
+	MCDRV_DBG_VERBOSE("enter\n");
+
+	do {
+		ret = copy_from_user(
+				  &(params.in),
+				  &(user_params->in),
+				  sizeof(params.in));
+
+		if (ret != 0) {
+			MCDRV_DBG_ERROR("copy_from_user\n");
+			break;
+		}
+
+		/* daemon can do this for another task. */
+		if (params.in.pid != 0) {
+			MCDRV_DBG_ERROR("pid != 0 unsupported\n");
+			ret = -EINVAL;
+			break;
+		}
+
+		ret = mobicore_free(instance, params.in.handle);
+
+		/* there are no out parameters */
+
+	} while (FALSE);
+
+	MCDRV_DBG_VERBOSE("exit with %d/0x%08X\n", ret, ret);
+
+	return ret;
+
+}
+
+
+/*----------------------------------------------------------------------------*/
+/**
+ *
+ * @param instance
+ * @param arg
+ *
+ * @return 0 if no error
+ *
+ */
+static int handle_ioctl_info(
+	struct mc_instance	*instance,
+	union mc_ioctl_info_params	*user_params
+)
+{
+	int			ret = 0;
+	union mc_ioctl_info_params	params;
+	union mc_fc_info		fc_info;
+
+
+	MCDRV_ASSERT(instance != NULL);
+	MCDRV_DBG_VERBOSE("enter\n");
+
+	do {
+		/* only the MobiCore Daemon is allowed to call this function */
+		if (!is_caller_mc_daemon(instance)) {
+			MCDRV_DBG_ERROR("caller not MobiCore Daemon\n");
+			ret = -EFAULT;
+			break;
+		}
+
+		ret = copy_from_user(
+				  &(params.in),
+				  &(user_params->in),
+				  sizeof(params.in));
+
+		if (ret != 0) {
+			MCDRV_DBG_ERROR("copy_from_user\n");
+			break;
+		}
+
+
+		memset(&fc_info, 0, sizeof(fc_info));
+		fc_info.as_in.cmd	   = MC_FC_INFO;
+		fc_info.as_in.ext_info_id = params.in.ext_info_id;
+
+		MCDRV_DBG(
+			"fc_info in cmd=0x%08x, ext_info_id=0x%08x "
+			"rfu=(0x%08x, 0x%08x)\n",
+			fc_info.as_in.cmd,
+			fc_info.as_in.ext_info_id,
+			fc_info.as_in.rfu[0],
+			fc_info.as_in.rfu[1]);
+
+		mc_fastcall(&(fc_info.as_generic));
+
+		MCDRV_DBG(
+			"fc_info out resp=0x%08x, ret=0x%08x "
+			"state=0x%08x, ext_info=0x%08x\n",
+			fc_info.as_out.resp,
+			fc_info.as_out.ret,
+			fc_info.as_out.state,
+			fc_info.as_out.ext_info);
+
+		ret = convert_fc_ret(fc_info.as_out.ret);
+		if (ret != 0)
+			break;
+
+		memset(&(params.out), 0, sizeof(params.out));
+		params.out.state  = fc_info.as_out.state;
+		params.out.ext_info = fc_info.as_out.ext_info;
+
+		ret = copy_to_user(
+				  &(user_params->out),
+				  &(params.out),
+				  sizeof(params.out));
+
+		if (ret != 0) {
+			MCDRV_DBG_ERROR("copy_to_user\n");
+			break;
+		}
+	} while (FALSE);
+
+	MCDRV_DBG_VERBOSE("exit with %d/0x%08X\n", ret, ret);
+
+	return ret;
+}
+
+/*----------------------------------------------------------------------------*/
+/**
+ *
+ * @param instance
+ * @param arg
+ *
+ * @return 0 if no error
+ *
+ */
+static int handle_ioctl_yield(
+	struct mc_instance	*instance
+)
+{
+	int			ret = 0;
+	union mc_fc_s_yield	fc_s_yield;
+
+	MCDRV_ASSERT(instance != NULL);
+
+	/* avoid putting debug output here, as we do this very often */
+	MCDRV_DBG_VERBOSE("enter\n");
+
+	do {
+		/* only the MobiCore Daemon is allowed to call this function */
+		if (!is_caller_mc_daemon(instance)) {
+			MCDRV_DBG_ERROR("caller not MobiCore Daemon\n");
+			ret = -EFAULT;
+			break;
+		}
+
+		memset(&fc_s_yield, 0, sizeof(fc_s_yield));
+		fc_s_yield.as_in.cmd = MC_SMC_N_YIELD;
+		mc_fastcall(&(fc_s_yield.as_generic));
+		ret = convert_fc_ret(fc_s_yield.as_out.ret);
+		if (ret != 0)
+			break;
+
+	} while (FALSE);
+
+	MCDRV_DBG_VERBOSE("exit with %d/0x%08X\n", ret, ret);
+
+	return ret;
+}
+
+/*----------------------------------------------------------------------------*/
+/**
+ * handle ioctl and call common notify
+ *
+ * @param instance
+ * @param arg
+ *
+ * @return 0 if no error
+ *
+ */
+static int handle_ioctl_nsiq(
+	struct mc_instance	*instance,
+	unsigned long		arg
+)
+{
+	int		ret = 0;
+
+	MCDRV_ASSERT(instance != NULL);
+
+	/* avoid putting debug output here, as we do this very often */
+	MCDRV_DBG_VERBOSE("enter\n");
+	/* only the MobiCore Daemon is allowed to call this function */
+	if (!is_caller_mc_daemon(instance)) {
+		MCDRV_DBG_ERROR("caller not MobiCore Daemon\n");
+		return -EFAULT;
+	}
+
+	do {
+		union mc_fc_nsiq fc_nsiq;
+		memset(&fc_nsiq, 0, sizeof(fc_nsiq));
+		fc_nsiq.as_in.cmd = MC_SMC_N_SIQ;
+		mc_fastcall(&(fc_nsiq.as_generic));
+		ret = convert_fc_ret(fc_nsiq.as_out.ret);
+		if (ret != 0)
+			break;
+	} while (FALSE);
+
+	MCDRV_DBG_VERBOSE("exit with %d/0x%08X\n", ret, ret);
+
+	return ret;
+}
+
+/*----------------------------------------------------------------------------*/
+/**
+ *
+ * @param instance
+ * @param arg
+ *
+ * @return 0 if no error
+ *
+ */
+static int handle_ioctl_dump_status(
+	struct mc_instance	*instance,
+	unsigned long		arg
+)
+{
+	int		ret = 0;
+	int		i = 0;
+	union mc_fc_info	fc_info;
+
+	MCDRV_ASSERT(instance != NULL);
+	MCDRV_DBG_VERBOSE("enter\n");
+
+	do {
+		/* anybody with root access can do this. */
+		if (!is_userland_caller_privileged()) {
+			MCDRV_DBG_ERROR("caller must have root privileges\n");
+			ret = -EFAULT;
+			break;
+		}
+
+		/* loop ext_info */
+		while (TRUE) {
+			memset(&fc_info, 0, sizeof(fc_info));
+			fc_info.as_in.cmd	   = MC_FC_INFO;
+			fc_info.as_in.ext_info_id = i;
+
+			MCDRV_DBG(
+				"fc_info in cmd=0x%08x, ext_info_id=0x%08x "
+				"rfu=(0x%08x, 0x%08x)\n",
+				fc_info.as_in.cmd,
+				fc_info.as_in.ext_info_id,
+				fc_info.as_in.rfu[0],
+				fc_info.as_in.rfu[1]);
+
+			mc_fastcall(&(fc_info.as_generic));
+
+			MCDRV_DBG(
+				"fc_info out resp=0x%08x, ret=0x%08x "
+				"state=0x%08x, ext_info=0x%08x\n",
+				fc_info.as_out.resp,
+				fc_info.as_out.ret,
+				fc_info.as_out.state,
+				fc_info.as_out.ext_info);
+
+			ret = convert_fc_ret(fc_info.as_out.ret);
+			if (ret != 0)
+				break;
+
+			MCDRV_DBG("state=%08X, idx=%02d: ext_info=%08X\n",
+				fc_info.as_out.state,
+				i,
+				fc_info.as_out.ext_info);
+			i++;
+		};
+
+		if (ret != 0)
+			break;
+
+
+	} while (FALSE);
+
+	MCDRV_DBG_VERBOSE("exit with %d/0x%08X\n", ret, ret);
+
+	return ret;
+}
+
+/*----------------------------------------------------------------------------*/
+/**
+ *
+ * @param instance
+ * @param arg
+ *
+ * @return 0 if no error
+ *
+ */
+static int handle_ioctl_init(
+	struct mc_instance	*instance,
+	union mc_ioctl_init_params	*user_params
+)
+{
+	int			ret = 0;
+	union mc_ioctl_init_params	params;
+	union mc_fc_init		fc_init;
+
+	MCDRV_ASSERT(instance != NULL);
+	MCDRV_DBG_VERBOSE("enter\n");
+
+	do {
+		/* only the MobiCore Daemon is allowed to call this function */
+		if (!is_caller_mc_daemon(instance)) {
+			MCDRV_DBG_ERROR("caller not MobiCore Daemon\n");
+			ret = -EFAULT;
+			break;
+		}
+
+		ret = copy_from_user(
+				  &(params.in),
+				  &(user_params->in),
+				  sizeof(params.in));
+		if (ret != 0) {
+			MCDRV_DBG_ERROR("copy_from_user failed\n");
+			break;
+		}
+
+		memset(&fc_init, 0, sizeof(fc_init));
+
+		fc_init.as_in.cmd	= MC_FC_INIT;
+		/* base address of mci buffer 4KB aligned */
+		fc_init.as_in.base   = (uint32_t)params.in.base;
+		/* notification buffer start/length [16:16] [start, length] */
+		fc_init.as_in.nq_info  = (params.in.nq_offset << 16)
+					  | (params.in.nq_length & 0xFFFF);
+		/* mcp buffer start/length [16:16] [start, length] */
+		fc_init.as_in.mcp_info = (params.in.mcp_offset << 16)
+					  | (params.in.mcp_length & 0xFFFF);
+
+		/* Set KMOD notification queue to start of MCI
+			mciInfo was already set up in mmap */
+		if (!mci_base) {
+			MCDRV_DBG_ERROR("No MCI set yet.\n");
+			return -EFAULT;
+		}
+		MCDRV_DBG("in cmd=0x%08x, base=0x%08x, "
+			  "nq_info=0x%08x, mcp_info=0x%08x\n",
+			  fc_init.as_in.cmd,
+			  fc_init.as_in.base,
+			  fc_init.as_in.nq_info,
+			  fc_init.as_in.mcp_info);
+
+		mc_fastcall(&(fc_init.as_generic));
+
+		MCDRV_DBG("out cmd=0x%08x, ret=0x%08x rfu=(0x%08x, 0x%08x)\n",
+			  fc_init.as_out.resp,
+			  fc_init.as_out.ret,
+			  fc_init.as_out.rfu[0],
+			  fc_init.as_out.rfu[1]);
+
+		ret = convert_fc_ret(fc_init.as_out.ret);
+		if (ret != 0)
+			break;
+
+		/* no ioctl response parameters */
+
+	} while (FALSE);
+
+	MCDRV_DBG_VERBOSE("exit with %d/0x%08X\n", ret, ret);
+
+	return ret;
+}
+
+/*----------------------------------------------------------------------------*/
+/**
+ *
+ * @param instance
+ * @param arg
+ *
+ * @return 0 if no error
+ *
+ */
+static int handle_ioctl_fc_execute(
+	struct mc_instance		*instance,
+	union mc_ioctl_fc_execute_params	*user_params
+)
+{
+	int				ret = 0;
+	union mc_ioctl_fc_execute_params	params;
+	union fc_generic			fc_params;
+
+	MCDRV_ASSERT(instance != NULL);
+	MCDRV_DBG_VERBOSE("enter\n");
+
+	do {
+		/* only the MobiCore Daemon is allowed to call this function */
+		if (!is_caller_mc_daemon(instance)) {
+			MCDRV_DBG_ERROR("caller not MobiCore Daemon\n");
+			ret = -EFAULT;
+			break;
+		}
+
+		ret = copy_from_user(
+				  &(params.in),
+				  &(user_params->in),
+				  sizeof(params.in));
+		if (ret != 0) {
+			MCDRV_DBG_ERROR("copy_from_user failed\n");
+			break;
+		}
+
+		fc_params.as_in.cmd = -4;/*FC_EXECUTE */
+		fc_params.as_in.param[0] = params.in.phys_start_addr;
+		fc_params.as_in.param[1] = params.in.length;
+		fc_params.as_in.param[2] = 0;
+
+		MCDRV_DBG("in cmd=0x%08x, startAddr=0x%08x, length=0x%08x\n",
+			  fc_params.as_in.cmd,
+			  fc_params.as_in.param[0],
+			  fc_params.as_in.param[1]);
+
+		mc_fastcall(&fc_params);
+
+		MCDRV_DBG("out cmd=0x%08x, ret=0x%08x rfu=(0x%08x, 0x%08x)\n",
+			  fc_params.as_out.resp,
+			  fc_params.as_out.ret,
+			  fc_params.as_out.param[0],
+			  fc_params.as_out.param[1]);
+
+		ret = convert_fc_ret(fc_params.as_out.ret);
+		if (ret != 0)
+			break;
+
+		/* no ioctl response parameters */
+
+	} while (FALSE);
+
+	MCDRV_DBG_VERBOSE("exit with %d/0x%08X\n", ret, ret);
+
+	return ret;
+}
+
+/*----------------------------------------------------------------------------*/
+#define MC_MAKE_VERSION(major, minor) \
+		(((major & 0x0000ffff) << 16) | (minor & 0x0000ffff))
+/**
+ *
+ * @param instance
+ * @param arg
+ *
+ * @return 0 if no error
+ *
+ */
+static int handle_ioctl_get_version(
+	struct mc_instance			*instance,
+	struct mc_ioctl_get_version_params	*user_params
+)
+{
+	int ret = 0;
+	struct mc_ioctl_get_version_params params = {
+		{
+			MC_MAKE_VERSION(MCDRVMODULEAPI_VERSION_MAJOR,
+					MCDRVMODULEAPI_VERSION_MINOR)
+		}
+	};
+
+	MCDRV_ASSERT(instance != NULL);
+	MCDRV_DBG_VERBOSE("enter\n");
+
+	do {
+		MCDRV_DBG("mcDrvModuleApi version is %i.%i\n",
+				MCDRVMODULEAPI_VERSION_MAJOR,
+				MCDRVMODULEAPI_VERSION_MINOR);
+
+		/* no ioctl response parameters */
+		ret = copy_to_user(
+					&(user_params->out),
+					&(params.out),
+					sizeof(params.out));
+		if (ret != 0)
+			MCDRV_DBG_ERROR("copy_to_user() failed\n");
+
+	} while (FALSE);
+
+	MCDRV_DBG_VERBOSE("exit with %d/0x%08X\n", ret, ret);
+
+	return ret;
+}
+
+/*----------------------------------------------------------------------------*/
+/**
+ * This function will be called from user space as ioctl(...).
+ * @param file	pointer to file
+ * @param cmd	command
+ * @param arg	arguments
+ *
+ * @return int 0 for OK and an errno in case of error
+ */
+static long mc_kernel_module_ioctl(
+	struct file	*file,
+	unsigned int	cmd,
+	unsigned long	arg
+)
+{
+	int ret;
+	struct mc_instance *instance = get_instance(file);
+
+	MCDRV_ASSERT(instance != NULL);
+
+	switch (cmd) {
+	/*--------------------------------------------------------------------*/
+	case MC_DRV_KMOD_IOCTL_DUMP_STATUS:
+		ret = handle_ioctl_dump_status(
+				instance,
+				arg);
+		break;
+
+	/*--------------------------------------------------------------------*/
+	case MC_DRV_KMOD_IOCTL_FC_INIT:
+		ret = handle_ioctl_init(
+				instance,
+				(union mc_ioctl_init_params *)arg);
+		break;
+	/*--------------------------------------------------------------------*/
+	case MC_DRV_KMOD_IOCTL_FC_INFO:
+		ret = handle_ioctl_info(
+				instance,
+				(union mc_ioctl_info_params *)arg);
+		break;
+
+	/*--------------------------------------------------------------------*/
+	case MC_DRV_KMOD_IOCTL_FC_YIELD:
+		ret = handle_ioctl_yield(
+				instance);
+		break;
+
+	/*--------------------------------------------------------------------*/
+	case MC_DRV_KMOD_IOCTL_FC_NSIQ:
+		ret = handle_ioctl_nsiq(
+				instance,
+				arg);
+		break;
+
+	/*--------------------------------------------------------------------*/
+	case MC_DRV_KMOD_IOCTL_DAEMON_LOCK_WSM_L2:
+		ret = handle_ioctl_daemon_lock_wsm_l2(
+			instance,
+			(struct mc_ioctl_daemon_lock_wsm_l2_params *)arg);
+		break;
+
+	/*--------------------------------------------------------------------*/
+	case MC_DRV_KMOD_IOCTL_DAEMON_UNLOCK_WSM_L2:
+		ret = handle_ioctl_daemon_unlock_wsm_l2(
+			instance,
+			(struct mc_ioctl_daemon_unlock_wsm_l2_params *)arg);
+		break;
+
+	/*--------------------------------------------------------------------*/
+	case MC_DRV_KMOD_IOCTL_FREE:
+		/* called by ClientLib */
+		ret = handle_ioctl_free(
+				instance,
+				(union mc_ioctl_free_params *)arg);
+		break;
+
+	/*--------------------------------------------------------------------*/
+	case MC_DRV_KMOD_IOCTL_APP_REGISTER_WSM_L2:
+		/* called by ClientLib */
+		ret = handle_ioctl_app_register_wsm_l2(
+				instance,
+				(union mc_ioctl_app_reg_wsm_l2_params *)arg);
+		break;
+
+	/*--------------------------------------------------------------------*/
+	case MC_DRV_KMOD_IOCTL_APP_UNREGISTER_WSM_L2:
+		/* called by ClientLib */
+		ret = handle_ioctl_app_unregister_wsm_l2(
+				instance,
+				(struct mc_ioctl_app_unreg_wsm_l2_params *)arg);
+		break;
+
+	/*--------------------------------------------------------------------*/
+	case MC_DRV_KMOD_IOCTL_FC_EXECUTE:
+		ret = handle_ioctl_fc_execute(
+				instance,
+				(union mc_ioctl_fc_execute_params *)arg);
+		break;
+
+	/*--------------------------------------------------------------------*/
+	case MC_DRV_KMOD_IOCTL_GET_VERSION:
+		ret = handle_ioctl_get_version(
+				instance,
+				(struct mc_ioctl_get_version_params *)arg);
+		break;
+
+	/*--------------------------------------------------------------------*/
+	default:
+		MCDRV_DBG_ERROR("unsupported cmd=%d\n", cmd);
+		ret = -EFAULT;
+		break;
+
+	} /* end switch(cmd) */
+
+#ifdef MC_MEM_TRACES
+	mobicore_log_read();
+#endif
+
+	return (int)ret;
+}
+
+
+/*----------------------------------------------------------------------------*/
+/**
+ * This function will be called from user space as read(...).
+ * The read function is blocking until a interrupt occurs. In that case the
+ * event counter is copied into user space and the function is finished.
+ * @param *file
+ * @param *buffer  buffer where to copy to(userspace)
+ * @param buffer_len	 number of requested data
+ * @param *pos	 not used
+ * @return ssize_t  ok case: number of copied data
+ *				error case: return errno
+ */
+static ssize_t mc_kernel_module_read(
+	struct file	*file,
+	char		*buffer,
+	size_t		buffer_len,
+	loff_t		*pos
+)
+{
+	int ret = 0, ssiq_counter;
+	size_t retLen = 0;
+	struct mc_instance *instance = get_instance(file);
+
+	MCDRV_ASSERT(instance != NULL);
+
+	/* avoid debug output on non-error, because this is call quite often */
+	MCDRV_DBG_VERBOSE("enter\n");
+
+	do {
+		/* only the MobiCore Daemon is allowed to call this function */
+		if (!is_caller_mc_daemon(instance)) {
+			MCDRV_DBG_ERROR("caller not MobiCore Daemon\n");
+			ret = -EFAULT;
+			break;
+		}
+
+		if (buffer_len < sizeof(unsigned int)) {
+			MCDRV_DBG_ERROR("invalid length\n");
+			ret = (ssize_t)(-EINVAL);
+			break;
+		}
+
+		for (;;) {
+			if (down_interruptible(
+					&mc_drv_kmod_ctx.daemon_ctx.sem)) {
+				MCDRV_DBG_VERBOSE("read interrupted\n");
+				ret = (ssize_t)-ERESTARTSYS;
+				break;
+			}
+
+			ssiq_counter = atomic_read(
+					&(mc_drv_kmod_ctx.ssiq_ctx.counter));
+			MCDRV_DBG_VERBOSE("ssiq_counter=%i, ctx.counter=%i\n",
+				ssiq_counter,
+				mc_drv_kmod_ctx.daemon_ctx.ssiq_counter);
+
+			if (ssiq_counter !=
+				mc_drv_kmod_ctx.daemon_ctx.ssiq_counter) {
+				/* read data and exit loop without
+					error */
+				mc_drv_kmod_ctx.daemon_ctx.ssiq_counter =
+					ssiq_counter;
+				ret = 0;
+				break;
+			}
+
+			/* end loop if non-blocking */
+			if ((file->f_flags & O_NONBLOCK) != 0) {
+				MCDRV_DBG_ERROR("non-blocking read\n");
+				ret = (ssize_t)(-EAGAIN);
+				break;
+			}
+
+			if (signal_pending(current) != 0) {
+				MCDRV_DBG_VERBOSE("received signal.\n");
+				ret = (ssize_t)(-ERESTARTSYS);
+				break;
+			}
+
+		}
+
+		/* we are here if an event occurred or we had an
+			error.*/
+		if (ret != 0)
+			break;
+
+		/* read data and exit loop */
+		ret = copy_to_user(
+				  buffer,
+				  &(mc_drv_kmod_ctx.daemon_ctx.ssiq_counter),
+				  sizeof(unsigned int));
+
+
+		if (ret != 0) {
+			MCDRV_DBG_ERROR("copy_to_user failed\n");
+			ret = (ssize_t)(-EFAULT);
+			break;
+		}
+
+		retLen = sizeof(s32);
+
+	} while (FALSE);
+
+	/* avoid debug on non-error. */
+	if (ret == 0)
+		ret = (size_t)retLen;
+	else
+		MCDRV_DBG("exit with %d/0x%08X\n", ret, ret);
+
+	return (ssize_t)ret;
+}
+
+/*----------------------------------------------------------------------------*/
+/**
+ * Allocate WSM for given instance
+ *
+ * @param instance		instance
+ * @param requested_size		size of the WSM
+ * @param handle		pointer where the handle will be saved
+ * @param virt_kernel_addr	pointer for the kernel virtual address
+ * @param phys_addr		pointer for the physical address
+ *
+ * @return error code or 0 for success
+ */
+int mobicore_allocate_wsm(
+	struct mc_instance	*instance,
+	unsigned long		requested_size,
+	uint32_t		*handle,
+	void			**virt_kernel_addr,
+	void			**phys_addr
+)
+{
+	unsigned int	i;
+	unsigned int	order;
+	unsigned long	allocated_size;
+	int		ret = 0;
+	struct mc_contg_buffer	*contg_buffer = 0;
+	void		*virt_kernel_addr_stack;
+	void		*phys_addr_stack;
+
+	MCDRV_ASSERT(instance != NULL);
+	MCDRV_DBG("%s (size=%ld)\n", __func__, requested_size);
+
+	order = size_to_order(requested_size);
+	if (order == INVALID_ORDER) {
+		MCDRV_DBG_ERROR(
+			"size to order converting failed for size %ld\n",
+			requested_size);
+		return INVALID_ORDER;
+	}
+
+	allocated_size = (1<<order)*PAGE_SIZE;
+
+	MCDRV_DBG("size %ld -> order %d --> %ld (2^n pages)\n",
+		  requested_size, order, allocated_size);
+
+	do {
+		/* Usual Wsm request, allocate contigous buffer. */
+		/* search for a free entry in the wsm buffer list
+		 * REV axh: serialize this over multiple instances. */
+		for (i = 0; i < MC_DRV_KMOD_CONTG_BUFFER_MAX; i++) {
+			contg_buffer = &(instance->contg_buffers[i]);
+			if (contg_buffer->handle == 0) {
+				contg_buffer->handle = get_mc_kmod_unique_id();
+				break;
+			}
+		}
+		if (i == MC_DRV_KMOD_CONTG_BUFFER_MAX) {
+			MCDRV_DBG_ERROR("no free contigous buffer\n");
+			ret = -EFAULT;
+			break;
+		}
+
+		/* Common code for all allocation paths */
+		virt_kernel_addr_stack = (void *)__get_free_pages(
+							GFP_USER | __GFP_COMP,
+							order);
+		if (virt_kernel_addr_stack == NULL) {
+			MCDRV_DBG_ERROR("get_free_pages failed\n");
+			ret = -ENOMEM;
+			break;
+		}
+
+		/* Get physical address to instance data */
+		phys_addr_stack = (void *)virt_to_phys(virt_kernel_addr_stack);
+		/* TODO: check for INVALID_ADDRESS? */
+
+		MCDRV_DBG(
+			"allocated phys=0x%p - 0x%p, "
+			"size=%ld, kernel_virt=0x%p, handle=%d\n",
+			phys_addr_stack,
+			(void *)((unsigned int)phys_addr_stack+allocated_size),
+			allocated_size,
+			virt_kernel_addr_stack,
+			contg_buffer->handle);
+
+		/* Usual Wsm request, allocate contg_buffer.
+		 *		Also, we never free a persistent Tci */
+		contg_buffer->phys_addr	 = phys_addr_stack;
+		contg_buffer->virt_kernel_addr = virt_kernel_addr_stack;
+		contg_buffer->virt_user_addr   = virt_kernel_addr_stack;
+		contg_buffer->num_pages	 = (1U << order);
+		*handle = contg_buffer->handle;
+		*virt_kernel_addr = virt_kernel_addr_stack;
+		*phys_addr = phys_addr_stack;
+
+	} while (FALSE);
+
+	MCDRV_DBG_VERBOSE("%s: exit with 0x%08X\n", __func__, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL(mobicore_allocate_wsm);
+
+
+/*----------------------------------------------------------------------------*/
+/**
+ * This function will be called from user space as address = mmap(...).
+ *
+ * @param file
+ * @param vmarea
+ * vmarea.pg_offset != 0 is mapping of MCI is requested
+ *
+ * @return 0 if OK or -ENOMEM in case of error.
+ */
+static int mc_kernel_module_mmap(
+	struct file		*file,
+	struct vm_area_struct	*vmarea
+)
+{
+	unsigned int		i;
+	unsigned int		order;
+	void			*virt_kernel_addr_stack = 0;
+	void			*phys_addr = 0;
+	unsigned long		requested_size =
+					vmarea->vm_end - vmarea->vm_start;
+	unsigned long		allocated_size;
+	int			ret = 0;
+	struct mc_contg_buffer	*contg_buffer = 0;
+	unsigned int		handle = 0;
+	struct mc_instance	*instance = get_instance(file);
+	unsigned int		request = vmarea->vm_pgoff * 4096;
+#if defined(DEBUG)
+	bool release = false;
+#else
+	bool release = true;
+#endif
+
+	MCDRV_ASSERT(instance != NULL);
+	MCDRV_DBG("enter (vmaStart=0x%p, size=%ld, request=0x%x, mci=0x%x)\n",
+		  (void *)vmarea->vm_start,
+		  requested_size,
+		  request,
+		  mci_base);
+
+	order = size_to_order(requested_size);
+	if (order == INVALID_ORDER) {
+		MCDRV_DBG_ERROR(
+			"size to order converting failed for size %ld\n",
+			requested_size);
+		return -ENOMEM;
+	}
+
+	allocated_size = (1<<order)*PAGE_SIZE;
+
+	MCDRV_DBG("size %ld -> order %d --> %ld (2^n pages)\n",
+		  requested_size, order, allocated_size);
+
+	do {
+		/* Daemon tries to get an existing MCI */
+		if ((request == MC_DRV_KMOD_MMAP_MCI) && (mci_base != 0)) {
+			MCDRV_DBG("Request MCI, it is at (%x)\n", mci_base);
+
+			if (!is_caller_mc_daemon(instance)) {
+				ret = -EPERM;
+				break;
+			}
+			virt_kernel_addr_stack = (void *)mci_base;
+			phys_addr =
+				(void *)virt_to_phys(virt_kernel_addr_stack);
+		} else {
+			/* Usual Wsm request, allocate buffer. */
+			if (request == MC_DRV_KMOD_MMAP_WSM) {
+				/* search for a free entry in the buffer list
+				REV axh: serialize this over multiple instances.
+				*/
+				for (i = 0; i < MC_DRV_KMOD_CONTG_BUFFER_MAX;
+					i++) {
+					contg_buffer =
+						&(instance->contg_buffers[i]);
+					if (contg_buffer->handle == 0) {
+						contg_buffer->handle =
+							get_mc_kmod_unique_id();
+						break;
+					}
+				}
+				if (i == MC_DRV_KMOD_CONTG_BUFFER_MAX) {
+					MCDRV_DBG_ERROR(
+						"no free contigous buffer\n");
+					ret = -EFAULT;
+					break;
+				}
+			} else {
+				if (request <= MC_DRV_KMOD_MMAP_PERSISTENTWSM
+					|| release) {
+					/* Special Wsm request
+						--> only Daemon is allowed */
+					if (!is_caller_mc_daemon(instance)) {
+						ret = -EPERM;
+						break;
+					}
+				}
+			}
+			if (request <= MC_DRV_KMOD_MMAP_PERSISTENTWSM) {
+				/* Common code for all allocation paths
+					*  get physical address, */
+				virt_kernel_addr_stack =
+					(void *)__get_free_pages(
+							GFP_USER | __GFP_COMP,
+							order);
+				if (virt_kernel_addr_stack == NULL) {
+					MCDRV_DBG_ERROR(
+						"get_free_pages failed\n");
+					ret = -ENOMEM;
+					break;
+				}
+				if (request == MC_DRV_KMOD_MMAP_WSM)
+					handle = contg_buffer->handle;
+				/* Get physical address to instance data */
+				/* TODO: check for INVALID_ADDRESS? */
+				phys_addr = (void *)virt_to_phys(
+							virt_kernel_addr_stack);
+			} else {
+#if defined(DEBUG)
+				phys_addr = (void *)request;
+				virt_kernel_addr_stack = phys_to_virt(request);
+#endif
+			}
+		}
+		/* Common code for all mmap calls:
+		 * map page to user
+		 * store data in page */
+
+		MCDRV_DBG("allocated phys=0x%p - 0x%p, "
+			"size=%ld, kernel_virt=0x%p, handle=%d\n",
+			phys_addr,
+			(void *)((unsigned int)phys_addr+allocated_size),
+			allocated_size, virt_kernel_addr_stack, handle);
+
+		vmarea->vm_flags |= VM_RESERVED;
+		/* convert Kernel address to User Address. Kernel address begins
+			at PAGE_OFFSET, user Address range is below PAGE_OFFSET.
+			Remapping the area is always done, so multiple mappings
+			of one region are possible. Now remap kernel address
+			space into user space */
+		ret = (int)remap_pfn_range(
+				vmarea,
+				(vmarea->vm_start),
+				addr_to_pfn(phys_addr),
+				requested_size,
+				vmarea->vm_page_prot);
+		if (ret != 0) {
+			MCDRV_DBG_ERROR("remapPfnRange failed\n");
+
+			/* free allocated pages when mmap fails, however, do not
+				do it, when daemon tried to get an MCI that
+				existed */
+			if (!((request == MC_DRV_KMOD_MMAP_MCI) &&
+				  (mci_base != 0)))
+				free_continguous_pages(virt_kernel_addr_stack,
+							(1U << order));
+			break;
+		}
+
+		/* Usual Wsm request, allocate contg_buffer.
+			When requesting Mci, we do not associate the page with
+			the process.
+			Note: we also never free the Mci
+			Also, we never free a persistent Tci */
+		if (request == MC_DRV_KMOD_MMAP_WSM) {
+			contg_buffer->phys_addr = phys_addr;
+			contg_buffer->virt_kernel_addr = virt_kernel_addr_stack;
+			contg_buffer->virt_user_addr =
+						(void *)(vmarea->vm_start);
+			contg_buffer->num_pages = (1U << order);
+		}
+
+		/* set response in allocated buffer */
+		{
+			struct mc_mmap_resp *mmapResp =
+				(struct mc_mmap_resp *)virt_kernel_addr_stack;
+			/* TODO: do this for daemon only, otherwise set NULL */
+			mmapResp->phys_addr = (uint32_t)phys_addr;
+			mmapResp->handle = handle;
+			if ((request == MC_DRV_KMOD_MMAP_MCI) &&
+				(mci_base != 0)) {
+				mmapResp->is_reused = 1;
+			} else
+				mmapResp->is_reused = 0;
+		}
+
+		/* store MCI pointer */
+		if ((request == MC_DRV_KMOD_MMAP_MCI) && (mci_base == 0)) {
+			mci_base = (uint32_t)virt_kernel_addr_stack;
+			MCDRV_DBG("MCI base set to 0x%x\n", mci_base);
+		}
+	} while (FALSE);
+
+	MCDRV_DBG_VERBOSE("exit with %d/0x%08X\n", ret, ret);
+
+	return (int)ret;
+}
+
+#ifdef CONFIG_SMP
+/*----------------------------------------------------------------------------*/
+/**
+ * Force migration of current task to CPU0(where the monitor resides)
+ *
+ * @return Error code or 0 for success
+ */
+static int goto_cpu0(
+	void
+)
+{
+	int		ret = 0;
+	struct cpumask	mask =  CPU_MASK_CPU0;
+
+	MCDRV_DBG_VERBOSE("System has %d CPU's, we are on CPU #%d\n"
+		  "\tBinding this process to CPU #0.\n"
+		  "\tactive mask is %lx, setting it to mask=%lx\n",
+		  nr_cpu_ids,
+		  raw_smp_processor_id(),
+		  cpu_active_mask->bits[0],
+		  mask.bits[0]);
+	ret = set_cpus_allowed_ptr(current, &mask);
+	if (ret != 0)
+		MCDRV_DBG_ERROR("set_cpus_allowed_ptr=%d.\n", ret);
+	MCDRV_DBG_VERBOSE("And now we are on CPU #%d\n",
+				raw_smp_processor_id());
+
+	return ret;
+}
+
+/*----------------------------------------------------------------------------*/
+/**
+ * Restore CPU mask for current to ALL Cpus(reverse of goto_cpu0)
+ *
+ * @return Error code or 0 for success
+ */
+static int goto_all_cpu(
+	void
+)
+{
+	int		ret = 0;
+
+	struct cpumask	mask =  CPU_MASK_ALL;
+
+	MCDRV_DBG_VERBOSE("System has %d CPU's, we are on CPU #%d\n"
+		  "\tBinding this process to CPU #0.\n"
+		  "\tactive mask is %lx, setting it to mask=%lx\n",
+		  nr_cpu_ids,
+		  raw_smp_processor_id(),
+		  cpu_active_mask->bits[0],
+		  mask.bits[0]);
+	ret = set_cpus_allowed_ptr(current, &mask);
+	if (ret != 0)
+		MCDRV_DBG_ERROR("set_cpus_allowed_ptr=%d.\n", ret);
+	MCDRV_DBG_VERBOSE("And now we are on CPU #%d\n",
+				raw_smp_processor_id());
+
+	return ret;
+}
+
+#else
+static int goto_cpu0(void)
+{
+	return 0;
+}
+
+static int goto_all_cpu(void)
+{
+	return 0;
+}
+#endif
+
+/*----------------------------------------------------------------------------*/
+/**
+ * Initialize a new mobicore API instance object
+ *
+ * @return Instance or NULL if no allocation was possible.
+ */
+struct mc_instance *mobicore_open(
+	void
+) {
+	struct mc_instance	*instance;
+	pid_t			pid_vnr;
+
+	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
+	if (instance == NULL)
+		return NULL;
+
+	/* get a unique ID for this instance (PIDs are not unique) */
+	instance->handle = get_mc_kmod_unique_id();
+
+	/* get the PID of the calling process. We avoid using
+	 *	current->pid directly, as 2.6.24 introduced PID
+	 *	namespaces. See also http://lwn.net/Articles/259217 */
+	pid_vnr = task_pid_vnr(current);
+	instance->pid_vnr = pid_vnr;
+
+	return instance;
+}
+EXPORT_SYMBOL(mobicore_open);
+
+/*----------------------------------------------------------------------------*/
+/**
+ * This function will be called from user space as fd = open(...).
+ * A set of internal instance data are created and initialized.
+ *
+ * @param inode
+ * @param file
+ * @return 0 if OK or -ENOMEM if no allocation was possible.
+ */
+static int mc_kernel_module_open(
+	struct inode	*inode,
+	struct file	*file
+)
+{
+	struct mc_instance	*instance;
+	int			ret = 0;
+
+	MCDRV_DBG_VERBOSE("enter\n");
+
+	do {
+		instance = mobicore_open();
+		if (instance == NULL)
+			return -ENOMEM;
+
+		/* check if Daemon. We simply assume that the first to open us
+			with root privileges must be the daemon. */
+		if ((is_userland_caller_privileged())
+			&& (mc_drv_kmod_ctx.daemon_inst == NULL)) {
+			MCDRV_DBG("accept this as MobiCore Daemon\n");
+
+			/* Set the caller's CPU mask to CPU0*/
+			ret = goto_cpu0();
+			if (ret != 0) {
+				mobicore_release(instance);
+				file->private_data = NULL;
+				MCDRV_DBG("changing core failed!\n");
+				break;
+			}
+
+			mc_drv_kmod_ctx.daemon_inst = instance;
+			sema_init(&mc_drv_kmod_ctx.daemon_ctx.sem,
+					DAEMON_SEM_VAL);
+			/* init ssiq event counter */
+			mc_drv_kmod_ctx.daemon_ctx.ssiq_counter =
+				atomic_read(
+					&(mc_drv_kmod_ctx.ssiq_ctx.counter));
+
+#ifdef MC_MEM_TRACES
+			/* The traces have to be setup on CPU-0 since we must
+			 * do a fastcall to MobiCore. */
+			if (!mci_base)
+				/* Do the work only if MCI base is not
+				 * initialized properly */
+				work_on_cpu(0, mobicore_log_setup, NULL);
+#endif
+		}
+
+		/* store instance data reference */
+		file->private_data = instance;
+
+		/* TODO axh: link all instances to allow clean up? */
+
+	} while (FALSE);
+
+	MCDRV_DBG_VERBOSE("exit with %d/0x%08X\n", ret, ret);
+
+	return (int)ret;
+
+}
+
+/*----------------------------------------------------------------------------*/
+/**
+ * Release a mobicore instance object and all objects related to it
+ * @param instance instance
+ * @return 0 if Ok or -E ERROR
+ */
+int mobicore_release(
+	struct mc_instance	*instance
+)
+{
+	int ret = 0;
+	int i;
+	struct mc_used_l2_table	*used_l2table, *used_l2table_temp;
+
+	do {
+		/* try to get the semaphore */
+		ret = down_interruptible(&(mc_drv_kmod_ctx.wsm_l2_sem));
+		if (ret != 0) {
+			MCDRV_DBG_ERROR(
+				"down_interruptible() failed with %d\n", ret);
+			/* TODO: can be block here? */
+			ret = -ERESTARTSYS;
+		} else {
+			/* Check if some WSM is still in use. */
+			list_for_each_entry_safe(
+				used_l2table,
+				used_l2table_temp,
+				&(mc_drv_kmod_ctx.mc_used_l2_tables),
+				list
+			) {
+				if (used_l2table->owner == instance) {
+					MCDRV_DBG_WARN(
+						"trying to release WSM L2: "
+						"physBase=%p ,nr_of_pages=%d\n",
+						get_l2_table_phys(used_l2table),
+						used_l2table->nr_of_pages);
+
+					/* unlock app usage and free if MobiCore
+					does not use it */
+					delete_used_l2_table(used_l2table,
+							FREE_FROM_NWD);
+				}
+			} /* end while */
+
+			/* release semaphore */
+			up(&(mc_drv_kmod_ctx.wsm_l2_sem));
+		}
+
+
+		/* release all mapped data */
+		for (i = 0; i < MC_DRV_KMOD_CONTG_BUFFER_MAX; i++) {
+			struct mc_contg_buffer *contg_buffer =
+					&(instance->contg_buffers[i]);
+
+			if (contg_buffer->virt_user_addr != 0) {
+				free_continguous_pages(
+					contg_buffer->virt_kernel_addr,
+					contg_buffer->num_pages);
+			}
+		}
+
+		/* release instance context */
+		kfree(instance);
+	} while (FALSE);
+
+	return ret;
+}
+EXPORT_SYMBOL(mobicore_release);
+
+/*----------------------------------------------------------------------------*/
+/**
+ * This function will be called from user space as close(...).
+ * The instance data are freed and the associated memory pages are unreserved.
+ *
+ * @param inode
+ * @param file
+ *
+ * @return 0
+ */
+static int mc_kernel_module_release(
+	struct inode	*inode,
+	struct file	*file
+)
+{
+	int			ret = 0;
+	struct mc_instance	*instance = get_instance(file);
+
+	MCDRV_DBG_VERBOSE("enter\n");
+
+	do {
+		/* check if daemon closes us. */
+		if (is_caller_mc_daemon(instance)) {
+			/* TODO: cleanup?
+				* mc_drv_kmod_ctx.mc_used_l2_tables remains */
+			MCDRV_DBG_WARN("WARNING: MobiCore Daemon died\n");
+			mc_drv_kmod_ctx.daemon_inst = NULL;
+		}
+
+		ret = mobicore_release(instance);
+
+	} while (FALSE);
+
+	MCDRV_DBG_VERBOSE("exit with %d/0x%08X\n", ret, ret);
+
+	return (int)ret;
+}
+
+
+/*----------------------------------------------------------------------------*/
+/**
+ * This function represents the interrupt function of the mcDrvModule.
+ * It signals by incrementing of an event counter and the start of the read
+ * waiting queue, the read function a interrupt has occurred.
+ *
+ * @param   intr
+ * @param   *context  pointer to registered device data
+ *
+ * @return  IRQ_HANDLED
+ */
+static irqreturn_t mc_kernel_module_intr_ssiq(
+	int	intr,
+	void	*context
+)
+{
+	irqreturn_t	ret = IRQ_NONE;
+
+	/* we know the context. */
+	MCDRV_ASSERT(&mc_drv_kmod_ctx == context);
+
+	do {
+		if (intr != MC_INTR_SSIQ) {
+			/* this should not happen, as we did no register for any
+				other interrupt. For debugging, we print a
+				message, but continue */
+			MCDRV_DBG_WARN(
+				"unknown interrupt %d, expecting only %d\n",
+				intr, MC_INTR_SSIQ);
+		}
+		MCDRV_DBG_VERBOSE("received interrupt %d\n",
+				  intr);
+
+		/* increment interrupt event counter */
+		atomic_inc(&(mc_drv_kmod_ctx.ssiq_ctx.counter));
+
+		/* signal the daemon */
+		up(&mc_drv_kmod_ctx.daemon_ctx.sem);
+
+
+		ret = IRQ_HANDLED;
+
+	} while (FALSE);
+
+	return ret;
+}
+
+/*----------------------------------------------------------------------------*/
+/** function table structure of this device driver. */
+static const struct file_operations mc_kernel_module_file_operations = {
+	.owner		= THIS_MODULE, /**< driver owner */
+	.open		= mc_kernel_module_open, /**< driver open function */
+	.release	= mc_kernel_module_release, /**< driver release function*/
+	.unlocked_ioctl	= mc_kernel_module_ioctl, /**< driver ioctl function */
+	.mmap		= mc_kernel_module_mmap, /**< driver mmap function */
+	.read		= mc_kernel_module_read, /**< driver read function */
+};
+
+/*----------------------------------------------------------------------------*/
+/** registration structure as miscdevice. */
+static struct miscdevice mc_kernel_module_device = {
+	.name	= MC_DRV_MOD_DEVNODE, /**< device name */
+	.minor	= MISC_DYNAMIC_MINOR, /**< device minor number */
+	/** device interface function structure */
+	.fops	= &mc_kernel_module_file_operations,
+};
+
+
+/*----------------------------------------------------------------------------*/
+/**
+ * This function is called the kernel during startup or by a insmod command.
+ * This device is installed and registered as miscdevice, then interrupt and
+ * queue handling is set up
+ *
+ * @return 0 for no error or -EIO if registration fails
+ */
+static int __init mc_kernel_module_init(
+	void
+)
+{
+	int ret = 0;
+
+	MCDRV_DBG("enter (Build " __TIMESTAMP__ ")\n");
+	MCDRV_DBG("mcDrvModuleApi version is %i.%i\n",
+			MCDRVMODULEAPI_VERSION_MAJOR,
+			MCDRVMODULEAPI_VERSION_MINOR);
+#ifdef MOBICORE_COMPONENT_BUILD_TAG
+	MCDRV_DBG("%s\n", MOBICORE_COMPONENT_BUILD_TAG);
+#endif
+	do {
+		/* Hardware does not support ARM TrustZone
+			-> Cannot continue! */
+		if (!has_security_extensions()) {
+			MCDRV_DBG_ERROR(
+				"Hardware does't support ARM TrustZone!\n");
+			ret = -ENODEV;
+			break;
+		}
+
+		/* Running in secure mode -> Cannot load the driver! */
+		if (is_secure_mode()) {
+			MCDRV_DBG_ERROR("Running in secure MODE!\n");
+			ret = -ENODEV;
+			break;
+		}
+
+		sema_init(&mc_drv_kmod_ctx.daemon_ctx.sem, DAEMON_SEM_VAL);
+		/* set up S-SIQ interrupt handler */
+		ret = request_irq(
+				  MC_INTR_SSIQ,
+				  mc_kernel_module_intr_ssiq,
+				  IRQF_TRIGGER_RISING,
+				  MC_DRV_MOD_DEVNODE,
+				  &mc_drv_kmod_ctx);
+		if (ret != 0) {
+			MCDRV_DBG_ERROR("interrupt request failed\n");
+			break;
+		}
+
+		ret = misc_register(&mc_kernel_module_device);
+		if (ret != 0) {
+			MCDRV_DBG_ERROR("device register failed\n");
+			break;
+		}
+
+		/* initialize event counter for signaling of an IRQ to zero */
+		atomic_set(&(mc_drv_kmod_ctx.ssiq_ctx.counter), 0);
+
+		/* init list for WSM L2 chunks. */
+		INIT_LIST_HEAD(&(mc_drv_kmod_ctx.mc_l2_tables_sets));
+
+		/* L2 table descriptor list. */
+		INIT_LIST_HEAD(&(mc_drv_kmod_ctx.mc_used_l2_tables));
+
+		sema_init(&(mc_drv_kmod_ctx.wsm_l2_sem), 1);
+
+		/* initialize unique number counter which we can use for
+			handles. It is limited to 2^32, but this should be
+			enough to be roll-over safe for us. We start with 1
+			instead of 0. */
+		atomic_set(&(mc_drv_kmod_ctx.unique_counter), 1);
+
+		mci_base = 0;
+		MCDRV_DBG("initialized\n");
+
+		ret = 0;
+
+	} while (FALSE);
+
+	MCDRV_DBG_VERBOSE("exit with %d/0x%08X\n", ret, ret);
+
+	return (int)ret;
+}
+
+
+
+/*----------------------------------------------------------------------------*/
+/**
+ * This function removes this device driver from the Linux device manager .
+ */
+static void __exit mc_kernel_module_exit(
+	void
+)
+{
+	struct mc_used_l2_table	*used_l2table;
+
+	MCDRV_DBG_VERBOSE("enter\n");
+
+	mobicore_log_free();
+
+	/* Check if some WSM is still in use. */
+	list_for_each_entry(
+		used_l2table,
+		&(mc_drv_kmod_ctx.mc_used_l2_tables),
+		list
+	) {
+		MCDRV_DBG_WARN(
+			"WSM L2 still in use: physBase=%p ,nr_of_pages=%d\n",
+			get_l2_table_phys(used_l2table),
+			used_l2table->nr_of_pages);
+	} /* end while */
+
+	free_irq(MC_INTR_SSIQ, &mc_drv_kmod_ctx);
+
+	misc_deregister(&mc_kernel_module_device);
+	MCDRV_DBG_VERBOSE("exit");
+}
+
+
+/*----------------------------------------------------------------------------*/
+/* Linux Driver Module Macros */
+module_init(mc_kernel_module_init);
+module_exit(mc_kernel_module_exit);
+MODULE_AUTHOR("Giesecke & Devrient GmbH");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MobiCore driver");
+
+/** @} */
+
diff --git a/drivers/gud/mobicore_driver/mc_drv_module.h b/drivers/gud/mobicore_driver/mc_drv_module.h
new file mode 100644
index 0000000..8b402d6
--- /dev/null
+++ b/drivers/gud/mobicore_driver/mc_drv_module.h
@@ -0,0 +1,238 @@
+/**
+ * Header file of MobiCore Driver Kernel Module.
+ *
+ * @addtogroup MCD_MCDIMPL_KMOD_IMPL
+ * @{
+ * Internal structures of the McDrvModule
+ * @file
+ *
+ * Header file the MobiCore Driver Kernel Module,
+ * its internal structures and defines.
+ *
+ * <!-- Copyright Giesecke & Devrient GmbH 2009-2012 -->
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _MC_DRV_KMOD_H_
+#define _MC_DRV_KMOD_H_
+
+#include "mc_drv_module_linux_api.h"
+#include "public/mc_drv_module_api.h"
+/** Platform specific settings */
+#include "platform.h"
+
+/** ARM Specific masks and modes */
+#define ARM_CPSR_MASK 0x1F
+#define ARM_MONITOR_MODE 0b10110
+#define ARM_SECURITY_EXTENSION_MASK 0x30
+
+/**
+ * Number of page table entries in one L2 table. This is ARM specific, an
+ *  L2 table covers 1 MiB by using 256 entry referring to 4KiB pages each.
+ */
+#define MC_ARM_L2_TABLE_ENTRIES		256
+
+/** Maximum number of contiguous buffer allocations for one driver instance. */
+#define MC_DRV_KMOD_CONTG_BUFFER_MAX	16
+
+/** Number of pages for L2 tables. There are 4 table in each page. */
+#define MC_DRV_KMOD_L2_TABLE_PER_PAGES	4
+
+/** ARM level 2 (L2) table with 256 entries. Size: 1k */
+struct l2table {
+	pte_t	table_entries[MC_ARM_L2_TABLE_ENTRIES];
+};
+
+#define INVALID_ADDRESS     ((void *)(-1))
+
+/** ARM L2 PTE bits */
+#define L2_FLAG_SMALL_XN    (1U <<  0)
+#define L2_FLAG_SMALL       (1U <<  1)
+#define L2_FLAG_B           (1U <<  2)
+#define L2_FLAG_C           (1U <<  3)
+#define L2_FLAG_AP0         (1U <<  4)
+#define L2_FLAG_AP1         (1U <<  5)
+#define L2_FLAG_SMALL_TEX0  (1U <<  6)
+#define L2_FLAG_SMALL_TEX1  (1U <<  7)
+#define L2_FLAG_SMALL_TEX2  (1U <<  8)
+#define L2_FLAG_APX         (1U <<  9)
+#define L2_FLAG_S           (1U << 10)
+#define L2_FLAG_NG          (1U << 11)
+
+/**
+ * Contiguous buffer allocated to TLCs.
+ * These buffers are uses as world shared memory (wsm) and shared with
+ * secure world.
+ * The virtual kernel address is added for a simpler search algorithm.
+ */
+struct mc_contg_buffer {
+	unsigned int	handle; /* unique handle */
+	void		*virt_user_addr; /**< virtual User start address */
+	void		*virt_kernel_addr; /**< virtual Kernel start address */
+	void		*phys_addr; /**< physical start address */
+	unsigned int	num_pages; /**< number of pages */
+};
+
+/** Instance data for MobiCore Daemon and TLCs. */
+struct mc_instance {
+	/** unique handle */
+	unsigned int	handle;
+	/** process that opened this instance */
+	pid_t		pid_vnr;
+	/** buffer list for mmap generated address space and
+		its virtual client address */
+	struct mc_contg_buffer	contg_buffers[MC_DRV_KMOD_CONTG_BUFFER_MAX];
+};
+
+/** Store for four L2 tables in one 4kb page*/
+struct mc_l2_table_store {
+	struct l2table table[MC_DRV_KMOD_L2_TABLE_PER_PAGES];
+};
+
+/** Usage and maintenance information about mc_l2_table_store */
+struct mc_l2_tables_set {
+	struct list_head		list;
+	unsigned int			usage_bitmap;	/**< usage bitmap */
+	struct mc_l2_table_store	*kernel_virt;	/**< kernel virtual address */
+	struct mc_l2_table_store	*phys;		/**< physical address */
+	struct page			*page;		/**< pointer to page struct */
+};
+
+/**
+ * L2 table allocated to the Daemon or a TLC describing a world shared buffer.
+ * When users map a malloc()ed area into SWd, a L2 table is allocated.
+ * In addition, the area of maximum 1MB virtual address space is mapped into
+ * the L2 table and a handle for this table is returned to the user.
+ */
+struct mc_used_l2_table {
+	struct list_head	list;
+
+	/** handle as communicated to user mode */
+	unsigned int		handle;
+	unsigned int		flags;
+
+	/** owner of this L2 table */
+	struct mc_instance	*owner;
+
+	/** set describing where our L2 table is stored */
+	struct mc_l2_tables_set	*set;
+
+	/** index into L2 table set */
+	unsigned int		idx;
+
+	/** size of buffer */
+	unsigned int		nr_of_pages;
+};
+
+#define MC_WSM_L2_CONTAINER_WSM_LOCKED_BY_APP   (1U << 0)
+#define MC_WSM_L2_CONTAINER_WSM_LOCKED_BY_MC    (1U << 1)
+
+
+/** MobiCore S-SIQ interrupt context data. */
+struct mc_ssiq_ctx {
+	/** S-SIQ interrupt counter */
+	atomic_t	counter;
+};
+
+/** MobiCore Daemon context data. */
+struct mc_daemon_ctx {
+	/** event semaphore */
+	struct semaphore	sem;
+	struct fasync_struct	*async_queue;
+	/** event counter */
+	unsigned int		ssiq_counter;
+};
+
+/** MobiCore Driver Kernel Module context data. */
+struct mc_drv_kmod_ctx {
+
+	/** ever incrementing counter */
+	atomic_t		unique_counter;
+
+	/** S-SIQ interrupt context */
+	struct mc_ssiq_ctx	ssiq_ctx;
+
+	/** MobiCore Daemon context */
+	struct mc_daemon_ctx	daemon_ctx;
+
+	/** pointer to instance of daemon */
+	struct mc_instance	*daemon_inst;
+
+	/** Backing store for L2 tables */
+	struct list_head	mc_l2_tables_sets;
+
+	/** Bookkeeping for used L2 tables */
+	struct list_head	mc_used_l2_tables;
+
+	/** semaphore to synchronize access to above lists */
+	struct semaphore	wsm_l2_sem;
+};
+
+/** MobiCore internal trace buffer structure. */
+struct mc_trace_buf {
+	uint32_t version; /**< version of trace buffer */
+	uint32_t length; /**< length of allocated buffer(includes header) */
+	uint32_t write_pos; /**< last write position */
+	char  buff[1]; /**< start of the log buffer */
+};
+
+/*** MobiCore internal trace log setup. */
+void mobicore_log_read(void);
+long mobicore_log_setup(void *);
+void mobicore_log_free(void);
+
+#define MCDRV_DBG_ERROR(txt, ...) \
+	printk(KERN_ERR "mcDrvKMod [%d] %s() ### ERROR: " txt, \
+		task_pid_vnr(current), \
+		__func__, \
+		##__VA_ARGS__)
+
+/* dummy function helper macro. */
+#define DUMMY_FUNCTION()    do {} while (0)
+
+#if defined(DEBUG)
+
+/* #define DEBUG_VERBOSE */
+#if defined(DEBUG_VERBOSE)
+#define MCDRV_DBG_VERBOSE          MCDRV_DBG
+#else
+#define MCDRV_DBG_VERBOSE(...)     DUMMY_FUNCTION()
+#endif
+
+#define MCDRV_DBG(txt, ...) \
+	printk(KERN_INFO "mcDrvKMod [%d on CPU%d] %s(): " txt, \
+		task_pid_vnr(current), \
+		raw_smp_processor_id(), \
+		__func__, \
+		##__VA_ARGS__)
+
+#define MCDRV_DBG_WARN(txt, ...) \
+	printk(KERN_WARNING "mcDrvKMod [%d] %s() WARNING: " txt, \
+		task_pid_vnr(current), \
+		__func__, \
+		##__VA_ARGS__)
+
+#define MCDRV_ASSERT(cond) \
+	do { \
+		if (unlikely(!(cond))) { \
+			panic("mcDrvKMod Assertion failed: %s:%d\n", \
+				__FILE__, __LINE__); \
+		} \
+	} while (0)
+
+#else
+
+#define MCDRV_DBG_VERBOSE(...)	DUMMY_FUNCTION()
+#define MCDRV_DBG(...)		DUMMY_FUNCTION()
+#define MCDRV_DBG_WARN(...)	DUMMY_FUNCTION()
+
+#define MCDRV_ASSERT(...)	DUMMY_FUNCTION()
+
+#endif /* [not] defined(DEBUG) */
+
+
+#endif /* _MC_DRV_KMOD_H_ */
+/** @} */
diff --git a/drivers/gud/mobicore_driver/mc_drv_module_android.h b/drivers/gud/mobicore_driver/mc_drv_module_android.h
new file mode 100644
index 0000000..319509f
--- /dev/null
+++ b/drivers/gud/mobicore_driver/mc_drv_module_android.h
@@ -0,0 +1,37 @@
+/**
+ * Header file of MobiCore Driver Kernel Module.
+ *
+ * @addtogroup MobiCore_Driver_Kernel_Module
+ * @{
+ * Android specific defines
+ * @file
+ *
+ * Android specific defines
+ *
+ * <!-- Copyright Giesecke & Devrient GmbH 2009-2012 -->
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _MC_DRV_MODULE_ANDROID_H_
+#define _MC_DRV_MODULE_ANDROID_H_
+
+/* Defines needed to identify the Daemon in Android systems
+ * For the full list see:
+ * platform_system_core/include/private/android_filesystem_config.h in the
+ * Android source tree
+ */
+/* traditional unix root user */
+#define AID_ROOT	0
+/* system server */
+#define AID_SYSTEM	1000
+/* access to misc storage */
+#define AID_MISC	9998
+#define AID_NOBODY	9999
+/* first app user */
+#define AID_APP		10000
+
+#endif /* _MC_DRV_MODULE_ANDROID_H_ */
+/** @} */
diff --git a/drivers/gud/mobicore_driver/mc_drv_module_fastcalls.h b/drivers/gud/mobicore_driver/mc_drv_module_fastcalls.h
new file mode 100644
index 0000000..d058043
--- /dev/null
+++ b/drivers/gud/mobicore_driver/mc_drv_module_fastcalls.h
@@ -0,0 +1,227 @@
+/**
+ * Header file of MobiCore Driver Kernel Module.
+ *
+ * @addtogroup MobiCore_Driver_Kernel_Module
+ * @{
+ * Internal structures of the McDrvModule
+ * @file
+ *
+ * MobiCore Fast Call interface
+ *
+ * <!-- Copyright Giesecke & Devrient GmbH 2009-2012 -->
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _MC_DRV_MODULE_FC_H_
+#define _MC_DRV_MODULE_FC_H_
+
+#include "mc_drv_module.h"
+
+/**
+ * MobiCore SMCs
+ */
+enum mc_smc_codes {
+	MC_SMC_N_YIELD  = 0x3, /**< Yield to switch from NWd to SWd. */
+	MC_SMC_N_SIQ    = 0x4  /**< SIQ to switch from NWd to SWd. */
+};
+
+/**
+ * MobiCore fast calls. See MCI documentation
+ */
+enum mc_fast_call_codes {
+	MC_FC_INIT      = -1,
+	MC_FC_INFO      = -2,
+	MC_FC_POWER     = -3,
+	MC_FC_DUMP      = -4,
+	MC_FC_NWD_TRACE = -31 /**< Mem trace setup fastcall */
+};
+
+/**
+ * return code for fast calls
+ */
+enum mc_fast_calls_result {
+	MC_FC_RET_OK                       = 0,
+	MC_FC_RET_ERR_INVALID              = 1,
+	MC_FC_RET_ERR_ALREADY_INITIALIZED  = 5
+};
+
+
+
+/*------------------------------------------------------------------------------
+	structure wrappers for specific fastcalls
+------------------------------------------------------------------------------*/
+
+/** generic fast call parameters */
+union fc_generic {
+	struct {
+		uint32_t cmd;
+		uint32_t param[3];
+	} as_in;
+	struct {
+		uint32_t resp;
+		uint32_t ret;
+		uint32_t param[2];
+	} as_out;
+};
+
+
+/** fast call init */
+union mc_fc_init {
+	union fc_generic as_generic;
+	struct {
+		uint32_t cmd;
+		uint32_t base;
+		uint32_t nq_info;
+		uint32_t mcp_info;
+	} as_in;
+	struct {
+		uint32_t resp;
+		uint32_t ret;
+		uint32_t rfu[2];
+	} as_out;
+};
+
+
+/** fast call info parameters */
+union mc_fc_info {
+	union fc_generic as_generic;
+	struct {
+		uint32_t cmd;
+		uint32_t ext_info_id;
+		uint32_t rfu[2];
+	} as_in;
+	struct {
+		uint32_t resp;
+		uint32_t ret;
+		uint32_t state;
+		uint32_t ext_info;
+	} as_out;
+};
+
+
+/** fast call S-Yield parameters */
+union mc_fc_s_yield {
+	union fc_generic as_generic;
+	struct {
+		uint32_t cmd;
+		uint32_t rfu[3];
+	} as_in;
+	struct {
+		uint32_t resp;
+		uint32_t ret;
+		uint32_t rfu[2];
+	} as_out;
+};
+
+
+/** fast call N-SIQ parameters */
+union mc_fc_nsiq {
+	union fc_generic as_generic;
+	struct {
+		uint32_t cmd;
+		uint32_t rfu[3];
+	} as_in;
+	struct {
+		uint32_t resp;
+		uint32_t ret;
+		uint32_t rfu[2];
+	} as_out;
+};
+
+
+/*----------------------------------------------------------------------------*/
+/**
+ * fast call to MobiCore
+ *
+ * @param fc_generic pointer to fast call data
+ */
+static inline void mc_fastcall(
+	union fc_generic *fc_generic
+)
+{
+	MCDRV_ASSERT(fc_generic != NULL);
+	/* We only expect to make smc calls on CPU0 otherwise something wrong
+	 * will happen */
+	MCDRV_ASSERT(raw_smp_processor_id() == 0);
+	mb();
+#ifdef MC_SMC_FASTCALL
+	{
+		int ret = 0;
+		MCDRV_DBG("Going into SCM()");
+		ret = smc_fastcall((void *)fc_generic, sizeof(*fc_generic));
+		MCDRV_DBG("Coming from SCM, scm_call=%i, resp=%d/0x%x\n",
+			ret,
+			fc_generic->as_out.resp, fc_generic->as_out.resp);
+	}
+#else
+	{
+		/* SVC expect values in r0-r3 */
+		register u32 reg0 __asm__("r0") = fc_generic->as_in.cmd;
+		register u32 reg1 __asm__("r1") = fc_generic->as_in.param[0];
+		register u32 reg2 __asm__("r2") = fc_generic->as_in.param[1];
+		register u32 reg3 __asm__("r3") = fc_generic->as_in.param[2];
+
+		/* one of the famous preprocessor hacks to stingitize things.*/
+#define __STR2(x)   #x
+#define __STR(x)    __STR2(x)
+
+		/* compiler does not support certain instructions
+		"SMC": secure monitor call.*/
+#define ASM_ARM_SMC         0xE1600070
+		/*   "BPKT": debugging breakpoint. We keep this, as is comes
+				quite handy for debugging. */
+#define ASM_ARM_BPKT        0xE1200070
+#define ASM_THUMB_BPKT      0xBE00
+
+
+		__asm__ volatile (
+			".word " __STR(ASM_ARM_SMC) "\n"
+			: "+r"(reg0), "+r"(reg1), "+r"(reg2), "+r"(reg3)
+		);
+
+		/* set response */
+		fc_generic->as_out.resp     = reg0;
+		fc_generic->as_out.ret      = reg1;
+		fc_generic->as_out.param[0] = reg2;
+		fc_generic->as_out.param[1] = reg3;
+	}
+#endif
+}
+
+
+/*----------------------------------------------------------------------------*/
+/**
+ * convert fast call return code to linux driver module error code
+ *
+ */
+static inline int convert_fc_ret(
+	uint32_t sret
+)
+{
+	int         ret = -EFAULT;
+
+	switch (sret) {
+
+	case MC_FC_RET_OK:
+		ret = 0;
+		break;
+
+	case MC_FC_RET_ERR_INVALID:
+		ret = -EINVAL;
+		break;
+
+	case MC_FC_RET_ERR_ALREADY_INITIALIZED:
+		ret = -EBUSY;
+		break;
+
+	default:
+		break;
+	} /* end switch( sret ) */
+	return ret;
+}
+
+#endif /* _MC_DRV_MODULE_FC_H_ */
+/** @} */
diff --git a/drivers/gud/mobicore_driver/mc_drv_module_linux_api.h b/drivers/gud/mobicore_driver/mc_drv_module_linux_api.h
new file mode 100644
index 0000000..b2a99f1
--- /dev/null
+++ b/drivers/gud/mobicore_driver/mc_drv_module_linux_api.h
@@ -0,0 +1,187 @@
+/**
+ * Header file of MobiCore Driver Kernel Module.
+ *
+ * @addtogroup MobiCore_Driver_Kernel_Module
+ * @{
+ * Wrapper for Linux API
+ * @file
+ *
+ * Some convenient wrappers for memory functions
+ *
+ * <!-- Copyright Giesecke & Devrient GmbH 2009-2012 -->
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _MC_DRV_MODULE_LINUX_API_H_
+#define _MC_DRV_MODULE_LINUX_API_H_
+
+#include <linux/version.h>
+#include <linux/miscdevice.h>
+#include <linux/interrupt.h>
+#include <linux/highmem.h>
+#include <linux/kthread.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <asm/sizes.h>
+#include <asm/pgtable.h>
+#include <linux/semaphore.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+
+
+/* make some nice types */
+#if !defined(TRUE)
+#define TRUE (1 == 1)
+#endif
+
+#if !defined(FALSE)
+#define FALSE (1 != 1)
+#endif
+
+
+/* Linux GCC modifiers */
+#if !defined(__init)
+#warning "missing definition: __init"
+/* define a dummy */
+#define __init
+#endif
+
+
+#if !defined(__exit)
+#warning "missing definition: __exit"
+/* define a dummy */
+#define __exit
+#endif
+
+
+#if !defined(__must_check)
+#warning "missing definition: __must_check"
+/* define a dummy */
+#define __must_check
+#endif
+
+
+#if !defined(__user)
+#warning "missing definition: __user"
+/* define a dummy */
+#define __user
+#endif
+
+#define INVALID_ORDER       ((unsigned int)(-1))
+
+/*----------------------------------------------------------------------------*/
+/* get start address of the 4 KiB page where the given addres is located in. */
+static inline void *get_page_start(
+	void *addr
+)
+{
+	return (void *)(((unsigned long)(addr)) & PAGE_MASK);
+}
+
+/*----------------------------------------------------------------------------*/
+/* get offset into the 4 KiB page where the given addres is located in. */
+static inline unsigned int get_offset_in_page(
+	void *addr
+)
+{
+	return (unsigned int)(((unsigned long)(addr)) & (~PAGE_MASK));
+}
+
+/*----------------------------------------------------------------------------*/
+/* get number of pages for a given buffer. */
+static inline unsigned int get_nr_of_pages_for_buffer(
+	void		*addr_start, /* may be null */
+	unsigned int	len
+)
+{
+	/* calculate used number of pages. Example:
+	offset+size    newSize+PAGE_SIZE-1    nr_of_pages
+	   0              4095                   0
+	   1              4096                   1
+	  4095            8190                   1
+	  4096            8191                   1
+	  4097            8192                   2 */
+
+	return (get_offset_in_page(addr_start) + len + PAGE_SIZE-1) / PAGE_SIZE;
+}
+
+
+/*----------------------------------------------------------------------------*/
+/**
+ * convert a given size to page order, which is equivalent to finding log_2(x).
+ * The maximum for order was 5 in Linux 2.0 corresponding to 32 pages.
+ * Later versions allow 9 corresponding to 512 pages, which is 2 MB on
+ * most platforms). Anyway, the bigger order is, the more likely it is
+ * that the allocation will fail.
+ * Size       0           1  4097  8193  12289  24577  28673   40961   61441
+ * Pages      -           1     2     3      4      7      8      15      16
+ * Order  INVALID_ORDER   0     1     1      2      2      3       3       4
+ *
+ * @param  size
+ * @return order
+ */
+static inline unsigned int size_to_order(
+	unsigned int size
+)
+{
+	unsigned int order = INVALID_ORDER;
+
+	if (size != 0) {
+		/* ARMv5 as a CLZ instruction which count the leading zeros of
+		the binary representation of a value. It return a value
+		between 0 and 32.
+		Value   0   1   2   3   4   5   6   7   8   9  10 ...
+		CLZ    32  31  30  30  29  29  29  29  28  28  28 ...
+
+		We have excluded Size==0 before, so this is safe. */
+		order = __builtin_clz(
+				get_nr_of_pages_for_buffer(NULL, size));
+
+		/* there is a size overflow in get_nr_of_pages_for_buffer when
+		 * the size is too large */
+		if (unlikely(order > 31))
+			return INVALID_ORDER;
+		order = 31 - order;
+
+		/* above algorithm rounds down: clz(5)=2 instead of 3 */
+		/* quick correction to fix it: */
+		if (((1<<order)*PAGE_SIZE) < size)
+			order++;
+	}
+	return order;
+}
+
+/* magic linux macro */
+#if !defined(list_for_each_entry)
+/* stop compiler */
+#error "missing macro: list_for_each_entry()"
+/* define a dummy */
+#define list_for_each_entry(a, b, c)    if (0)
+#endif
+
+/*----------------------------------------------------------------------------*/
+/* return the page frame number of an address */
+static inline unsigned int addr_to_pfn(
+	void *addr
+)
+{
+	/* there is no real API for this */
+	return ((unsigned int)(addr)) >> PAGE_SHIFT;
+}
+
+
+/*----------------------------------------------------------------------------*/
+/* return the address of a page frame number */
+static inline void *pfn_to_addr(
+	unsigned int pfn
+)
+{
+	/* there is no real API for this */
+	return (void *)(pfn << PAGE_SHIFT);
+}
+
+#endif /* _MC_DRV_MODULE_LINUX_API_H_ */
+/** @} */
diff --git a/drivers/gud/mobicore_driver/platforms/MSM8960_SURF_STD/platform.h b/drivers/gud/mobicore_driver/platforms/MSM8960_SURF_STD/platform.h
new file mode 100644
index 0000000..7034cb0
--- /dev/null
+++ b/drivers/gud/mobicore_driver/platforms/MSM8960_SURF_STD/platform.h
@@ -0,0 +1,50 @@
+/**
+ * Header file of MobiCore Driver Kernel Module Platform
+ * specific structures
+ *
+ * @addtogroup MobiCore_Driver_Kernel_Module
+ * @{
+ * Internal structures of the McDrvModule
+ * @file
+ *
+ * Header file the MobiCore Driver Kernel Module,
+ * its internal structures and defines.
+ *
+ * <!-- Copyright Giesecke & Devrient GmbH 2009-2012 -->
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _MC_DRV_PLATFORM_H_
+#define _MC_DRV_PLATFORM_H_
+
+/** MobiCore Interrupt for Qualcomm */
+#define MC_INTR_SSIQ						218
+
+/** Use SMC for fastcalls */
+#define MC_SMC_FASTCALL
+
+
+/*--------------- Implementation -------------- */
+#include <mach/scm.h>
+/* from following file */
+#define SCM_SVC_MOBICORE		250
+#define SCM_CMD_MOBICORE		1
+
+extern int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len,
+			void *resp_buf, size_t resp_len);
+
+static inline int smc_fastcall(void *fc_generic, size_t size)
+{
+	return scm_call(SCM_SVC_MOBICORE, SCM_CMD_MOBICORE,
+			   fc_generic, size,
+			   fc_generic, size);
+}
+
+/** Enable mobicore mem traces */
+#define MC_MEM_TRACES
+
+#endif /* _MC_DRV_PLATFORM_H_ */
+/** @} */
diff --git a/drivers/gud/mobicore_driver/public/mc_drv_module_api.h b/drivers/gud/mobicore_driver/public/mc_drv_module_api.h
new file mode 100644
index 0000000..59366f3
--- /dev/null
+++ b/drivers/gud/mobicore_driver/public/mc_drv_module_api.h
@@ -0,0 +1,311 @@
+/** @addtogroup MCD_MCDIMPL_KMOD_API Mobicore Driver Module API
+ * @ingroup  MCD_MCDIMPL_KMOD
+ * @{
+ * Interface to Mobicore Driver Kernel Module.
+ * @file
+ *
+ * <h2>Introduction</h2>
+ * The MobiCore Driver Kernel Module is a Linux device driver, which represents
+ * the command proxy on the lowest layer to the secure world (Swd). Additional
+ * services like memory allocation via mmap and generation of a L2 tables for
+ * given virtual memory are also supported. IRQ functionallity receives
+ * information from the SWd in the non secure world (NWd).
+ * As customary the driver is handled as linux device driver with "open",
+ * "close" and "ioctl" commands. Access to the driver is possible after the
+ * device "/dev/mobicore" has been opened.
+ * The MobiCore Driver Kernel Module must be installed via
+ * "insmod mcDrvModule.ko".
+ *
+ *
+ * <h2>Version history</h2>
+ * <table class="customtab">
+ * <tr><td width="100px"><b>Date</b></td><td width="80px"><b>Version</b></td>
+ * <td><b>Changes</b></td></tr>
+ * <tr><td>2010-05-25</td><td>0.1</td><td>Initial Release</td></tr>
+ * </table>
+ *
+ * <!-- Copyright Giesecke & Devrient GmbH 2010-2012 -->
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ *	products derived from this software without specific prior
+ *	written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MC_DRV_MODULEAPI_H_
+#define _MC_DRV_MODULEAPI_H_
+
+#include "version.h"
+
+#define MC_DRV_MOD_DEVNODE		   "mobicore"
+#define MC_DRV_MOD_DEVNODE_FULLPATH  "/dev/" MC_DRV_MOD_DEVNODE
+
+/**
+ * Data exchange structure of the MC_DRV_MODULE_INIT ioctl command.
+ * INIT request data to SWD
+ */
+union mc_ioctl_init_params {
+	struct {
+		/** base address of mci buffer 4KB align */
+		uint32_t  base;
+		/** notification buffer start/length [16:16] [start, length] */
+		uint32_t  nq_offset;
+		/** length of notification queue */
+		uint32_t  nq_length;
+		/** mcp buffer start/length [16:16] [start, length] */
+		uint32_t  mcp_offset;
+		/** length of mcp buffer */
+		uint32_t  mcp_length;
+	} in;
+	struct {
+		/* nothing */
+	} out;
+};
+
+
+/**
+ * Data exchange structure of the MC_DRV_MODULE_INFO ioctl command.
+ * INFO request data to the SWD
+ */
+union mc_ioctl_info_params {
+	struct {
+		uint32_t  ext_info_id; /**< extended info ID */
+	} in;
+	struct {
+		uint32_t  state; /**< state */
+		uint32_t  ext_info; /**< extended info */
+	} out;
+};
+
+/**
+ * Mmap allocates and maps contiguous memory into a process.
+ * We use the third parameter, void *offset, to distinguish between some cases
+ * offset = MC_DRV_KMOD_MMAP_WSM	usual operation, pages are registered in
+					device structure and freed later.
+ * offset = MC_DRV_KMOD_MMAP_MCI	get Instance of MCI, allocates or mmaps
+					the MCI to daemon
+ * offset = MC_DRV_KMOD_MMAP_PERSISTENTWSM	special operation, without
+						registration of pages
+ *
+ * In mmap(), the offset specifies which of several device I/O pages is
+ *  requested. Linux only transfers the page number, i.e. the upper 20 bits to
+ *  kernel module. Therefore we define our special offsets as multiples of page
+ *  size.
+ */
+enum mc_mmap_memtype {
+	MC_DRV_KMOD_MMAP_WSM		= 0,
+	MC_DRV_KMOD_MMAP_MCI		= 4096,
+	MC_DRV_KMOD_MMAP_PERSISTENTWSM	= 8192
+};
+
+struct mc_mmap_resp {
+	uint32_t  handle; /**< WSN handle */
+	uint32_t  phys_addr; /**< physical address of WSM (or NULL) */
+	bool	  is_reused; /**< if WSM memory was reused, or new allocated */
+};
+
+/**
+ * Data exchange structure of the MC_DRV_KMOD_IOCTL_FREE ioctl command.
+ */
+union mc_ioctl_free_params {
+	struct {
+		uint32_t  handle; /**< driver handle */
+		uint32_t  pid; /**< process id */
+	} in;
+	struct {
+		/* nothing */
+	} out;
+};
+
+
+/**
+ * Data exchange structure of the MC_DRV_KMOD_IOCTL_APP_REGISTER_WSM_L2 command.
+ *
+ * Allocates a physical L2 table and maps the buffer into this page.
+ * Returns the physical address of the L2 table.
+ * The page alignment will be created and the appropriated pSize and pOffsetL2
+ * will be modified to the used values.
+ */
+union mc_ioctl_app_reg_wsm_l2_params {
+	struct {
+		uint32_t  buffer; /**< base address of the virtual address  */
+		uint32_t  len; /**< size of the virtual address space */
+		uint32_t  pid; /**< process id */
+	} in;
+	struct {
+		uint32_t  handle; /**< driver handle for locked memory */
+		uint32_t  phys_wsm_l2_table; /* physical address of the L2 table */
+	} out;
+};
+
+
+/**
+ * Data exchange structure of the MC_DRV_KMOD_IOCTL_APP_UNREGISTER_WSM_L2
+ * command.
+ */
+struct mc_ioctl_app_unreg_wsm_l2_params {
+	struct {
+		uint32_t  handle; /**< driver handle for locked memory */
+		uint32_t  pid; /**< process id */
+	} in;
+	struct {
+		/* nothing */
+	} out;
+};
+
+
+/**
+ * Data exchange structure of the MC_DRV_KMOD_IOCTL_DAEMON_LOCK_WSM_L2 command.
+ */
+struct mc_ioctl_daemon_lock_wsm_l2_params {
+	struct {
+		uint32_t  handle; /**< driver handle for locked memory */
+	} in;
+	struct {
+		uint32_t phys_wsm_l2_table;
+	} out;
+};
+
+
+/**
+ * Data exchange structure of the MC_DRV_KMOD_IOCTL_DAEMON_UNLOCK_WSM_L2
+ * command.
+ */
+struct mc_ioctl_daemon_unlock_wsm_l2_params {
+	struct {
+		uint32_t  handle; /**< driver handle for locked memory */
+	} in;
+	struct {
+		/* nothing */
+	} out;
+};
+
+/**
+ * Data exchange structure of the MC_DRV_MODULE_FC_EXECUTE ioctl command.
+ */
+union mc_ioctl_fc_execute_params {
+	struct {
+		/**< base address of mobicore binary */
+		uint32_t  phys_start_addr;
+		/**< length of DDR area */
+		uint32_t  length;
+	} in;
+	struct {
+		/* nothing */
+	} out;
+};
+
+/**
+ * Data exchange structure of the MC_DRV_MODULE_GET_VERSION ioctl command.
+ */
+struct mc_ioctl_get_version_params {
+	struct {
+		uint32_t	kernel_module_version;
+	} out;
+};
+
+/* @defgroup Mobicore_Driver_Kernel_Module_Interface IOCTL */
+
+
+
+
+/* TODO: use IOCTL macros like _IOWR. See Documentation/ioctl/ioctl-number.txt,
+	Documentation/ioctl/ioctl-decoding.txt */
+/**
+ * defines for the ioctl mobicore driver module function call from user space.
+ */
+enum mc_kmod_ioctl {
+
+	/*
+	 * get detailed MobiCore Status
+	 */
+	MC_DRV_KMOD_IOCTL_DUMP_STATUS  = 200,
+
+	/*
+	 * initialize MobiCore
+	 */
+	MC_DRV_KMOD_IOCTL_FC_INIT  = 201,
+
+	/*
+	 * get MobiCore status
+	 */
+	MC_DRV_KMOD_IOCTL_FC_INFO  = 202,
+
+	/**
+	 * ioctl parameter to send the YIELD command to the SWD.
+	 * Only possible in Privileged Mode.
+	 * ioctl(fd, MC_DRV_MODULE_YIELD)
+	 */
+	MC_DRV_KMOD_IOCTL_FC_YIELD =  203,
+	/**
+	 * ioctl parameter to send the NSIQ signal to the SWD.
+	 * Only possible in Privileged Mode
+	 * ioctl(fd, MC_DRV_MODULE_NSIQ)
+	 */
+	MC_DRV_KMOD_IOCTL_FC_NSIQ   =  204,
+	/**
+	 * ioctl parameter to tzbsp to start Mobicore binary from DDR.
+	 * Only possible in Privileged Mode
+	 * ioctl(fd, MC_DRV_KMOD_IOCTL_FC_EXECUTE)
+	 */
+	MC_DRV_KMOD_IOCTL_FC_EXECUTE =  205,
+
+	/**
+	 * Free's memory which is formerly allocated by the driver's mmap
+	 * command. The parameter must be this mmaped address.
+	 * The internal instance data regarding to this address are deleted as
+	 * well as each according memory page and its appropriated reserved bit
+	 * is cleared (ClearPageReserved).
+	 * Usage: ioctl(fd, MC_DRV_MODULE_FREE, &address) with address beeing of
+	 * type long address
+	 */
+	MC_DRV_KMOD_IOCTL_FREE = 218,
+
+	/**
+	 * Creates a L2 Table of the given base address and the size of the
+	 * data.
+	 * Parameter: mc_ioctl_app_reg_wsm_l2_params
+	 */
+	MC_DRV_KMOD_IOCTL_APP_REGISTER_WSM_L2 = 220,
+
+	/**
+	 * Frees the L2 table created by a MC_DRV_KMOD_IOCTL_APP_REGISTER_WSM_L2
+	 * ioctl.
+	 * Parameter: mc_ioctl_app_unreg_wsm_l2_params
+	 */
+	MC_DRV_KMOD_IOCTL_APP_UNREGISTER_WSM_L2 = 221,
+
+
+	/* TODO: comment this. */
+	MC_DRV_KMOD_IOCTL_DAEMON_LOCK_WSM_L2 = 222,
+	MC_DRV_KMOD_IOCTL_DAEMON_UNLOCK_WSM_L2 = 223,
+
+	/**
+	 * Return kernel driver version.
+	 * Parameter: mc_ioctl_get_version_params
+	 */
+	MC_DRV_KMOD_IOCTL_GET_VERSION = 224,
+};
+
+
+#endif /* _MC_DRV_MODULEAPI_H_ */
+/** @} */
diff --git a/drivers/gud/mobicore_driver/public/mc_kernel_api.h b/drivers/gud/mobicore_driver/public/mc_kernel_api.h
new file mode 100644
index 0000000..fdfc618
--- /dev/null
+++ b/drivers/gud/mobicore_driver/public/mc_kernel_api.h
@@ -0,0 +1,100 @@
+/** @addtogroup MCD_MCDIMPL_KMOD_KAPI Mobicore Driver Module API inside Kernel.
+ * @ingroup  MCD_MCDIMPL_KMOD
+ * @{
+ * Interface to Mobicore Driver Kernel Module inside Kernel.
+ * @file
+ *
+ * Interface to be used by module MobiCoreKernelAPI.
+ *
+ * <!-- Copyright Giesecke & Devrient GmbH 2010-2012 -->
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _MOBICORE_KERNELMODULE_API_H_
+#define _MOBICORE_KERNELMODULE_API_H_
+
+struct mc_instance;
+
+/**
+ * Initialize a new mobicore API instance object
+ *
+ * @return Instance or NULL if no allocation was possible.
+ */
+struct mc_instance *mobicore_open(
+	void
+);
+
+/**
+ * Release a mobicore instance object and all objects related to it
+ * @param instance instance
+ * @return 0 if Ok or -E ERROR
+ */
+int mobicore_release(
+	struct mc_instance	*instance
+);
+
+/**
+ * Free a WSM buffer allocated with mobicore_allocate_wsm
+ * @param instance
+ * @param handle		handle of the buffer
+ *
+ * @return 0 if no error
+ *
+ */
+int mobicore_allocate_wsm(
+	struct mc_instance	*instance,
+	unsigned long		requested_size,
+	uint32_t		*handle,
+	void			**kernel_virt_addr,
+	void			**phys_addr
+);
+
+/**
+ * Free a WSM buffer allocated with mobicore_allocate_wsm
+ * @param instance
+ * @param handle		handle of the buffer
+ *
+ * @return 0 if no error
+ *
+ */
+int mobicore_free(
+	struct mc_instance	*instance,
+	uint32_t		handle
+);
+
+/**
+ * Map a virtual memory buffer structure to Mobicore
+ * @param instance
+ * @param addr		address of the buffer(NB it must be kernel virtual!)
+ * @param len		buffer length
+ * @param handle	pointer to handle
+ * @param phys_wsm_l2_table	pointer to physical L2 table(?)
+ *
+ * @return 0 if no error
+ *
+ */
+int mobicore_map_vmem(
+	struct mc_instance	*instance,
+	void			*addr,
+	uint32_t		len,
+	uint32_t		*handle,
+	void			**phys_wsm_l2_table
+);
+
+/**
+ * Unmap a virtual memory buffer from mobicore
+ * @param instance
+ * @param handle
+ *
+ * @return 0 if no error
+ *
+ */
+int mobicore_unmap_vmem(
+	struct mc_instance	*instance,
+	uint32_t		handle
+);
+#endif /* _MOBICORE_KERNELMODULE_API_H_ */
+/** @} */
diff --git a/drivers/gud/mobicore_driver/public/version.h b/drivers/gud/mobicore_driver/public/version.h
new file mode 100644
index 0000000..9b2dbca
--- /dev/null
+++ b/drivers/gud/mobicore_driver/public/version.h
@@ -0,0 +1,36 @@
+/** @addtogroup MCD_MCDIMPL_KMOD
+ * @{
+ * <!-- Copyright Giesecke & Devrient GmbH 2010-2012 -->
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ *    products derived from this software without specific prior
+ *    written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MC_DRV_VERSION_H_
+#define _MC_DRV_VERSION_H_
+
+#define MCDRVMODULEAPI_VERSION_MAJOR 0
+#define MCDRVMODULEAPI_VERSION_MINOR 1
+
+#endif /* _MC_DRV_VERSION_H_ */
diff --git a/drivers/gud/mobicore_kernelapi/clientlib.c b/drivers/gud/mobicore_kernelapi/clientlib.c
new file mode 100644
index 0000000..13826f2
--- /dev/null
+++ b/drivers/gud/mobicore_kernelapi/clientlib.c
@@ -0,0 +1,1093 @@
+/**
+ * MobiCore KernelApi module
+ *
+ * <!-- Copyright Giesecke & Devrient GmbH 2009-2012 -->
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/netlink.h>
+#include <net/sock.h>
+#include <net/net_namespace.h>
+#include <linux/list.h>
+
+#include "public/mobicore_driver_api.h"
+#include "public/mobicore_driver_cmd.h"
+#include "device.h"
+#include "session.h"
+
+/* device list */
+LIST_HEAD(devices);
+
+/*----------------------------------------------------------------------------*/
+static struct mcore_device_t *resolve_device_id(
+	uint32_t device_id
+) {
+	struct mcore_device_t *tmp;
+	struct list_head *pos;
+
+	/* Get mcore_device_t for device_id */
+	list_for_each(pos, &devices) {
+		tmp = list_entry(pos, struct mcore_device_t, list);
+		if (tmp->device_id == device_id)
+			return tmp;
+	}
+	return NULL;
+}
+
+
+/*----------------------------------------------------------------------------*/
+static void add_device(
+	struct mcore_device_t *device
+) {
+	list_add_tail(&(device->list), &devices);
+}
+
+
+/*----------------------------------------------------------------------------*/
+static bool remove_device(
+	uint32_t device_id
+) {
+	struct mcore_device_t *tmp;
+	struct list_head *pos, *q;
+
+	list_for_each_safe(pos, q, &devices) {
+		tmp = list_entry(pos, struct mcore_device_t, list);
+		if (tmp->device_id == device_id) {
+			list_del(pos);
+			mcore_device_cleanup(tmp);
+			return true;
+		}
+	}
+	return false;
+}
+
+
+/*----------------------------------------------------------------------------*/
+enum mc_result mc_open_device(
+	uint32_t device_id
+) {
+	enum mc_result mc_result = MC_DRV_OK;
+	struct connection *dev_con = NULL;
+
+	MCDRV_DBG_VERBOSE("===%s()===", __func__);
+
+	/* Enter critical section */
+
+	do {
+		struct mcore_device_t *device = resolve_device_id(device_id);
+		if (device != NULL) {
+			MCDRV_DBG_ERROR("Device %d already opened", device_id);
+			mc_result = MC_DRV_ERR_INVALID_OPERATION;
+			break;
+		}
+
+		/* Open new connection to device */
+		dev_con = connection_new();
+		if (!connection_connect(dev_con, MC_DAEMON_PID)) {
+			MCDRV_DBG_ERROR(
+				"Could not setup netlink connection to PID %u",
+				MC_DAEMON_PID);
+			mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+			break;
+		}
+
+		/* Forward device open to the daemon and read result */
+		struct mc_drv_cmd_open_device_t mc_drv_cmd_open_device = {
+			/* C++ does not support C99 designated initializers */
+			/* .header = */ {
+			/* .command_id = */ MC_DRV_CMD_OPEN_DEVICE
+			},
+		/* .payload = */ {
+		/* .device_id = */ device_id
+			}
+		};
+
+		int len = connection_write_data(
+				dev_con,
+				&mc_drv_cmd_open_device,
+				sizeof(struct mc_drv_cmd_open_device_t));
+		if (len < 0) {
+			MCDRV_DBG_ERROR("CMD_OPEN_DEVICE writeCmd failed "
+				"ret=%d", len);
+			mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+			break;
+		}
+
+		struct mc_drv_response_header_t  rsp_header;
+		len = connection_read_datablock(
+					dev_con,
+					&rsp_header,
+					sizeof(rsp_header));
+		if (len != sizeof(rsp_header)) {
+			MCDRV_DBG_ERROR("CMD_OPEN_DEVICE readRsp failed "
+				"ret=%d", len);
+			mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+			break;
+		}
+		if (rsp_header.response_id != MC_DRV_RSP_OK) {
+			MCDRV_DBG_ERROR("CMD_OPEN_DEVICE failed, respId=%d",
+							rsp_header.response_id);
+			switch (rsp_header.response_id) {
+			case MC_DRV_RSP_PAYLOAD_LENGTH_ERROR:
+				mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+				break;
+			case MC_DRV_INVALID_DEVICE_NAME:
+				mc_result = MC_DRV_ERR_UNKNOWN_DEVICE;
+				break;
+			case MC_DRV_RSP_DEVICE_ALREADY_OPENED:
+			default:
+				mc_result = MC_DRV_ERR_INVALID_OPERATION;
+				break;
+			}
+			break;
+		}
+
+		/* there is no payload to read */
+
+		device = mcore_device_create(device_id, dev_con);
+		if (!mcore_device_open(device, MC_DRV_MOD_DEVNODE_FULLPATH)) {
+			mcore_device_cleanup(device);
+			MCDRV_DBG_ERROR("could not open device file: %s",
+				MC_DRV_MOD_DEVNODE_FULLPATH);
+			mc_result = MC_DRV_ERR_INVALID_DEVICE_FILE;
+			break;
+		}
+
+		add_device(device);
+
+	} while (false);
+
+	if (mc_result != MC_DRV_OK)
+		connection_cleanup(dev_con);
+
+	/* Exit critical section */
+
+	return mc_result;
+}
+EXPORT_SYMBOL(mc_open_device);
+
+/*----------------------------------------------------------------------------*/
+enum mc_result mc_close_device(
+	uint32_t device_id
+) {
+	enum mc_result mc_result = MC_DRV_OK;
+
+	MCDRV_DBG_VERBOSE("===%s()===", __func__);
+
+	/* Enter critical section */
+	do {
+		struct mcore_device_t *device = resolve_device_id(device_id);
+		if (device == NULL) {
+			MCDRV_DBG_ERROR("Device not found");
+			mc_result = MC_DRV_ERR_UNKNOWN_DEVICE;
+			break;
+		}
+		struct connection *dev_con = device->connection;
+
+		/* Return if not all sessions have been closed */
+		if (mcore_device_has_sessions(device)) {
+			MCDRV_DBG_ERROR("cannot close with sessions pending");
+			mc_result = MC_DRV_ERR_SESSION_PENDING;
+			break;
+		}
+
+		struct mc_drv_cmd_close_device_t mc_drv_cmd_close_device = {
+			/* C++ does not support C99 designated initializers */
+			/* .header = */ {
+				/* .command_id = */ MC_DRV_CMD_CLOSE_DEVICE
+			}
+		};
+		int len = connection_write_data(
+				dev_con,
+				&mc_drv_cmd_close_device,
+				sizeof(struct mc_drv_cmd_close_device_t));
+		/* ignore error, but log details */
+		if (len < 0) {
+			MCDRV_DBG_ERROR("CMD_CLOSE_DEVICE writeCmd failed "
+				"ret=%d", len);
+			mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+		}
+
+		struct mc_drv_response_header_t  rsp_header;
+		len = connection_read_datablock(
+					dev_con,
+					&rsp_header,
+					sizeof(rsp_header));
+		if (len != sizeof(rsp_header)) {
+			MCDRV_DBG_ERROR("CMD_CLOSE_DEVICE readResp failed "
+				" ret=%d", len);
+			mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+			break;
+		}
+
+		if (rsp_header.response_id != MC_DRV_RSP_OK) {
+			MCDRV_DBG_ERROR("CMD_CLOSE_DEVICE failed, respId=%d",
+							rsp_header.response_id);
+			mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+			break;
+		}
+
+		remove_device(device_id);
+
+	} while (false);
+
+	/* Exit critical section */
+
+	return mc_result;
+}
+EXPORT_SYMBOL(mc_close_device);
+
+/*----------------------------------------------------------------------------*/
+enum mc_result mc_open_session(
+	struct mc_session_handle *session,
+	const struct mc_uuid_t	*uuid,
+	uint8_t			*tci,
+	uint32_t		len
+) {
+	enum mc_result mc_result = MC_DRV_OK;
+
+	MCDRV_DBG_VERBOSE("===%s()===", __func__);
+
+	/* Enter critical section */
+
+	do {
+		if (session == NULL) {
+			MCDRV_DBG_ERROR("Session is null");
+			mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+			break;
+		}
+		if (uuid == NULL) {
+			MCDRV_DBG_ERROR("UUID is null");
+			mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+			break;
+		}
+		if (tci == NULL) {
+			MCDRV_DBG_ERROR("TCI is null");
+			mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+			break;
+		}
+		if (len > MC_MAX_TCI_LEN) {
+			MCDRV_DBG_ERROR("TCI length is longer than %d",
+				MC_MAX_TCI_LEN);
+			mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+			break;
+		}
+
+		/* Get the device associated with the given session */
+		struct mcore_device_t *device =
+				resolve_device_id(session->device_id);
+		if (device == NULL) {
+			MCDRV_DBG_ERROR("Device not found");
+			mc_result = MC_DRV_ERR_UNKNOWN_DEVICE;
+			break;
+		}
+		struct connection *dev_con = device->connection;
+
+		/* Get the physical address of the given TCI */
+		struct wsm *wsm =
+			mcore_device_find_contiguous_wsm(device, tci);
+		if (wsm == NULL) {
+			MCDRV_DBG_ERROR("Could not resolve TCI phy address ");
+			mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+			break;
+		}
+
+		if (wsm->len < len) {
+			MCDRV_DBG_ERROR("length is more than allocated TCI");
+			mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+			break;
+		}
+
+		/* Prepare open session command */
+		struct mc_drv_cmd_open_session_t cmdOpenSession = {
+			/* C++ does not support C99 designated initializers */
+			/* .header = */ {
+				/* .command_id = */ MC_DRV_CMD_OPEN_SESSION
+			},
+			/* .payload = */ {
+				/* .device_id = */ session->device_id,
+				/* .uuid = */ *uuid,
+				/* .tci = */ (uint32_t)wsm->phys_addr,
+				/* .len = */ len
+			}
+		};
+
+		/* Transmit command data */
+
+		int len = connection_write_data(
+						dev_con,
+						&cmdOpenSession,
+						sizeof(cmdOpenSession));
+		if (len != sizeof(cmdOpenSession)) {
+			MCDRV_DBG_ERROR("CMD_OPEN_SESSION writeData failed "
+				"ret=%d", len);
+			mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+			break;
+		}
+
+		/* Read command response */
+
+		/* read header first */
+		struct mc_drv_response_header_t rsp_header;
+		len = connection_read_datablock(
+					dev_con,
+					&rsp_header,
+					sizeof(rsp_header));
+		if (len != sizeof(rsp_header)) {
+			MCDRV_DBG_ERROR("CMD_OPEN_SESSION readResp failed "
+				" ret=%d", len);
+			mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+			break;
+		}
+
+		if (rsp_header.response_id != MC_DRV_RSP_OK) {
+			MCDRV_DBG_ERROR("CMD_OPEN_SESSION failed, respId=%d",
+							rsp_header.response_id);
+			switch (rsp_header.response_id) {
+			case MC_DRV_RSP_TRUSTLET_NOT_FOUND:
+				mc_result = MC_DRV_ERR_INVALID_DEVICE_FILE;
+				break;
+			case MC_DRV_RSP_PAYLOAD_LENGTH_ERROR:
+			case MC_DRV_RSP_DEVICE_NOT_OPENED:
+			case MC_DRV_RSP_FAILED:
+			default:
+				mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+				break;
+			}
+			break;
+		}
+
+		/* read payload */
+		struct mc_drv_rsp_open_session_payload_t
+						rsp_open_session_payload;
+		len = connection_read_datablock(
+					dev_con,
+					&rsp_open_session_payload,
+					sizeof(rsp_open_session_payload));
+		if (len != sizeof(rsp_open_session_payload)) {
+			MCDRV_DBG_ERROR("CMD_OPEN_SESSION readPayload failed "
+				"ret=%d", len);
+			mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+			break;
+		}
+
+		/* Register session with handle */
+		session->session_id = rsp_open_session_payload.session_id;
+
+		/* Set up second channel for notifications */
+		struct connection *session_connection = connection_new();
+		/*TODO: no real need to connect here? */
+		if (!connection_connect(session_connection, MC_DAEMON_PID)) {
+			MCDRV_DBG_ERROR(
+				"Could not setup netlink connection to PID %u",
+				MC_DAEMON_PID);
+			connection_cleanup(session_connection);
+			mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+			break;
+		}
+
+		/*TODO CONTINOUE HERE !!!! FIX RW RETURN HANDLING!!!! */
+
+		/* Write command to use channel for notifications */
+		struct mc_drv_cmd_nqconnect_t cmd_nqconnect = {
+			/* C++ does not support C99 designated initializers */
+			/* .header = */ {
+				/* .command_id = */ MC_DRV_CMD_NQ_CONNECT
+			},
+			/* .payload = */ {
+				/* .device_id =  */ session->device_id,
+				/* .session_id = */ session->session_id,
+				/* .device_session_id = */
+				rsp_open_session_payload.device_session_id,
+				/* .session_magic = */
+					rsp_open_session_payload.session_magic
+			}
+		};
+		connection_write_data(session_connection,
+			&cmd_nqconnect,
+			sizeof(cmd_nqconnect));
+
+		/* Read command response, header first */
+		len = connection_read_datablock(
+					session_connection,
+					&rsp_header,
+					sizeof(rsp_header));
+		if (len != sizeof(rsp_header)) {
+			MCDRV_DBG_ERROR("CMD_NQ_CONNECT readRsp failed "
+				"ret=%d", len);
+			connection_cleanup(session_connection);
+			mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+			break;
+		}
+
+		if (rsp_header.response_id != MC_DRV_RSP_OK) {
+			MCDRV_DBG_ERROR("CMD_NQ_CONNECT failed, respId=%d",
+					rsp_header.response_id);
+			connection_cleanup(session_connection);
+			mc_result = MC_DRV_ERR_NQ_FAILED;
+			break;
+		}
+
+		/* there is no payload. */
+
+		/* Session established, new session object must be created */
+		mcore_device_create_new_session(
+			device,
+			session->session_id,
+			session_connection);
+
+	} while (false);
+
+	/* Exit critical section */
+
+	return mc_result;
+}
+EXPORT_SYMBOL(mc_open_session);
+
+/*----------------------------------------------------------------------------*/
+enum mc_result mc_close_session(
+	struct mc_session_handle *session
+) {
+	enum mc_result mc_result = MC_DRV_OK;
+
+	MCDRV_DBG_VERBOSE("===%s()===", __func__);
+
+	/* Enter critical section */
+
+	do {
+		if (session == NULL) {
+			MCDRV_DBG_ERROR("Session is null");
+			mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+			break;
+		}
+
+		struct mcore_device_t  *device =
+					resolve_device_id(session->device_id);
+		if (device == NULL) {
+			MCDRV_DBG_ERROR("Device not found");
+			mc_result = MC_DRV_ERR_UNKNOWN_DEVICE;
+			break;
+		}
+		struct connection  *dev_con = device->connection;
+
+		struct session  *nq_session =
+		 mcore_device_resolve_session_id(device, session->session_id);
+		if (nq_session == NULL) {
+			MCDRV_DBG_ERROR("Session not found");
+			mc_result = MC_DRV_ERR_UNKNOWN_SESSION;
+			break;
+		}
+
+		/* Write close session command */
+		struct mc_drv_cmd_close_session_t cmd_close_session = {
+			/* C++ does not support C99 designated initializers */
+			/* .header = */ {
+				/* .command_id = */ MC_DRV_CMD_CLOSE_SESSION
+			},
+			/* .payload = */ {
+				/* .session_id = */ session->session_id,
+			}
+		};
+		connection_write_data(
+			dev_con,
+			&cmd_close_session,
+			sizeof(cmd_close_session));
+
+		/* Read command response */
+		struct mc_drv_response_header_t rsp_header;
+		int len = connection_read_datablock(
+						dev_con,
+						&rsp_header,
+						sizeof(rsp_header));
+		if (len != sizeof(rsp_header)) {
+			MCDRV_DBG_ERROR("CMD_CLOSE_SESSION readRsp failed "
+				"ret=%d", len);
+			mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+			break;
+		}
+
+		if (rsp_header.response_id != MC_DRV_RSP_OK) {
+			MCDRV_DBG_ERROR("CMD_CLOSE_SESSION failed, respId=%d",
+							rsp_header.response_id);
+			mc_result = MC_DRV_ERR_UNKNOWN_DEVICE;
+			break;
+		}
+
+		mcore_device_remove_session(device, session->session_id);
+		mc_result = MC_DRV_OK;
+
+	} while (false);
+
+	/* Exit critical section */
+
+	return mc_result;
+}
+EXPORT_SYMBOL(mc_close_session);
+
+/*----------------------------------------------------------------------------*/
+enum mc_result mc_notify(
+	struct mc_session_handle   *session
+) {
+	enum mc_result mc_result = MC_DRV_OK;
+
+	MCDRV_DBG_VERBOSE("===%s()===", __func__);
+
+	do {
+		if (session == NULL) {
+			MCDRV_DBG_ERROR("Session is null");
+			mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+			break;
+		}
+
+		struct mcore_device_t *device =
+					resolve_device_id(session->device_id);
+		if (device == NULL) {
+			MCDRV_DBG_ERROR("Device not found");
+			mc_result = MC_DRV_ERR_UNKNOWN_DEVICE;
+			break;
+		}
+		struct connection *dev_con = device->connection;
+
+		struct session  *nqsession =
+		 mcore_device_resolve_session_id(device, session->session_id);
+		if (nqsession == NULL) {
+			MCDRV_DBG_ERROR("Session not found");
+			mc_result = MC_DRV_ERR_UNKNOWN_SESSION;
+			break;
+		}
+
+		struct mc_drv_cmd_notify_t cmd_notify = {
+			/* C++ does not support C99 designated initializers */
+			/* .header = */ {
+				/* .command_id = */ MC_DRV_CMD_NOTIFY
+			},
+			/* .payload = */ {
+				/* .session_id = */ session->session_id,
+			}
+		};
+
+		connection_write_data(
+			dev_con,
+			&cmd_notify,
+			sizeof(cmd_notify));
+
+		/* Daemon will not return a response */
+
+	} while (false);
+
+	return mc_result;
+}
+EXPORT_SYMBOL(mc_notify);
+
+/*----------------------------------------------------------------------------*/
+enum mc_result mc_wait_notification(
+	struct mc_session_handle  *session,
+	int32_t			timeout
+) {
+	enum mc_result mc_result = MC_DRV_OK;
+
+	MCDRV_DBG_VERBOSE("===%s()===", __func__);
+
+	do {
+		if (session == NULL) {
+			mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+			break;
+		}
+
+		struct mcore_device_t  *device =
+					resolve_device_id(session->device_id);
+		if (device == NULL) {
+			MCDRV_DBG_ERROR("Device not found");
+			mc_result = MC_DRV_ERR_UNKNOWN_DEVICE;
+			break;
+		}
+
+		struct session  *nq_session =
+		 mcore_device_resolve_session_id(device, session->session_id);
+		if (nq_session == NULL) {
+			MCDRV_DBG_ERROR("Session not found");
+			mc_result = MC_DRV_ERR_UNKNOWN_SESSION;
+			break;
+		}
+
+		struct connection *nqconnection =
+					nq_session->notification_connection;
+		uint32_t count = 0;
+
+		/* Read notification queue till it's empty */
+		for (;;) {
+			struct notification notification;
+			ssize_t num_read = connection_read_data(
+				nqconnection,
+				&notification,
+				sizeof(notification),
+				timeout);
+			/* Exit on timeout in first run. Later runs have
+			 * timeout set to 0.
+			 * -2 means, there is no more data. */
+			if (count == 0 && num_read == -2) {
+				MCDRV_DBG_ERROR("read timeout");
+				mc_result = MC_DRV_ERR_TIMEOUT;
+				break;
+			}
+			/* After first notification the queue will be
+			 * drained, Thus we set no timeout for the
+			 * following reads */
+			timeout = 0;
+
+			if (num_read != sizeof(struct notification)) {
+				if (count == 0) {
+					/* failure in first read, notify it */
+					mc_result = MC_DRV_ERR_NOTIFICATION;
+					MCDRV_DBG_ERROR(
+					"read notification failed, "
+					"%i bytes received", (int)num_read);
+					break;
+				} else {
+					/* Read of the n-th notification
+					   failed/timeout. We don't tell the
+					   caller, as we got valid notifications
+					   before. */
+					mc_result = MC_DRV_OK;
+					break;
+				}
+			}
+
+			count++;
+			MCDRV_DBG_VERBOSE("readNq count=%d, SessionID=%d, "
+				"Payload=%d", count,
+				notification.session_id, notification.payload);
+
+			if (notification.payload != 0) {
+				/* Session end point died -> store exit code */
+				session_set_error_info(nq_session,
+					notification.payload);
+
+				mc_result = MC_DRV_INFO_NOTIFICATION;
+				break;
+			}
+		} /* for(;;) */
+
+	} while (false);
+
+	return mc_result;
+}
+EXPORT_SYMBOL(mc_wait_notification);
+
+/*----------------------------------------------------------------------------*/
+enum mc_result mc_malloc_wsm(
+	uint32_t	device_id,
+	uint32_t	align,
+	uint32_t	len,
+	uint8_t		**wsm,
+	uint32_t	wsm_flags
+) {
+	enum mc_result mc_result = MC_DRV_ERR_UNKNOWN;
+
+	MCDRV_DBG_VERBOSE("===%s()===", __func__);
+
+	/* Enter critical section */
+
+	do {
+		struct mcore_device_t *device = resolve_device_id(device_id);
+		if (device == NULL) {
+			MCDRV_DBG_ERROR("Device not found");
+			mc_result = MC_DRV_ERR_UNKNOWN_DEVICE;
+			break;
+		}
+		if (wsm == NULL) {
+			mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+			break;
+		}
+
+		struct wsm *wsm_stack =
+			mcore_device_allocate_contiguous_wsm(device, len);
+		if (wsm_stack == NULL) {
+			MCDRV_DBG_ERROR("Allocation of WSM failed");
+			mc_result = MC_DRV_ERR_NO_FREE_MEMORY;
+			break;
+		}
+
+		*wsm = (uint8_t *)wsm_stack->virt_addr;
+		mc_result = MC_DRV_OK;
+
+	} while (false);
+
+	/* Exit critical section */
+
+	return mc_result;
+}
+EXPORT_SYMBOL(mc_malloc_wsm);
+
+/*----------------------------------------------------------------------------*/
+enum mc_result mc_free_wsm(
+	uint32_t	device_id,
+	uint8_t		*wsm
+) {
+	enum mc_result mc_result = MC_DRV_ERR_UNKNOWN;
+	struct mcore_device_t *device;
+
+
+	MCDRV_DBG_VERBOSE("===%s()===", __func__);
+
+	/* Enter critical section */
+
+	do {
+
+		/* Get the device associated wit the given session */
+		device = resolve_device_id(device_id);
+		if (device == NULL) {
+			MCDRV_DBG_ERROR("Device not found");
+			mc_result = MC_DRV_ERR_UNKNOWN_DEVICE;
+			break;
+		}
+
+		/* find WSM object */
+		struct wsm *wsm_stack =
+			mcore_device_find_contiguous_wsm(device, wsm);
+		if (wsm_stack == NULL) {
+			MCDRV_DBG_ERROR("unknown address");
+			mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+			break;
+		}
+
+		/* Free the given virtual address */
+		if (!mcore_device_free_contiguous_wsm(device, wsm_stack)) {
+			MCDRV_DBG_ERROR("Free of virtual address failed");
+			mc_result = MC_DRV_ERR_FREE_MEMORY_FAILED;
+			break;
+		}
+		mc_result = MC_DRV_OK;
+
+	} while (false);
+
+	/* Exit critical section */
+
+	return mc_result;
+}
+EXPORT_SYMBOL(mc_free_wsm);
+
+/*----------------------------------------------------------------------------*/
+enum mc_result mc_map(
+	struct mc_session_handle	*session_handle,
+	void				*buf,
+	uint32_t			buf_len,
+	struct mc_bulk_map		*map_info
+) {
+	enum mc_result mc_result = MC_DRV_ERR_UNKNOWN;
+
+	MCDRV_DBG_VERBOSE("===%s()===", __func__);
+
+	/* Enter critical section */
+
+	do {
+		if (session_handle == NULL) {
+			MCDRV_DBG_ERROR("session_handle is null");
+			mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+			break;
+		}
+		if (map_info == NULL) {
+			MCDRV_DBG_ERROR("map_info is null");
+			mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+			break;
+		}
+		if (buf == NULL) {
+			MCDRV_DBG_ERROR("buf is null");
+			mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+			break;
+		}
+
+		/* Determine device the session belongs to */
+		struct mcore_device_t  *device = resolve_device_id(
+						session_handle->device_id);
+		if (device == NULL) {
+			MCDRV_DBG_ERROR("Device not found");
+			mc_result = MC_DRV_ERR_UNKNOWN_DEVICE;
+			break;
+		}
+		struct connection *dev_con = device->connection;
+
+		/* Get session */
+		struct session  *session =
+		mcore_device_resolve_session_id(device,
+						session_handle->session_id);
+		if (session == NULL) {
+			MCDRV_DBG_ERROR("Session not found");
+			mc_result = MC_DRV_ERR_UNKNOWN_SESSION;
+			break;
+		}
+
+		/* Register mapped bulk buffer to Kernel Module and keep mapped
+		   bulk buffer in mind */
+		struct bulk_buffer_descriptor *bulk_buf = session_add_bulk_buf(
+			session, buf, buf_len);
+		if (bulk_buf == NULL) {
+			MCDRV_DBG_ERROR("Error mapping bulk buffer");
+			mc_result = MC_DRV_ERR_BULK_MAPPING;
+			break;
+		}
+
+		/* Prepare map command */
+		struct mc_drv_cmd_map_bulk_mem_t mc_drv_cmd_map_bulk_mem = {
+			/* C++ does not support C99 designated initializers */
+			/* .header = */ {
+				/* .command_id = */ MC_DRV_CMD_MAP_BULK_BUF
+			},
+			/* .payload = */ {
+				/* .session_id = */ session->session_id,
+				/* .phys_addr_l2; = */
+					(uint32_t)bulk_buf->phys_addr_wsm_l2,
+				/* .offset_payload = */
+					(uint32_t)(bulk_buf->virt_addr) & 0xFFF,
+				/* .len_bulk_mem = */ bulk_buf->len
+			}
+		};
+
+		/* Transmit map command to MobiCore device */
+		connection_write_data(
+			dev_con,
+			&mc_drv_cmd_map_bulk_mem,
+			sizeof(mc_drv_cmd_map_bulk_mem));
+
+		/* Read command response */
+		struct mc_drv_response_header_t rsp_header;
+		int len = connection_read_datablock(
+						dev_con,
+						&rsp_header,
+						sizeof(rsp_header));
+		if (len != sizeof(rsp_header)) {
+			MCDRV_DBG_ERROR("CMD_MAP_BULK_BUF readRsp failed, "
+				"ret=%d", len);
+			mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+			break;
+		}
+
+		if (rsp_header.response_id != MC_DRV_RSP_OK) {
+			MCDRV_DBG_ERROR("CMD_MAP_BULK_BUF failed, respId=%d",
+							rsp_header.response_id);
+			/* REV We ignore Daemon Error code because client cannot
+			   handle it anyhow. */
+			mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+
+			/* Unregister mapped bulk buffer from Kernel Module and
+			   remove mapped bulk buffer from session maintenance */
+			if (!session_remove_bulk_buf(session, buf)) {
+				/* Removing of bulk buffer not possible */
+				MCDRV_DBG_ERROR("Unregistering of bulk memory"
+					"from Kernel Module failed");
+			}
+			break;
+		}
+
+		struct mc_drv_rsp_map_bulk_mem_payload_t
+						rsp_map_bulk_mem_payload;
+		connection_read_datablock(
+			dev_con,
+			&rsp_map_bulk_mem_payload,
+			sizeof(rsp_map_bulk_mem_payload));
+
+		/* Set mapping info for Trustlet */
+		map_info->secure_virt_addr =
+			(void *)(rsp_map_bulk_mem_payload.secure_virtual_adr);
+		map_info->secure_virt_len = buf_len;
+		mc_result = MC_DRV_OK;
+
+	} while (false);
+
+	/* Exit critical section */
+
+	return mc_result;
+}
+EXPORT_SYMBOL(mc_map);
+
+/*----------------------------------------------------------------------------*/
+enum mc_result mc_unmap(
+	struct mc_session_handle	*session_handle,
+	void				*buf,
+	struct mc_bulk_map		*map_info
+) {
+	enum mc_result mc_result = MC_DRV_ERR_UNKNOWN;
+
+	MCDRV_DBG_VERBOSE("===%s()===", __func__);
+
+	/* Enter critical section */
+
+	do {
+		if (session_handle == NULL) {
+			MCDRV_DBG_ERROR("session_handle is null");
+			mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+			break;
+		}
+		if (map_info == NULL) {
+			MCDRV_DBG_ERROR("map_info is null");
+			mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+			break;
+		}
+		if (buf == NULL) {
+			MCDRV_DBG_ERROR("buf is null");
+			mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+			break;
+		}
+
+		/* Determine device the session belongs to */
+		struct mcore_device_t  *device =
+			resolve_device_id(session_handle->device_id);
+		if (device == NULL) {
+			MCDRV_DBG_ERROR("Device not found");
+			mc_result = MC_DRV_ERR_UNKNOWN_DEVICE;
+			break;
+		}
+		struct connection  *dev_con = device->connection;
+
+		/* Get session */
+		struct session  *session =
+			mcore_device_resolve_session_id(device,
+						session_handle->session_id);
+		if (session == NULL) {
+			MCDRV_DBG_ERROR("Session not found");
+			mc_result = MC_DRV_ERR_UNKNOWN_SESSION;
+			break;
+		}
+
+		/* Prepare unmap command */
+		struct mc_drv_cmd_unmap_bulk_mem_t cmd_unmap_bulk_mem = {
+				/* .header = */ {
+					/* .command_id = */
+						MC_DRV_CMD_UNMAP_BULK_BUF
+				},
+				/* .payload = */ {
+					/* .session_id = */ session->session_id,
+					/* .secure_virtual_adr = */
+					(uint32_t)(map_info->secure_virt_addr),
+					/* .len_bulk_mem =
+						map_info->secure_virt_len*/
+				}
+			};
+
+		connection_write_data(
+			dev_con,
+			&cmd_unmap_bulk_mem,
+			sizeof(cmd_unmap_bulk_mem));
+
+		/* Read command response */
+		struct mc_drv_response_header_t rsp_header;
+		int len = connection_read_datablock(
+						dev_con,
+						&rsp_header,
+						sizeof(rsp_header));
+		if (len != sizeof(rsp_header)) {
+			MCDRV_DBG_ERROR("CMD_UNMAP_BULK_BUF readRsp failed, "
+				"ret=%d", len);
+			mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+			break;
+		}
+
+		if (rsp_header.response_id != MC_DRV_RSP_OK) {
+			MCDRV_DBG_ERROR("CMD_UNMAP_BULK_BUF failed, respId=%d",
+							rsp_header.response_id);
+			/* REV We ignore Daemon Error code because client
+			   cannot handle it anyhow. */
+			mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+			break;
+		}
+
+		struct mc_drv_rsp_unmap_bulk_mem_payload_t
+						rsp_unmap_bulk_mem_payload;
+		connection_read_datablock(
+			dev_con,
+			&rsp_unmap_bulk_mem_payload,
+			sizeof(rsp_unmap_bulk_mem_payload));
+
+		/* REV axh: what about check the payload? */
+
+		/* Unregister mapped bulk buffer from Kernel Module and
+		 * remove mapped bulk buffer from session maintenance */
+		if (!session_remove_bulk_buf(session, buf)) {
+			/* Removing of bulk buffer not possible */
+			MCDRV_DBG_ERROR("Unregistering of bulk memory from "
+							"Kernel Module failed");
+			mc_result = MC_DRV_ERR_BULK_UNMAPPING;
+			break;
+		}
+
+		mc_result = MC_DRV_OK;
+
+	} while (false);
+
+	/* Exit critical section */
+
+	return mc_result;
+}
+EXPORT_SYMBOL(mc_unmap);
+
+/*----------------------------------------------------------------------------*/
+enum mc_result mc_get_session_error_code(
+	struct mc_session_handle   *session,
+	int32_t			 *last_error
+) {
+	enum mc_result mc_result = MC_DRV_OK;
+
+	MCDRV_DBG_VERBOSE("===%s()===", __func__);
+
+	do {
+		if (session == NULL || last_error == NULL) {
+			mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+			break;
+		}
+
+		/* Get device */
+		struct mcore_device_t *device =
+					resolve_device_id(session->device_id);
+		if (device == NULL) {
+			MCDRV_DBG_ERROR("Device not found");
+			mc_result = MC_DRV_ERR_UNKNOWN_DEVICE;
+			break;
+		}
+
+		/* Get session */
+		struct session *nqsession =
+		 mcore_device_resolve_session_id(device, session->session_id);
+		if (nqsession == NULL) {
+			MCDRV_DBG_ERROR("Session not found");
+			mc_result = MC_DRV_ERR_UNKNOWN_SESSION;
+			break;
+		}
+
+		/* get session error code from session */
+		*last_error = session_get_last_err(nqsession);
+
+	} while (false);
+
+	return mc_result;
+}
+EXPORT_SYMBOL(mc_get_session_error_code);
+
+/*----------------------------------------------------------------------------*/
+enum mc_result mc_driver_ctrl(
+	enum mc_driver_ctrl  param,
+	uint8_t		 *data,
+	uint32_t		len
+) {
+	MCDRV_DBG_WARN("not implemented");
+	return MC_DRV_ERR_NOT_IMPLEMENTED;
+}
+EXPORT_SYMBOL(mc_driver_ctrl);
+
+/*----------------------------------------------------------------------------*/
+enum mc_result mc_manage(
+	uint32_t  device_id,
+	uint8_t   *data,
+	uint32_t  len
+) {
+	MCDRV_DBG_WARN("not implemented");
+	return MC_DRV_ERR_NOT_IMPLEMENTED;
+}
+EXPORT_SYMBOL(mc_manage);
+
diff --git a/drivers/gud/mobicore_kernelapi/common.h b/drivers/gud/mobicore_kernelapi/common.h
new file mode 100644
index 0000000..2a73474
--- /dev/null
+++ b/drivers/gud/mobicore_kernelapi/common.h
@@ -0,0 +1,97 @@
+/**
+ *
+ * Common data types
+ *
+ * <!-- Copyright Giesecke & Devrient GmbH 2009 - 2012 -->
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef COMMON_H
+#define COMMON_H
+
+#include "connection.h"
+#include "mcinq.h"
+
+void mcapi_insert_connection(
+	struct connection *connection
+);
+
+void mcapi_remove_connection(
+	uint32_t seq
+);
+
+unsigned int mcapi_unique_id(
+	void
+);
+
+
+#define MC_DAEMON_PID 0xFFFFFFFF
+#define MC_DRV_MOD_DEVNODE_FULLPATH "/dev/mobicore"
+
+/* dummy function helper macro. */
+#define DUMMY_FUNCTION()	do {} while (0)
+
+#define MCDRV_ERROR(txt, ...) \
+	printk(KERN_ERR "mcKernelApi %s() ### ERROR: " txt, \
+		__func__, \
+		##__VA_ARGS__)
+
+#if defined(DEBUG)
+
+/* #define DEBUG_VERBOSE */
+#if defined(DEBUG_VERBOSE)
+#define MCDRV_DBG_VERBOSE		  MCDRV_DBG
+#else
+#define MCDRV_DBG_VERBOSE(...)	 DUMMY_FUNCTION()
+#endif
+
+#define MCDRV_DBG(txt, ...) \
+	printk(KERN_INFO "mcKernelApi %s(): " txt, \
+		__func__, \
+		##__VA_ARGS__)
+
+#define MCDRV_DBG_WARN(txt, ...) \
+	printk(KERN_WARNING "mcKernelApi %s() WARNING: " txt, \
+		__func__, \
+		##__VA_ARGS__)
+
+#define MCDRV_DBG_ERROR(txt, ...) \
+	printk(KERN_ERR "mcKernelApi %s() ### ERROR: " txt, \
+		__func__, \
+		##__VA_ARGS__)
+
+
+#define MCDRV_ASSERT(cond) \
+	do { \
+		if (unlikely(!(cond))) { \
+			panic("mcKernelApi Assertion failed: %s:%d\n", \
+				__FILE__, __LINE__); \
+		} \
+	} while (0)
+
+#elif defined(NDEBUG)
+
+#define MCDRV_DBG_VERBOSE(...)	DUMMY_FUNCTION()
+#define MCDRV_DBG(...)		DUMMY_FUNCTION()
+#define MCDRV_DBG_WARN(...)	DUMMY_FUNCTION()
+#define MCDRV_DBG_ERROR(...)	DUMMY_FUNCTION()
+
+#define MCDRV_ASSERT(...)	DUMMY_FUNCTION()
+
+#else
+#error "Define DEBUG or NDEBUG"
+#endif /* [not] defined(DEBUG_MCMODULE) */
+
+
+#define LOG_I MCDRV_DBG_VERBOSE
+#define LOG_W MCDRV_DBG_WARN
+#define LOG_E MCDRV_DBG_ERROR
+
+
+#define assert(expr) MCDRV_ASSERT(expr)
+
+#endif /* COMMON_H */
+
+/** @} */
diff --git a/drivers/gud/mobicore_kernelapi/connection.c b/drivers/gud/mobicore_kernelapi/connection.c
new file mode 100644
index 0000000..9048ae8
--- /dev/null
+++ b/drivers/gud/mobicore_kernelapi/connection.c
@@ -0,0 +1,229 @@
+/** @addtogroup MCD_MCDIMPL_DAEMON_SRV
+ * @{
+ * @file
+ *
+ * Connection data.
+ *
+ * <!-- Copyright Giesecke & Devrient GmbH 2009 - 2012 -->
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/netlink.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/semaphore.h>
+#include <linux/time.h>
+#include <net/sock.h>
+#include <net/net_namespace.h>
+
+#include "connection.h"
+#include "common.h"
+
+/* Define the initial state of the Data Available Semaphore */
+#define SEM_NO_DATA_AVAILABLE 0
+
+/*----------------------------------------------------------------------------*/
+struct connection *connection_new(
+	void
+) {
+	struct connection *conn = kzalloc(sizeof(struct connection),
+					GFP_KERNEL);
+	conn->sequence_magic = mcapi_unique_id();
+	mutex_init(&conn->data_lock);
+	/* No data available */
+	sema_init(&conn->data_available_sem, SEM_NO_DATA_AVAILABLE);
+
+	mcapi_insert_connection(conn);
+	return conn;
+}
+
+/*----------------------------------------------------------------------------*/
+struct connection *connection_create(
+	int	 socket_descriptor,
+	pid_t   dest
+) {
+	struct connection *conn = connection_new();
+
+	conn->peer_pid = dest;
+	return conn;
+}
+
+
+/*----------------------------------------------------------------------------*/
+void connection_cleanup(
+	struct connection *conn
+) {
+	if (!conn)
+		return;
+
+	kfree_skb(conn->skb);
+
+	mcapi_remove_connection(conn->sequence_magic);
+	kfree(conn);
+}
+
+
+/*----------------------------------------------------------------------------*/
+bool connection_connect(
+	struct connection *conn,
+	pid_t		dest
+) {
+	/* Nothing to connect */
+	conn->peer_pid = dest;
+	return true;
+}
+
+/*----------------------------------------------------------------------------*/
+size_t connection_readDataMsg(
+	struct connection *conn,
+	void *buffer,
+	uint32_t len
+) {
+	size_t ret = -1;
+	MCDRV_DBG_VERBOSE("reading connection data %u, connection data left %u",
+			len, conn->data_len);
+	/* trying to read more than the left data */
+	if (len > conn->data_len) {
+		ret = conn->data_len;
+		memcpy(buffer, conn->data_start, conn->data_len);
+		conn->data_len = 0;
+	} else {
+		ret = len;
+		memcpy(buffer, conn->data_start, len);
+		conn->data_len -= len;
+		conn->data_start += len;
+	}
+
+	if (conn->data_len == 0)	{
+		conn->data_start = NULL;
+		kfree_skb(conn->skb);
+		conn->skb = NULL;
+	}
+	MCDRV_DBG_VERBOSE("read %u",  ret);
+	return ret;
+}
+
+/*----------------------------------------------------------------------------*/
+size_t connection_read_datablock(
+	struct connection *conn,
+	void		 *buffer,
+	uint32_t	 len
+) {
+	return connection_read_data(conn, buffer, len, -1);
+}
+
+
+/*----------------------------------------------------------------------------*/
+size_t connection_read_data(
+	struct connection *conn,
+	void		*buffer,
+	uint32_t	len,
+	int32_t		timeout
+) {
+	size_t ret = 0;
+
+	MCDRV_ASSERT(buffer != NULL);
+	MCDRV_ASSERT(conn->socket_descriptor != NULL);
+
+	MCDRV_DBG_VERBOSE("read data len = %u for PID = %u",
+						len, conn->sequence_magic);
+	do {
+		/* Wait until data is available or timeout
+		   msecs_to_jiffies(-1) -> wait forever for the sem */
+		if (down_timeout(&(conn->data_available_sem),
+				  msecs_to_jiffies(timeout))) {
+			MCDRV_DBG_VERBOSE("Timeout reading the data sem");
+			ret = -2;
+			break;
+		}
+
+		if (mutex_lock_interruptible(&(conn->data_lock))) {
+			MCDRV_DBG_ERROR("interrupted reading the data sem");
+			ret = -1;
+			break;
+		}
+		/* Have data, use it */
+		if (conn->data_len > 0)
+			ret = connection_readDataMsg(conn, buffer, len);
+
+			mutex_unlock(&(conn->data_lock));
+
+		/* There is still some data left */
+		if (conn->data_len > 0)
+			up(&conn->data_available_sem);
+	} while (0);
+
+	return ret;
+}
+
+/*----------------------------------------------------------------------------*/
+size_t connection_write_data(
+	struct connection *conn,
+	void		 *buffer,
+	uint32_t	 len
+) {
+	struct sk_buff *skb = NULL;
+	struct nlmsghdr *nlh;
+	int ret = 0;
+
+	MCDRV_DBG_VERBOSE("buffer length %u from pid %u\n",
+		  len,  conn->sequence_magic);
+	do {
+		skb = nlmsg_new(NLMSG_SPACE(len), GFP_KERNEL);
+		if (!skb) {
+			ret = -1;
+			break;
+		}
+
+		nlh = nlmsg_put(skb, 0, conn->sequence_magic, 2,
+					  NLMSG_LENGTH(len), NLM_F_REQUEST);
+		if (!nlh) {
+			ret = -1;
+			break;
+		}
+		memcpy(NLMSG_DATA(nlh), buffer, len);
+
+		netlink_unicast(conn->socket_descriptor, skb,
+						conn->peer_pid, MSG_DONTWAIT);
+		ret = len;
+	} while (0);
+
+	if (!ret && skb != NULL)
+		kfree_skb(skb);
+
+	return ret;
+}
+
+int connection_process(
+	struct connection *conn,
+	struct sk_buff *skb
+)
+{
+	int ret = 0;
+	do {
+		if (mutex_lock_interruptible(&(conn->data_lock))) {
+			MCDRV_DBG_ERROR("Interrupted getting data semaphore!");
+			ret = -1;
+			break;
+		}
+
+		kfree_skb(conn->skb);
+
+		/* Get a reference to the incomming skb */
+		conn->skb = skb_get(skb);
+		if (conn->skb) {
+			conn->data_msg = nlmsg_hdr(conn->skb);
+			conn->data_len = NLMSG_PAYLOAD(conn->data_msg, 0);
+			conn->data_start = NLMSG_DATA(conn->data_msg);
+			up(&(conn->data_available_sem));
+		}
+		mutex_unlock(&(conn->data_lock));
+		ret = 0;
+	} while (0);
+	return ret;
+}
+/** @} */
diff --git a/drivers/gud/mobicore_kernelapi/connection.h b/drivers/gud/mobicore_kernelapi/connection.h
new file mode 100644
index 0000000..0b468e6
--- /dev/null
+++ b/drivers/gud/mobicore_kernelapi/connection.h
@@ -0,0 +1,122 @@
+/** @addtogroup MCD_MCDIMPL_DAEMON_SRV
+ * @{
+ * @file
+ *
+ * Connection data.
+ *
+ * <!-- Copyright Giesecke & Devrient GmbH 2009 - 2012 -->
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef CONNECTION_H_
+#define CONNECTION_H_
+
+#include <linux/semaphore.h>
+
+#include <stddef.h>
+#include <stdbool.h>
+
+#define MAX_PAYLOAD_SIZE 128
+
+struct connection {
+	struct sock *socket_descriptor; /**< Netlink socket */
+	uint32_t sequence_magic; /**< Random? magic to match requests/answers */
+
+	struct nlmsghdr *data_msg;
+	uint32_t data_len; /**< How much connection data is left */
+	void *data_start; /**< Start pointer of remaining data */
+	struct sk_buff *skb;
+
+	struct mutex data_lock; /**< Data protection lock */
+	struct semaphore data_available_sem; /**< Data protection semaphore */
+
+	pid_t self_pid; /**< PID address used for local connection */
+	pid_t peer_pid; /**< Remote PID for connection */
+
+	struct list_head list; /**< The list param for using the kernel lists*/
+};
+
+struct connection *connection_new(
+	void
+);
+
+struct connection *connection_create(
+	int		  socket_descriptor,
+	pid_t		dest
+);
+
+void connection_cleanup(
+	struct connection *conn
+);
+
+/**
+  * Connect to destination.
+  *
+  * @param Destination pointer.
+  * @return true on success.
+  */
+bool connection_connect(
+	struct connection *conn,
+	pid_t		dest
+);
+
+
+/**
+  * Read bytes from the connection.
+  *
+  * @param buffer	Pointer to destination buffer.
+  * @param len	   Number of bytes to read.
+  * @return Number of bytes read.
+  */
+size_t connection_read_datablock(
+	struct connection *conn,
+	void		 *buffer,
+	uint32_t	 len
+);
+/**
+  * Read bytes from the connection.
+  *
+  * @param buffer	Pointer to destination buffer.
+  * @param len	   Number of bytes to read.
+  * @param timeout   Timeout in milliseconds
+  * @return Number of bytes read.
+  * @return -1 if select() failed (returned -1)
+  * @return -2 if no data available, i.e. timeout
+  */
+size_t connection_read_data(
+	struct connection *conn,
+	void		 *buffer,
+	uint32_t	 len,
+	int32_t	  timeout
+);
+
+/**
+  * Write bytes to the connection.
+  *
+  * @param buffer	Pointer to source buffer.
+  * @param len		Number of bytes to read.
+  * @return Number of bytes written.
+  */
+size_t connection_write_data(
+	struct connection *conn,
+	void		 *buffer,
+	uint32_t	  len
+);
+
+/**
+ * Write bytes to the connection.
+ *
+ * @param buffer	Pointer to source buffer.
+ * @param len		Number of bytes to read.
+ * @return Number of bytes written.
+ */
+int connection_process(
+	struct connection *conn,
+	struct sk_buff *skb
+);
+
+#endif /* CONNECTION_H_ */
+
+/** @} */
diff --git a/drivers/gud/mobicore_kernelapi/device.c b/drivers/gud/mobicore_kernelapi/device.c
new file mode 100644
index 0000000..dbeee6a
--- /dev/null
+++ b/drivers/gud/mobicore_kernelapi/device.c
@@ -0,0 +1,257 @@
+/** @addtogroup MCD_IMPL_LIB
+ * @{
+ * @file
+ *
+ * Client library device management.
+ *
+ * Device and Trustlet Session management Funtions.
+ *
+ * <!-- Copyright Giesecke & Devrient GmbH 2009 - 2012 -->
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/list.h>
+#include <linux/slab.h>
+#include "mc_kernel_api.h"
+#include "public/mobicore_driver_api.h"
+
+#include "device.h"
+#include "common.h"
+
+/*----------------------------------------------------------------------------*/
+struct wsm *wsm_create(
+	void	*virt_addr,
+	uint32_t  len,
+	uint32_t  handle,
+	void	*phys_addr /*= NULL this may be unknown, so is can be omitted.*/
+	)
+{
+	struct wsm *wsm = kzalloc(sizeof(struct wsm), GFP_KERNEL);
+	wsm->virt_addr = virt_addr;
+	wsm->len = len;
+	wsm->handle = handle;
+	wsm->phys_addr = phys_addr;
+	return wsm;
+}
+
+
+/*----------------------------------------------------------------------------*/
+struct mcore_device_t *mcore_device_create(
+	uint32_t  device_id,
+	struct connection  *connection
+) {
+	struct mcore_device_t *dev =
+			kzalloc(sizeof(struct mcore_device_t), GFP_KERNEL);
+	dev->device_id = device_id;
+	dev->connection = connection;
+
+	INIT_LIST_HEAD(&dev->session_vector);
+	INIT_LIST_HEAD(&dev->wsm_l2_vector);
+
+	return dev;
+}
+
+
+/*----------------------------------------------------------------------------*/
+void mcore_device_cleanup(
+	struct mcore_device_t *dev
+) {
+	struct session *tmp;
+	struct wsm *wsm;
+	struct list_head *pos, *q;
+
+	/* Delete all session objects. Usually this should not be needed
+	 * as closeDevice()requires that all sessions have been closed before.*/
+	list_for_each_safe(pos, q, &dev->session_vector) {
+		tmp = list_entry(pos, struct session, list);
+		list_del(pos);
+		session_cleanup(tmp);
+	}
+
+	/* Free all allocated WSM descriptors */
+	list_for_each_safe(pos, q, &dev->wsm_l2_vector) {
+		wsm = list_entry(pos, struct wsm, list);
+		/* mcKMod_free(dev->instance, wsm->handle); */
+		list_del(pos);
+		kfree(wsm);
+	}
+	connection_cleanup(dev->connection);
+
+	mcore_device_close(dev);
+	kfree(dev);
+}
+
+
+/*----------------------------------------------------------------------------*/
+bool mcore_device_open(
+	struct mcore_device_t   *dev,
+	const char *deviceName
+) {
+	dev->instance = mobicore_open();
+	return (dev->instance != NULL);
+}
+
+
+/*----------------------------------------------------------------------------*/
+void mcore_device_close(
+	struct mcore_device_t *dev
+) {
+	mobicore_release(dev->instance);
+}
+
+
+/*----------------------------------------------------------------------------*/
+bool mcore_device_has_sessions(
+	struct mcore_device_t *dev
+) {
+	return !list_empty(&dev->session_vector);
+}
+
+
+/*----------------------------------------------------------------------------*/
+bool mcore_device_create_new_session(
+	struct mcore_device_t	*dev,
+	uint32_t		session_id,
+	struct connection	*connection
+) {
+	/* Check if session_id already exists */
+	if (mcore_device_resolve_session_id(dev, session_id)) {
+		MCDRV_DBG_ERROR(" session %u already exists", session_id);
+		return false;
+	}
+	struct session *session = session_create(session_id, dev->instance,
+						connection);
+	list_add_tail(&(session->list), &(dev->session_vector));
+	return true;
+}
+
+
+/*----------------------------------------------------------------------------*/
+bool mcore_device_remove_session(
+	struct mcore_device_t *dev,
+	uint32_t session_id
+) {
+	bool ret = false;
+	struct session *tmp;
+	struct list_head *pos, *q;
+
+	list_for_each_safe(pos, q, &dev->session_vector) {
+		tmp = list_entry(pos, struct session, list);
+		if (tmp->session_id == session_id) {
+			list_del(pos);
+			session_cleanup(tmp);
+			ret = true;
+			break;
+		}
+	}
+	return ret;
+}
+
+
+/*----------------------------------------------------------------------------*/
+struct session *mcore_device_resolve_session_id(
+	struct mcore_device_t *dev,
+	uint32_t session_id
+) {
+	struct session  *ret = NULL;
+	struct session *tmp;
+	struct list_head *pos;
+
+
+	/* Get session for session_id */
+	list_for_each(pos, &dev->session_vector) {
+		tmp = list_entry(pos, struct session, list);
+		if (tmp->session_id == session_id) {
+			ret = tmp;
+			break;
+		}
+	}
+	return ret;
+}
+
+
+/*----------------------------------------------------------------------------*/
+struct wsm *mcore_device_allocate_contiguous_wsm(
+	struct mcore_device_t *dev,
+	uint32_t len
+) {
+	struct wsm *wsm = NULL;
+	do {
+		if (len == 0)
+			break;
+
+		/* Allocate shared memory */
+		void	*virt_addr;
+		uint32_t  handle;
+		void	*phys_addr;
+		int ret = mobicore_allocate_wsm(dev->instance,
+						len,
+						&handle,
+						&virt_addr,
+						&phys_addr);
+		if (ret != 0)
+			break;
+
+		/* Register (vaddr,paddr) with device */
+		wsm = wsm_create(virt_addr, len, handle, phys_addr);
+
+		list_add_tail(&(wsm->list), &(dev->wsm_l2_vector));
+
+	} while (0);
+
+	/* Return pointer to the allocated memory */
+	return wsm;
+}
+
+
+/*----------------------------------------------------------------------------*/
+bool mcore_device_free_contiguous_wsm(
+	struct mcore_device_t  *dev,
+	struct wsm   *wsm
+) {
+	bool ret = false;
+	struct wsm *tmp;
+	struct list_head *pos;
+
+	list_for_each(pos, &dev->wsm_l2_vector) {
+		tmp = list_entry(pos, struct wsm, list);
+		if (tmp == wsm) {
+			ret = true;
+			break;
+		}
+	}
+
+	if (ret) {
+		MCDRV_DBG_VERBOSE("freeWsm virt_addr=0x%p, handle=%d",
+				wsm->virt_addr, wsm->handle);
+
+		/* ignore return code */
+		mobicore_free(dev->instance, wsm->handle);
+
+		list_del(pos);
+		kfree(wsm);
+	}
+	return ret;
+}
+
+
+/*----------------------------------------------------------------------------*/
+struct wsm *mcore_device_find_contiguous_wsm(
+	struct mcore_device_t *dev,
+	void   *virt_addr
+) {
+	struct wsm *wsm;
+	struct list_head *pos;
+
+	list_for_each(pos, &dev->wsm_l2_vector) {
+		wsm = list_entry(pos, struct wsm, list);
+		if (virt_addr == wsm->virt_addr)
+			return wsm;
+	}
+
+	return NULL;
+}
+
+/** @} */
diff --git a/drivers/gud/mobicore_kernelapi/device.h b/drivers/gud/mobicore_kernelapi/device.h
new file mode 100644
index 0000000..f40d993
--- /dev/null
+++ b/drivers/gud/mobicore_kernelapi/device.h
@@ -0,0 +1,139 @@
+/** @addtogroup MCD_IMPL_LIB
+ * @{
+ * @file
+ *
+ * Client library device management.
+ *
+ * Device and Trustlet Session management Functions.
+ *
+ * <!-- Copyright Giesecke & Devrient GmbH 2009 - 2012 -->
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef DEVICE_H_
+#define DEVICE_H_
+
+#include <linux/list.h>
+
+#include "connection.h"
+#include "session.h"
+#include "wsm.h"
+
+
+struct mcore_device_t {
+	struct list_head session_vector; /**< MobiCore Trustlet session
+				associated with the device */
+	struct list_head	 wsm_l2_vector; /**< WSM L2 Table  */
+
+	uint32_t		device_id; /**< Device identifier */
+	struct connection	*connection; /**< The device connection */
+	struct mc_instance  *instance; /**< MobiCore Driver instance */
+
+	struct list_head list; /**< The list param for using the kernel lists*/
+};
+
+struct mcore_device_t *mcore_device_create(
+	uint32_t	  device_id,
+	struct connection  *connection
+);
+
+void mcore_device_cleanup(
+	struct mcore_device_t *dev
+);
+
+/**
+  * Open the device.
+  * @param deviceName Name of the kernel modules device file.
+  * @return true if the device has been opened successfully
+  */
+bool mcore_device_open(
+	struct mcore_device_t   *dev,
+	const char *deviceName
+);
+
+/**
+  * Closes the device.
+  */
+void mcore_device_close(
+	struct mcore_device_t *dev
+);
+
+/**
+  * Check if the device has open sessions.
+  * @return true if the device has one or more open sessions.
+  */
+bool mcore_device_has_sessions(
+	struct mcore_device_t *dev
+);
+
+/**
+  * Add a session to the device.
+  * @param session_id session ID
+  * @param connection session connection
+  */
+bool mcore_device_create_new_session(
+	struct mcore_device_t	  *dev,
+	uint32_t	session_id,
+	struct connection  *connection
+);
+
+/**
+  * Remove the specified session from the device.
+  * The session object will be destroyed and all resources associated with it
+  * will be freed.
+  *
+  * @param session_id Session of the session to remove.
+  * @return true if a session has been found and removed.
+  */
+bool mcore_device_remove_session(
+	struct mcore_device_t *dev,
+	uint32_t session_id
+);
+
+/**
+  * Get as session object for a given session ID.
+  * @param session_id Identified of a previously opened session.
+  * @return Session object if available or NULL if no session has been found.
+  */
+struct session *mcore_device_resolve_session_id(
+	struct mcore_device_t *dev,
+	uint32_t session_id
+);
+
+/**
+  * Allocate a block of contiguous WSM.
+  * @param len The virtual address to be registered.
+  * @return The virtual address of the allocated memory or NULL if no memory
+  * is available.
+  */
+struct wsm *mcore_device_allocate_contiguous_wsm(
+	struct mcore_device_t *dev,
+	uint32_t len
+);
+
+/**
+  * Unregister a vaddr from a device.
+  * @param vaddr The virtual address to be registered.
+  * @param paddr The physical address to be registered.
+  */
+bool mcore_device_free_contiguous_wsm(
+	struct mcore_device_t  *dev,
+	struct wsm *wsm
+);
+
+/**
+  * Get a WSM object for a given virtual address.
+  * @param vaddr The virtual address which has been allocate with mc_malloc_wsm()
+  * in advance.
+  * @return the WSM object or NULL if no address has been found.
+  */
+struct wsm *mcore_device_find_contiguous_wsm(
+	struct mcore_device_t *dev,
+	void   *virt_addr
+);
+
+#endif /* DEVICE_H_ */
+
+/** @} */
diff --git a/drivers/gud/mobicore_kernelapi/include/mcinq.h b/drivers/gud/mobicore_kernelapi/include/mcinq.h
new file mode 100644
index 0000000..3cb82be
--- /dev/null
+++ b/drivers/gud/mobicore_kernelapi/include/mcinq.h
@@ -0,0 +1,125 @@
+/** @addtogroup NQ
+ * @{
+ * Notifications inform the MobiCore runtime environment that information is
+ * pending in a WSM buffer.
+ * The Trustlet Connector (TLC) and the corresponding trustlet also utilize
+ * this buffer to notify each other about new data within the
+ * Trustlet Connector Interface (TCI).
+ *
+ * The buffer is set up as a queue, which means that more than one
+ * notification can be written to the buffer before the switch to the other
+ * world is performed. Each side therefore facilitates an incoming and an
+ * outgoing queue for communication with the other side.
+ *
+ * Notifications hold the session ID, which is used to reference the
+ * communication partner in the other world.
+ * So if, e.g., the TLC in the normal world wants to notify his trustlet
+ * about new data in the TLC buffer
+ *
+ * @file
+ * Notification queue declarations.
+ *
+ * <!-- Copyright Giesecke & Devrient GmbH 2009-2012 -->
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ *	products derived from this software without specific prior
+ *	written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef NQ_H_
+#define NQ_H_
+
+/** \name NQ Size Defines
+ * Minimum and maximum count of elements in the notification queue.
+ * @{ */
+#define MIN_NQ_ELEM 1   /**< Minimum notification queue elements. */
+#define MAX_NQ_ELEM 64 /**< Maximum notification queue elements. */
+/** @} */
+
+/** \name NQ Length Defines
+ * Minimum and maximum notification queue length.
+ * @{ */
+/**< Minimum notification length (in bytes). */
+#define MIN_NQ_LEN (MIN_NQ_ELEM * sizeof(notification))
+/**< Maximum notification length (in bytes). */
+#define MAX_NQ_LEN (MAX_NQ_ELEM * sizeof(notification))
+/** @} */
+
+/** \name Session ID Defines
+ * Standard Session IDs.
+ * @{ */
+/**< MCP session ID is used when directly communicating with the MobiCore
+ * (e.g. for starting and stopping of trustlets). */
+#define SID_MCP	   0
+/**< Invalid session id is returned in case of an error. */
+#define SID_INVALID   0xffffffff
+/** @} */
+
+/** Notification data structure. */
+struct notification {
+	uint32_t session_id; /**< Session ID. */
+	int32_t payload;	/**< Additional notification information. */
+};
+
+/** Notification payload codes.
+ * 0 indicated a plain simple notification,
+ * a positive value is a termination reason from the task,
+ * a negative value is a termination reason from MobiCore.
+ * Possible negative values are given below.
+ */
+enum notification_payload {
+	/**< task terminated, but exit code is invalid */
+	ERR_INVALID_EXIT_CODE   = -1,
+	/**< task terminated due to session end, no exit code available */
+	ERR_SESSION_CLOSE	   = -2,
+	/**< task terminated due to invalid operation */
+	ERR_INVALID_OPERATION   = -3,
+	/**< session ID is unknown */
+	ERR_INVALID_SID		 = -4,
+	/**<  session is not active */
+	ERR_SID_NOT_ACTIVE	  = -5
+};
+
+/** Declaration of the notification queue header.
+ * layout as specified in the data structure specification.
+ */
+struct notification_queue_header {
+	uint32_t write_cnt;  /**< Write counter. */
+	uint32_t read_cnt;   /**< Read counter. */
+	uint32_t queue_size; /**< Queue size. */
+};
+
+/** Queue struct which defines a queue object.
+ * The queue struct is accessed by the queue<operation> type of
+ * function. elementCnt must be a power of two and the power needs
+ * to be smaller than power of uint32_t (obviously 32).
+ */
+struct notification_queue {
+	/**< Queue header. */
+	struct notification_queue_header hdr;
+	/**< Notification elements. */
+	struct notification notification[MIN_NQ_ELEM];
+} ;
+
+#endif /** NQ_H_ */
+
+/** @} */
diff --git a/drivers/gud/mobicore_kernelapi/include/mcuuid.h b/drivers/gud/mobicore_kernelapi/include/mcuuid.h
new file mode 100644
index 0000000..b72acb8
--- /dev/null
+++ b/drivers/gud/mobicore_kernelapi/include/mcuuid.h
@@ -0,0 +1,74 @@
+/**
+ * @addtogroup MC_UUID mcUuid - Universally Unique Identifier.
+ *
+ * <!-- Copyright Giesecke & Devrient GmbH 2011-2012 -->
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ *	products derived from this software without specific prior
+ *	written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * @ingroup  MC_DATA_TYPES
+ * @{
+ */
+
+#ifndef MC_UUID_H_
+#define MC_UUID_H_
+
+#define UUID_TYPE
+
+/** Universally Unique Identifier (UUID) according to ISO/IEC 11578. */
+struct mc_uuid_t {
+	uint8_t value[16]; /**< Value of the UUID. */
+};
+
+/** UUID value used as free marker in service provider containers. */
+#define MC_UUID_FREE_DEFINE \
+	{ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, \
+	  0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }
+
+static const struct mc_uuid_t MC_UUID_FREE = {
+	MC_UUID_FREE_DEFINE
+};
+
+/** Reserved UUID. */
+#define MC_UUID_RESERVED_DEFINE \
+	{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+	  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }
+
+static const struct mc_uuid_t MC_UUID_RESERVED = {
+	MC_UUID_RESERVED_DEFINE
+};
+
+/** UUID for system applications. */
+#define MC_UUID_SYSTEM_DEFINE \
+	{ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, \
+	  0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE }
+
+static const struct mc_uuid_t MC_UUID_SYSTEM = {
+	MC_UUID_SYSTEM_DEFINE
+};
+
+#endif /* MC_UUID_H_ */
+
+/** @} */
+
diff --git a/drivers/gud/mobicore_kernelapi/main.c b/drivers/gud/mobicore_kernelapi/main.c
new file mode 100644
index 0000000..62997f7
--- /dev/null
+++ b/drivers/gud/mobicore_kernelapi/main.c
@@ -0,0 +1,181 @@
+/**
+ * MobiCore KernelApi module
+ *
+ * <!-- Copyright Giesecke & Devrient GmbH 2009-2012 -->
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/netlink.h>
+#include <linux/kthread.h>
+#include <net/sock.h>
+
+#include <linux/list.h>
+
+#include "connection.h"
+#include "common.h"
+
+#define MC_DAEMON_NETLINK  17
+
+struct mc_kernelapi_ctx {
+	struct sock *sk;
+	struct list_head peers;
+	atomic_t counter;
+};
+
+struct mc_kernelapi_ctx *mod_ctx; /* = NULL; */
+
+/*----------------------------------------------------------------------------*/
+/* get a unique ID */
+unsigned int mcapi_unique_id(
+	void
+)
+{
+	return (unsigned int)atomic_inc_return(
+		&(mod_ctx->counter));
+}
+
+
+/*----------------------------------------------------------------------------*/
+static struct connection *mcapi_find_connection(
+	uint32_t seq
+)
+{
+	struct connection *tmp;
+	struct list_head *pos;
+
+	/* Get session for session_id */
+	list_for_each(pos, &mod_ctx->peers) {
+		tmp = list_entry(pos, struct connection, list);
+		if (tmp->sequence_magic == seq)
+			return tmp;
+	}
+
+	return NULL;
+}
+
+/*----------------------------------------------------------------------------*/
+void mcapi_insert_connection(
+	struct connection *connection
+)
+{
+	list_add_tail(&(connection->list), &(mod_ctx->peers));
+	connection->socket_descriptor = mod_ctx->sk;
+}
+
+void mcapi_remove_connection(
+	uint32_t seq
+)
+{
+	struct connection *tmp;
+	struct list_head *pos, *q;
+
+	/* Delete all session objects. Usually this should not be needed as
+	   closeDevice() requires that all sessions have been closed before.*/
+	list_for_each_safe(pos, q, &mod_ctx->peers) {
+		tmp = list_entry(pos, struct connection, list);
+		if (tmp->sequence_magic == seq) {
+			list_del(pos);
+			break;
+		}
+	}
+}
+
+/*----------------------------------------------------------------------------*/
+static int mcapi_process(
+	struct sk_buff *skb,
+	struct nlmsghdr *nlh
+)
+{
+	struct connection *c;
+	int length;
+	int seq;
+	pid_t pid;
+	int ret;
+
+	pid = nlh->nlmsg_pid;
+	length = nlh->nlmsg_len;
+	seq = nlh->nlmsg_seq;
+	MCDRV_DBG_VERBOSE("nlmsg len %d type %d pid 0x%X seq %d\n",
+		   length, nlh->nlmsg_type, pid, seq);
+	do {
+		c = mcapi_find_connection(seq);
+		if (!c) {
+			MCDRV_ERROR("Invalid incomming connection - seq=%u!",
+				seq);
+			ret = -1;
+			break;
+		}
+
+		/* Pass the buffer to the appropriate connection */
+		connection_process(c, skb);
+
+		ret = 0;
+	} while (false);
+	return ret;
+}
+
+/*----------------------------------------------------------------------------*/
+static void mcapi_callback(
+	struct sk_buff *skb
+)
+{
+	struct nlmsghdr *nlh = nlmsg_hdr(skb);
+	int len = skb->len;
+	int err = 0;
+
+	while (NLMSG_OK(nlh, len)) {
+		err = mcapi_process(skb, nlh);
+
+		/* if err or if this message says it wants a response */
+		if (err || (nlh->nlmsg_flags & NLM_F_ACK))
+			netlink_ack(skb, nlh, err);
+
+		nlh = NLMSG_NEXT(nlh, len);
+	}
+}
+
+/*----------------------------------------------------------------------------*/
+static int __init mcapi_init(void)
+{
+	printk(KERN_INFO "Mobicore API module initialized!\n");
+
+	mod_ctx = kzalloc(sizeof(struct mc_kernelapi_ctx), GFP_KERNEL);
+
+	/* start kernel thread */
+	mod_ctx->sk = netlink_kernel_create(&init_net, MC_DAEMON_NETLINK, 0,
+					mcapi_callback, NULL, THIS_MODULE);
+
+	if (!mod_ctx->sk) {
+		MCDRV_ERROR("register of recieve handler failed");
+		return -EFAULT;
+	}
+
+	INIT_LIST_HEAD(&mod_ctx->peers);
+	return 0;
+}
+
+static void __exit mcapi_exit(void)
+{
+	printk(KERN_INFO "Unloading Mobicore API module.\n");
+
+	if (mod_ctx->sk != NULL) {
+		netlink_kernel_release(mod_ctx->sk);
+		mod_ctx->sk = NULL;
+	}
+	kfree(mod_ctx);
+	mod_ctx = NULL;
+}
+
+module_init(mcapi_init);
+module_exit(mcapi_exit);
+
+MODULE_AUTHOR("Giesecke & Devrient GmbH");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MobiCore API driver");
diff --git a/drivers/gud/mobicore_kernelapi/public/mobicore_driver_api.h b/drivers/gud/mobicore_kernelapi/public/mobicore_driver_api.h
new file mode 100644
index 0000000..ccfb2e5
--- /dev/null
+++ b/drivers/gud/mobicore_kernelapi/public/mobicore_driver_api.h
@@ -0,0 +1,483 @@
+/**
+ * @defgroup MCD_API					MobiCore Driver API
+ * @addtogroup MCD_API
+ * @{
+ *
+ * @if DOXYGEN_MCDRV_API
+ *   @mainpage MobiCore Driver API.
+ * @endif
+ *
+ * MobiCore Driver API.
+ *
+ * The MobiCore (MC) Driver API provides access functions to the MobiCore
+ * runtime environment and the contained Trustlets.
+ *
+ * @image html DoxyOverviewDrvApi500x.png
+ * @image latex DoxyOverviewDrvApi500x.png "MobiCore Overview" width=12cm
+ *
+ * <!-- Copyright Giesecke & Devrient GmbH 2009 - 2012 -->
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ *	products derived from this software without specific prior
+ *	written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef MCDRIVER_H_
+#define MCDRIVER_H_
+
+#define __MC_CLIENT_LIB_API
+
+#include "mcuuid.h"
+
+/**
+ * Return values of MobiCore driver functions.
+ */
+enum mc_result {
+	/**< Function call succeeded. */
+	MC_DRV_OK		= 0,
+	/**< No notification available. */
+	MC_DRV_NO_NOTIFICATION	= 1,
+	/**< Error during notification on communication level. */
+	MC_DRV_ERR_NOTIFICATION	= 2,
+	/**< Function not implemented. */
+	MC_DRV_ERR_NOT_IMPLEMENTED = 3,
+	/**< No more resources available. */
+	MC_DRV_ERR_OUT_OF_RESOURCES = 4,
+	/**< Driver initialization failed. */
+	MC_DRV_ERR_INIT = 5,
+	/**< Unknown error. */
+	MC_DRV_ERR_UNKNOWN	= 6,
+	/**< The specified device is unknown. */
+	MC_DRV_ERR_UNKNOWN_DEVICE = 7,
+	/**< The specified session is unknown.*/
+	MC_DRV_ERR_UNKNOWN_SESSION = 8,
+	/**< The specified operation is not allowed. */
+	MC_DRV_ERR_INVALID_OPERATION = 9,
+	/**< The response header from the MC is invalid. */
+	MC_DRV_ERR_INVALID_RESPONSE = 10,
+	/**< Function call timed out. */
+	MC_DRV_ERR_TIMEOUT = 11,
+	/**< Can not allocate additional memory. */
+	MC_DRV_ERR_NO_FREE_MEMORY = 12,
+	/**< Free memory failed. */
+	MC_DRV_ERR_FREE_MEMORY_FAILED = 13,
+	/**< Still some open sessions pending. */
+	MC_DRV_ERR_SESSION_PENDING = 14,
+	/**< MC daemon not reachable */
+	MC_DRV_ERR_DAEMON_UNREACHABLE = 15,
+	/**< The device file of the kernel module could not be opened. */
+	MC_DRV_ERR_INVALID_DEVICE_FILE = 16,
+	/**< Invalid parameter. */
+	MC_DRV_ERR_INVALID_PARAMETER	= 17,
+	/**< Unspecified error from Kernel Module*/
+	MC_DRV_ERR_KERNEL_MODULE = 18,
+	/**< Error during mapping of additional bulk memory to session. */
+	MC_DRV_ERR_BULK_MAPPING = 19,
+	/**< Error during unmapping of additional bulk memory to session. */
+	MC_DRV_ERR_BULK_UNMAPPING = 20,
+	/**< Notification received, exit code available. */
+	MC_DRV_INFO_NOTIFICATION = 21,
+	/**< Set up of NWd connection failed. */
+	MC_DRV_ERR_NQ_FAILED = 22
+};
+
+
+/**
+ * Driver control command.
+ */
+enum mc_driver_ctrl {
+	MC_CTRL_GET_VERSION = 1 /**< Return the driver version */
+};
+
+
+/** Structure of Session Handle, includes the Session ID and the Device ID the
+ * Session belongs to.
+ * The session handle will be used for session-based MobiCore communication.
+ * It will be passed to calls which address a communication end point in the
+ * MobiCore environment.
+ */
+struct mc_session_handle {
+	uint32_t session_id; /**< MobiCore session ID */
+	uint32_t device_id; /**< Device ID the session belongs to */
+};
+
+/** Information structure about additional mapped Bulk buffer between the
+ * Trustlet Connector (Nwd) and the Trustlet (Swd). This structure is
+ * initialized from a Trustlet Connector by calling mc_map().
+ * In order to use the memory within a Trustlet the Trustlet Connector has to
+ * inform the Trustlet with the content of this structure via the TCI.
+ */
+struct mc_bulk_map {
+	/**< The virtual address of the Bulk buffer regarding the address space
+	 * of the Trustlet, already includes a possible offset! */
+	void *secure_virt_addr;
+	uint32_t secure_virt_len; /**< Length of the mapped Bulk buffer */
+};
+
+
+/**< The default device ID */
+#define MC_DEVICE_ID_DEFAULT	0
+/**< Wait infinite for a response of the MC. */
+#define MC_INFINITE_TIMEOUT	((int32_t)(-1))
+/**< Do not wait for a response of the MC. */
+#define MC_NO_TIMEOUT		0
+/**< TCI/DCI must not exceed 1MiB */
+#define MC_MAX_TCI_LEN		0x100000
+
+
+
+/** Open a new connection to a MobiCore device.
+ *
+ * mc_open_device() initializes all device specific resources required to
+ * communicate with an MobiCore instance located on the specified device in the
+ * system. If the device does not exist the function will return
+ * MC_DRV_ERR_UNKNOWN_DEVICE.
+ *
+ * @param [in] device_id Identifier for the MobiCore device to be used.
+ * MC_DEVICE_ID_DEFAULT refers to the default device.
+ *
+ * @return MC_DRV_OK if operation has been successfully completed.
+ * @return MC_DRV_ERR_INVALID_OPERATION if device already opened.
+ * @return MC_DRV_ERR_DAEMON_UNREACHABLE when problems with daemon occur.
+ * @return MC_DRV_ERR_UNKNOWN_DEVICE when device_id is unknown.
+ * @return MC_DRV_ERR_INVALID_DEVICE_FILE if kernel module under
+ * /dev/mobicore cannot be opened
+ *
+ * Uses a Mutex.
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_open_device(
+	uint32_t device_id
+);
+
+/** Close the connection to a MobiCore device.
+ * When closing a device, active sessions have to be closed beforehand.
+ * Resources associated with the device will be released.
+ * The device may be opened again after it has been closed.
+ *
+ * @param [in] device_id Identifier for the MobiCore device.
+ * MC_DEVICE_ID_DEFAULT refers to the default device.
+ *
+ * @return MC_DRV_OK if operation has been successfully completed.
+ * @return MC_DRV_ERR_UNKNOWN_DEVICE when device id is invalid.
+ * @return MC_DRV_ERR_SESSION_PENDING when a session is still open.
+ * @return MC_DRV_ERR_DAEMON_UNREACHABLE when problems with daemon occur.
+ *
+ * Uses a Mutex.
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_close_device(
+	uint32_t device_id
+);
+
+/** Open a new session to a Trustlet. The trustlet with the given UUID has
+ * to be available in the flash filesystem.
+ *
+ * Write MCP open message to buffer and notify MobiCore about the availability
+ * of a new command.
+ * Waits till the MobiCore responses with the new session ID (stored in the MCP
+ * buffer).
+ *
+ * @param [in,out] session On success, the session data will be returned.
+ * Note that session.device_id has to be the device id of an opened device.
+ * @param [in] uuid UUID of the Trustlet to be opened.
+ * @param [in] tci TCI buffer for communicating with the trustlet.
+ * @param [in] tci_len Length of the TCI buffer. Maximum allowed value
+ * is MC_MAX_TCI_LEN.
+ *
+ * @return MC_DRV_OK if operation has been successfully completed.
+ * @return MC_DRV_INVALID_PARAMETER if session parameter is invalid.
+ * @return MC_DRV_ERR_UNKNOWN_DEVICE when device id is invalid.
+ * @return MC_DRV_ERR_DAEMON_UNREACHABLE when problems with daemon socket occur.
+ * @return MC_DRV_ERR_UNKNOWN_DEVICE when daemon returns an error.
+ *
+ * Uses a Mutex.
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_open_session(
+	struct mc_session_handle  *session,
+	const struct mc_uuid_t *uuid,
+	uint8_t *tci,
+	uint32_t tci_len
+);
+
+/** Close a Trustlet session.
+ *
+ * Closes the specified MobiCore session. The call will block until the
+ * session has been closed.
+ *
+ * @pre Device device_id has to be opened in advance.
+ *
+ * @param [in] session Session to be closed.
+ *
+ * @return MC_DRV_OK if operation has been successfully completed.
+ * @return MC_DRV_INVALID_PARAMETER if session parameter is invalid.
+ * @return MC_DRV_ERR_UNKNOWN_SESSION when session id is invalid.
+ * @return MC_DRV_ERR_UNKNOWN_DEVICE when device id of session is invalid.
+ * @return MC_DRV_ERR_DAEMON_UNREACHABLE when problems with daemon occur.
+ * @return MC_DRV_ERR_INVALID_DEVICE_FILE when daemon cannot open trustlet file.
+ *
+ * Uses a Mutex.
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_close_session(
+	struct mc_session_handle *session
+);
+
+/** Notify a session.
+ * Notifies the session end point about available message data.
+ * If the session parameter is correct, notify will always succeed.
+ * Corresponding errors can only be received by mc_wait_notification().
+ * @pre A session has to be opened in advance.
+ *
+ * @param session The session to be notified.
+ *
+ * @return MC_DRV_OK if operation has been successfully completed.
+ * @return MC_DRV_INVALID_PARAMETER if session parameter is invalid.
+ * @return MC_DRV_ERR_UNKNOWN_SESSION when session id is invalid.
+ * @return MC_DRV_ERR_UNKNOWN_DEVICE when device id of session is invalid.
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_notify(
+	struct mc_session_handle *session
+);
+
+/** Wait for a notification.
+ *
+ * Wait for a notification issued by the MobiCore for a specific session.
+ * The timeout parameter specifies the number of milliseconds the call will wait
+ * for a notification.
+ * If the caller passes 0 as timeout value the call will immediately return.
+ * If timeout value is below 0 the call will block until a notification for the
+ session has been received.
+ *
+ * @attention if timeout is below 0, call will block:
+ * Caller has to trust the other side to send a notification to wake him up
+ * again.
+ *
+ * @param [in] session The session the notification should correspond to.
+ * @param [in] timeout Time in milliseconds to wait
+ * (MC_NO_TIMEOUT : direct return, > 0 : milliseconds,
+ * MC_INFINITE_TIMEOUT : wait infinitely)
+ *
+ * @return MC_DRV_OK if notification is available.
+ * @return MC_DRV_ERR_TIMEOUT if no notification arrived in time.
+ * @return MC_DRV_INFO_NOTIFICATION if a problem with the session was
+ * encountered. Get more details with mc_get_session_error_code().
+ * @return MC_DRV_ERR_NOTIFICATION if a problem with the socket occurred.
+ * @return MC_DRV_INVALID_PARAMETER if a parameter is invalid.
+ * @return MC_DRV_ERR_UNKNOWN_SESSION when session id is invalid.
+ * @return MC_DRV_ERR_UNKNOWN_DEVICE when device id of session is invalid.
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_wait_notification(
+	struct mc_session_handle  *session,
+	int32_t			timeout
+);
+
+/**
+ * Allocate a block of world shared memory (WSM).
+ * The MC driver allocates a contiguous block of memory which can be used as
+ * WSM.
+ * This implicates that the allocated memory is aligned according to the
+ * alignment parameter.
+ * Always returns a buffer of size WSM_SIZE aligned to 4K.
+ *
+ * @param [in]  device_id The ID of an opened device to retrieve the WSM from.
+ * @param [in]  align The alignment (number of pages) of the memory block
+ * (e.g. 0x00000001 for 4kb).
+ * @param [in]  len	Length of the block in bytes.
+ * @param [out] wsm Virtual address of the world shared memory block.
+ * @param [in]  wsm_flags Platform specific flags describing the memory to
+ * be allocated.
+ *
+ * @attention: align and wsm_flags are currently ignored
+ *
+ * @return MC_DRV_OK if operation has been successfully completed.
+ * @return MC_DRV_INVALID_PARAMETER if a parameter is invalid.
+ * @return MC_DRV_ERR_UNKNOWN_DEVICE when device id is invalid.
+ * @return MC_DRV_ERR_NO_FREE_MEMORY if no more contiguous memory is available
+ * in this size or for this process.
+ *
+ * Uses a Mutex.
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_malloc_wsm(
+	uint32_t  device_id,
+	uint32_t  align,
+	uint32_t  len,
+	uint8_t   **wsm,
+	uint32_t  wsm_flags
+);
+
+/**
+ * Free a block of world shared memory (WSM).
+ * The MC driver will free a block of world shared memory (WSM) previously
+ * allocated with mc_malloc_wsm(). The caller has to assure that the address
+ * handed over to the driver is a valid WSM address.
+ *
+ * @param [in] device_id The ID to which the given address belongs.
+ * @param [in] wsm Address of WSM block to be freed.
+ *
+ * @return MC_DRV_OK if operation has been successfully completed.
+ * @return MC_DRV_INVALID_PARAMETER if a parameter is invalid.
+ * @return MC_DRV_ERR_UNKNOWN_DEVICE when device id is invalid.
+ * @return MC_DRV_ERR_FREE_MEMORY_FAILED on failures.
+ *
+ * Uses a Mutex.
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_free_wsm(
+	uint32_t  device_id,
+	uint8_t   *wsm
+);
+
+/**
+ * Map additional bulk buffer between a Trustlet Connector (TLC) and
+ * the Trustlet (TL) for a session.
+ * Memory allocated in user space of the TLC can be mapped as additional
+ * communication channel (besides TCI) to the Trustlet. Limitation of the
+ * Trustlet memory structure apply: only 6 chunks can be mapped with a maximum
+ * chunk size of 1 MiB each.
+ *
+ * @attention It is up to the application layer (TLC) to inform the Trustlet
+ * about the additional mapped bulk memory.
+ *
+ * @param [in] session Session handle with information of the device_id and
+ * the session_id. The
+ * given buffer is mapped to the session specified in the sessionHandle.
+ * @param [in] buf Virtual address of a memory portion (relative to TLC)
+ * to be shared with the Trustlet, already includes a possible offset!
+ * @param [in] len length of buffer block in bytes.
+ * @param [out] map_info Information structure about the mapped Bulk buffer
+ * between the TLC (Nwd) and
+ * the TL (Swd).
+ *
+ * @return MC_DRV_OK if operation has been successfully completed.
+ * @return MC_DRV_INVALID_PARAMETER if a parameter is invalid.
+ * @return MC_DRV_ERR_UNKNOWN_SESSION when session id is invalid.
+ * @return MC_DRV_ERR_UNKNOWN_DEVICE when device id of session is invalid.
+ * @return MC_DRV_ERR_DAEMON_UNREACHABLE when problems with daemon occur.
+ * @return MC_DRV_ERR_BULK_MAPPING when buf is already uses as bulk buffer or
+ * when registering the buffer failed.
+ *
+ * Uses a Mutex.
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_map(
+	struct mc_session_handle	*session,
+	void				*buf,
+	uint32_t			len,
+	struct mc_bulk_map		*map_info
+);
+
+/**
+ * Remove additional mapped bulk buffer between Trustlet Connector (TLC)
+ * and the Trustlet (TL) for a session.
+ *
+ * @attention The bulk buffer will immediately be unmapped from the session
+ * context.
+ * @attention The application layer (TLC) must inform the TL about unmapping
+ * of the additional bulk memory before calling mc_unmap!
+ *
+ * @param [in] session Session handle with information of the device_id and
+ * the session_id. The given buffer is unmapped from the session specified
+ * in the sessionHandle.
+ * @param [in] buf Virtual address of a memory portion (relative to TLC)
+ * shared with the TL, already includes a possible offset!
+ * @param [in] map_info Information structure about the mapped Bulk buffer
+ * between the TLC (Nwd) and
+ * the TL (Swd).
+ * @attention The clientlib currently ignores the len field in map_info.
+ *
+ * @return MC_DRV_OK if operation has been successfully completed.
+ * @return MC_DRV_INVALID_PARAMETER if a parameter is invalid.
+ * @return MC_DRV_ERR_UNKNOWN_SESSION when session id is invalid.
+ * @return MC_DRV_ERR_UNKNOWN_DEVICE when device id of session is invalid.
+ * @return MC_DRV_ERR_DAEMON_UNREACHABLE when problems with daemon occur.
+ * @return MC_DRV_ERR_BULK_UNMAPPING when buf was not registered earlier
+ * or when unregistering failed.
+ *
+ * Uses a Mutex.
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_unmap(
+	struct mc_session_handle	*session,
+	void				*buf,
+	struct mc_bulk_map		*map_info
+);
+
+
+/**
+ * @attention: Not implemented.
+ * Execute driver specific command.
+ * mc_driver_ctrl() can be used to execute driver specific commands.
+ * Besides the control command MC_CTRL_GET_VERSION commands are implementation
+ * specific.
+ * Please refer to the corresponding specification of the driver manufacturer.
+ *
+ * @param [in] param Command ID of the command to be executed.
+ * @param [in, out] data  Command data and response depending on command.
+ * @param [in] len Length of the data block.
+ *
+ * @return MC_DRV_ERR_NOT_IMPLEMENTED.
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_driver_ctrl(
+	enum mc_driver_ctrl  param,
+	uint8_t		 *data,
+	uint32_t	len
+);
+
+/**
+ * @attention: Not implemented.
+ * Execute application management command.
+ * mc_manage() shall be used to exchange application management commands with
+ * the MobiCore.
+ * The MobiCore Application Management Protocol is described in [MCAMP].
+ *
+ * @param [in] device_id Identifier for the MobiCore device to be used.
+ * NULL refers to the default device.
+ * @param [in, out] data  Command data/response data depending on command.
+ * @param [in] len Length of the data block.
+ *
+ * @return MC_DRV_ERR_NOT_IMPLEMENTED.
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_manage(
+	uint32_t  device_id,
+	uint8_t   *data,
+	uint32_t  len
+);
+
+/**
+ * Get additional error information of the last error that occured on a session.
+ * After the request the stored error code will be deleted.
+ *
+ * @param [in] session Session handle with information of the device_id and
+ * the session_id.
+ * @param [out] last_error >0 Trustlet has terminated itself with this value,
+ * <0 Trustlet is dead because of an error within the MobiCore
+ * (e.g. Kernel exception).
+ * See also MCI definition.
+ *
+ * @return MC_DRV_OK if operation has been successfully completed.
+ * @return MC_DRV_INVALID_PARAMETER if a parameter is invalid.
+ * @return MC_DRV_ERR_UNKNOWN_SESSION when session id is invalid.
+ * @return MC_DRV_ERR_UNKNOWN_DEVICE when device id of session is invalid.
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_get_session_error_code(
+	struct mc_session_handle  *session,
+	int32_t			*last_error
+);
+
+#endif /** MCDRIVER_H_ */
+
+/** @} */
diff --git a/drivers/gud/mobicore_kernelapi/public/mobicore_driver_cmd.h b/drivers/gud/mobicore_kernelapi/public/mobicore_driver_cmd.h
new file mode 100644
index 0000000..9ff7989
--- /dev/null
+++ b/drivers/gud/mobicore_kernelapi/public/mobicore_driver_cmd.h
@@ -0,0 +1,289 @@
+/** @addtogroup MCD_MCDIMPL_DAEMON
+ * @{
+ * @file
+ *
+ * <!-- Copyright Giesecke & Devrient GmbH 2009 - 2012 -->
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ *	products derived from this software without specific prior
+ *	written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef MCDAEMON_H_
+#define MCDAEMON_H_
+
+
+
+
+#include "mcuuid.h"
+
+enum mc_drv_cmd_t {
+	MC_DRV_CMD_PING			= 0,
+	MC_DRV_CMD_GET_INFO		= 1,
+	MC_DRV_CMD_OPEN_DEVICE		= 2,
+	MC_DRV_CMD_CLOSE_DEVICE		= 3,
+	MC_DRV_CMD_NQ_CONNECT		= 4,
+	MC_DRV_CMD_OPEN_SESSION		= 5,
+	MC_DRV_CMD_CLOSE_SESSION	= 6,
+	MC_DRV_CMD_NOTIFY		= 7,
+	MC_DRV_CMD_MAP_BULK_BUF		= 8,
+	MC_DRV_CMD_UNMAP_BULK_BUF	= 9
+};
+
+
+enum mc_drv_rsp_t {
+	MC_DRV_RSP_OK				= 0,
+	MC_DRV_RSP_FAILED			= 1,
+	MC_DRV_RSP_DEVICE_NOT_OPENED		= 2,
+	MC_DRV_RSP_DEVICE_ALREADY_OPENED	= 3,
+	MC_DRV_RSP_COMMAND_NOT_ALLOWED		= 4,
+	MC_DRV_INVALID_DEVICE_NAME		= 5,
+	MC_DRV_RSP_MAP_BULK_ERRO		= 6,
+	MC_DRV_RSP_TRUSTLET_NOT_FOUND		= 7,
+	MC_DRV_RSP_PAYLOAD_LENGTH_ERROR		= 8,
+};
+
+
+struct mc_drv_command_header_t {
+	uint32_t  command_id;
+};
+
+struct mc_drv_response_header_t {
+	uint32_t  response_id;
+};
+
+#define MC_DEVICE_ID_DEFAULT	0 /**< The default device ID */
+
+
+/*****************************************************************************/
+struct mc_drv_cmd_open_device_payload_t {
+	uint32_t  device_id;
+};
+
+struct mc_drv_cmd_open_device_t {
+	struct mc_drv_command_header_t header;
+	struct mc_drv_cmd_open_device_payload_t payload;
+};
+
+
+struct mc_drv_rsp_open_device_payload_t {
+	/* empty */
+};
+
+struct mc_drv_rsp_open_device_t {
+	struct mc_drv_response_header_t header;
+	struct mc_drv_rsp_open_device_payload_t payload;
+};
+
+
+/*****************************************************************************/
+struct mc_drv_cmd_close_device_t {
+	struct mc_drv_command_header_t header;
+	/* no payload here because close has none.
+	   If we use an empty struct, C++ will count it as 4 bytes.
+	   This will write too much into the socket at write(cmd,sizeof(cmd)) */
+};
+
+
+struct mc_drv_rsp_close_device_payload_t {
+	/* empty */
+};
+
+struct mc_drv_rsp_close_device_t {
+	struct mc_drv_response_header_t header;
+	struct mc_drv_rsp_close_device_payload_t payload;
+};
+
+
+/*****************************************************************************/
+struct mc_drv_cmd_open_session_payload_t {
+	uint32_t device_id;
+	struct mc_uuid_t uuid;
+	uint32_t tci;
+	uint32_t len;
+};
+
+struct mc_drv_cmd_open_session_t {
+	struct mc_drv_command_header_t header;
+	struct mc_drv_cmd_open_session_payload_t payload;
+};
+
+
+struct mc_drv_rsp_open_session_payload_t {
+	uint32_t device_id;
+	uint32_t session_id;
+	uint32_t device_session_id;
+	uint32_t mc_result;
+	uint32_t session_magic;
+};
+
+struct mc_drv_rsp_open_session_t {
+	struct mc_drv_response_header_t header;
+	struct mc_drv_rsp_open_session_payload_t  payload;
+};
+
+
+/*****************************************************************************/
+struct mc_drv_cmd_close_session_payload_t {
+	uint32_t  session_id;
+};
+
+struct mc_drv_cmd_close_session_t {
+	struct mc_drv_command_header_t header;
+	struct mc_drv_cmd_close_session_payload_t payload;
+};
+
+
+struct mc_drv_rsp_close_session_payload_t {
+	/* empty */
+};
+
+struct mc_drv_rsp_close_session_t {
+	struct mc_drv_response_header_t header;
+	struct mc_drv_rsp_close_session_payload_t payload;
+};
+
+
+/*****************************************************************************/
+struct mc_drv_cmd_notify_payload_t {
+	uint32_t session_id;
+};
+
+struct mc_drv_cmd_notify_t {
+	struct mc_drv_command_header_t header;
+	struct mc_drv_cmd_notify_payload_t payload;
+};
+
+
+struct mc_drv_rsp_notify_payload_t {
+	/* empty */
+};
+
+struct mc_drv_rsp_notify_t {
+	struct mc_drv_response_header_t header;
+	struct mc_drv_rsp_notify_payload_t  payload;
+};
+
+
+/*****************************************************************************/
+struct mc_drv_cmd_map_bulk_mem_payload_t {
+	uint32_t session_id;
+	uint32_t phys_addr_l2;
+	uint32_t offset_payload;
+	uint32_t len_bulk_mem;
+};
+
+struct mc_drv_cmd_map_bulk_mem_t {
+	struct mc_drv_command_header_t header;
+	struct mc_drv_cmd_map_bulk_mem_payload_t  payload;
+};
+
+
+struct mc_drv_rsp_map_bulk_mem_payload_t {
+	uint32_t session_id;
+	uint32_t secure_virtual_adr;
+	uint32_t mc_result;
+};
+
+struct mc_drv_rsp_map_bulk_mem_t {
+	struct mc_drv_response_header_t header;
+	struct mc_drv_rsp_map_bulk_mem_payload_t  payload;
+};
+
+
+/*****************************************************************************/
+struct mc_drv_cmd_unmap_bulk_mem_payload_t {
+	uint32_t session_id;
+	uint32_t secure_virtual_adr;
+	uint32_t len_bulk_mem;
+};
+
+struct mc_drv_cmd_unmap_bulk_mem_t {
+	struct mc_drv_command_header_t header;
+	struct mc_drv_cmd_unmap_bulk_mem_payload_t payload;
+};
+
+
+struct mc_drv_rsp_unmap_bulk_mem_payload_t {
+	uint32_t response_id;
+	uint32_t session_id;
+	uint32_t mc_result;
+};
+
+struct mc_drv_rsp_unmap_bulk_mem_t {
+	struct mc_drv_response_header_t header;
+	struct mc_drv_rsp_unmap_bulk_mem_payload_t payload;
+};
+
+
+/*****************************************************************************/
+struct mc_drv_cmd_nqconnect_payload_t {
+	uint32_t device_id;
+	uint32_t session_id;
+	uint32_t device_session_id;
+	uint32_t session_magic; /* Random data */
+};
+
+struct mc_drv_cmd_nqconnect_t {
+	struct mc_drv_command_header_t header;
+	struct mc_drv_cmd_nqconnect_payload_t  payload;
+};
+
+
+struct mc_drv_rsp_nqconnect_payload_t {
+	/* empty; */
+};
+
+struct mc_drv_rsp_nqconnect_t {
+	struct mc_drv_response_header_t header;
+	struct mc_drv_rsp_nqconnect_payload_t payload;
+};
+
+
+/*****************************************************************************/
+union mc_drv_command_t {
+	struct mc_drv_command_header_t		header;
+	struct mc_drv_cmd_open_device_t		mc_drv_cmd_open_device;
+	struct mc_drv_cmd_close_device_t	mc_drv_cmd_close_device;
+	struct mc_drv_cmd_open_session_t	mc_drv_cmd_open_session;
+	struct mc_drv_cmd_close_session_t	mc_drv_cmd_close_session;
+	struct mc_drv_cmd_nqconnect_t		mc_drv_cmd_nqconnect;
+	struct mc_drv_cmd_notify_t		mc_drv_cmd_notify;
+	struct mc_drv_cmd_map_bulk_mem_t	mc_drv_cmd_map_bulk_mem;
+	struct mc_drv_cmd_unmap_bulk_mem_t	mc_drv_cmd_unmap_bulk_mem;
+};
+
+union mc_drv_response_t {
+	struct mc_drv_response_header_t		header;
+	struct mc_drv_rsp_open_device_t		mc_drv_rsp_open_device;
+	struct mc_drv_rsp_close_device_t	mc_drv_rsp_close_device;
+	struct mc_drv_rsp_open_session_t	mc_drv_rsp_open_session;
+	struct mc_drv_rsp_close_session_t	mc_drv_rsp_close_session;
+	struct mc_drv_rsp_nqconnect_t		mc_drv_rsp_nqconnect;
+	struct mc_drv_rsp_notify_t		mc_drv_rsp_notify;
+	struct mc_drv_rsp_map_bulk_mem_t	mc_drv_rsp_map_bulk_mem;
+	struct mc_drv_rsp_unmap_bulk_mem_t	mc_drv_rsp_unmap_bulk_mem;
+};
+
+#endif /* MCDAEMON_H_ */
+
+/** @} */
diff --git a/drivers/gud/mobicore_kernelapi/session.c b/drivers/gud/mobicore_kernelapi/session.c
new file mode 100644
index 0000000..e62b4b3
--- /dev/null
+++ b/drivers/gud/mobicore_kernelapi/session.c
@@ -0,0 +1,202 @@
+/** @addtogroup MCD_IMPL_LIB
+ * @{
+ * @file
+ * <!-- Copyright Giesecke & Devrient GmbH 2009 - 2012 -->
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/types.h>
+#include <linux/slab.h>
+#include "mc_kernel_api.h"
+#include "public/mobicore_driver_api.h"
+
+#include "session.h"
+
+/*****************************************************************************/
+struct bulk_buffer_descriptor *bulk_buffer_descriptor_create(
+	void		*virt_addr,
+	uint32_t	len,
+	uint32_t	handle,
+	void		*phys_addr_wsm_l2
+) {
+	struct bulk_buffer_descriptor *desc =
+		kzalloc(sizeof(struct bulk_buffer_descriptor), GFP_KERNEL);
+	desc->virt_addr = virt_addr;
+	desc->len = len;
+	desc->handle = handle;
+	desc->phys_addr_wsm_l2 = phys_addr_wsm_l2;
+	return desc;
+}
+
+/*****************************************************************************/
+struct session *session_create(
+	uint32_t	session_id,
+	void		*instance,
+	struct connection *connection
+) {
+	struct session *session =
+			kzalloc(sizeof(struct session), GFP_KERNEL);
+	session->session_id = session_id;
+	session->instance = instance;
+	session->notification_connection = connection;
+
+	session->session_info.last_error = SESSION_ERR_NO;
+	session->session_info.state = SESSION_STATE_INITIAL;
+
+	INIT_LIST_HEAD(&(session->bulk_buffer_descriptors));
+	return session;
+}
+
+
+/*****************************************************************************/
+void session_cleanup(
+	struct session *session
+) {
+	struct bulk_buffer_descriptor  *bulk_buf_descr;
+	struct list_head *pos, *q;
+
+	/* Unmap still mapped buffers */
+	list_for_each_safe(pos, q, &session->bulk_buffer_descriptors) {
+		bulk_buf_descr =
+			list_entry(pos, struct bulk_buffer_descriptor, list);
+
+		MCDRV_DBG_VERBOSE("Physical Address of L2 Table = 0x%X, "
+				"handle= %d",
+				(unsigned int)bulk_buf_descr->phys_addr_wsm_l2,
+				bulk_buf_descr->handle);
+
+		/* ignore any error, as we cannot do anything in this case. */
+		int ret = mobicore_unmap_vmem(session->instance,
+						bulk_buf_descr->handle);
+		if (ret != 0)
+			MCDRV_DBG_ERROR("mobicore_unmap_vmem failed: %d", ret);
+
+		list_del(pos);
+		kfree(bulk_buf_descr);
+	}
+
+	/* Finally delete notification connection */
+	connection_cleanup(session->notification_connection);
+	kfree(session);
+}
+
+
+/*****************************************************************************/
+void session_set_error_info(
+	struct session *session,
+	int32_t   err
+) {
+	session->session_info.last_error = err;
+}
+
+
+/*****************************************************************************/
+int32_t session_get_last_err(
+	struct session *session
+) {
+	return session->session_info.last_error;
+}
+
+
+/*****************************************************************************/
+struct bulk_buffer_descriptor *session_add_bulk_buf(
+	struct session	*session,
+	void		*buf,
+	uint32_t	len
+) {
+	struct bulk_buffer_descriptor *bulk_buf_descr = NULL;
+	struct bulk_buffer_descriptor  *tmp;
+	struct list_head *pos;
+
+	/* Search bulk buffer descriptors for existing vAddr
+	   At the moment a virtual address can only be added one time */
+	list_for_each(pos, &session->bulk_buffer_descriptors) {
+		tmp = list_entry(pos, struct bulk_buffer_descriptor, list);
+		if (tmp->virt_addr == buf)
+			return NULL;
+	}
+
+	do {
+		/* Prepare the interface structure for memory registration in
+		   Kernel Module */
+		void	*l2_table_phys;
+		uint32_t  handle;
+
+		int ret = mobicore_map_vmem(session->instance,
+					buf,
+					len,
+					&handle,
+					&l2_table_phys);
+
+		if (ret != 0) {
+			MCDRV_DBG_ERROR("mobicore_map_vmem failed, ret=%d",
+					ret);
+			break;
+		}
+
+		MCDRV_DBG_VERBOSE("Physical Address of L2 Table = 0x%X, "
+				"handle=%d",
+				(unsigned int)l2_table_phys,
+				handle);
+
+		/* Create new descriptor */
+		bulk_buf_descr = bulk_buffer_descriptor_create(
+							buf,
+							len,
+							handle,
+							l2_table_phys);
+
+		/* Add to vector of descriptors */
+		list_add_tail(&(bulk_buf_descr->list),
+			&(session->bulk_buffer_descriptors));
+	} while (0);
+
+	return bulk_buf_descr;
+}
+
+
+/*****************************************************************************/
+bool session_remove_bulk_buf(
+	struct session *session,
+	void	*virt_addr
+) {
+	bool ret = true;
+	struct bulk_buffer_descriptor  *bulk_buf_descr = NULL;
+	struct bulk_buffer_descriptor  *tmp;
+	struct list_head *pos, *q;
+
+	MCDRV_DBG_VERBOSE("Virtual Address = 0x%X", (unsigned int) virt_addr);
+
+	/* Search and remove bulk buffer descriptor */
+	list_for_each_safe(pos, q, &session->bulk_buffer_descriptors) {
+		tmp = list_entry(pos, struct bulk_buffer_descriptor, list);
+		if (tmp->virt_addr == virt_addr) {
+			bulk_buf_descr = tmp;
+			list_del(pos);
+			break;
+		}
+	}
+
+	if (bulk_buf_descr == NULL) {
+		MCDRV_DBG_ERROR("Virtual Address not found");
+		ret = false;
+	} else {
+		MCDRV_DBG_VERBOSE("WsmL2 phys=0x%X, handle=%d",
+			(unsigned int)bulk_buf_descr->phys_addr_wsm_l2,
+			bulk_buf_descr->handle);
+
+		/* ignore any error, as we cannot do anything */
+		int ret = mobicore_unmap_vmem(session->instance,
+						bulk_buf_descr->handle);
+		if (ret != 0)
+			MCDRV_DBG_ERROR("mobicore_unmap_vmem failed: %d", ret);
+
+		kfree(bulk_buf_descr);
+	}
+
+	return ret;
+}
+
+/** @} */
diff --git a/drivers/gud/mobicore_kernelapi/session.h b/drivers/gud/mobicore_kernelapi/session.h
new file mode 100644
index 0000000..9a53740
--- /dev/null
+++ b/drivers/gud/mobicore_kernelapi/session.h
@@ -0,0 +1,136 @@
+/** @addtogroup MCD_IMPL_LIB
+ * @{
+ * @file
+ * <!-- Copyright Giesecke & Devrient GmbH 2009 - 2012 -->
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef SESSION_H_
+#define SESSION_H_
+
+#include "common.h"
+
+#include <linux/list.h>
+#include "connection.h"
+
+
+struct bulk_buffer_descriptor {
+	void		*virt_addr;/**< The virtual address of the Bulk buffer*/
+	uint32_t	len;	  /**< Length of the Bulk buffer*/
+	uint32_t	handle;
+	void		*phys_addr_wsm_l2; /**< The physical address of the
+				L2 table of the Bulk buffer*/
+	struct list_head list; /**< The list param for using the kernel lists*/
+};
+
+struct bulk_buffer_descriptor *bulk_buffer_descriptor_create(
+	void	*virt_addr,
+	uint32_t  len,
+	uint32_t  handle,
+	void	*phys_addr_wsm_l2
+);
+
+/** Session states.
+ * At the moment not used !!.
+ */
+enum session_state {
+	SESSION_STATE_INITIAL,
+	SESSION_STATE_OPEN,
+	SESSION_STATE_TRUSTLET_DEAD
+};
+
+#define SESSION_ERR_NO	  0 /**< No session error */
+
+/** Session information structure.
+ * The information structure is used to hold the state of the session, which
+ * will limit further actions for the session.
+ * Also the last error code will be stored till it's read.
+ */
+struct session_information {
+	enum session_state state;	   /**< Session state */
+	int32_t		last_error;	 /**< Last error of session */
+};
+
+
+struct session {
+	struct mc_instance		*instance;
+	/**< Descriptors of additional bulk buffer of a session */
+	struct list_head	bulk_buffer_descriptors;
+	/**< Informations about session */
+	struct session_information	 session_info;
+
+	uint32_t		 session_id;
+	struct connection	 *notification_connection;
+
+	/**< The list param for using the kernel lists*/
+	struct list_head list;
+};
+
+struct session *session_create(
+	uint32_t	 session_id,
+	void		 *instance,
+	struct connection *connection
+);
+
+void session_cleanup(
+	struct session *session
+);
+
+/**
+  * Add address information of additional bulk buffer memory to session and
+  * register virtual memory in kernel module.
+  *
+  * @attention The virtual address can only be added one time. If the virtual
+  * address already exist, NULL is returned.
+  *
+  * @param buf The virtual address of bulk buffer.
+  * @param len Length of bulk buffer.
+  *
+  * @return On success the actual Bulk buffer descriptor with all address
+  * information is retured, NULL if an error occurs.
+  */
+struct bulk_buffer_descriptor *session_add_bulk_buf(
+	struct session *session,
+	void	*buf,
+	uint32_t  len
+);
+
+/**
+  * Remove address information of additional bulk buffer memory from session and
+  * unregister virtual memory in kernel module
+  *
+  * @param buf The virtual address of the bulk buffer.
+  *
+  * @return true on success.
+  */
+bool session_remove_bulk_buf(
+	struct session *session,
+	void  *buf
+);
+
+/**
+  * Set additional error information of the last error that occured.
+  *
+  * @param errorCode The actual error.
+  */
+void session_set_error_info(
+	struct session *session,
+	int32_t err
+);
+
+/**
+  * Get additional error information of the last error that occured.
+  *
+  * @attention After request the information is set to SESSION_ERR_NO.
+  *
+  * @return Last stored error code or SESSION_ERR_NO.
+  */
+int32_t session_get_last_err(
+	struct session *session
+);
+
+#endif /* SESSION_H_ */
+
+/** @} */
diff --git a/drivers/gud/mobicore_kernelapi/wsm.h b/drivers/gud/mobicore_kernelapi/wsm.h
new file mode 100644
index 0000000..6877c53
--- /dev/null
+++ b/drivers/gud/mobicore_kernelapi/wsm.h
@@ -0,0 +1,35 @@
+/** @addtogroup MCD_MCDIMPL_DAEMON_SRV
+ * @{
+ * @file
+ *
+ * World shared memory definitions.
+ *
+ * <!-- Copyright Giesecke & Devrient GmbH 2009 - 2012 -->
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef WSM_H_
+#define WSM_H_
+
+#include "common.h"
+#include <linux/list.h>
+
+struct wsm {
+	void *virt_addr;
+	uint32_t len;
+	uint32_t handle;
+	void *phys_addr;
+	struct list_head list;
+};
+
+struct wsm *wsm_create(
+	void	*virt_addr,
+	uint32_t  len,
+	uint32_t  handle,
+	void	*phys_addr /*= NULL this may be unknown, so is can be omitted.*/
+);
+#endif /* WSM_H_ */
+
+/** @} */
diff --git a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.c b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.c
index e7bbfcb..03b3929 100644
--- a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.c
+++ b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.c
@@ -34,16 +34,6 @@
 	   sizeof(struct mpq_adapter_video_meta_data)))
 
 /*
- * The following threshold defines gap from end of ring-buffer
- * from which new PES payload will not be written to make
- * sure that the PES payload does not wrap-around at end of the
- * buffer. Instead, padding will be inserted and the new PES will
- * be written from the beginning of the buffer.
- * Setting this to 0 means no padding will be added.
- */
-#define VIDEO_WRAP_AROUND_THRESHOLD			(1024*1024+512*1024)
-
-/*
  * PCR/STC information length saved in ring-buffer.
  * PCR / STC are saved in ring-buffer in the following form:
  * <8 bit flags><64 bits of STC> <64bits of PCR>
@@ -51,13 +41,85 @@
  * The current flags that are defined:
  * 0x00000001: discontinuity_indicator
  */
-#define PCR_STC_LEN							17
+#define PCR_STC_LEN					17
 
 
 /* Number of demux devices, has default of linux configuration */
 static int mpq_demux_device_num = CONFIG_DVB_MPQ_NUM_DMX_DEVICES;
 module_param(mpq_demux_device_num, int, S_IRUGO);
 
+/**
+ * Maximum allowed framing pattern size
+ */
+#define MPQ_MAX_PATTERN_SIZE				6
+
+/**
+ * Number of patterns to look for when doing framing, per video standard
+ */
+#define MPQ_MPEG2_PATTERN_NUM				5
+#define MPQ_H264_PATTERN_NUM				5
+#define MPQ_VC1_PATTERN_NUM				3
+
+/*
+ * mpq_framing_pattern_lookup_params - framing pattern lookup parameters.
+ *
+ * @pattern: the byte pattern to look for.
+ * @mask: the byte mask to use (same length as pattern).
+ * @size: the length of the pattern, in bytes.
+ * @type: the type of the pattern.
+ */
+struct mpq_framing_pattern_lookup_params {
+	u8 pattern[MPQ_MAX_PATTERN_SIZE];
+	u8 mask[MPQ_MAX_PATTERN_SIZE];
+	size_t size;
+	enum dmx_framing_pattern_type type;
+};
+
+/*
+ * Pre-defined video framing lookup pattern information.
+ * Note: the first pattern in each patterns database must
+ * be the Sequence Header (or equivalent SPS in H.264).
+ * The code assumes this is the case when prepending
+ * Sequence Header data in case it is required.
+ */
+static const struct mpq_framing_pattern_lookup_params
+		mpeg2_patterns[MPQ_MPEG2_PATTERN_NUM] = {
+	{{0x00, 0x00, 0x01, 0xB3}, {0xFF, 0xFF, 0xFF, 0xFF}, 4,
+			DMX_FRM_MPEG2_SEQUENCE_HEADER},
+	{{0x00, 0x00, 0x01, 0xB8}, {0xFF, 0xFF, 0xFF, 0xFF}, 4,
+			DMX_FRM_MPEG2_GOP_HEADER},
+	{{0x00, 0x00, 0x01, 0x00, 0x00, 0x08},
+			{0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x38}, 6,
+			DMX_FRM_MPEG2_I_PIC},
+	{{0x00, 0x00, 0x01, 0x00, 0x00, 0x10},
+			{0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x38}, 6,
+			DMX_FRM_MPEG2_P_PIC},
+	{{0x00, 0x00, 0x01, 0x00, 0x00, 0x18},
+			{0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x38}, 6,
+			DMX_FRM_MPEG2_B_PIC}
+};
+
+static const struct mpq_framing_pattern_lookup_params
+		h264_patterns[MPQ_H264_PATTERN_NUM] = {
+	{{0x00, 0x00, 0x01, 0x07}, {0xFF, 0xFF, 0xFF, 0x1F}, 4,
+			DMX_FRM_H264_SPS},
+	{{0x00, 0x00, 0x01, 0x08}, {0xFF, 0xFF, 0xFF, 0x1F}, 4,
+			DMX_FRM_H264_PPS},
+	{{0x00, 0x00, 0x01, 0x05, 0x80}, {0xFF, 0xFF, 0xFF, 0x1F, 0x80}, 5,
+			DMX_FRM_H264_IDR_PIC},
+	{{0x00, 0x00, 0x01, 0x01, 0x80}, {0xFF, 0xFF, 0xFF, 0x1F, 0x80}, 5,
+			DMX_FRM_H264_NON_IDR_PIC}
+};
+
+static const struct mpq_framing_pattern_lookup_params
+		vc1_patterns[MPQ_VC1_PATTERN_NUM] = {
+	{{0x00, 0x00, 0x01, 0x0F}, {0xFF, 0xFF, 0xFF, 0xFF}, 4,
+			DMX_FRM_VC1_SEQUENCE_HEADER},
+	{{0x00, 0x00, 0x01, 0x0E}, {0xFF, 0xFF, 0xFF, 0xFF}, 4,
+			DMX_FRM_VC1_ENTRY_POINT_HEADER},
+	{{0x00, 0x00, 0x01, 0x0D}, {0xFF, 0xFF, 0xFF, 0xFF}, 4,
+			DMX_FRM_VC1_FRAME_START_CODE}
+};
 
 /* Global data-structure for managing demux devices */
 static struct
@@ -73,14 +135,13 @@
 		decoder_buffers[MPQ_ADAPTER_MAX_NUM_OF_INTERFACES];
 
 	/*
-	 * Indicates whether we allow decoder's data to
-	 * wrap-around in the output buffer or padding is
-	 * inserted in such case.
+	 * Indicates whether the video decoder handles framing
+	 * or we are required to provide framing information
+	 * in the meta-data passed to the decoder.
 	 */
-	int decoder_data_wrap;
+	int decoder_framing;
 } mpq_dmx_info;
 
-
 /* Check that PES header is valid and that it is a video PES */
 static int mpq_dmx_is_valid_video_pes(struct pes_packet_header *pes_header)
 {
@@ -97,6 +158,321 @@
 	return 0;
 }
 
+/* Check if a framing pattern is a video frame pattern or a header pattern */
+static inline int mpq_dmx_is_video_frame(
+				enum dmx_indexing_video_standard standard,
+				enum dmx_framing_pattern_type pattern_type)
+{
+	switch (standard) {
+	case DMX_INDEXING_MPEG2:
+		if ((pattern_type == DMX_FRM_MPEG2_I_PIC) ||
+			(pattern_type == DMX_FRM_MPEG2_P_PIC) ||
+			(pattern_type == DMX_FRM_MPEG2_B_PIC))
+			return 1;
+		return 0;
+	case DMX_INDEXING_H264:
+		if ((pattern_type == DMX_FRM_H264_IDR_PIC) ||
+			(pattern_type == DMX_FRM_H264_NON_IDR_PIC))
+			return 1;
+		return 0;
+	case DMX_INDEXING_VC1:
+		if (pattern_type == DMX_FRM_VC1_FRAME_START_CODE)
+			return 1;
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+/*
+ * mpq_framing_pattern_lookup_results - framing lookup results
+ *
+ * @offset: The offset in the buffer where the pattern was found.
+ * If a pattern is found using a prefix (i.e. started on the
+ * previous buffer), offset is zero.
+ * @type: the type of the pattern found.
+ * @used_prefix_size: the prefix size that was used to find this pattern
+ */
+struct mpq_framing_pattern_lookup_results {
+	struct {
+		u32 offset;
+		enum dmx_framing_pattern_type type;
+		u32 used_prefix_size;
+	} info[MPQ_MAX_FOUND_PATTERNS];
+};
+
+/*
+ * Check if two patterns are identical, taking mask into consideration.
+ * @pattern1: the first byte pattern to compare.
+ * @pattern2: the second byte pattern to compare.
+ * @mask: the bit mask to use.
+ * @pattern_size: the length of both patterns and the mask, in bytes.
+ *
+ * Return: 1 if patterns match, 0 otherwise.
+ */
+static inline int mpq_dmx_patterns_match(const u8 *pattern1, const u8 *pattern2,
+					const u8 *mask, size_t pattern_size)
+{
+	int i;
+
+	/*
+	 * Assumption: it is OK to access pattern1, pattern2 and mask.
+	 * This function performs no sanity checks to keep things fast.
+	 */
+
+	for (i = 0; i < pattern_size; i++)
+		if ((pattern1[i] & mask[i]) != (pattern2[i] & mask[i]))
+			return 0;
+
+	return 1;
+}
+
+/*
+ * mpq_dmx_framing_pattern_search -
+ * search for framing patterns in a given buffer.
+ *
+ * Optimized version: first search for a common substring, e.g. 0x00 0x00 0x01.
+ * If this string is found, go over all the given patterns (all must start
+ * with this string) and search for their ending in the buffer.
+ *
+ * Assumption: the patterns we look for do not spread over more than two
+ * buffers.
+ *
+ * @paterns: the full patterns information to look for.
+ * @patterns_num: the number of patterns to look for.
+ * @buf: the buffer to search.
+ * @buf_size: the size of the buffer to search. we search the entire buffer.
+ * @prefix_size_masks: a bit mask (per pattern) of possible prefix sizes to use
+ * when searching for a pattern that started at the last buffer.
+ * Updated in this function for use in the next lookup.
+ * @results: lookup results (offset, type, used_prefix_size) per found pattern,
+ * up to MPQ_MAX_FOUND_PATTERNS.
+ *
+ * Return:
+ *   Number of patterns found (up to MPQ_MAX_FOUND_PATTERNS).
+ *   0 if pattern was not found.
+ *   Negative error value on failure.
+ */
+static int mpq_dmx_framing_pattern_search(
+		const struct mpq_framing_pattern_lookup_params *patterns,
+		int patterns_num,
+		const u8 *buf,
+		size_t buf_size,
+		struct mpq_framing_prefix_size_masks *prefix_size_masks,
+		struct mpq_framing_pattern_lookup_results *results)
+{
+	int i, j;
+	unsigned int current_size;
+	u32 prefix;
+	int found = 0;
+	int start_offset = 0;
+	/* the starting common substring to look for */
+	u8 string[] = {0x00, 0x00, 0x01};
+	/* the mask for the starting string */
+	u8 string_mask[] = {0xFF, 0xFF, 0xFF};
+	/* the size of the starting string (in bytes) */
+	size_t string_size = 3;
+
+	/* sanity checks - can be commented out for optimization purposes */
+	if ((patterns == NULL) || (patterns_num <= 0) || (buf == NULL)) {
+		MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	memset(results, 0, sizeof(struct mpq_framing_pattern_lookup_results));
+
+	/*
+	 * handle prefix - disregard string, simply check all patterns,
+	 * looking for a matching suffix at the very beginning of the buffer.
+	 */
+	for (j = 0; (j < patterns_num) && !found; j++) {
+		prefix = prefix_size_masks->size_mask[j];
+		current_size = 32;
+		while (prefix) {
+			if (prefix & (0x1 << (current_size - 1))) {
+				/*
+				 * check that we don't look further
+				 * than buf_size boundary
+				 */
+				if ((int)(patterns[j].size - current_size) >
+						buf_size)
+					break;
+
+				if (mpq_dmx_patterns_match(
+					(patterns[j].pattern + current_size),
+					buf, (patterns[j].mask + current_size),
+					(patterns[j].size - current_size))) {
+
+					MPQ_DVB_DBG_PRINT(
+						"%s: Found matching pattern"
+						"using prefix of size %d\n",
+						__func__, current_size);
+					/*
+					 * pattern found using prefix at the
+					 * very beginning of the buffer, so
+					 * offset is 0, but we already zeroed
+					 * everything in the beginning of the
+					 * function. that's why the next line
+					 * is commented.
+					 */
+					/* results->info[found].offset = 0; */
+					results->info[found].type =
+							patterns[j].type;
+					results->info[found].used_prefix_size =
+							current_size;
+					found++;
+					/*
+					 * save offset to start looking from
+					 * in the buffer, to avoid reusing the
+					 * data of a pattern we already found.
+					 */
+					start_offset = (patterns[j].size -
+							current_size);
+
+					if (found >= MPQ_MAX_FOUND_PATTERNS)
+						goto next_prefix_lookup;
+					/*
+					 * we don't want to search for the same
+					 * pattern with several possible prefix
+					 * sizes if we have already found it,
+					 * so we break from the inner loop.
+					 * since we incremented 'found', we
+					 * will not search for additional
+					 * patterns using a prefix - that would
+					 * imply ambiguous patterns where one
+					 * pattern can be included in another.
+					 * the for loop will exit.
+					 */
+					break;
+				}
+			}
+			current_size--;
+			prefix &= ~(0x1 << (current_size - 1));
+		}
+	}
+
+	/*
+	 * Search buffer for entire pattern, starting with the string.
+	 * Note the external for loop does not execute if buf_size is
+	 * smaller than string_size (the cast to int is required, since
+	 * size_t is unsigned).
+	 */
+	for (i = start_offset; i < (int)(buf_size - string_size + 1); i++) {
+		if (mpq_dmx_patterns_match(string, (buf + i), string_mask,
+							string_size)) {
+			/* now search for patterns: */
+			for (j = 0; j < patterns_num; j++) {
+				/* avoid overflow to next buffer */
+				if ((i + patterns[j].size) > buf_size)
+					continue;
+
+				if (mpq_dmx_patterns_match(
+					(patterns[j].pattern + string_size),
+					(buf + i + string_size),
+					(patterns[j].mask + string_size),
+					(patterns[j].size - string_size))) {
+
+					results->info[found].offset = i;
+					results->info[found].type =
+						patterns[j].type;
+					/*
+					 * save offset to start next prefix
+					 * lookup, to avoid reusing the data
+					 * of any pattern we already found.
+					 */
+					if ((i + patterns[j].size) >
+							start_offset)
+						start_offset = (i +
+							patterns[j].size);
+					/*
+					 * did not use a prefix to find this
+					 * pattern, but we zeroed everything
+					 * in the beginning of the function.
+					 * So no need to zero used_prefix_size
+					 * for results->info[found]
+					 */
+
+					found++;
+					if (found >= MPQ_MAX_FOUND_PATTERNS)
+						goto next_prefix_lookup;
+					/*
+					 * theoretically we don't have to break
+					 * here, but we don't want to search
+					 * for the other matching patterns on
+					 * the very same same place in the
+					 * buffer. That would mean the
+					 * (pattern & mask) combinations are
+					 * not unique. So we break from inner
+					 * loop and move on to the next place
+					 * in the buffer.
+					 */
+					break;
+				}
+			}
+		}
+	}
+
+next_prefix_lookup:
+	/* check for possible prefix sizes for the next buffer */
+	for (j = 0; j < patterns_num; j++) {
+		prefix_size_masks->size_mask[j] = 0;
+		for (i = 1; i < patterns[j].size; i++) {
+			/*
+			 * avoid looking outside of the buffer
+			 * or reusing previously used data.
+			 */
+			if (i > (buf_size - start_offset))
+				break;
+
+			if (mpq_dmx_patterns_match(patterns[j].pattern,
+					(buf + buf_size - i),
+					patterns[j].mask, i)) {
+				prefix_size_masks->size_mask[j] |=
+						(1 << (i - 1));
+			}
+		}
+	}
+
+	return found;
+}
+
+/*
+ * mpq_dmx_get_pattern_params -
+ * get a pointer to the relevant pattern parameters structure,
+ * based on the video parameters.
+ *
+ * @video_params: the video parameters (e.g. video standard).
+ * @patterns: a pointer to a pointer to the pattern parameters,
+ * updated by this function.
+ * @patterns_num: number of patterns, updated by this function.
+ */
+static inline int mpq_dmx_get_pattern_params(
+		struct dmx_indexing_video_params *video_params,
+		const struct mpq_framing_pattern_lookup_params **patterns,
+		int *patterns_num)
+{
+	switch (video_params->standard) {
+	case DMX_INDEXING_MPEG2:
+		*patterns = mpeg2_patterns;
+		*patterns_num = MPQ_MPEG2_PATTERN_NUM;
+		break;
+	case DMX_INDEXING_H264:
+		*patterns = h264_patterns;
+		*patterns_num = MPQ_H264_PATTERN_NUM;
+		break;
+	case DMX_INDEXING_VC1:
+		*patterns = vc1_patterns;
+		*patterns_num = MPQ_VC1_PATTERN_NUM;
+		break;
+	default:
+		MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+		*patterns = NULL;
+		*patterns_num = 0;
+		return -EINVAL;
+	}
+
+	return 0;
+}
 
 /* Extend dvb-demux debugfs with HW statistics */
 void mpq_dmx_init_hw_statistics(struct mpq_demux *mpq_demux)
@@ -199,8 +575,12 @@
 	mpq_dmx_info.devices = NULL;
 	mpq_dmx_info.ion_client = NULL;
 
-	/* TODO: the following should be set based on the decoder */
-	mpq_dmx_info.decoder_data_wrap = 0;
+	/*
+	 * TODO: the following should be set based on the decoder:
+	 * 0 means the decoder doesn't handle framing, so framing
+	 * is done by demux. 1 means the decoder handles framing.
+	 */
+	mpq_dmx_info.decoder_framing = 0;
 
 	/* Allocate memory for all MPQ devices */
 	mpq_dmx_info.devices =
@@ -339,9 +719,16 @@
 	int i;
 	int dvr_index;
 	int dmx_index;
-	struct mpq_demux *mpq_demux = (struct mpq_demux *)demux->priv;
+	struct dvb_demux *dvb_demux = (struct dvb_demux *)demux->priv;
+	struct mpq_demux *mpq_demux;
 
-	if ((mpq_dmx_info.devices == NULL) || (mpq_demux == NULL)) {
+	if ((mpq_dmx_info.devices == NULL) || (dvb_demux == NULL)) {
+		MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	mpq_demux = (struct mpq_demux *)dvb_demux->priv;
+	if (mpq_demux == NULL) {
 		MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
 		return -EINVAL;
 	}
@@ -404,13 +791,27 @@
 
 	if (feed_data == NULL) {
 		MPQ_DVB_ERR_PRINT(
-			"%s: FAILED to private video feed data\n",
+			"%s: FAILED to allocate private video feed data\n",
 			__func__);
 
 		ret = -ENOMEM;
 		goto init_failed;
 	}
 
+	/* get and store framing information if required */
+	if (!mpq_dmx_info.decoder_framing) {
+		mpq_dmx_get_pattern_params(&feed->indexing_params,
+				&feed_data->patterns, &feed_data->patterns_num);
+		if (feed_data->patterns == NULL) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: FAILED to get framing pattern parameters\n",
+				__func__);
+
+			ret = -EINVAL;
+			goto init_failed_free_priv_data;
+		}
+	}
+
 	/* Allocate packet buffer holding the meta-data */
 	packet_buffer = vmalloc(VIDEO_META_DATA_BUFFER_SIZE);
 
@@ -430,12 +831,7 @@
 	 * flag set.
 	 */
 
-	if (mpq_dmx_info.decoder_data_wrap)
-		actual_buffer_size =
-			feed->buffer_size;
-	else
-		actual_buffer_size =
-			feed->buffer_size + VIDEO_WRAP_AROUND_THRESHOLD;
+	actual_buffer_size = feed->buffer_size;
 
 	actual_buffer_size += (SZ_4K - 1);
 	actual_buffer_size &= ~(SZ_4K - 1);
@@ -551,6 +947,14 @@
 	feed->pusi_seen = 0;
 	feed->peslen = 0;
 	feed_data->fullness_wait_cancel = 0;
+	feed_data->last_framing_match_address = 0;
+	feed_data->last_framing_match_type = DMX_FRM_UNKNOWN;
+	feed_data->found_sequence_header_pattern = 0;
+	memset(&feed_data->prefix_size, 0,
+			sizeof(struct mpq_framing_prefix_size_masks));
+	feed_data->first_pattern_offset = 0;
+	feed_data->first_prefix_size = 0;
+	feed_data->write_pts_dts = 0;
 
 	spin_lock(&mpq_demux->feed_lock);
 	feed->priv = (void *)feed_data;
@@ -669,7 +1073,6 @@
 
 	if (mpq_dmx_is_video_feed(feed)) {
 		int ret;
-		int gap;
 		struct mpq_video_feed_info *feed_data;
 		struct dvb_ringbuffer *video_buff;
 
@@ -686,16 +1089,6 @@
 		video_buff =
 			&feed_data->video_buffer->raw_data;
 
-		/*
-		 * If we are now starting new PES and the
-		 * PES payload may wrap-around, extra padding
-		 * needs to be pushed into the buffer.
-		 */
-		gap = video_buff->size - video_buff->pwrite;
-		if ((!mpq_dmx_info.decoder_data_wrap) &&
-			(gap < VIDEO_WRAP_AROUND_THRESHOLD))
-			required_space += gap;
-
 		ret = 0;
 		if ((feed_data != NULL) &&
 			(!feed_data->fullness_wait_cancel) &&
@@ -795,13 +1188,206 @@
 }
 EXPORT_SYMBOL(mpq_dmx_decoder_fullness_abort);
 
-int mpq_dmx_process_video_packet(
+
+static inline int mpq_dmx_parse_mandatory_pes_header(
+				struct dvb_demux_feed *feed,
+				struct mpq_video_feed_info *feed_data,
+				struct pes_packet_header *pes_header,
+				const u8 *buf,
+				u32 *ts_payload_offset,
+				int *bytes_avail)
+{
+	int left_size, copy_len;
+
+	if (feed_data->pes_header_offset < PES_MANDATORY_FIELDS_LEN) {
+		left_size =
+			PES_MANDATORY_FIELDS_LEN -
+			feed_data->pes_header_offset;
+
+		copy_len = (left_size > *bytes_avail) ?
+					*bytes_avail :
+					left_size;
+
+		memcpy((u8 *)((u8 *)pes_header + feed_data->pes_header_offset),
+				(buf + *ts_payload_offset),
+				copy_len);
+
+		feed_data->pes_header_offset += copy_len;
+
+		if (left_size > *bytes_avail)
+			return -EINVAL;
+
+		/* else - we have beginning of PES header */
+		*bytes_avail -= left_size;
+		*ts_payload_offset += left_size;
+
+		/* Make sure the PES packet is valid */
+		if (mpq_dmx_is_valid_video_pes(pes_header) < 0) {
+			/*
+			 * Since the new PES header parsing
+			 * failed, reset pusi_seen to drop all
+			 * data until next PUSI
+			 */
+			feed->pusi_seen = 0;
+			feed_data->pes_header_offset = 0;
+
+			MPQ_DVB_ERR_PRINT(
+				"%s: invalid packet\n",
+				__func__);
+
+			return -EINVAL;
+		}
+
+		feed_data->pes_header_left_bytes =
+			pes_header->pes_header_data_length;
+	}
+
+	return 0;
+}
+
+static inline int mpq_dmx_parse_remaining_pes_header(
+				struct dvb_demux_feed *feed,
+				struct mpq_video_feed_info *feed_data,
+				struct pes_packet_header *pes_header,
+				const u8 *buf,
+				u32 *ts_payload_offset,
+				int *bytes_avail)
+{
+	int left_size, copy_len;
+
+	/* Remainning header bytes that need to be processed? */
+	if (!feed_data->pes_header_left_bytes)
+		return 0;
+
+	/* Did we capture the PTS value (if exists)? */
+	if ((*bytes_avail != 0) &&
+		(feed_data->pes_header_offset <
+		 (PES_MANDATORY_FIELDS_LEN+5)) &&
+		((pes_header->pts_dts_flag == 2) ||
+		 (pes_header->pts_dts_flag == 3))) {
+
+		/* 5 more bytes should be there */
+		left_size =
+			PES_MANDATORY_FIELDS_LEN + 5 -
+			feed_data->pes_header_offset;
+
+		copy_len = (left_size > *bytes_avail) ?
+					*bytes_avail :
+					left_size;
+
+		memcpy((u8 *)((u8 *)pes_header + feed_data->pes_header_offset),
+			(buf + *ts_payload_offset),
+			copy_len);
+
+		feed_data->pes_header_offset += copy_len;
+		feed_data->pes_header_left_bytes -= copy_len;
+
+		if (left_size > *bytes_avail)
+			return -EINVAL;
+
+		/* else - we have the PTS */
+		*bytes_avail -= copy_len;
+		*ts_payload_offset += copy_len;
+		feed_data->write_pts_dts = 1;
+	}
+
+	/* Did we capture the DTS value (if exist)? */
+	if ((*bytes_avail != 0) &&
+		(feed_data->pes_header_offset <
+		 (PES_MANDATORY_FIELDS_LEN+10)) &&
+		(pes_header->pts_dts_flag == 3)) {
+
+		/* 5 more bytes should be there */
+		left_size =
+			PES_MANDATORY_FIELDS_LEN + 10 -
+			feed_data->pes_header_offset;
+
+		copy_len = (left_size > *bytes_avail) ?
+					*bytes_avail :
+					left_size;
+
+		memcpy((u8 *)((u8 *)pes_header + feed_data->pes_header_offset),
+			(buf + *ts_payload_offset),
+			copy_len);
+
+		feed_data->pes_header_offset += copy_len;
+		feed_data->pes_header_left_bytes -= copy_len;
+
+		if (left_size > *bytes_avail)
+			return -EINVAL;
+
+		/* else - we have the DTS */
+		*bytes_avail -= copy_len;
+		*ts_payload_offset += copy_len;
+		feed_data->write_pts_dts = 1;
+	}
+
+	/* Any more header bytes?! */
+	if (feed_data->pes_header_left_bytes >= *bytes_avail) {
+		feed_data->pes_header_left_bytes -= *bytes_avail;
+		return -EINVAL;
+	}
+
+	/* Got PES header, process payload */
+	*bytes_avail -= feed_data->pes_header_left_bytes;
+	*ts_payload_offset += feed_data->pes_header_left_bytes;
+	feed_data->pes_header_left_bytes = 0;
+
+	return 0;
+}
+
+static inline void mpq_dmx_get_pts_dts(struct mpq_video_feed_info *feed_data,
+				struct pes_packet_header *pes_header,
+				struct mpq_adapter_video_meta_data *meta_data,
+				enum dmx_packet_type packet_type)
+{
+	struct dmx_pts_dts_info *info;
+
+	if (packet_type == DMX_PES_PACKET)
+		info = &(meta_data->info.pes.pts_dts_info);
+	else
+		info = &(meta_data->info.framing.pts_dts_info);
+
+	if (feed_data->write_pts_dts) {
+		if ((pes_header->pts_dts_flag == 2) ||
+			(pes_header->pts_dts_flag == 3)) {
+			info->pts_exist = 1;
+
+			info->pts =
+				((u64)pes_header->pts_1 << 30) |
+				((u64)pes_header->pts_2 << 22) |
+				((u64)pes_header->pts_3 << 15) |
+				((u64)pes_header->pts_4 << 7) |
+				(u64)pes_header->pts_5;
+		} else {
+			info->pts_exist = 0;
+			info->pts = 0;
+		}
+
+		if (pes_header->pts_dts_flag == 3) {
+			info->dts_exist = 1;
+
+			info->dts =
+				((u64)pes_header->dts_1 << 30) |
+				((u64)pes_header->dts_2 << 22) |
+				((u64)pes_header->dts_3 << 15) |
+				((u64)pes_header->dts_4 << 7) |
+				(u64)pes_header->dts_5;
+		} else {
+			info->dts_exist = 0;
+			info->dts = 0;
+		}
+	} else {
+		info->pts_exist = 0;
+		info->dts_exist = 0;
+	}
+}
+
+static int mpq_dmx_process_video_packet_framing(
 			struct dvb_demux_feed *feed,
 			const u8 *buf)
 {
 	int bytes_avail;
-	int left_size;
-	int copy_len;
 	u32 ts_payload_offset;
 	struct mpq_video_feed_info *feed_data;
 	const struct ts_packet_header *ts_header;
@@ -809,13 +1395,18 @@
 	struct pes_packet_header *pes_header;
 	struct mpq_demux *mpq_demux;
 
-	mpq_demux =
-		(struct mpq_demux *)feed->demux->priv;
+	struct mpq_framing_pattern_lookup_results framing_res;
+	int found_patterns = 0;
+	int first_pattern = 0;
+	int i;
+	u32 pattern_addr = 0;
+	int is_video_frame = 0;
+
+	mpq_demux = (struct mpq_demux *)feed->demux->priv;
 
 	spin_lock(&mpq_demux->feed_lock);
 
-	feed_data =
-		(struct mpq_video_feed_info *)feed->priv;
+	feed_data = (struct mpq_video_feed_info *)feed->priv;
 
 	if (unlikely(feed_data == NULL)) {
 		spin_unlock(&mpq_demux->feed_lock);
@@ -824,13 +1415,11 @@
 
 	ts_header = (const struct ts_packet_header *)buf;
 
-	stream_buffer =
-			feed_data->video_buffer;
+	stream_buffer = feed_data->video_buffer;
 
-	pes_header =
-			&feed_data->pes_header;
+	pes_header = &feed_data->pes_header;
 
-/*	printk("TS packet: %X %X %X %X %X%X %X %X %X\n",
+	/* MPQ_DVB_DBG_PRINT("TS packet: %X %X %X %X %X%X %X %X %X\n",
 		ts_header->sync_byte,
 		ts_header->transport_error_indicator,
 		ts_header->payload_unit_start_indicator,
@@ -839,7 +1428,318 @@
 		ts_header->pid_lsb,
 		ts_header->transport_scrambling_control,
 		ts_header->adaptation_field_control,
-		ts_header->continuity_counter);*/
+		ts_header->continuity_counter); */
+
+	/* Make sure this TS packet has a payload and not scrambled */
+	if ((ts_header->sync_byte != 0x47) ||
+		(ts_header->adaptation_field_control == 0) ||
+		(ts_header->adaptation_field_control == 2) ||
+		(ts_header->transport_scrambling_control)) {
+		/* continue to next packet */
+		spin_unlock(&mpq_demux->feed_lock);
+		return 0;
+	}
+
+	if (ts_header->payload_unit_start_indicator) { /* PUSI? */
+		if (feed->pusi_seen) { /* Did we see PUSI before? */
+			/*
+			 * Double check that we are not in middle of
+			 * previous PES header parsing.
+			 */
+			if (feed_data->pes_header_left_bytes != 0) {
+				MPQ_DVB_ERR_PRINT(
+					"%s: received PUSI"
+					"while handling PES header"
+					"of previous PES\n",
+					__func__);
+			}
+
+			feed->peslen = 0;
+			feed_data->pes_header_offset = 0;
+			feed_data->pes_header_left_bytes =
+				PES_MANDATORY_FIELDS_LEN;
+			feed_data->write_pts_dts = 0;
+		} else {
+			feed->pusi_seen = 1;
+		}
+	}
+
+	/*
+	 * Parse PES data only if PUSI was encountered,
+	 * otherwise the data is dropped
+	 */
+	if (!feed->pusi_seen) {
+		spin_unlock(&mpq_demux->feed_lock);
+		return 0; /* drop and wait for next packets */
+	}
+
+	ts_payload_offset = sizeof(struct ts_packet_header);
+
+	/* Skip adaptation field if exists */
+	if (ts_header->adaptation_field_control == 3)
+		ts_payload_offset += buf[ts_payload_offset] + 1;
+
+	/* 188 bytes: the size of a TS packet including the TS packet header */
+	bytes_avail = 188 - ts_payload_offset;
+
+	/* Get the mandatory fields of the video PES header */
+	if (mpq_dmx_parse_mandatory_pes_header(feed, feed_data,
+						pes_header, buf,
+						&ts_payload_offset,
+						&bytes_avail)) {
+		spin_unlock(&mpq_demux->feed_lock);
+		return 0;
+	}
+
+	if (mpq_dmx_parse_remaining_pes_header(feed, feed_data,
+						pes_header, buf,
+						&ts_payload_offset,
+						&bytes_avail)) {
+		spin_unlock(&mpq_demux->feed_lock);
+		return 0;
+	}
+
+	/*
+	 * If we reached here,
+	 * then we are now at the PES payload data
+	 */
+	if (bytes_avail == 0) {
+		spin_unlock(&mpq_demux->feed_lock);
+		return 0;
+	}
+
+	/*
+	 * the decoder requires demux to do framing,
+	 * so search for the patterns now.
+	 */
+	found_patterns = mpq_dmx_framing_pattern_search(
+				feed_data->patterns,
+				feed_data->patterns_num,
+				(buf + ts_payload_offset),
+				bytes_avail,
+				&feed_data->prefix_size,
+				&framing_res);
+
+	if (!(feed_data->found_sequence_header_pattern)) {
+		for (i = 0; i < found_patterns; i++) {
+			if ((framing_res.info[i].type ==
+				DMX_FRM_MPEG2_SEQUENCE_HEADER) ||
+			    (framing_res.info[i].type ==
+				DMX_FRM_H264_SPS) ||
+			    (framing_res.info[i].type ==
+				DMX_FRM_VC1_SEQUENCE_HEADER)) {
+
+				MPQ_DVB_DBG_PRINT(
+					"%s: Found Sequence Pattern, buf %p, "
+					"i = %d, offset = %d, type = %d\n",
+					__func__, buf, i,
+					framing_res.info[i].offset,
+					framing_res.info[i].type);
+
+				first_pattern = i;
+				feed_data->found_sequence_header_pattern = 1;
+				ts_payload_offset +=
+					framing_res.info[i].offset;
+				bytes_avail -= framing_res.info[i].offset;
+
+				if (framing_res.info[i].used_prefix_size) {
+					feed_data->first_prefix_size =
+						framing_res.info[i].
+							used_prefix_size;
+				}
+				/*
+				 * if this is the first pattern we write,
+				 * no need to take offset into account since we
+				 * dropped all data before it (so effectively
+				 * offset is 0).
+				 * we save the first pattern offset and take
+				 * it into consideration for the rest of the
+				 * patterns found in this buffer.
+				 */
+				feed_data->first_pattern_offset =
+					framing_res.info[i].offset;
+				break;
+			}
+		}
+	}
+
+	/*
+	 * If decoder requires demux to do framing,
+	 * pass data to decoder only after sequence header
+	 * or equivalent is found. Otherwise the data is dropped.
+	 */
+	if (!(feed_data->found_sequence_header_pattern)) {
+		spin_unlock(&mpq_demux->feed_lock);
+		return 0;
+	}
+
+	/*
+	 * write prefix used to find first Sequence pattern, if needed.
+	 * feed_data->patterns[0].pattern always contains the Sequence
+	 * pattern.
+	 */
+	if (feed_data->first_prefix_size) {
+		if (mpq_streambuffer_data_write(stream_buffer,
+					(feed_data->patterns[0].pattern),
+					feed_data->first_prefix_size) < 0) {
+			mpq_demux->decoder_tsp_drop_count++;
+			spin_unlock(&mpq_demux->feed_lock);
+			return 0;
+		}
+		feed_data->first_prefix_size = 0;
+	}
+	/* write data to payload buffer */
+	if (mpq_streambuffer_data_write(stream_buffer,
+					(buf + ts_payload_offset),
+					bytes_avail) < 0) {
+		mpq_demux->decoder_tsp_drop_count++;
+	} else {
+		struct mpq_streambuffer_packet_header packet;
+		struct mpq_adapter_video_meta_data meta_data;
+
+		feed->peslen += bytes_avail;
+
+		meta_data.packet_type = DMX_FRAMING_INFO_PACKET;
+		packet.user_data_len =
+				sizeof(struct mpq_adapter_video_meta_data);
+
+		for (i = first_pattern; i < found_patterns; i++) {
+			if (feed_data->last_framing_match_address) {
+				is_video_frame = mpq_dmx_is_video_frame(
+					feed->indexing_params.standard,
+					feed_data->last_framing_match_type);
+				if (is_video_frame == 1) {
+					mpq_dmx_get_pts_dts(feed_data,
+						pes_header,
+						&meta_data,
+						DMX_FRAMING_INFO_PACKET);
+				} else {
+					meta_data.info.framing.
+						pts_dts_info.pts_exist = 0;
+					meta_data.info.framing.
+						pts_dts_info.dts_exist = 0;
+				}
+				/*
+				 * writing meta-data that includes
+				 * framing information
+				 */
+				meta_data.info.framing.pattern_type =
+					feed_data->last_framing_match_type;
+				packet.raw_data_addr =
+					feed_data->last_framing_match_address;
+
+				pattern_addr = feed_data->pes_payload_address +
+					framing_res.info[i].offset -
+					framing_res.info[i].used_prefix_size;
+
+				if ((pattern_addr -
+					feed_data->first_pattern_offset) <
+					feed_data->last_framing_match_address) {
+					/* wraparound case */
+					packet.raw_data_len =
+						(pattern_addr -
+						feed_data->
+						   last_framing_match_address +
+						stream_buffer->raw_data.size) -
+						feed_data->first_pattern_offset;
+				} else {
+					packet.raw_data_len =
+					  pattern_addr -
+					  feed_data->
+						last_framing_match_address -
+					  feed_data->first_pattern_offset;
+				}
+
+				MPQ_DVB_DBG_PRINT("Writing Packet: "
+					"addr = 0x%X, len = %d, type = %d, "
+					"isPts = %d, isDts = %d\n",
+					packet.raw_data_addr,
+					packet.raw_data_len,
+					meta_data.info.framing.pattern_type,
+					meta_data.info.framing.
+						pts_dts_info.pts_exist,
+					meta_data.info.framing.
+						pts_dts_info.dts_exist);
+
+				if (mpq_streambuffer_pkt_write(stream_buffer,
+						&packet,
+						(u8 *)&meta_data) < 0) {
+							MPQ_DVB_ERR_PRINT(
+								"%s: "
+								"Couldn't write packet. "
+								"Should never happen\n",
+								__func__);
+				} else {
+					if (is_video_frame == 1)
+						feed_data->write_pts_dts = 0;
+				}
+			}
+
+			/* save the last match for next time */
+			feed_data->last_framing_match_type =
+					framing_res.info[i].type;
+
+			feed_data->last_framing_match_address =
+				(feed_data->pes_payload_address +
+				framing_res.info[i].offset -
+				framing_res.info[i].used_prefix_size -
+				feed_data->first_pattern_offset);
+		}
+		/*
+		 * the first pattern offset is needed only for the group of
+		 * patterns that are found and written with the first pattern.
+		 */
+		feed_data->first_pattern_offset = 0;
+
+		feed_data->pes_payload_address =
+			(u32)stream_buffer->raw_data.data +
+			stream_buffer->raw_data.pwrite;
+	}
+
+	spin_unlock(&mpq_demux->feed_lock);
+
+	return 0;
+}
+
+static int mpq_dmx_process_video_packet_no_framing(
+			struct dvb_demux_feed *feed,
+			const u8 *buf)
+{
+	int bytes_avail;
+	u32 ts_payload_offset;
+	struct mpq_video_feed_info *feed_data;
+	const struct ts_packet_header *ts_header;
+	struct mpq_streambuffer *stream_buffer;
+	struct pes_packet_header *pes_header;
+	struct mpq_demux *mpq_demux;
+
+	mpq_demux = (struct mpq_demux *)feed->demux->priv;
+
+	spin_lock(&mpq_demux->feed_lock);
+
+	feed_data = (struct mpq_video_feed_info *)feed->priv;
+
+	if (unlikely(feed_data == NULL)) {
+		spin_unlock(&mpq_demux->feed_lock);
+		return 0;
+	}
+
+	ts_header = (const struct ts_packet_header *)buf;
+
+	stream_buffer =	feed_data->video_buffer;
+
+	pes_header = &feed_data->pes_header;
+
+	/* MPQ_DVB_DBG_PRINT("TS packet: %X %X %X %X %X%X %X %X %X\n",
+		ts_header->sync_byte,
+		ts_header->transport_error_indicator,
+		ts_header->payload_unit_start_indicator,
+		ts_header->transport_priority,
+		ts_header->pid_msb,
+		ts_header->pid_lsb,
+		ts_header->transport_scrambling_control,
+		ts_header->adaptation_field_control,
+		ts_header->continuity_counter); */
 
 	/* Make sure this TS packet has a payload and not scrambled */
 	if ((ts_header->sync_byte != 0x47) ||
@@ -869,46 +1769,15 @@
 
 				packet.raw_data_len = feed->peslen;
 
-				if ((!mpq_dmx_info.decoder_data_wrap) &&
-					((feed_data->pes_payload_address +
-					feed->peslen) >
-					((u32)stream_buffer->raw_data.data +
-					stream_buffer->raw_data.size)))
-					MPQ_DVB_ERR_PRINT(
-						"%s: "
-						"Video data has wrapped-around!\n",
-						__func__);
-
 				packet.user_data_len =
 					sizeof(struct
 						mpq_adapter_video_meta_data);
 
-				if ((pes_header->pts_dts_flag == 2) ||
-					(pes_header->pts_dts_flag == 3))
-					meta_data.pts_exist = 1;
-				else
-					meta_data.pts_exist = 0;
+				mpq_dmx_get_pts_dts(feed_data, pes_header,
+							&meta_data,
+							DMX_PES_PACKET);
 
-				meta_data.pts =
-					((u64)pes_header->pts_1 << 30) |
-					((u64)pes_header->pts_2 << 22) |
-					((u64)pes_header->pts_3 << 15) |
-					((u64)pes_header->pts_4 << 7) |
-					(u64)pes_header->pts_5;
-
-				if (pes_header->pts_dts_flag == 3)
-					meta_data.dts_exist = 1;
-				else
-					meta_data.dts_exist = 0;
-
-				meta_data.dts =
-					((u64)pes_header->dts_1 << 30) |
-					((u64)pes_header->dts_2 << 22) |
-					((u64)pes_header->dts_3 << 15) |
-					((u64)pes_header->dts_4 << 7) |
-					(u64)pes_header->dts_5;
-
-				meta_data.is_padding = 0;
+				meta_data.packet_type = DMX_PES_PACKET;
 
 				if (mpq_streambuffer_pkt_write(
 						stream_buffer,
@@ -919,6 +1788,8 @@
 						"Couldn't write packet. "
 						"Should never happen\n",
 						__func__);
+				else
+					feed_data->write_pts_dts = 0;
 			} else {
 				MPQ_DVB_ERR_PRINT(
 					"%s: received PUSI"
@@ -956,137 +1827,24 @@
 	if (ts_header->adaptation_field_control == 3)
 		ts_payload_offset += buf[ts_payload_offset] + 1;
 
+	/* 188 bytes: size of a TS packet including the TS packet header */
 	bytes_avail = 188 - ts_payload_offset;
 
-	/* Got the mandatory fields of the video PES header? */
-	if (feed_data->pes_header_offset < PES_MANDATORY_FIELDS_LEN) {
-		left_size =
-			PES_MANDATORY_FIELDS_LEN -
-			feed_data->pes_header_offset;
-
-		copy_len = (left_size > bytes_avail) ?
-					bytes_avail :
-					left_size;
-
-		memcpy((u8 *)pes_header+feed_data->pes_header_offset,
-				buf+ts_payload_offset,
-				copy_len);
-
-		feed_data->pes_header_offset += copy_len;
-
-		if (left_size > bytes_avail) {
-			spin_unlock(&mpq_demux->feed_lock);
-			return 0;
-		}
-
-		/* else - we have beginning of PES header */
-		bytes_avail -= left_size;
-		ts_payload_offset += left_size;
-
-		/* Make sure the PES packet is valid */
-		if (mpq_dmx_is_valid_video_pes(pes_header) < 0) {
-			/*
-			 * Since the new PES header parsing
-			 * failed, reset pusi_seen to drop all
-			 * data until next PUSI
-			 */
-			feed->pusi_seen = 0;
-			feed_data->pes_header_offset = 0;
-
-			MPQ_DVB_ERR_PRINT(
-				"%s: invalid packet\n",
-				__func__);
-
-			spin_unlock(&mpq_demux->feed_lock);
-			return 0;
-		}
-
-		feed_data->pes_header_left_bytes =
-			pes_header->pes_header_data_length;
+	/* Get the mandatory fields of the video PES header */
+	if (mpq_dmx_parse_mandatory_pes_header(feed, feed_data,
+						pes_header, buf,
+						&ts_payload_offset,
+						&bytes_avail)) {
+		spin_unlock(&mpq_demux->feed_lock);
+		return 0;
 	}
 
-	/* Remainning header bytes that need to be processed? */
-	if (feed_data->pes_header_left_bytes) {
-		/* Did we capture the PTS value (if exist)? */
-		if ((bytes_avail != 0) &&
-			(feed_data->pes_header_offset <
-			 (PES_MANDATORY_FIELDS_LEN+5)) &&
-			((pes_header->pts_dts_flag == 2) ||
-			 (pes_header->pts_dts_flag == 3))) {
-
-			/* 5 more bytes should be there */
-			left_size =
-				PES_MANDATORY_FIELDS_LEN +
-				5 -
-				feed_data->pes_header_offset;
-
-			copy_len = (left_size > bytes_avail) ?
-						bytes_avail :
-						left_size;
-
-			memcpy((u8 *)pes_header+
-				feed_data->pes_header_offset,
-				buf+ts_payload_offset,
-				copy_len);
-
-			feed_data->pes_header_offset += copy_len;
-			feed_data->pes_header_left_bytes -= copy_len;
-
-			if (left_size > bytes_avail) {
-				spin_unlock(&mpq_demux->feed_lock);
-				return 0;
-			}
-
-			/* else - we have the PTS */
-			bytes_avail -= copy_len;
-			ts_payload_offset += copy_len;
-		}
-
-		/* Did we capture the DTS value (if exist)? */
-		if ((bytes_avail != 0) &&
-			(feed_data->pes_header_offset <
-			 (PES_MANDATORY_FIELDS_LEN+10)) &&
-			(pes_header->pts_dts_flag == 3)) {
-
-			/* 5 more bytes should be there */
-			left_size =
-				PES_MANDATORY_FIELDS_LEN +
-				10 -
-				feed_data->pes_header_offset;
-
-			copy_len = (left_size > bytes_avail) ?
-						bytes_avail :
-						left_size;
-
-			memcpy((u8 *)pes_header+
-				feed_data->pes_header_offset,
-				buf+ts_payload_offset,
-				copy_len);
-
-			feed_data->pes_header_offset += copy_len;
-			feed_data->pes_header_left_bytes -= copy_len;
-
-			if (left_size > bytes_avail) {
-				spin_unlock(&mpq_demux->feed_lock);
-				return 0;
-			}
-
-			/* else - we have the DTS */
-			bytes_avail -= copy_len;
-			ts_payload_offset += copy_len;
-		}
-
-		/* Any more header bytes?! */
-		if (feed_data->pes_header_left_bytes >= bytes_avail) {
-			feed_data->pes_header_left_bytes -= bytes_avail;
-			spin_unlock(&mpq_demux->feed_lock);
-			return 0;
-		}
-
-		/* Got PES header, process payload */
-		bytes_avail -= feed_data->pes_header_left_bytes;
-		ts_payload_offset += feed_data->pes_header_left_bytes;
-		feed_data->pes_header_left_bytes = 0;
+	if (mpq_dmx_parse_remaining_pes_header(feed, feed_data,
+						pes_header, buf,
+						&ts_payload_offset,
+						&bytes_avail)) {
+		spin_unlock(&mpq_demux->feed_lock);
+		return 0;
 	}
 
 	/*
@@ -1098,56 +1856,6 @@
 		return 0;
 	}
 
-	if (feed->peslen == 0) { /* starting new PES */
-		/* gap till end of the buffer */
-		int gap =
-			stream_buffer->raw_data.size -
-			stream_buffer->raw_data.pwrite;
-
-		if ((!mpq_dmx_info.decoder_data_wrap) &&
-			(gap < VIDEO_WRAP_AROUND_THRESHOLD)) {
-			struct mpq_streambuffer_packet_header packet;
-			struct mpq_adapter_video_meta_data meta_data;
-
-			/*
-			 * Do not start writting new PES from
-			 * this location to prevent possible
-			 * wrap-around of the payload, fill padding instead.
-			 */
-
-			/* push a packet with padding indication */
-			meta_data.is_padding = 1;
-
-			packet.raw_data_len = gap;
-			packet.user_data_len =
-				sizeof(struct mpq_adapter_video_meta_data);
-			packet.raw_data_addr =
-				feed_data->pes_payload_address;
-
-			if (mpq_streambuffer_data_write_deposit(
-						stream_buffer,
-						gap) < 0) {
-				MPQ_DVB_ERR_PRINT(
-					"%s: mpq_streambuffer_data_write_deposit "
-					"failed!\n",
-					__func__);
-			} else if (mpq_streambuffer_pkt_write(
-							stream_buffer,
-							&packet,
-							(u8 *)&meta_data) < 0) {
-				MPQ_DVB_ERR_PRINT(
-					"%s: "
-					"Couldn't write packet. "
-					"Should never happen\n",
-					__func__);
-			} else {
-				feed_data->pes_payload_address =
-					(u32)stream_buffer->raw_data.data +
-					stream_buffer->raw_data.pwrite;
-			}
-		}
-	}
-
 	if (mpq_streambuffer_data_write(
 				stream_buffer,
 				buf+ts_payload_offset,
@@ -1160,8 +1868,17 @@
 
 	return 0;
 }
-EXPORT_SYMBOL(mpq_dmx_process_video_packet);
 
+int mpq_dmx_process_video_packet(
+			struct dvb_demux_feed *feed,
+			const u8 *buf)
+{
+	if (mpq_dmx_info.decoder_framing)
+		return mpq_dmx_process_video_packet_no_framing(feed, buf);
+	else
+		return mpq_dmx_process_video_packet_framing(feed, buf);
+}
+EXPORT_SYMBOL(mpq_dmx_process_video_packet);
 
 int mpq_dmx_process_pcr_packet(
 			struct dvb_demux_feed *feed,
@@ -1217,9 +1934,9 @@
 		(((u64)adaptation_field->program_clock_reference_ext_1) << 8) +
 		adaptation_field->program_clock_reference_ext_2;
 
-	stc = buf[189] << 16;
-	stc += buf[190] << 8;
-	stc += buf[191];
+	stc = buf[190] << 16;
+	stc += buf[189] << 8;
+	stc += buf[188];
 	stc *= 256; /* convert from 105.47 KHZ to 27MHz */
 
 	output[0] = adaptation_field->discontinuity_indicator;
diff --git a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.h b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.h
index d90bd89..0275b14 100644
--- a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.h
+++ b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.h
@@ -23,16 +23,16 @@
 #include "mpq_adapter.h"
 
 
-/**
- * Total number of filters per demux,
- * including section and PES feeds
- */
-#define MPQ_DMX_MAX_NUM_OF_FILTERS			64
+/* Max number open() request can be done on demux device */
+#define MPQ_MAX_DMX_FILES				128
+
 
 /**
  * TSIF alias name length
  */
-#define TSIF_NAME_LENGTH					10
+#define TSIF_NAME_LENGTH				10
+
+#define MPQ_MAX_FOUND_PATTERNS				5
 
 /**
  * struct mpq_demux - mpq demux information
@@ -252,6 +252,17 @@
 } __packed;
 
 /*
+ * mpq_framing_prefix_size_masks - possible prefix sizes.
+ *
+ * @size_mask: a bit mask (per pattern) of possible prefix sizes to use
+ * when searching for a pattern that started in the last buffer.
+ * Updated in mpq_dmx_framing_pattern_search for use in the next lookup
+ */
+struct mpq_framing_prefix_size_masks {
+	u32 size_mask[MPQ_MAX_FOUND_PATTERNS];
+};
+
+/*
  * mpq_video_feed_info - private data used for video feed.
  *
  * @plugin_data: Underlying plugin's own private data.
@@ -270,6 +281,30 @@
  * @payload_buff_handle: ION handle for the allocated payload buffer
  * @stream_interface: The ID of the video stream interface registered
  * with this stream buffer.
+ * @patterns: pointer to the framing patterns to look for.
+ * @patterns_num: number of framing patterns.
+ * @last_framing_match_address: Used for saving the raw data address of
+ * the previous pattern match found in this video feed.
+ * @last_framing_match_type: Used for saving the type of
+ * the previous pattern match found in this video feed.
+ * @found_sequence_header_pattern: Flag used to note that an MPEG-2
+ * Sequence Header, H.264 SPS or VC-1 Sequence Header pattern
+ * (whichever is relevant according to the video standard) had already
+ * been found.
+ * @prefix_size: a bit mask representing the size(s) of possible prefixes
+ * to the pattern, already found in the previous buffer. If bit 0 is set,
+ * a prefix of size 1 was found. If bit 1 is set, a prefix of size 2 was
+ * found, etc. This supports a prefix size of up to 32, which is more
+ * than we need. The search function updates prefix_size as needed
+ * for the next buffer search.
+ * @first_pattern_offset: used to save the offset of the first pattern written
+ * to the stream buffer.
+ * @first_prefix_size: used to save the prefix size used to find the first
+ * pattern written to the stream buffer.
+ * @write_pts_dts: Flag used to decide if to write PTS/DTS information
+ * (if it is available in the PES header) in the meta-data passed
+ * to the video decoder. PTS/DTS information is written in the first
+ * packet after it is available.
  */
 struct mpq_video_feed_info {
 	void *plugin_data;
@@ -281,6 +316,15 @@
 	int fullness_wait_cancel;
 	struct ion_handle *payload_buff_handle;
 	enum mpq_adapter_stream_if stream_interface;
+	const struct mpq_framing_pattern_lookup_params *patterns;
+	int patterns_num;
+	u32 last_framing_match_address;
+	enum dmx_framing_pattern_type last_framing_match_type;
+	int found_sequence_header_pattern;
+	struct mpq_framing_prefix_size_masks prefix_size;
+	u32 first_pattern_offset;
+	u32 first_prefix_size;
+	int write_pts_dts;
 };
 
 /**
diff --git a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tsif.c b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tsif.c
index 5894a65..bfd6b96 100644
--- a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tsif.c
+++ b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tsif.c
@@ -18,12 +18,18 @@
 #include "mpq_dvb_debug.h"
 #include "mpq_dmx_plugin_common.h"
 
+
 /* TSIF HW configuration: */
 #define TSIF_COUNT				2
+
+/* Max number of section filters */
+#define DMX_TSIF_MAX_SECTION_FILTER_NUM	64
+
 /* When TSIF driver notifies demux that new packets are received */
 #define DMX_TSIF_PACKETS_IN_CHUNK_DEF		16
 #define DMX_TSIF_CHUNKS_IN_BUF			8
 #define DMX_TSIF_TIME_LIMIT			10000
+
 /* TSIF_DRIVER_MODE: 3 means manual control from debugfs. use 1 normally. */
 #define DMX_TSIF_DRIVER_MODE_DEF		1
 
@@ -542,6 +548,42 @@
 	return 0;
 }
 
+/**
+ * Returns demux capabilities of TSIF plugin
+ *
+ * @demux: demux device
+ * @caps: Returned capbabilities
+ *
+ * Return     error code
+ */
+static int mpq_tsif_dmx_get_caps(struct dmx_demux *demux,
+				struct dmx_caps *caps)
+{
+	struct dvb_demux *dvb_demux = (struct dvb_demux *)demux->priv;
+
+	if ((dvb_demux == NULL) || (caps == NULL)) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: invalid parameters\n",
+			__func__);
+
+		return -EINVAL;
+	}
+
+	caps->caps = DMX_CAP_PULL_MODE | DMX_CAP_VIDEO_DECODER_DATA;
+	caps->num_decoders = MPQ_ADAPTER_MAX_NUM_OF_INTERFACES;
+	caps->num_demux_devices = CONFIG_DVB_MPQ_NUM_DMX_DEVICES;
+	caps->num_pid_filters = dvb_demux->feednum;
+	caps->num_section_filters = dvb_demux->filternum;
+	caps->num_section_filters_per_pid = dvb_demux->filternum;
+	caps->section_filter_length = DMX_FILTER_SIZE;
+	caps->num_demod_inputs = TSIF_COUNT;
+	caps->num_memory_inputs = CONFIG_DVB_MPQ_NUM_DMX_DEVICES;
+	caps->max_bitrate = 144;
+	caps->demod_input_max_bitrate = 72;
+	caps->memory_input_max_bitrate = 72;
+
+	return 0;
+}
 
 /**
  * Initialize a single demux device.
@@ -570,8 +612,8 @@
 
 	/* Set dvb-demux "virtual" function pointers */
 	mpq_demux->demux.priv = (void *)mpq_demux;
-	mpq_demux->demux.filternum = MPQ_DMX_MAX_NUM_OF_FILTERS;
-	mpq_demux->demux.feednum = MPQ_DMX_MAX_NUM_OF_FILTERS;
+	mpq_demux->demux.filternum = DMX_TSIF_MAX_SECTION_FILTER_NUM;
+	mpq_demux->demux.feednum = MPQ_MAX_DMX_FILES;
 	mpq_demux->demux.start_feed = mpq_tsif_dmx_start_filtering;
 	mpq_demux->demux.stop_feed = mpq_tsif_dmx_stop_filtering;
 	mpq_demux->demux.write_to_decoder = mpq_tsif_dmx_write_to_decoder;
@@ -593,14 +635,16 @@
 	}
 
 	/* Now initailize the dmx-dev object */
-	mpq_demux->dmxdev.filternum = MPQ_DMX_MAX_NUM_OF_FILTERS;
+	mpq_demux->dmxdev.filternum = MPQ_MAX_DMX_FILES;
 	mpq_demux->dmxdev.demux = &mpq_demux->demux.dmx;
 	mpq_demux->dmxdev.capabilities =
 		DMXDEV_CAP_DUPLEX |
 		DMXDEV_CAP_PULL_MODE |
-		DMXDEV_CAP_PCR_EXTRACTION;
+		DMXDEV_CAP_PCR_EXTRACTION |
+		DMXDEV_CAP_INDEXING;
 
 	mpq_demux->dmxdev.demux->set_source = mpq_dmx_set_source;
+	mpq_demux->dmxdev.demux->get_caps = mpq_tsif_dmx_get_caps;
 
 	result = dvb_dmxdev_init(&mpq_demux->dmxdev, mpq_adapter);
 	if (result < 0) {
diff --git a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c
index 406ae52..a552fdf 100644
--- a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c
+++ b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c
@@ -18,9 +18,12 @@
 #include "mpq_dmx_plugin_common.h"
 
 
-#define TSIF_COUNT						2
+#define TSIF_COUNT					2
 
-#define TSPP_FILTERS_COUNT				16
+#define TSPP_MAX_PID_FILTER_NUM		16
+
+/* Max number of section filters */
+#define TSPP_MAX_SECTION_FILTER_NUM		64
 
 /* For each TSIF we allocate two pipes, one for PES and one for sections */
 #define TSPP_PES_CHANNEL				0
@@ -44,8 +47,12 @@
 
 #define TSPP_RAW_TTS_SIZE				192
 
-/* Size of single descriptor */
-#define TSPP_BUFFER_SIZE				(TSPP_RAW_TTS_SIZE * 35)
+/* Size of single descriptor.
+ * Assuming 20MBit/sec stream, with 200 packets
+ * per descriptor there would be about 68 descriptors.
+ * Meanning about 68 interrupts per second.
+ */
+#define TSPP_BUFFER_SIZE			(TSPP_RAW_TTS_SIZE * 200)
 
 /* Number of descriptors, total size: TSPP_BUFFER_SIZE*TSPP_BUFFER_COUNT */
 #define TSPP_BUFFER_COUNT				(16)
@@ -100,7 +107,7 @@
 		struct {
 			int pid;
 			int ref_count;
-		} filters[TSPP_FILTERS_COUNT];
+		} filters[TSPP_MAX_PID_FILTER_NUM];
 
 		/* workqueue that processes TS packets from specific TSIF */
 		struct workqueue_struct *workqueue;
@@ -136,11 +143,11 @@
 	int i;
 
 	if (TSPP_IS_PES_CHANNEL(channel_id)) {
-		for (i = 0; i < TSPP_FILTERS_COUNT; i++)
+		for (i = 0; i < TSPP_MAX_PID_FILTER_NUM; i++)
 			if (mpq_dmx_tspp_info.tsif[tsif].filters[i].pid == -1)
 				return i;
 	} else {
-		for (i = TSPP_FILTERS_COUNT-1; i >= 0; i--)
+		for (i = TSPP_MAX_PID_FILTER_NUM-1; i >= 0; i--)
 			if (mpq_dmx_tspp_info.tsif[tsif].filters[i].pid == -1)
 				return i;
 	}
@@ -160,7 +167,7 @@
 {
 	int i;
 
-	for (i = 0; i < TSPP_FILTERS_COUNT; i++)
+	for (i = 0; i < TSPP_MAX_PID_FILTER_NUM; i++)
 		if (mpq_dmx_tspp_info.tsif[tsif].filters[i].pid == pid)
 			return i;
 
@@ -640,6 +647,7 @@
 					const u8 *buf,
 					size_t len)
 {
+
 	/*
 	 * It is assumed that this function is called once for each
 	 * TS packet of the relevant feed.
@@ -658,6 +666,43 @@
 	return 0;
 }
 
+/**
+ * Returns demux capabilities of TSPPv1 plugin
+ *
+ * @demux: demux device
+ * @caps: Returned capbabilities
+ *
+ * Return     error code
+ */
+static int mpq_tspp_dmx_get_caps(struct dmx_demux *demux,
+				struct dmx_caps *caps)
+{
+	struct dvb_demux *dvb_demux = (struct dvb_demux *)demux->priv;
+
+	if ((dvb_demux == NULL) || (caps == NULL)) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: invalid parameters\n",
+			__func__);
+
+		return -EINVAL;
+	}
+
+	caps->caps = DMX_CAP_PULL_MODE | DMX_CAP_VIDEO_DECODER_DATA;
+	caps->num_decoders = MPQ_ADAPTER_MAX_NUM_OF_INTERFACES;
+	caps->num_demux_devices = CONFIG_DVB_MPQ_NUM_DMX_DEVICES;
+	caps->num_pid_filters = TSPP_MAX_PID_FILTER_NUM;
+	caps->num_section_filters = dvb_demux->filternum;
+	caps->num_section_filters_per_pid = dvb_demux->filternum;
+	caps->section_filter_length = DMX_FILTER_SIZE;
+	caps->num_demod_inputs = TSIF_COUNT;
+	caps->num_memory_inputs = CONFIG_DVB_MPQ_NUM_DMX_DEVICES;
+	caps->max_bitrate = 144;
+	caps->demod_input_max_bitrate = 72;
+	caps->memory_input_max_bitrate = 72;
+
+	return 0;
+}
+
 static int mpq_tspp_dmx_init(
 			struct dvb_adapter *mpq_adapter,
 			struct mpq_demux *mpq_demux)
@@ -677,8 +722,8 @@
 
 	/* Set dvb-demux "virtual" function pointers */
 	mpq_demux->demux.priv = (void *)mpq_demux;
-	mpq_demux->demux.filternum = MPQ_DMX_MAX_NUM_OF_FILTERS;
-	mpq_demux->demux.feednum = MPQ_DMX_MAX_NUM_OF_FILTERS;
+	mpq_demux->demux.filternum = TSPP_MAX_SECTION_FILTER_NUM;
+	mpq_demux->demux.feednum = MPQ_MAX_DMX_FILES;
 	mpq_demux->demux.start_feed = mpq_tspp_dmx_start_filtering;
 	mpq_demux->demux.stop_feed = mpq_tspp_dmx_stop_filtering;
 	mpq_demux->demux.write_to_decoder = mpq_tspp_dmx_write_to_decoder;
@@ -700,14 +745,16 @@
 	}
 
 	/* Now initailize the dmx-dev object */
-	mpq_demux->dmxdev.filternum = MPQ_DMX_MAX_NUM_OF_FILTERS;
+	mpq_demux->dmxdev.filternum = MPQ_MAX_DMX_FILES;
 	mpq_demux->dmxdev.demux = &mpq_demux->demux.dmx;
 	mpq_demux->dmxdev.capabilities =
 		DMXDEV_CAP_DUPLEX |
 		DMXDEV_CAP_PULL_MODE |
-		DMXDEV_CAP_PCR_EXTRACTION;
+		DMXDEV_CAP_PCR_EXTRACTION |
+		DMXDEV_CAP_INDEXING;
 
 	mpq_demux->dmxdev.demux->set_source = mpq_dmx_set_source;
+	mpq_demux->dmxdev.demux->get_caps = mpq_tspp_dmx_get_caps;
 
 	result = dvb_dmxdev_init(&mpq_demux->dmxdev, mpq_adapter);
 	if (result < 0) {
@@ -753,7 +800,7 @@
 		INIT_WORK(&mpq_dmx_tspp_info.tsif[i].section_work.work,
 				  mpq_dmx_tspp_work);
 
-		for (j = 0; j < TSPP_FILTERS_COUNT; j++) {
+		for (j = 0; j < TSPP_MAX_PID_FILTER_NUM; j++) {
 			mpq_dmx_tspp_info.tsif[i].filters[j].pid = -1;
 			mpq_dmx_tspp_info.tsif[i].filters[j].ref_count = 0;
 		}
diff --git a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v2.c b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v2.c
index d3c2c50..6c484a0 100644
--- a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v2.c
+++ b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v2.c
@@ -16,6 +16,17 @@
 #include "mpq_dmx_plugin_common.h"
 
 
+#define TSIF_COUNT				2
+
+#define BAM_INPUT_COUNT			4
+
+/* Max number of PID filters */
+#define TSPP_MAX_PID_FILTER_NUM		128
+
+/* Max number of section filters */
+#define TSPP_MAX_SECTION_FILTER_NUM		64
+
+
 static int mpq_tspp_dmx_start_filtering(struct dvb_demux_feed *feed)
 {
 	MPQ_DVB_DBG_PRINT(
@@ -41,6 +52,44 @@
 }
 
 /**
+ * Returns demux capabilities of TSPPv2 plugin
+ *
+ * @demux: demux device
+ * @caps: Returned capbabilities
+ *
+ * Return     error code
+ */
+static int mpq_tspp_dmx_get_caps(struct dmx_demux *demux,
+				struct dmx_caps *caps)
+{
+	struct dvb_demux *dvb_demux = (struct dvb_demux *)demux->priv;
+
+	if ((dvb_demux == NULL) || (caps == NULL)) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: invalid parameters\n",
+			__func__);
+
+		return -EINVAL;
+	}
+
+	caps->caps = DMX_CAP_PULL_MODE | DMX_CAP_VIDEO_INDEXING |
+		DMX_CAP_VIDEO_DECODER_DATA;
+	caps->num_decoders = MPQ_ADAPTER_MAX_NUM_OF_INTERFACES;
+	caps->num_demux_devices = CONFIG_DVB_MPQ_NUM_DMX_DEVICES;
+	caps->num_pid_filters = TSPP_MAX_PID_FILTER_NUM;
+	caps->num_section_filters = dvb_demux->filternum;
+	caps->num_section_filters_per_pid = dvb_demux->filternum;
+	caps->section_filter_length = DMX_FILTER_SIZE;
+	caps->num_demod_inputs = TSIF_COUNT;
+	caps->num_memory_inputs = BAM_INPUT_COUNT;
+	caps->max_bitrate = 320;
+	caps->demod_input_max_bitrate = 96;
+	caps->memory_input_max_bitrate = 80;
+
+	return 0;
+}
+
+/**
  * Initialize a single demux device.
  *
  * @mpq_adapter: MPQ DVB adapter
@@ -67,8 +116,8 @@
 
 	/* Set dvb-demux "virtual" function pointers */
 	mpq_demux->demux.priv = (void *)mpq_demux;
-	mpq_demux->demux.filternum = MPQ_DMX_MAX_NUM_OF_FILTERS;
-	mpq_demux->demux.feednum = MPQ_DMX_MAX_NUM_OF_FILTERS;
+	mpq_demux->demux.filternum = TSPP_MAX_SECTION_FILTER_NUM;
+	mpq_demux->demux.feednum = MPQ_MAX_DMX_FILES;
 	mpq_demux->demux.start_feed = mpq_tspp_dmx_start_filtering;
 	mpq_demux->demux.stop_feed = mpq_tspp_dmx_stop_filtering;
 	mpq_demux->demux.write_to_decoder = NULL;
@@ -84,14 +133,16 @@
 	}
 
 	/* Now initailize the dmx-dev object */
-	mpq_demux->dmxdev.filternum = MPQ_DMX_MAX_NUM_OF_FILTERS;
+	mpq_demux->dmxdev.filternum = MPQ_MAX_DMX_FILES;
 	mpq_demux->dmxdev.demux = &mpq_demux->demux.dmx;
 	mpq_demux->dmxdev.capabilities =
 		DMXDEV_CAP_DUPLEX |
 		DMXDEV_CAP_PULL_MODE |
-		DMXDEV_CAP_PCR_EXTRACTION;
+		DMXDEV_CAP_PCR_EXTRACTION |
+		DMXDEV_CAP_INDEXING;
 
 	mpq_demux->dmxdev.demux->set_source = mpq_dmx_set_source;
+	mpq_demux->dmxdev.demux->get_caps = mpq_tspp_dmx_get_caps;
 
 	result = dvb_dmxdev_init(&mpq_demux->dmxdev, mpq_adapter);
 	if (result < 0) {
diff --git a/drivers/media/dvb/mpq/include/mpq_adapter.h b/drivers/media/dvb/mpq/include/mpq_adapter.h
index c720f91..c9b2441 100644
--- a/drivers/media/dvb/mpq/include/mpq_adapter.h
+++ b/drivers/media/dvb/mpq/include/mpq_adapter.h
@@ -37,15 +37,38 @@
 };
 
 
-/** The meta-data used for video interface */
-struct mpq_adapter_video_meta_data {
-	/**
-	 * Indication whether this packet is just a padding packet.
-	 * In this case packet should be just disposed along
-	 * with the padding in the raw-data buffer.
-	 */
-	int is_padding;
+enum dmx_framing_pattern_type {
+	/* MPEG-2 */
+	DMX_FRM_MPEG2_SEQUENCE_HEADER,
+	DMX_FRM_MPEG2_GOP_HEADER,
+	DMX_FRM_MPEG2_I_PIC,
+	DMX_FRM_MPEG2_P_PIC,
+	DMX_FRM_MPEG2_B_PIC,
+	/* H.264 */
+	DMX_FRM_H264_SPS,
+	DMX_FRM_H264_PPS,
+	/* H.264 First Coded slice of an IDR Picture */
+	DMX_FRM_H264_IDR_PIC,
+	/* H.264 First Coded slice of a non-IDR Picture */
+	DMX_FRM_H264_NON_IDR_PIC,
+	/* VC-1 Sequence Header*/
+	DMX_FRM_VC1_SEQUENCE_HEADER,
+	/* VC-1 Entry Point Header (Advanced Profile only) */
+	DMX_FRM_VC1_ENTRY_POINT_HEADER,
+	/* VC-1 Frame Start Code */
+	DMX_FRM_VC1_FRAME_START_CODE,
+	/* Unknown or invalid framing information */
+	DMX_FRM_UNKNOWN
+};
 
+enum dmx_packet_type {
+	DMX_PADDING_PACKET,
+	DMX_PES_PACKET,
+	DMX_FRAMING_INFO_PACKET,
+	DMX_EOS_PACKET
+};
+
+struct dmx_pts_dts_info {
 	/** Indication whether PTS exist */
 	int pts_exist;
 
@@ -57,6 +80,30 @@
 
 	/** DTS value associated with the PES data if any */
 	u64 dts;
+};
+
+struct dmx_framing_packet_info {
+	/** framing pattern type */
+	enum dmx_framing_pattern_type pattern_type;
+	/** PTS/DTS information */
+	struct dmx_pts_dts_info pts_dts_info;
+};
+
+struct dmx_pes_packet_info {
+	/** PTS/DTS information */
+	struct dmx_pts_dts_info pts_dts_info;
+};
+
+/** The meta-data used for video interface */
+struct mpq_adapter_video_meta_data {
+	/** meta-data packet type */
+	enum dmx_packet_type packet_type;
+
+	/** packet-type specific information */
+	union {
+		struct dmx_framing_packet_info framing;
+		struct dmx_pes_packet_info pes;
+	} info;
 } __packed;
 
 
diff --git a/drivers/media/radio/radio-iris.c b/drivers/media/radio/radio-iris.c
index fd72638..3f0f8de 100644
--- a/drivers/media/radio/radio-iris.c
+++ b/drivers/media/radio/radio-iris.c
@@ -1590,8 +1590,8 @@
 
 	if (status)
 		return;
-
-	iris_q_event(radio, IRIS_EVT_RADIO_DISABLED);
+	if (radio->mode != FM_CALIB)
+		iris_q_event(radio, IRIS_EVT_RADIO_DISABLED);
 
 	radio_hci_req_complete(hdev, status);
 }
@@ -1629,8 +1629,8 @@
 
 	if (rsp->status)
 		return;
-
-	iris_q_event(radio, IRIS_EVT_RADIO_READY);
+	if (radio->mode != FM_CALIB)
+		iris_q_event(radio, IRIS_EVT_RADIO_READY);
 
 	radio_hci_req_complete(hdev, rsp->status);
 }
@@ -2455,22 +2455,26 @@
 	int retval = 0x00;
 
 	cal_mode = PROCS_CALIB_MODE;
+	radio->mode = FM_CALIB;
 	retval = hci_cmd(HCI_FM_ENABLE_RECV_CMD,
 			radio->fm_hdev);
 	if (retval < 0) {
 		FMDERR("Enable failed before calibration %x", retval);
+		radio->mode = FM_OFF;
 		return retval;
 	}
 	retval = radio_hci_request(radio->fm_hdev, hci_fm_do_cal_req,
 		(unsigned long)cal_mode, RADIO_HCI_TIMEOUT);
 	if (retval < 0) {
 		FMDERR("Do Process calibration failed %x", retval);
+		radio->mode = FM_RECV;
 		return retval;
 	}
 	retval = hci_cmd(HCI_FM_DISABLE_RECV_CMD,
 			radio->fm_hdev);
 	if (retval < 0)
 		FMDERR("Disable Failed after calibration %d", retval);
+	radio->mode = FM_OFF;
 	return retval;
 }
 static int iris_vidioc_g_ctrl(struct file *file, void *priv,
@@ -2880,12 +2884,12 @@
 		case FM_TRANS:
 			retval = hci_cmd(HCI_FM_ENABLE_TRANS_CMD,
 							 radio->fm_hdev);
-			radio->mode = FM_TRANS;
 			if (retval < 0) {
 				FMDERR("Error while enabling TRANS FM"
 							" %d\n", retval);
 				return retval;
 			}
+			radio->mode = FM_TRANS;
 			retval = hci_cmd(HCI_FM_GET_TX_CONFIG, radio->fm_hdev);
 			if (retval < 0)
 				FMDERR("get frequency failed %d\n", retval);
@@ -2895,17 +2899,23 @@
 			case FM_RECV:
 				retval = hci_cmd(HCI_FM_DISABLE_RECV_CMD,
 						radio->fm_hdev);
-				if (retval < 0)
+				if (retval < 0) {
 					FMDERR("Err on disable recv FM"
 						   " %d\n", retval);
+					return retval;
+				}
+				radio->mode = FM_OFF;
 				break;
 			case FM_TRANS:
 				retval = hci_cmd(HCI_FM_DISABLE_TRANS_CMD,
 						radio->fm_hdev);
 
-				if (retval < 0)
+				if (retval < 0) {
 					FMDERR("Err disabling trans FM"
 						" %d\n", retval);
+					return retval;
+				}
+				radio->mode = FM_OFF;
 				break;
 			default:
 				retval = -EINVAL;
diff --git a/drivers/media/video/msm/gemini/msm_gemini_hw.h b/drivers/media/video/msm/gemini/msm_gemini_hw.h
index e1702a5..233f082 100644
--- a/drivers/media/video/msm/gemini/msm_gemini_hw.h
+++ b/drivers/media/video/msm/gemini/msm_gemini_hw.h
@@ -15,8 +15,8 @@
 
 #include <media/msm_gemini.h>
 #include "msm_gemini_hw_reg.h"
-#include <mach/msm_subsystem_map.h>
 #include <linux/ion.h>
+#include <mach/iommu_domains.h>
 
 struct msm_gemini_hw_buf {
 	struct msm_gemini_buf vbuf;
diff --git a/drivers/media/video/msm/gemini/msm_gemini_platform.c b/drivers/media/video/msm/gemini/msm_gemini_platform.c
index 1ebc2f1..06b2aac 100644
--- a/drivers/media/video/msm/gemini/msm_gemini_platform.c
+++ b/drivers/media/video/msm/gemini/msm_gemini_platform.c
@@ -17,7 +17,7 @@
 #include <linux/io.h>
 #include <linux/android_pmem.h>
 #include <mach/camera.h>
-#include <mach/msm_subsystem_map.h>
+#include <mach/iommu_domains.h>
 
 #include "msm_gemini_platform.h"
 #include "msm_gemini_sync.h"
diff --git a/drivers/media/video/msm/msm.c b/drivers/media/video/msm/msm.c
index 034cbc5..f5f9c0b 100644
--- a/drivers/media/video/msm/msm.c
+++ b/drivers/media/video/msm/msm.c
@@ -1768,7 +1768,6 @@
 	pcam->use_count++;
 	D("%s use_count %d\n", __func__, pcam->use_count);
 	if (pcam->use_count == 1) {
-		struct msm_cam_server_queue *queue;
 		int ges_evt = MSM_V4L2_GES_CAM_OPEN;
 		pcam->server_queue_idx = server_q_idx;
 		queue = &g_server_dev.server_queue[server_q_idx];
@@ -1863,14 +1862,15 @@
 	}
 msm_cam_server_open_session_failed:
 	if (pcam->use_count == 1) {
-		queue->queue_active = 0;
-		msm_drain_eventq(&queue->eventData_q);
-		kfree(queue->ctrl_data);
-		queue->ctrl_data = NULL;
-		msm_queue_drain(&queue->ctrl_q, list_control);
-		msm_drain_eventq(&queue->eventData_q);
-		queue = NULL;
-
+		if (queue != NULL) {
+			queue->queue_active = 0;
+			msm_drain_eventq(&queue->eventData_q);
+			kfree(queue->ctrl_data);
+			queue->ctrl_data = NULL;
+			msm_queue_drain(&queue->ctrl_q, list_control);
+			msm_drain_eventq(&queue->eventData_q);
+			queue = NULL;
+		}
 		pcam->dev_inst[i] = NULL;
 		pcam->use_count = 0;
 	}
diff --git a/drivers/media/video/msm/msm_vfe7x27a_v4l2.c b/drivers/media/video/msm/msm_vfe7x27a_v4l2.c
index f9414a5..84943ed 100644
--- a/drivers/media/video/msm/msm_vfe7x27a_v4l2.c
+++ b/drivers/media/video/msm/msm_vfe7x27a_v4l2.c
@@ -396,7 +396,7 @@
 		void (*getevent)(void *ptr, size_t len))
 {
 	uint32_t evt_buf[3];
-	void *data;
+	void *data = NULL;
 	struct buf_info *outch = NULL;
 	uint32_t y_phy, cbcr_phy;
 	struct table_cmd *table_pending = NULL;
@@ -432,6 +432,7 @@
 				vfe_7x_ops(driver_data, MSG_OUTPUT_T,
 						len, getevent);
 			vfe2x_send_isp_msg(vfe2x_ctrl, MSG_ID_SNAPSHOT_DONE);
+			kfree(data);
 			return;
 		case MSG_OUTPUT_S:
 			outch = &vfe2x_ctrl->snap;
@@ -489,8 +490,10 @@
 					len = sizeof(fack);
 					msm_adsp_write(vfe_mod, QDSP_CMDQUEUE,
 						cmd_data, len);
-					kfree(data);
-					return;
+					if (!vfe2x_ctrl->zsl_mode) {
+						kfree(data);
+						return;
+					}
 				}
 			}
 			y_phy = ((struct vfe_endframe *)data)->y_address;
@@ -559,8 +562,10 @@
 					len = sizeof(fack);
 					msm_adsp_write(vfe_mod, QDSP_CMDQUEUE,
 						cmd_data, len);
-					kfree(data);
-					return;
+					if (!vfe2x_ctrl->zsl_mode) {
+						kfree(data);
+						return;
+					}
 				}
 			}
 			y_phy = ((struct vfe_endframe *)data)->y_address;
@@ -712,24 +717,30 @@
 				vfe2x_ctrl->update_pending = 0;
 			}
 			spin_unlock_irqrestore(&vfe2x_ctrl->table_lock, flags);
+			kfree(data);
 			return;
 		}
 		table_pending = list_first_entry(&vfe2x_ctrl->table_q,
 					struct table_cmd, list);
 		if (!table_pending) {
 			spin_unlock_irqrestore(&vfe2x_ctrl->table_lock, flags);
+			kfree(data);
 			return;
 		}
 		msm_adsp_write(vfe_mod, table_pending->queue,
 				table_pending->cmd, table_pending->size);
 		list_del(&table_pending->list);
 		kfree(table_pending->cmd);
+		kfree(table_pending);
 		vfe2x_ctrl->tableack_pending = 1;
 		spin_unlock_irqrestore(&vfe2x_ctrl->table_lock, flags);
 	} else if (!vfe2x_ctrl->tableack_pending) {
-		if (!list_empty(&vfe2x_ctrl->table_q))
+		if (!list_empty(&vfe2x_ctrl->table_q)) {
+			kfree(data);
 			return;
+		}
 	}
+	kfree(data);
 }
 
 static struct msm_adsp_ops vfe_7x_sync = {
@@ -1640,6 +1651,7 @@
 config_failure:
 	kfree(scfg);
 	kfree(axio);
+	kfree(sfcfg);
 	return rc;
 }
 
diff --git a/drivers/media/video/msm/sensors/msm_sensor.c b/drivers/media/video/msm/sensors/msm_sensor.c
index d163427..5b9eb31 100644
--- a/drivers/media/video/msm/sensors/msm_sensor.c
+++ b/drivers/media/video/msm/sensors/msm_sensor.c
@@ -143,20 +143,9 @@
 int32_t msm_sensor_set_fps(struct msm_sensor_ctrl_t *s_ctrl,
 						struct fps_cfg *fps)
 {
-	uint16_t total_lines_per_frame;
-	int32_t rc = 0;
 	s_ctrl->fps_divider = fps->fps_div;
 
-	if (s_ctrl->curr_res != MSM_SENSOR_INVALID_RES) {
-		total_lines_per_frame = (uint16_t)
-			((s_ctrl->curr_frame_length_lines) *
-			s_ctrl->fps_divider/Q10);
-
-		rc = msm_camera_i2c_write(s_ctrl->sensor_i2c_client,
-			s_ctrl->sensor_output_reg_addr->frame_length_lines,
-			total_lines_per_frame, MSM_CAMERA_I2C_WORD_DATA);
-	}
-	return rc;
+	return 0;
 }
 
 int32_t msm_sensor_write_exp_gain1(struct msm_sensor_ctrl_t *s_ctrl,
diff --git a/drivers/media/video/msm/sensors/msm_sensor.h b/drivers/media/video/msm/sensors/msm_sensor.h
index 0e51409..556f036 100644
--- a/drivers/media/video/msm/sensors/msm_sensor.h
+++ b/drivers/media/video/msm/sensors/msm_sensor.h
@@ -234,12 +234,6 @@
 int32_t msm_sensor_setting1(struct msm_sensor_ctrl_t *s_ctrl,
 			int update_type, int res);
 
-int32_t msm_sensor_setting2(struct msm_sensor_ctrl_t *s_ctrl,
-			int update_type, int res);
-
-int32_t msm_sensor_setting3(struct msm_sensor_ctrl_t *s_ctrl,
-			int update_type, int res);
-
 int msm_sensor_enable_debugfs(struct msm_sensor_ctrl_t *s_ctrl);
 
 long msm_sensor_subdev_ioctl(struct v4l2_subdev *sd,
diff --git a/drivers/media/video/msm/sensors/ov2720.c b/drivers/media/video/msm/sensors/ov2720.c
index 7531a26..40867fb 100644
--- a/drivers/media/video/msm/sensors/ov2720.c
+++ b/drivers/media/video/msm/sensors/ov2720.c
@@ -330,7 +330,7 @@
 	{0x4005, 0x08},
 	{0x404f, 0x84},
 	{0x4051, 0x00},
-	{0x5000, 0xff},
+	{0x5000, 0xcf},
 	{0x3a18, 0x00},
 	{0x3a19, 0x80},
 	{0x3503, 0x07},
@@ -427,7 +427,7 @@
 	{0x4005, 0x08},
 	{0x404f, 0x84},
 	{0x4051, 0x00},
-	{0x5000, 0xff},
+	{0x5000, 0xcf},
 	{0x3a18, 0x00},
 	{0x3a19, 0x80},
 	{0x3503, 0x07},
@@ -436,8 +436,8 @@
 	{0x5184, 0xb0},
 	{0x5185, 0xb0},
 	{0x370c, 0x0c},
-	{0x3035, 0x20},
-	{0x3036, 0x14},
+	{0x3035, 0x30},
+	{0x3036, 0x1e},
 	{0x3037, 0x21},
 	{0x303e, 0x19},
 	{0x3038, 0x06},
@@ -524,7 +524,7 @@
 	{0x4005, 0x08},
 	{0x404f, 0x84},
 	{0x4051, 0x00},
-	{0x5000, 0xff},
+	{0x5000, 0xcf},
 	{0x3a18, 0x00},
 	{0x3a19, 0x80},
 	{0x3503, 0x07},
diff --git a/drivers/media/video/msm/sensors/ov5647_v4l2.c b/drivers/media/video/msm/sensors/ov5647_v4l2.c
index 48f1d5d..aac2f2b 100644
--- a/drivers/media/video/msm/sensors/ov5647_v4l2.c
+++ b/drivers/media/video/msm/sensors/ov5647_v4l2.c
@@ -457,9 +457,8 @@
 
 	CDBG(KERN_ERR "snapshot exposure seting 0x%x, 0x%x, %d"
 		, gain, line, line);
-
 	s_ctrl->func_tbl->sensor_group_hold_on(s_ctrl);
-	if (line > 1964) {
+	if (line > 1964 && line <= 1968) {
 		msm_camera_i2c_write(s_ctrl->sensor_i2c_client,
 			s_ctrl->sensor_output_reg_addr->frame_length_lines,
 			(uint8_t)((line+4) >> 8),
diff --git a/drivers/media/video/msm_vidc/msm_vdec.c b/drivers/media/video/msm_vidc/msm_vdec.c
index 3011a2b..6de3f25 100644
--- a/drivers/media/video/msm_vidc/msm_vdec.c
+++ b/drivers/media/video/msm_vidc/msm_vdec.c
@@ -26,6 +26,7 @@
 #define MAX_NUM_OUTPUT_BUFFERS 6
 
 static const char *const mpeg_video_vidc_divx_format[] = {
+	"DIVX Format 3",
 	"DIVX Format 4",
 	"DIVX Format 5",
 	"DIVX Format 6",
@@ -153,8 +154,7 @@
 static u32 get_frame_size_nv12(int plane,
 					u32 height, u32 width)
 {
-	int stride = (width + 31) & (~31);
-	return height * stride * 3/2;
+	return (ALIGN(height, 32) * ALIGN(width, 32) * 3) / 2;
 }
 static u32 get_frame_size_nv21(int plane,
 					u32 height, u32 width)
@@ -217,6 +217,14 @@
 		.get_frame_size = get_frame_size_nv21,
 		.type = CAPTURE_PORT,
 	},
+	{
+		.name = "DIVX 311",
+		.description = "DIVX 311 compressed format",
+		.fourcc = V4L2_PIX_FMT_DIVX_311,
+		.num_planes = 1,
+		.get_frame_size = get_frame_size_compressed,
+		.type = OUTPUT_PORT,
+	}
 };
 
 int msm_vdec_streamon(struct msm_vidc_inst *inst, enum v4l2_buf_type i)
@@ -654,7 +662,7 @@
 				break;
 			}
 			if (property_id) {
-				pr_err("Control: HAL property=%d,ctrl_id=%d,ctrl_value=%d\n",
+				pr_err("Control: HAL property=%x,ctrl_id=%x,ctrl_value=%d\n",
 					   property_id,
 					   msm_vdec_ctrls[control_idx].id,
 					   control.value);
diff --git a/drivers/media/video/msm_vidc/msm_vidc_common.c b/drivers/media/video/msm_vidc/msm_vidc_common.c
index 31879b7..6b06943 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_common.c
+++ b/drivers/media/video/msm_vidc/msm_vidc_common.c
@@ -149,7 +149,7 @@
 	enum command_response cmd)
 {
 	int rc = 0;
-	rc = wait_for_completion_timeout(
+	rc = wait_for_completion_interruptible_timeout(
 		&inst->completions[SESSION_MSG_INDEX(cmd)],
 		msecs_to_jiffies(HW_RESPONSE_TIMEOUT));
 	if (!rc) {
@@ -418,6 +418,33 @@
 		if (fill_buf_done->flags1 & HAL_BUFFERFLAG_EOS)
 			vb->v4l2_buf.flags |= V4L2_BUF_FLAG_EOS;
 		vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+	} else {
+		/*
+		 * FIXME:
+		 * Special handling for EOS case: if we sent a 0 length input
+		 * buf with EOS set, Venus doesn't return a valid output buffer.
+		 * So pick up a random buffer that's with us, and send it to
+		 * v4l2 client with EOS flag set.
+		 *
+		 * This would normally be OK unless client decides to send
+		 * frames even after EOS.
+		 *
+		 * This should be fixed in upcoming versions of firmware
+		 */
+		if (fill_buf_done->flags1 & HAL_BUFFERFLAG_EOS
+			&& fill_buf_done->filled_len1 == 0) {
+			struct vb2_queue *q = &inst->vb2_bufq[CAPTURE_PORT];
+
+			if (!list_empty(&q->queued_list)) {
+				vb = list_first_entry(&q->queued_list,
+					struct vb2_buffer, queued_entry);
+				vb->v4l2_planes[0].bytesused = 0;
+				vb->v4l2_buf.flags |= V4L2_BUF_FLAG_EOS;
+				vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+			}
+
+		}
+
 	}
 }
 
@@ -599,8 +626,10 @@
 	case V4L2_PIX_FMT_VC1_ANNEX_L:
 		codec = HAL_VIDEO_CODEC_VC1;
 		break;
+	case V4L2_PIX_FMT_DIVX_311:
+		codec = HAL_VIDEO_CODEC_DIVX_311;
+		break;
 		/*HAL_VIDEO_CODEC_MVC
-		  HAL_VIDEO_CODEC_DIVX_311
 		  HAL_VIDEO_CODEC_DIVX
 		  HAL_VIDEO_CODEC_SPARK
 		  HAL_VIDEO_CODEC_VP6
diff --git a/drivers/media/video/msm_vidc/vidc_hal.c b/drivers/media/video/msm_vidc/vidc_hal.c
index f4c7878..1f33c2c 100644
--- a/drivers/media/video/msm_vidc/vidc_hal.c
+++ b/drivers/media/video/msm_vidc/vidc_hal.c
@@ -1793,7 +1793,7 @@
 
 static void vidc_hal_core_work_handler(struct work_struct *work)
 {
-	struct hal_device *device =	list_first_entry(
+	struct hal_device *device = list_first_entry(
 		&hal_ctxt.dev_head, struct hal_device, list);
 
 	HAL_MSG_INFO(" GOT INTERRUPT %s() ", __func__);
diff --git a/drivers/media/video/vcap_v4l2.c b/drivers/media/video/vcap_v4l2.c
index dd5bd0f..b5037e8 100644
--- a/drivers/media/video/vcap_v4l2.c
+++ b/drivers/media/video/vcap_v4l2.c
@@ -20,7 +20,6 @@
 #include <linux/platform_device.h>
 #include <linux/memory_alloc.h>
 
-#include <mach/msm_subsystem_map.h>
 #include <mach/board.h>
 #include <mach/gpio.h>
 #include <mach/irqs.h>
@@ -39,6 +38,7 @@
 #include <linux/interrupt.h>
 #include <mach/msm_bus.h>
 #include <mach/msm_bus_board.h>
+#include <mach/iommu_domains.h>
 
 #include <media/vcap_v4l2.h>
 #include <media/vcap_fmt.h>
diff --git a/drivers/media/video/videobuf2-msm-mem.c b/drivers/media/video/videobuf2-msm-mem.c
index 7782955..186195d 100644
--- a/drivers/media/video/videobuf2-msm-mem.c
+++ b/drivers/media/video/videobuf2-msm-mem.c
@@ -29,8 +29,8 @@
 #include <media/videobuf2-msm-mem.h>
 #include <media/msm_camera.h>
 #include <mach/memory.h>
-#include <mach/msm_subsystem_map.h>
 #include <media/videobuf2-core.h>
+#include <mach/iommu_domains.h>
 
 #define MAGIC_PMEM 0x0733ac64
 #define MAGIC_CHECK(is, should)               \
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 44228a6..e4935ae 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -59,6 +59,18 @@
 #define INAND_CMD38_ARG_SECTRIM1 0x81
 #define INAND_CMD38_ARG_SECTRIM2 0x88
 
+#define MMC_SANITIZE_REQ_TIMEOUT 240000 /* msec */
+#define mmc_req_rel_wr(req)	(((req->cmd_flags & REQ_FUA) || \
+			(req->cmd_flags & REQ_META)) && \
+			(rq_data_dir(req) == WRITE))
+#define PACKED_CMD_VER		0x01
+#define PACKED_CMD_WR		0x02
+#define MMC_BLK_UPDATE_STOP_REASON(stats, reason)			\
+	do {								\
+		if (stats->enabled)					\
+			stats->pack_stop_reason[reason]++;		\
+	} while (0)
+
 static DEFINE_MUTEX(block_mutex);
 
 /*
@@ -107,6 +119,7 @@
 	 */
 	unsigned int	part_curr;
 	struct device_attribute force_ro;
+	struct device_attribute num_wr_reqs_to_start_packing;
 };
 
 static DEFINE_MUTEX(open_lock);
@@ -121,9 +134,21 @@
 	MMC_BLK_ECC_ERR,
 };
 
+enum {
+        MMC_PACKED_N_IDX = -1,
+        MMC_PACKED_N_ZERO,
+        MMC_PACKED_N_SINGLE,
+};
+
 module_param(perdev_minors, int, 0444);
 MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
 
+static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
+{
+        mqrq->packed_cmd = MMC_PACKED_NONE;
+        mqrq->packed_num = MMC_PACKED_N_ZERO;
+}
+
 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
 {
 	struct mmc_blk_data *md;
@@ -193,6 +218,38 @@
 	return ret;
 }
 
+static ssize_t
+num_wr_reqs_to_start_packing_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+	int num_wr_reqs_to_start_packing;
+	int ret;
+
+	num_wr_reqs_to_start_packing = md->queue.num_wr_reqs_to_start_packing;
+
+	ret = snprintf(buf, PAGE_SIZE, "%d\n", num_wr_reqs_to_start_packing);
+
+	mmc_blk_put(md);
+	return ret;
+}
+
+static ssize_t
+num_wr_reqs_to_start_packing_store(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf, size_t count)
+{
+	int value;
+	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+
+	sscanf(buf, "%d", &value);
+	if (value >= 0)
+		md->queue.num_wr_reqs_to_start_packing = value;
+
+	mmc_blk_put(md);
+	return count;
+}
+
 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
 {
 	struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
@@ -802,18 +859,11 @@
 	unsigned int from, nr, arg;
 	int err = 0, type = MMC_BLK_SECDISCARD;
 
-	if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) {
+	if (!(mmc_can_secure_erase_trim(card))) {
 		err = -EOPNOTSUPP;
 		goto out;
 	}
 
-	/* The sanitize operation is supported at v4.5 only */
-	if (mmc_can_sanitize(card)) {
-		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-				EXT_CSD_SANITIZE_START, 1, 0);
-		goto out;
-	}
-
 	from = blk_rq_pos(req);
 	nr = blk_rq_sectors(req);
 
@@ -856,6 +906,47 @@
 	return err ? 0 : 1;
 }
 
+static int mmc_blk_issue_sanitize_rq(struct mmc_queue *mq,
+				      struct request *req)
+{
+	struct mmc_blk_data *md = mq->data;
+	struct mmc_card *card = md->queue.card;
+	int err = 0;
+
+	BUG_ON(!card);
+	BUG_ON(!card->host);
+
+	if (!(mmc_can_sanitize(card) &&
+	     (card->host->caps2 & MMC_CAP2_SANITIZE))) {
+			pr_warning("%s: %s - SANITIZE is not supported\n",
+				   mmc_hostname(card->host), __func__);
+			err = -EOPNOTSUPP;
+			goto out;
+	}
+
+	pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
+		mmc_hostname(card->host), __func__);
+
+	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+					EXT_CSD_SANITIZE_START, 1,
+					MMC_SANITIZE_REQ_TIMEOUT);
+
+	if (err)
+		pr_err("%s: %s - mmc_switch() with "
+		       "EXT_CSD_SANITIZE_START failed. err=%d\n",
+		       mmc_hostname(card->host), __func__, err);
+
+	pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
+					     __func__);
+
+out:
+	spin_lock_irq(&md->lock);
+	__blk_end_request(req, err, blk_rq_bytes(req));
+	spin_unlock_irq(&md->lock);
+
+	return err ? 0 : 1;
+}
+
 static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
 {
 	struct mmc_blk_data *md = mq->data;
@@ -989,12 +1080,60 @@
 	if (!brq->data.bytes_xfered)
 		return MMC_BLK_RETRY;
 
+	if (mq_mrq->packed_cmd != MMC_PACKED_NONE) {
+		if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
+			return MMC_BLK_PARTIAL;
+		else
+			return MMC_BLK_SUCCESS;
+	}
+
 	if (blk_rq_bytes(req) != brq->data.bytes_xfered)
 		return MMC_BLK_PARTIAL;
 
 	return MMC_BLK_SUCCESS;
 }
 
+static int mmc_blk_packed_err_check(struct mmc_card *card,
+				    struct mmc_async_req *areq)
+{
+	struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
+			mmc_active);
+	struct request *req = mq_rq->req;
+	int err, check, status;
+	u8 ext_csd[512];
+
+	check = mmc_blk_err_check(card, areq);
+	err = get_card_status(card, &status, 0);
+	if (err) {
+		pr_err("%s: error %d sending status command\n",
+				req->rq_disk->disk_name, err);
+		return MMC_BLK_ABORT;
+	}
+
+	if (status & R1_EXP_EVENT) {
+		err = mmc_send_ext_csd(card, ext_csd);
+		if (err) {
+			pr_err("%s: error %d sending ext_csd\n",
+					req->rq_disk->disk_name, err);
+			return MMC_BLK_ABORT;
+		}
+
+		if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
+					EXT_CSD_PACKED_FAILURE) &&
+				(ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
+				 EXT_CSD_PACKED_GENERIC_ERROR)) {
+			if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
+					EXT_CSD_PACKED_INDEXED_ERROR) {
+				mq_rq->packed_fail_idx =
+				  ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
+				return MMC_BLK_PARTIAL;
+			}
+		}
+	}
+
+	return check;
+}
+
 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
 			       struct mmc_card *card,
 			       int disable_multi,
@@ -1135,10 +1274,286 @@
 	mmc_queue_bounce_pre(mqrq);
 }
 
+static void mmc_blk_write_packing_control(struct mmc_queue *mq,
+					  struct request *req)
+{
+	struct mmc_host *host = mq->card->host;
+	int data_dir;
+
+	if (!(host->caps2 & MMC_CAP2_PACKED_WR))
+		return;
+
+	/*
+	 * In case the packing control is not supported by the host, it should
+	 * not have an effect on the write packing. Therefore we have to enable
+	 * the write packing
+	 */
+	if (!(host->caps2 & MMC_CAP2_PACKED_WR_CONTROL)) {
+		mq->wr_packing_enabled = true;
+		return;
+	}
+
+	if (!req || (req && (req->cmd_flags & REQ_FLUSH))) {
+		if (mq->num_of_potential_packed_wr_reqs >
+				mq->num_wr_reqs_to_start_packing)
+			mq->wr_packing_enabled = true;
+		return;
+	}
+
+	data_dir = rq_data_dir(req);
+
+	if (data_dir == READ) {
+		mq->num_of_potential_packed_wr_reqs = 0;
+		mq->wr_packing_enabled = false;
+		return;
+	} else if (data_dir == WRITE) {
+		mq->num_of_potential_packed_wr_reqs++;
+	}
+
+	if (mq->num_of_potential_packed_wr_reqs >
+			mq->num_wr_reqs_to_start_packing)
+		mq->wr_packing_enabled = true;
+
+}
+
+struct mmc_wr_pack_stats *mmc_blk_get_packed_statistics(struct mmc_card *card)
+{
+	if (!card)
+		return NULL;
+
+	return &card->wr_pack_stats;
+}
+EXPORT_SYMBOL(mmc_blk_get_packed_statistics);
+
+void mmc_blk_init_packed_statistics(struct mmc_card *card)
+{
+	int max_num_of_packed_reqs = 0;
+
+	if (!card || !card->wr_pack_stats.packing_events)
+		return;
+
+	max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
+
+	spin_lock(&card->wr_pack_stats.lock);
+	memset(card->wr_pack_stats.packing_events, 0,
+		(max_num_of_packed_reqs + 1) *
+	       sizeof(*card->wr_pack_stats.packing_events));
+	memset(&card->wr_pack_stats.pack_stop_reason, 0,
+		sizeof(card->wr_pack_stats.pack_stop_reason));
+	card->wr_pack_stats.enabled = true;
+	spin_unlock(&card->wr_pack_stats.lock);
+}
+EXPORT_SYMBOL(mmc_blk_init_packed_statistics);
+
+static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
+{
+	struct request_queue *q = mq->queue;
+	struct mmc_card *card = mq->card;
+	struct request *cur = req, *next = NULL;
+	struct mmc_blk_data *md = mq->data;
+	bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
+	unsigned int req_sectors = 0, phys_segments = 0;
+	unsigned int max_blk_count, max_phys_segs;
+	u8 put_back = 0;
+	u8 max_packed_rw = 0;
+	u8 reqs = 0;
+	struct mmc_wr_pack_stats *stats = &card->wr_pack_stats;
+
+	mmc_blk_clear_packed(mq->mqrq_cur);
+
+	if (!(md->flags & MMC_BLK_CMD23) ||
+			!card->ext_csd.packed_event_en)
+		goto no_packed;
+
+	if (!mq->wr_packing_enabled)
+		goto no_packed;
+
+	if ((rq_data_dir(cur) == WRITE) &&
+			(card->host->caps2 & MMC_CAP2_PACKED_WR))
+		max_packed_rw = card->ext_csd.max_packed_writes;
+
+	if (max_packed_rw == 0)
+		goto no_packed;
+
+	if (mmc_req_rel_wr(cur) &&
+			(md->flags & MMC_BLK_REL_WR) &&
+			!en_rel_wr) {
+		goto no_packed;
+	}
+
+	max_blk_count = min(card->host->max_blk_count,
+			card->host->max_req_size >> 9);
+	if (unlikely(max_blk_count > 0xffff))
+		max_blk_count = 0xffff;
+
+	max_phys_segs = queue_max_segments(q);
+	req_sectors += blk_rq_sectors(cur);
+	phys_segments += cur->nr_phys_segments;
+
+	if (rq_data_dir(cur) == WRITE) {
+		req_sectors++;
+		phys_segments++;
+	}
+
+	spin_lock(&stats->lock);
+
+	while (reqs < max_packed_rw - 1) {
+		spin_lock_irq(q->queue_lock);
+		next = blk_fetch_request(q);
+		spin_unlock_irq(q->queue_lock);
+		if (!next) {
+			MMC_BLK_UPDATE_STOP_REASON(stats, EMPTY_QUEUE);
+			break;
+		}
+
+		if (next->cmd_flags & REQ_DISCARD ||
+				next->cmd_flags & REQ_FLUSH) {
+			MMC_BLK_UPDATE_STOP_REASON(stats, FLUSH_OR_DISCARD);
+			put_back = 1;
+			break;
+		}
+
+		if (rq_data_dir(cur) != rq_data_dir(next)) {
+			MMC_BLK_UPDATE_STOP_REASON(stats, WRONG_DATA_DIR);
+			put_back = 1;
+			break;
+		}
+
+		if (mmc_req_rel_wr(next) &&
+				(md->flags & MMC_BLK_REL_WR) &&
+				!en_rel_wr) {
+			MMC_BLK_UPDATE_STOP_REASON(stats, REL_WRITE);
+			put_back = 1;
+			break;
+		}
+
+		req_sectors += blk_rq_sectors(next);
+		if (req_sectors > max_blk_count) {
+			if (stats->enabled)
+				stats->pack_stop_reason[EXCEEDS_SECTORS]++;
+			put_back = 1;
+			break;
+		}
+
+		phys_segments +=  next->nr_phys_segments;
+		if (phys_segments > max_phys_segs) {
+			MMC_BLK_UPDATE_STOP_REASON(stats, EXCEEDS_SEGMENTS);
+			put_back = 1;
+			break;
+		}
+
+		if (rq_data_dir(next) == WRITE)
+			mq->num_of_potential_packed_wr_reqs++;
+		list_add_tail(&next->queuelist, &mq->mqrq_cur->packed_list);
+		cur = next;
+		reqs++;
+	}
+
+	if (put_back) {
+		spin_lock_irq(q->queue_lock);
+		blk_requeue_request(q, next);
+		spin_unlock_irq(q->queue_lock);
+	}
+
+	if (stats->enabled) {
+		if (reqs + 1 <= card->ext_csd.max_packed_writes)
+			stats->packing_events[reqs + 1]++;
+		if (reqs + 1 == max_packed_rw)
+			MMC_BLK_UPDATE_STOP_REASON(stats, THRESHOLD);
+	}
+
+	spin_unlock(&stats->lock);
+
+	if (reqs > 0) {
+		list_add(&req->queuelist, &mq->mqrq_cur->packed_list);
+		mq->mqrq_cur->packed_num = ++reqs;
+		return reqs;
+	}
+
+no_packed:
+	mmc_blk_clear_packed(mq->mqrq_cur);
+	return 0;
+}
+
+static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
+					struct mmc_card *card,
+					struct mmc_queue *mq)
+{
+	struct mmc_blk_request *brq = &mqrq->brq;
+	struct request *req = mqrq->req;
+	struct request *prq;
+	struct mmc_blk_data *md = mq->data;
+	bool do_rel_wr;
+	u32 *packed_cmd_hdr = mqrq->packed_cmd_hdr;
+	u8 i = 1;
+
+	mqrq->packed_cmd = MMC_PACKED_WRITE;
+	mqrq->packed_blocks = 0;
+	mqrq->packed_fail_idx = MMC_PACKED_N_IDX;
+
+	memset(packed_cmd_hdr, 0, sizeof(mqrq->packed_cmd_hdr));
+	packed_cmd_hdr[0] = (mqrq->packed_num << 16) |
+		(PACKED_CMD_WR << 8) | PACKED_CMD_VER;
+
+	/*
+	 * Argument for each entry of packed group
+	 */
+	list_for_each_entry(prq, &mqrq->packed_list, queuelist) {
+		do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
+		/* Argument of CMD23*/
+		packed_cmd_hdr[(i * 2)] =
+			(do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
+			blk_rq_sectors(prq);
+		/* Argument of CMD18 or CMD25 */
+		packed_cmd_hdr[((i * 2)) + 1] =
+			mmc_card_blockaddr(card) ?
+			blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
+		mqrq->packed_blocks += blk_rq_sectors(prq);
+		i++;
+	}
+
+	memset(brq, 0, sizeof(struct mmc_blk_request));
+	brq->mrq.cmd = &brq->cmd;
+	brq->mrq.data = &brq->data;
+	brq->mrq.sbc = &brq->sbc;
+	brq->mrq.stop = &brq->stop;
+
+	brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
+	brq->sbc.arg = MMC_CMD23_ARG_PACKED | (mqrq->packed_blocks + 1);
+	brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+	brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
+	brq->cmd.arg = blk_rq_pos(req);
+	if (!mmc_card_blockaddr(card))
+		brq->cmd.arg <<= 9;
+	brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+	brq->data.blksz = 512;
+	brq->data.blocks = mqrq->packed_blocks + 1;
+	brq->data.flags |= MMC_DATA_WRITE;
+
+	brq->stop.opcode = MMC_STOP_TRANSMISSION;
+	brq->stop.arg = 0;
+	brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+
+	mmc_set_data_timeout(&brq->data, card);
+
+	brq->data.sg = mqrq->sg;
+	brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
+
+	mqrq->mmc_active.mrq = &brq->mrq;
+	mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
+
+	mmc_queue_bounce_pre(mqrq);
+}
+
 static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
 			   struct mmc_blk_request *brq, struct request *req,
 			   int ret)
 {
+	struct mmc_queue_req *mq_rq;
+	mq_rq = container_of(brq, struct mmc_queue_req, brq);
+
 	/*
 	 * If this is an SD card and we're writing, we can first
 	 * mark the known good sectors as ok.
@@ -1157,13 +1572,48 @@
 			spin_unlock_irq(&md->lock);
 		}
 	} else {
-		spin_lock_irq(&md->lock);
-		ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
-		spin_unlock_irq(&md->lock);
+		if (mq_rq->packed_cmd == MMC_PACKED_NONE) {
+			spin_lock_irq(&md->lock);
+			ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
+			spin_unlock_irq(&md->lock);
+		}
 	}
 	return ret;
 }
 
+static int mmc_blk_end_packed_req(struct mmc_queue *mq,
+				  struct mmc_queue_req *mq_rq)
+{
+	struct mmc_blk_data *md = mq->data;
+	struct request *prq;
+	int idx = mq_rq->packed_fail_idx, i = 0;
+	int ret = 0;
+
+	while (!list_empty(&mq_rq->packed_list)) {
+		prq = list_entry_rq(mq_rq->packed_list.next);
+		if (idx == i) {
+			/* retry from error index */
+			mq_rq->packed_num -= idx;
+			mq_rq->req = prq;
+			ret = 1;
+
+			if (mq_rq->packed_num == MMC_PACKED_N_SINGLE) {
+				list_del_init(&prq->queuelist);
+				mmc_blk_clear_packed(mq_rq);
+			}
+			return ret;
+		}
+		list_del_init(&prq->queuelist);
+		spin_lock_irq(&md->lock);
+		__blk_end_request(prq, 0, blk_rq_bytes(prq));
+		spin_unlock_irq(&md->lock);
+		i++;
+	}
+
+	mmc_blk_clear_packed(mq_rq);
+	return ret;
+}
+
 static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
 {
 	struct mmc_blk_data *md = mq->data;
@@ -1172,15 +1622,24 @@
 	int ret = 1, disable_multi = 0, retry = 0, type;
 	enum mmc_blk_status status;
 	struct mmc_queue_req *mq_rq;
-	struct request *req;
+	struct request *req, *prq;
 	struct mmc_async_req *areq;
+	const u8 packed_num = 2;
+	u8 reqs = 0;
 
 	if (!rqc && !mq->mqrq_prev->req)
 		return 0;
 
+	if (rqc)
+		reqs = mmc_blk_prep_packed_list(mq, rqc);
+
 	do {
 		if (rqc) {
-			mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
+			if (reqs >= packed_num)
+				mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
+						card, mq);
+			else
+				mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
 			areq = &mq->mqrq_cur->mmc_active;
 		} else
 			areq = NULL;
@@ -1194,6 +1653,13 @@
 		type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
 		mmc_queue_bounce_post(mq_rq);
 
+		/*
+		 * Check BKOPS urgency from each R1 response
+		 */
+		if (mmc_card_mmc(card) &&
+			(brq->cmd.resp[0] & R1_EXCEPTION_EVENT))
+			mmc_card_set_check_bkops(card);
+
 		switch (status) {
 		case MMC_BLK_SUCCESS:
 		case MMC_BLK_PARTIAL:
@@ -1201,10 +1667,17 @@
 			 * A block was successfully transferred.
 			 */
 			mmc_blk_reset_success(md, type);
-			spin_lock_irq(&md->lock);
-			ret = __blk_end_request(req, 0,
+
+			if (mq_rq->packed_cmd != MMC_PACKED_NONE) {
+				ret = mmc_blk_end_packed_req(mq, mq_rq);
+				break;
+			} else {
+				spin_lock_irq(&md->lock);
+				ret = __blk_end_request(req, 0,
 						brq->data.bytes_xfered);
-			spin_unlock_irq(&md->lock);
+				spin_unlock_irq(&md->lock);
+			}
+
 			/*
 			 * If the blk_end_request function returns non-zero even
 			 * though all data has been transferred and no errors
@@ -1237,7 +1710,8 @@
 			err = mmc_blk_reset(md, card->host, type);
 			if (!err)
 				break;
-			if (err == -ENODEV)
+			if (err == -ENODEV ||
+				mq_rq->packed_cmd != MMC_PACKED_NONE)
 				goto cmd_abort;
 			/* Fall through */
 		}
@@ -1264,27 +1738,66 @@
 		}
 
 		if (ret) {
-			/*
-			 * In case of a incomplete request
-			 * prepare it again and resend.
-			 */
-			mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq);
-			mmc_start_req(card->host, &mq_rq->mmc_active, NULL);
+			if (mq_rq->packed_cmd == MMC_PACKED_NONE) {
+				/*
+				 * In case of a incomplete request
+				 * prepare it again and resend.
+				 */
+				mmc_blk_rw_rq_prep(mq_rq, card,
+						disable_multi, mq);
+				mmc_start_req(card->host,
+						&mq_rq->mmc_active, NULL);
+			} else {
+				mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
+				mmc_start_req(card->host,
+						&mq_rq->mmc_active, NULL);
+			}
 		}
 	} while (ret);
 
 	return 1;
 
  cmd_abort:
-	spin_lock_irq(&md->lock);
-	if (mmc_card_removed(card))
-		req->cmd_flags |= REQ_QUIET;
-	while (ret)
-		ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
-	spin_unlock_irq(&md->lock);
+	if (mq_rq->packed_cmd == MMC_PACKED_NONE) {
+		spin_lock_irq(&md->lock);
+		if (mmc_card_removed(card))
+			req->cmd_flags |= REQ_QUIET;
+		while (ret)
+			ret = __blk_end_request(req, -EIO,
+					blk_rq_cur_bytes(req));
+		spin_unlock_irq(&md->lock);
+	} else {
+		while (!list_empty(&mq_rq->packed_list)) {
+			prq = list_entry_rq(mq_rq->packed_list.next);
+			list_del_init(&prq->queuelist);
+			spin_lock_irq(&md->lock);
+			__blk_end_request(prq, -EIO, blk_rq_bytes(prq));
+			spin_unlock_irq(&md->lock);
+		}
+		mmc_blk_clear_packed(mq_rq);
+	}
 
  start_new_req:
 	if (rqc) {
+		/*
+		 * If current request is packed, it needs to put back.
+		 */
+		if (mq->mqrq_cur->packed_cmd != MMC_PACKED_NONE) {
+			while (!list_empty(&mq->mqrq_cur->packed_list)) {
+				prq = list_entry_rq(
+					mq->mqrq_cur->packed_list.prev);
+				if (prq->queuelist.prev !=
+						&mq->mqrq_cur->packed_list) {
+					list_del_init(&prq->queuelist);
+					spin_lock_irq(mq->queue->queue_lock);
+					blk_requeue_request(mq->queue, prq);
+					spin_unlock_irq(mq->queue->queue_lock);
+				} else {
+					list_del_init(&prq->queuelist);
+				}
+			}
+			mmc_blk_clear_packed(mq->mqrq_cur);
+		}
 		mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
 		mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL);
 	}
@@ -1323,7 +1836,14 @@
 		goto out;
 	}
 
-	if (req && req->cmd_flags & REQ_DISCARD) {
+	mmc_blk_write_packing_control(mq, req);
+
+	if (req && req->cmd_flags & REQ_SANITIZE) {
+		/* complete ongoing async transfer before issuing sanitize */
+		if (card->host && card->host->areq)
+			mmc_blk_issue_rw_rq(mq, NULL);
+		ret = mmc_blk_issue_sanitize_rq(mq, req);
+	} else if (req && req->cmd_flags & REQ_DISCARD) {
 		/* complete ongoing async transfer before issuing discard */
 		if (card->host->areq)
 			mmc_blk_issue_rw_rq(mq, NULL);
@@ -1559,6 +2079,8 @@
 static void mmc_blk_remove_req(struct mmc_blk_data *md)
 {
 	if (md) {
+		device_remove_file(disk_to_dev(md->disk),
+				   &md->num_wr_reqs_to_start_packing);
 		if (md->disk->flags & GENHD_FL_UP) {
 			device_remove_file(disk_to_dev(md->disk), &md->force_ro);
 
@@ -1597,9 +2119,27 @@
 	md->force_ro.attr.name = "force_ro";
 	md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
 	ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
-	if (ret)
+	if (ret) {
 		del_gendisk(md->disk);
+		goto out;
+	}
 
+	md->num_wr_reqs_to_start_packing.show =
+		num_wr_reqs_to_start_packing_show;
+	md->num_wr_reqs_to_start_packing.store =
+		num_wr_reqs_to_start_packing_store;
+	sysfs_attr_init(&md->num_wr_reqs_to_start_packing.attr);
+	md->num_wr_reqs_to_start_packing.attr.name =
+		"num_wr_reqs_to_start_packing";
+	md->num_wr_reqs_to_start_packing.attr.mode = S_IRUGO | S_IWUSR;
+	ret = device_create_file(disk_to_dev(md->disk),
+				 &md->num_wr_reqs_to_start_packing);
+	if (ret) {
+		device_remove_file(disk_to_dev(md->disk), &md->force_ro);
+		del_gendisk(md->disk);
+	}
+
+out:
 	return ret;
 }
 
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 73f63c9..c3a718a 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -25,6 +25,13 @@
 #define MMC_QUEUE_SUSPENDED	(1 << 0)
 
 /*
+ * Based on benchmark tests the default num of requests to trigger the write
+ * packing was determined, to keep the read latency as low as possible and
+ * manage to keep the high write throughput.
+ */
+#define DEFAULT_NUM_REQS_TO_START_PACK 17
+
+/*
  * Prepare a MMC request. This just filters out odd stuff.
  */
 static int mmc_prep_request(struct request_queue *q, struct request *req)
@@ -67,6 +74,9 @@
 		spin_unlock_irq(q->queue_lock);
 
 		if (req || mq->mqrq_prev->req) {
+			if (mmc_card_doing_bkops(mq->card))
+				mmc_interrupt_bkops(mq->card);
+
 			set_current_state(TASK_RUNNING);
 			mq->issue_fn(mq, req);
 		} else {
@@ -74,6 +84,8 @@
 				set_current_state(TASK_RUNNING);
 				break;
 			}
+
+			mmc_start_bkops(mq->card);
 			up(&mq->thread_sem);
 			schedule();
 			down(&mq->thread_sem);
@@ -146,10 +158,15 @@
 	/* granularity must not be greater than max. discard */
 	if (card->pref_erase > max_discard)
 		q->limits.discard_granularity = 0;
-	if (mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))
+	if (mmc_can_secure_erase_trim(card))
 		queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
 }
 
+static void mmc_queue_setup_sanitize(struct request_queue *q)
+{
+	queue_flag_set_unlocked(QUEUE_FLAG_SANITIZE, q);
+}
+
 /**
  * mmc_init_queue - initialise a queue structure.
  * @mq: mmc queue
@@ -178,15 +195,21 @@
 
 	memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
 	memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
+	INIT_LIST_HEAD(&mqrq_cur->packed_list);
+	INIT_LIST_HEAD(&mqrq_prev->packed_list);
 	mq->mqrq_cur = mqrq_cur;
 	mq->mqrq_prev = mqrq_prev;
 	mq->queue->queuedata = mq;
+	mq->num_wr_reqs_to_start_packing = DEFAULT_NUM_REQS_TO_START_PACK;
 
 	blk_queue_prep_rq(mq->queue, mmc_prep_request);
 	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
 	if (mmc_can_erase(card))
 		mmc_queue_setup_discard(mq->queue, card);
 
+	if ((mmc_can_sanitize(card) && (host->caps2 & MMC_CAP2_SANITIZE)))
+		mmc_queue_setup_sanitize(mq->queue);
+
 #ifdef CONFIG_MMC_BLOCK_BOUNCE
 	if (host->max_segs == 1) {
 		unsigned int bouncesz;
@@ -378,6 +401,35 @@
 	}
 }
 
+static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
+					    struct mmc_queue_req *mqrq,
+					    struct scatterlist *sg)
+{
+	struct scatterlist *__sg;
+	unsigned int sg_len = 0;
+	struct request *req;
+	enum mmc_packed_cmd cmd;
+
+	cmd = mqrq->packed_cmd;
+
+	if (cmd == MMC_PACKED_WRITE) {
+		__sg = sg;
+		sg_set_buf(__sg, mqrq->packed_cmd_hdr,
+				sizeof(mqrq->packed_cmd_hdr));
+		sg_len++;
+		__sg->page_link &= ~0x02;
+	}
+
+	__sg = sg + sg_len;
+	list_for_each_entry(req, &mqrq->packed_list, queuelist) {
+		sg_len += blk_rq_map_sg(mq->queue, req, __sg);
+		__sg = sg + (sg_len - 1);
+		(__sg++)->page_link &= ~0x02;
+	}
+	sg_mark_end(sg + (sg_len - 1));
+	return sg_len;
+}
+
 /*
  * Prepare the sg list(s) to be handed of to the host driver
  */
@@ -388,12 +440,19 @@
 	struct scatterlist *sg;
 	int i;
 
-	if (!mqrq->bounce_buf)
-		return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
+	if (!mqrq->bounce_buf) {
+		if (!list_empty(&mqrq->packed_list))
+			return mmc_queue_packed_map_sg(mq, mqrq, mqrq->sg);
+		else
+			return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
+	}
 
 	BUG_ON(!mqrq->bounce_sg);
 
-	sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
+	if (!list_empty(&mqrq->packed_list))
+		sg_len = mmc_queue_packed_map_sg(mq, mqrq, mqrq->bounce_sg);
+	else
+		sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
 
 	mqrq->bounce_sg_len = sg_len;
 
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index d2a1eb4..6c29e0e 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -12,6 +12,11 @@
 	struct mmc_data		data;
 };
 
+enum mmc_packed_cmd {
+	MMC_PACKED_NONE = 0,
+	MMC_PACKED_WRITE,
+};
+
 struct mmc_queue_req {
 	struct request		*req;
 	struct mmc_blk_request	brq;
@@ -20,6 +25,12 @@
 	struct scatterlist	*bounce_sg;
 	unsigned int		bounce_sg_len;
 	struct mmc_async_req	mmc_active;
+	struct list_head	packed_list;
+	u32			packed_cmd_hdr[128];
+	unsigned int		packed_blocks;
+	enum mmc_packed_cmd	packed_cmd;
+	int		packed_fail_idx;
+	u8		packed_num;
 };
 
 struct mmc_queue {
@@ -33,6 +44,9 @@
 	struct mmc_queue_req	mqrq[2];
 	struct mmc_queue_req	*mqrq_cur;
 	struct mmc_queue_req	*mqrq_prev;
+	bool			wr_packing_enabled;
+	int			num_of_potential_packed_wr_reqs;
+	int			num_wr_reqs_to_start_packing;
 };
 
 extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 84983e0..bca06a5 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -246,6 +246,8 @@
 	card->dev.release = mmc_release_card;
 	card->dev.type = type;
 
+	spin_lock_init(&card->wr_pack_stats.lock);
+
 	return card;
 }
 
@@ -359,6 +361,8 @@
 		device_del(&card->dev);
 	}
 
+	kfree(card->wr_pack_stats.packing_events);
+
 	put_device(&card->dev);
 }
 
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 15ddd83..e0217d0 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -40,6 +40,12 @@
 #include "sd_ops.h"
 #include "sdio_ops.h"
 
+/*
+ * The Background operations can take a long time, depends on the house keeping
+ * operations the card has to perform
+ */
+#define MMC_BKOPS_MAX_TIMEOUT    (4 * 60 * 1000) /* max time to wait in ms */
+
 static struct workqueue_struct *workqueue;
 
 /*
@@ -223,6 +229,74 @@
 	host->ops->request(host, mrq);
 }
 
+/**
+ *	mmc_start_bkops - start BKOPS for supported cards
+ *	@card: MMC card to start BKOPS
+ *
+ *	Start background operations whenever requested.
+ *	when the urgent BKOPS bit is set in a R1 command response
+ *	then background operations should be started immediately.
+*/
+void mmc_start_bkops(struct mmc_card *card)
+{
+	int err;
+	unsigned long flags;
+	int timeout;
+
+	BUG_ON(!card);
+	if (!card->ext_csd.bkops_en || !(card->host->caps2 & MMC_CAP2_BKOPS))
+		return;
+
+	if (mmc_card_check_bkops(card)) {
+		spin_lock_irqsave(&card->host->lock, flags);
+		mmc_card_clr_check_bkops(card);
+		spin_unlock_irqrestore(&card->host->lock, flags);
+		if (mmc_is_exception_event(card, EXT_CSD_URGENT_BKOPS))
+			if (card->ext_csd.raw_bkops_status)
+				mmc_card_set_need_bkops(card);
+	}
+
+	/*
+	 * If card is already doing bkops or need for
+	 * bkops flag is not set, then do nothing just
+	 * return
+	 */
+	if (mmc_card_doing_bkops(card) || !mmc_card_need_bkops(card))
+		return;
+
+	mmc_claim_host(card->host);
+
+	timeout = (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) ?
+		MMC_BKOPS_MAX_TIMEOUT : 0;
+
+	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+			EXT_CSD_BKOPS_START, 1, timeout);
+	if (err) {
+		pr_warning("%s: error %d starting bkops\n",
+			   mmc_hostname(card->host), err);
+		mmc_card_clr_need_bkops(card);
+		goto out;
+	}
+
+	spin_lock_irqsave(&card->host->lock, flags);
+	mmc_card_clr_need_bkops(card);
+
+	/*
+	 * For urgent bkops status (LEVEL_2 and more)
+	 * bkops executed synchronously, otherwise
+	 * the operation is in progress
+	 */
+	if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2)
+		mmc_card_set_check_bkops(card);
+	else
+		mmc_card_set_doing_bkops(card);
+
+	spin_unlock_irqrestore(&card->host->lock, flags);
+out:
+	mmc_release_host(card->host);
+}
+EXPORT_SYMBOL(mmc_start_bkops);
+
 static void mmc_wait_done(struct mmc_request *mrq)
 {
 	complete(&mrq->completion);
@@ -451,6 +525,69 @@
 EXPORT_SYMBOL(mmc_wait_for_cmd);
 
 /**
+ *	mmc_interrupt_bkops - interrupt ongoing BKOPS
+ *	@card: MMC card to check BKOPS
+ *
+ *	Send HPI command to interrupt ongoing background operations,
+ *	to allow rapid servicing of foreground operations,e.g. read/
+ *	writes. Wait until the card comes out of the programming state
+ *	to avoid errors in servicing read/write requests.
+ */
+int mmc_interrupt_bkops(struct mmc_card *card)
+{
+	int err = 0;
+	unsigned long flags;
+
+	BUG_ON(!card);
+
+	err = mmc_interrupt_hpi(card);
+
+	spin_lock_irqsave(&card->host->lock, flags);
+	mmc_card_clr_doing_bkops(card);
+	spin_unlock_irqrestore(&card->host->lock, flags);
+
+	return err;
+}
+EXPORT_SYMBOL(mmc_interrupt_bkops);
+
+int mmc_read_bkops_status(struct mmc_card *card)
+{
+	int err;
+	u8 ext_csd[512];
+
+	mmc_claim_host(card->host);
+	err = mmc_send_ext_csd(card, ext_csd);
+	mmc_release_host(card->host);
+	if (err)
+		return err;
+
+	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
+	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
+
+	return 0;
+}
+EXPORT_SYMBOL(mmc_read_bkops_status);
+
+int mmc_is_exception_event(struct mmc_card *card, unsigned int value)
+{
+	int err;
+
+	err = mmc_read_bkops_status(card);
+	if (err) {
+		pr_err("%s: Didn't read bkops status : %d\n",
+		       mmc_hostname(card->host), err);
+		return 0;
+	}
+
+	/* In eMMC 4.41, R1_EXCEPTION_EVENT is URGENT_BKOPS */
+	if (card->ext_csd.rev == 5)
+		return 1;
+
+	return (card->ext_csd.raw_exception_status & value) ? 1 : 0;
+}
+EXPORT_SYMBOL(mmc_is_exception_event);
+
+/**
  *	mmc_set_data_timeout - set the timeout for a data command
  *	@data: data phase for command
  *	@card: the MMC card associated with the data transfer
@@ -2418,8 +2555,12 @@
 				err = -EBUSY;
 
 		if (!err) {
-			if (host->bus_ops->suspend)
+			if (host->bus_ops->suspend) {
+				if (mmc_card_doing_bkops(host->card))
+					mmc_interrupt_bkops(host->card);
+
 				err = host->bus_ops->suspend(host);
+			}
 			if (!(host->card && mmc_card_sdio(host->card)))
 				mmc_do_release_host(host);
 
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index c1b0405..0efcf9d 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -287,6 +287,164 @@
 	.llseek		= default_llseek,
 };
 
+static int mmc_wr_pack_stats_open(struct inode *inode, struct file *filp)
+{
+	struct mmc_card *card = inode->i_private;
+
+	filp->private_data = card;
+	card->wr_pack_stats.print_in_read = 1;
+	return 0;
+}
+
+#define TEMP_BUF_SIZE 256
+static ssize_t mmc_wr_pack_stats_read(struct file *filp, char __user *ubuf,
+				size_t cnt, loff_t *ppos)
+{
+	struct mmc_card *card = filp->private_data;
+	struct mmc_wr_pack_stats *pack_stats;
+	int i;
+	int max_num_of_packed_reqs = 0;
+	char *temp_buf;
+
+	if (!card)
+		return cnt;
+
+	if (!card->wr_pack_stats.print_in_read)
+		return 0;
+
+	if (!card->wr_pack_stats.enabled) {
+		pr_info("%s: write packing statistics are disabled\n",
+			 mmc_hostname(card->host));
+		goto exit;
+	}
+
+	pack_stats = &card->wr_pack_stats;
+
+	if (!pack_stats->packing_events) {
+		pr_info("%s: NULL packing_events\n", mmc_hostname(card->host));
+		goto exit;
+	}
+
+	max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
+
+	temp_buf = kmalloc(TEMP_BUF_SIZE, GFP_KERNEL);
+	if (!temp_buf)
+		goto exit;
+
+	spin_lock(&pack_stats->lock);
+
+	snprintf(temp_buf, TEMP_BUF_SIZE, "%s: write packing statistics:\n",
+		mmc_hostname(card->host));
+	strlcat(ubuf, temp_buf, cnt);
+
+	for (i = 1 ; i <= max_num_of_packed_reqs ; ++i) {
+		if (pack_stats->packing_events[i]) {
+			snprintf(temp_buf, TEMP_BUF_SIZE,
+				 "%s: Packed %d reqs - %d times\n",
+				mmc_hostname(card->host), i,
+				pack_stats->packing_events[i]);
+			strlcat(ubuf, temp_buf, cnt);
+		}
+	}
+
+	snprintf(temp_buf, TEMP_BUF_SIZE,
+		 "%s: stopped packing due to the following reasons:\n",
+		 mmc_hostname(card->host));
+	strlcat(ubuf, temp_buf, cnt);
+
+	if (pack_stats->pack_stop_reason[EXCEEDS_SEGMENTS]) {
+		snprintf(temp_buf, TEMP_BUF_SIZE,
+			 "%s: %d times: exceed max num of segments\n",
+			 mmc_hostname(card->host),
+			 pack_stats->pack_stop_reason[EXCEEDS_SEGMENTS]);
+		strlcat(ubuf, temp_buf, cnt);
+	}
+	if (pack_stats->pack_stop_reason[EXCEEDS_SECTORS]) {
+		snprintf(temp_buf, TEMP_BUF_SIZE,
+			 "%s: %d times: exceed max num of sectors\n",
+			mmc_hostname(card->host),
+			pack_stats->pack_stop_reason[EXCEEDS_SECTORS]);
+		strlcat(ubuf, temp_buf, cnt);
+	}
+	if (pack_stats->pack_stop_reason[WRONG_DATA_DIR]) {
+		snprintf(temp_buf, TEMP_BUF_SIZE,
+			 "%s: %d times: wrong data direction\n",
+			mmc_hostname(card->host),
+			pack_stats->pack_stop_reason[WRONG_DATA_DIR]);
+		strlcat(ubuf, temp_buf, cnt);
+	}
+	if (pack_stats->pack_stop_reason[FLUSH_OR_DISCARD]) {
+		snprintf(temp_buf, TEMP_BUF_SIZE,
+			 "%s: %d times: flush or discard\n",
+			mmc_hostname(card->host),
+			pack_stats->pack_stop_reason[FLUSH_OR_DISCARD]);
+		strlcat(ubuf, temp_buf, cnt);
+	}
+	if (pack_stats->pack_stop_reason[EMPTY_QUEUE]) {
+		snprintf(temp_buf, TEMP_BUF_SIZE,
+			 "%s: %d times: empty queue\n",
+			mmc_hostname(card->host),
+			pack_stats->pack_stop_reason[EMPTY_QUEUE]);
+		strlcat(ubuf, temp_buf, cnt);
+	}
+	if (pack_stats->pack_stop_reason[REL_WRITE]) {
+		snprintf(temp_buf, TEMP_BUF_SIZE,
+			 "%s: %d times: rel write\n",
+			mmc_hostname(card->host),
+			pack_stats->pack_stop_reason[REL_WRITE]);
+		strlcat(ubuf, temp_buf, cnt);
+	}
+	if (pack_stats->pack_stop_reason[THRESHOLD]) {
+		snprintf(temp_buf, TEMP_BUF_SIZE,
+			 "%s: %d times: Threshold\n",
+			mmc_hostname(card->host),
+			pack_stats->pack_stop_reason[THRESHOLD]);
+		strlcat(ubuf, temp_buf, cnt);
+	}
+
+	spin_unlock(&pack_stats->lock);
+
+	kfree(temp_buf);
+
+	pr_info("%s", ubuf);
+
+exit:
+	if (card->wr_pack_stats.print_in_read == 1) {
+		card->wr_pack_stats.print_in_read = 0;
+		return strnlen(ubuf, cnt);
+	}
+
+	return 0;
+}
+
+static ssize_t mmc_wr_pack_stats_write(struct file *filp,
+				       const char __user *ubuf, size_t cnt,
+				       loff_t *ppos)
+{
+	struct mmc_card *card = filp->private_data;
+	int value;
+
+	if (!card)
+		return cnt;
+
+	sscanf(ubuf, "%d", &value);
+	if (value) {
+		mmc_blk_init_packed_statistics(card);
+	} else {
+		spin_lock(&card->wr_pack_stats.lock);
+		card->wr_pack_stats.enabled = false;
+		spin_unlock(&card->wr_pack_stats.lock);
+	}
+
+	return cnt;
+}
+
+static const struct file_operations mmc_dbg_wr_pack_stats_fops = {
+	.open		= mmc_wr_pack_stats_open,
+	.read		= mmc_wr_pack_stats_read,
+	.write		= mmc_wr_pack_stats_write,
+};
+
 void mmc_add_card_debugfs(struct mmc_card *card)
 {
 	struct mmc_host	*host = card->host;
@@ -319,6 +477,12 @@
 					&mmc_dbg_ext_csd_fops))
 			goto err;
 
+	if (mmc_card_mmc(card) && (card->ext_csd.rev >= 6) &&
+	    (card->host->caps2 & MMC_CAP2_PACKED_WR))
+		if (!debugfs_create_file("wr_pack_stats", S_IRUSR, root, card,
+					 &mmc_dbg_wr_pack_stats_fops))
+			goto err;
+
 	return;
 
 err:
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 8fce9a6..169fe68 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -425,6 +425,24 @@
 	}
 
 	if (card->ext_csd.rev >= 5) {
+		/* check whether the eMMC card support BKOPS */
+		if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
+			card->ext_csd.bkops = 1;
+			card->ext_csd.bkops_en = ext_csd[EXT_CSD_BKOPS_EN];
+			card->ext_csd.raw_bkops_status =
+				ext_csd[EXT_CSD_BKOPS_STATUS];
+			if (!card->ext_csd.bkops_en &&
+				card->host->caps2 & MMC_CAP2_INIT_BKOPS) {
+				err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+					EXT_CSD_BKOPS_EN, 1, 0);
+				if (err)
+					pr_warning("%s: Enabling BKOPS failed\n",
+						mmc_hostname(card->host));
+				else
+					card->ext_csd.bkops_en = 1;
+			}
+		}
+
 		/* check whether the eMMC card supports HPI */
 		if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) {
 			card->ext_csd.hpi = 1;
@@ -1205,6 +1223,24 @@
 		} else {
 			card->ext_csd.packed_event_en = 1;
 		}
+
+	}
+
+	if (!oldcard) {
+		if ((host->caps2 & MMC_CAP2_PACKED_CMD) &&
+		    (card->ext_csd.max_packed_writes > 0)) {
+			/*
+			 * We would like to keep the statistics in an index
+			 * that equals the num of packed requests
+			 * (1 to max_packed_writes)
+			 */
+			card->wr_pack_stats.packing_events = kzalloc(
+				(card->ext_csd.max_packed_writes + 1) *
+				sizeof(*card->wr_pack_stats.packing_events),
+				GFP_KERNEL);
+			if (!card->wr_pack_stats.packing_events)
+				goto free_card;
+		}
 	}
 
 	if (!oldcard)
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 2438176..c4cecba 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -334,6 +334,7 @@
 	return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD,
 			ext_csd, 512);
 }
+EXPORT_SYMBOL_GPL(mmc_send_ext_csd);
 
 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
 {
@@ -391,13 +392,22 @@
 		  (index << 16) |
 		  (value << 8) |
 		  set;
-	cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+		cmd.flags = MMC_CMD_AC;
+	if (index == EXT_CSD_BKOPS_START &&
+	    card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2)
+		cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
+	else
+		cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
 	cmd.cmd_timeout_ms = timeout_ms;
 
 	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
 	if (err)
 		return err;
 
+	/* No need to check card status in case of BKOPS switch*/
+	if (index == EXT_CSD_BKOPS_START)
+		return 0;
+
 	mmc_delay(1);
 	/* Must check status to be sure of no errors */
 	do {
@@ -556,14 +566,14 @@
 {
 	struct mmc_command cmd = {0};
 	unsigned int opcode;
-	unsigned int flags;
+	unsigned int flags = MMC_CMD_AC;
 	int err;
 
 	opcode = card->ext_csd.hpi_cmd;
 	if (opcode == MMC_STOP_TRANSMISSION)
-		flags = MMC_RSP_R1 | MMC_CMD_AC;
+		flags |= MMC_RSP_R1B;
 	else if (opcode == MMC_SEND_STATUS)
-		flags = MMC_RSP_R1 | MMC_CMD_AC;
+		flags |= MMC_RSP_R1;
 
 	cmd.opcode = opcode;
 	cmd.arg = card->rca << 16 | 1;
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 717f1d3..40f0f33 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -1614,6 +1614,14 @@
 		cmd->error = -EILSEQ;
 	}
 
+	if (!cmd->error) {
+		if (cmd->cmd_timeout_ms > host->curr.req_tout_ms) {
+			host->curr.req_tout_ms = cmd->cmd_timeout_ms;
+			mod_timer(&host->req_tout_timer, (jiffies +
+				  msecs_to_jiffies(host->curr.req_tout_ms)));
+		}
+	}
+
 	if (!cmd->data || cmd->error) {
 		if (host->curr.data && host->dma.sg &&
 			host->is_dma_mode)
@@ -1949,7 +1957,7 @@
 msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq)
 {
 	struct msmsdcc_host *host = mmc_priv(mmc);
-	unsigned long		flags, timeout;
+	unsigned long		flags;
 
 	/*
 	 * Get the SDIO AL client out of LPM.
@@ -2006,15 +2014,16 @@
 	 * Set timeout value to 10 secs (or more in case of buggy cards)
 	 */
 	if ((mmc->card) && (mmc->card->quirks & MMC_QUIRK_INAND_DATA_TIMEOUT))
-		timeout = 20000;
+		host->curr.req_tout_ms = 20000;
 	else
-		timeout = MSM_MMC_REQ_TIMEOUT;
+		host->curr.req_tout_ms = MSM_MMC_REQ_TIMEOUT;
 	/*
 	 * Kick the software request timeout timer here with the timeout
 	 * value identified above
 	 */
 	mod_timer(&host->req_tout_timer,
-			(jiffies + msecs_to_jiffies(timeout)));
+			(jiffies +
+			 msecs_to_jiffies(host->curr.req_tout_ms)));
 
 	host->curr.mrq = mrq;
 	if (mrq->data && (mrq->data->flags & MMC_DATA_WRITE)) {
@@ -4535,10 +4544,11 @@
 	}
 
 	pr_info("%s: got_dataend=%d, prog_enable=%d,"
-		" wait_for_auto_prog_done=%d, got_auto_prog_done=%d\n",
-		mmc_hostname(host->mmc), host->curr.got_dataend,
-		host->prog_enable, host->curr.wait_for_auto_prog_done,
-		host->curr.got_auto_prog_done);
+		" wait_for_auto_prog_done=%d, got_auto_prog_done=%d,"
+		" req_tout_ms=%d\n", mmc_hostname(host->mmc),
+		host->curr.got_dataend, host->prog_enable,
+		host->curr.wait_for_auto_prog_done,
+		host->curr.got_auto_prog_done, host->curr.req_tout_ms);
 	msmsdcc_print_rpm_info(host);
 }
 
@@ -5009,7 +5019,11 @@
 		mmc->caps |= (MMC_CAP_SET_XPC_330 | MMC_CAP_SET_XPC_300 |
 				MMC_CAP_SET_XPC_180);
 
+	mmc->caps2 |= MMC_CAP2_PACKED_WR;
+	mmc->caps2 |= MMC_CAP2_PACKED_WR_CONTROL;
 	mmc->caps2 |= (MMC_CAP2_BOOTPART_NOACC | MMC_CAP2_DETECT_ON_ERR);
+	mmc->caps2 |= MMC_CAP2_SANITIZE;
+
 	if (pdev->dev.of_node) {
 		if (of_get_property((&pdev->dev)->of_node,
 					"qcom,sdcc-hs200", NULL))
@@ -5020,6 +5034,8 @@
 		mmc->caps |= MMC_CAP_NONREMOVABLE;
 	mmc->caps |= MMC_CAP_SDIO_IRQ;
 
+	mmc->caps2 |= MMC_CAP2_INIT_BKOPS | MMC_CAP2_BKOPS;
+
 	if (plat->is_sdio_al_client)
 		mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY;
 
diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h
index 78c12c1..14677c6 100644
--- a/drivers/mmc/host/msm_sdcc.h
+++ b/drivers/mmc/host/msm_sdcc.h
@@ -297,6 +297,7 @@
 	int			got_auto_prog_done;
 	bool			use_wr_data_pend;
 	int			user_pages;
+	u32			req_tout_ms;
 };
 
 struct msmsdcc_sps_ep_conn_data {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 8aab269..44041b8 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -3466,6 +3466,15 @@
 	  for routing IP packets within the MSM using
 	  BAM as a physical transport.
 
+config MSM_RMNET_SMUX
+	bool "RMNET SMUX Driver"
+	depends on N_SMUX
+	help
+	  Implements RMNET over SMUX interface.
+	  RMNET provides a virtual ethernet interface
+	  for routing IP packets within the MSM using
+	  HSUART as a physical transport.
+
 config MSM_RMNET_DEBUG
 	bool "MSM RMNET debug interface"
 	depends on MSM_RMNET
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 7b3cd59..7373a61 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -304,6 +304,7 @@
 obj-$(CONFIG_MSM_RMNET) += msm_rmnet.o
 obj-$(CONFIG_MSM_RMNET_SDIO) += msm_rmnet_sdio.o
 obj-$(CONFIG_MSM_RMNET_BAM) += msm_rmnet_bam.o
+obj-$(CONFIG_MSM_RMNET_SMUX) += msm_rmnet_smux.o
 
 obj-$(CONFIG_NIU) += niu.o
 obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
diff --git a/drivers/net/msm_rmnet_smux.c b/drivers/net/msm_rmnet_smux.c
new file mode 100644
index 0000000..70e7182
--- /dev/null
+++ b/drivers/net/msm_rmnet_smux.c
@@ -0,0 +1,944 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * RMNET SMUX Module.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/wakelock.h>
+#include <linux/if_arp.h>
+#include <linux/msm_rmnet.h>
+#include <linux/platform_device.h>
+#include <linux/smux.h>
+#include <linux/ip.h>
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+
+/* Debug message support */
+static int msm_rmnet_smux_debug_mask;
+module_param_named(debug_enable, msm_rmnet_smux_debug_mask,
+				   int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define DEBUG_MASK_LVL0 (1U << 0)
+#define DEBUG_MASK_LVL1 (1U << 1)
+#define DEBUG_MASK_LVL2 (1U << 2)
+
+#define DBG(m, x...) do {			   \
+		if (msm_rmnet_smux_debug_mask & m) \
+			pr_info(x);		   \
+} while (0)
+
+#define DBG0(x...) DBG(DEBUG_MASK_LVL0, x)
+#define DBG1(x...) DBG(DEBUG_MASK_LVL1, x)
+#define DBG2(x...) DBG(DEBUG_MASK_LVL2, x)
+
+/* Configure device instances */
+#define RMNET_SMUX_DEVICE_COUNT (1)
+
+/* allow larger frames */
+#define RMNET_DATA_LEN 2000
+
+#define DEVICE_ID_INVALID   -1
+
+#define DEVICE_INACTIVE     0x00
+#define DEVICE_ACTIVE       0x01
+
+#define HEADROOM_FOR_SMUX    8 /* for mux header */
+#define HEADROOM_FOR_QOS     8
+#define TAILROOM             8 /* for padding by mux layer */
+
+struct rmnet_private {
+	struct net_device_stats stats;
+	uint32_t ch_id;
+#ifdef CONFIG_MSM_RMNET_DEBUG
+	ktime_t last_packet;
+	unsigned long wakeups_xmit;
+	unsigned long wakeups_rcv;
+	unsigned long timeout_us;
+#endif
+	spinlock_t lock;
+	struct tasklet_struct tsklt;
+	/* IOCTL specified mode (protocol, QoS header) */
+	u32 operation_mode;
+	uint8_t device_state;
+	uint8_t in_reset;
+};
+
+static struct net_device *netdevs[RMNET_SMUX_DEVICE_COUNT];
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+static unsigned long timeout_us;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+/*
+ * If early suspend is enabled then we specify two timeout values,
+ * screen on (default), and screen is off.
+ */
+static unsigned long timeout_suspend_us;
+static struct device *rmnet0;
+
+/* Set timeout in us when the screen is off. */
+static ssize_t timeout_suspend_store(struct device *d,
+				     struct device_attribute *attr,
+				     const char *buf, size_t n)
+{
+	timeout_suspend_us = strict_strtoul(buf, NULL, 10);
+	return n;
+}
+
+static ssize_t timeout_suspend_show(struct device *d,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%lu\n",
+			(unsigned long) timeout_suspend_us);
+}
+
+static DEVICE_ATTR(timeout_suspend, 0664, timeout_suspend_show,
+				   timeout_suspend_store);
+
+static void rmnet_early_suspend(struct early_suspend *handler)
+{
+	if (rmnet0) {
+		struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
+		p->timeout_us = timeout_suspend_us;
+	}
+}
+
+static void rmnet_late_resume(struct early_suspend *handler)
+{
+	if (rmnet0) {
+		struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
+		p->timeout_us = timeout_us;
+	}
+}
+
+static struct early_suspend rmnet_power_suspend = {
+	.suspend = rmnet_early_suspend,
+	.resume = rmnet_late_resume,
+};
+
+static int __init rmnet_late_init(void)
+{
+	register_early_suspend(&rmnet_power_suspend);
+	return 0;
+}
+
+late_initcall(rmnet_late_init);
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+
+/* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */
+static int rmnet_cause_wakeup(struct rmnet_private *p)
+{
+	int ret = 0;
+	ktime_t now;
+	if (p->timeout_us == 0)	/* Check if disabled */
+		return 0;
+
+	/* Use real (wall) time. */
+	now = ktime_get_real();
+
+	if (ktime_us_delta(now, p->last_packet) > p->timeout_us)
+		ret = 1;
+
+	p->last_packet = now;
+	return ret;
+}
+
+static ssize_t wakeups_xmit_show(struct device *d,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct rmnet_private *p = netdev_priv(to_net_dev(d));
+	return snprintf(buf, PAGE_SIZE, "%lu\n", p->wakeups_xmit);
+}
+
+DEVICE_ATTR(wakeups_xmit, 0444, wakeups_xmit_show, NULL);
+
+static ssize_t wakeups_rcv_show(struct device *d,
+				struct device_attribute *attr,
+				char *buf)
+{
+	struct rmnet_private *p = netdev_priv(to_net_dev(d));
+	return snprintf(buf, PAGE_SIZE, "%lu\n", p->wakeups_rcv);
+}
+
+DEVICE_ATTR(wakeups_rcv, 0444, wakeups_rcv_show, NULL);
+
+/* Set timeout in us. */
+static ssize_t timeout_store(struct device *d,
+			     struct device_attribute *attr,
+			     const char *buf, size_t n)
+{
+#ifndef CONFIG_HAS_EARLYSUSPEND
+	struct rmnet_private *p = netdev_priv(to_net_dev(d));
+	p->timeout_us = timeout_us = strict_strtoul(buf, NULL, 10);
+#else
+/* If using early suspend/resume hooks do not write the value on store. */
+	timeout_us = strict_strtoul(buf, NULL, 10);
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+	return n;
+}
+
+static ssize_t timeout_show(struct device *d,
+			    struct device_attribute *attr,
+			    char *buf)
+{
+	struct rmnet_private *p = netdev_priv(to_net_dev(d));
+	p = netdev_priv(to_net_dev(d));
+	return snprintf(buf, PAGE_SIZE, "%lu\n", timeout_us);
+}
+
+DEVICE_ATTR(timeout, 0664, timeout_show, timeout_store);
+#endif /* CONFIG_MSM_RMNET_DEBUG */
+
+/* Forward declaration */
+static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+
+
+
+static int count_this_packet(void *_hdr, int len)
+{
+	struct ethhdr *hdr = _hdr;
+
+	if (len >= ETH_HLEN && hdr->h_proto == htons(ETH_P_ARP))
+		return 0;
+
+	return 1;
+}
+
+static __be16 rmnet_ip_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+	__be16 protocol = 0;
+
+	skb->dev = dev;
+
+	/* Determine L3 protocol */
+	switch (skb->data[0] & 0xf0) {
+	case 0x40:
+		protocol = htons(ETH_P_IP);
+		break;
+	case 0x60:
+		protocol = htons(ETH_P_IPV6);
+		break;
+	default:
+		pr_err("[%s] rmnet_recv() L3 protocol decode error: 0x%02x",
+			   dev->name, skb->data[0] & 0xf0);
+		/* skb will be dropped in upper layer for unknown protocol */
+	}
+	return protocol;
+}
+
+static void smux_read_done(void *rcv_dev, const void *meta_data)
+{
+	struct rmnet_private *p;
+	struct net_device *dev = rcv_dev;
+	u32 opmode;
+	unsigned long flags;
+	struct sk_buff *skb = NULL;
+	const struct smux_meta_read  *read_meta_info = meta_data;
+
+	if (!dev || !read_meta_info) {
+		DBG1("%s:invalid read_done callback recieved", __func__);
+		return;
+	}
+
+	p = netdev_priv(dev);
+
+	skb = (struct sk_buff *) read_meta_info->pkt_priv;
+
+	if (!skb || skb->dev != dev) {
+		DBG1("%s: ERR:skb pointer NULL in READ_DONE CALLBACK",
+		      __func__);
+		return;
+	}
+
+	/* Handle Rx frame format */
+	spin_lock_irqsave(&p->lock, flags);
+	opmode = p->operation_mode;
+	spin_unlock_irqrestore(&p->lock, flags);
+
+	if (RMNET_IS_MODE_IP(opmode)) {
+		/* Driver in IP mode */
+		skb->protocol =
+		rmnet_ip_type_trans(skb, dev);
+	} else {
+		/* Driver in Ethernet mode */
+		skb->protocol =
+		eth_type_trans(skb, dev);
+	}
+	if (RMNET_IS_MODE_IP(opmode) ||
+		count_this_packet(skb->data, skb->len)) {
+#ifdef CONFIG_MSM_RMNET_DEBUG
+		p->wakeups_rcv +=
+		rmnet_cause_wakeup(p);
+#endif
+		p->stats.rx_packets++;
+		p->stats.rx_bytes += skb->len;
+	}
+	DBG2("[%s] Rx packet #%lu len=%d\n",
+		 dev->name, p->stats.rx_packets,
+		 skb->len);
+	/* Deliver to network stack */
+	netif_rx(skb);
+
+	return;
+}
+
+static void smux_write_done(void *dev, const void *meta_data)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	u32 opmode;
+	struct sk_buff *skb = NULL;
+	const struct smux_meta_write  *write_meta_info = meta_data;
+	unsigned long flags;
+
+	if (!dev || !write_meta_info) {
+		DBG1("%s: ERR:invalid WRITE_DONE callback recieved", __func__);
+		return;
+	}
+
+	skb = (struct sk_buff *) write_meta_info->pkt_priv;
+
+	if (!skb) {
+		DBG1("%s: ERR:skb pointer NULL in WRITE_DONE"
+		     " CALLBACK", __func__);
+		return;
+	}
+
+	spin_lock_irqsave(&p->lock, flags);
+	opmode = p->operation_mode;
+	spin_unlock_irqrestore(&p->lock, flags);
+
+	DBG1("%s: write complete\n", __func__);
+	if (RMNET_IS_MODE_IP(opmode) ||
+		count_this_packet(skb->data, skb->len)) {
+		p->stats.tx_packets++;
+		p->stats.tx_bytes += skb->len;
+#ifdef CONFIG_MSM_RMNET_DEBUG
+		p->wakeups_xmit += rmnet_cause_wakeup(p);
+#endif
+	}
+	DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
+		 ((struct net_device *)(dev))->name, p->stats.tx_packets,
+		 skb->len, skb->mark);
+	dev_kfree_skb_any(skb);
+	if (netif_queue_stopped(dev) &&
+		msm_smux_is_ch_low(p->ch_id)) {
+		DBG0("%s: Low WM hit, waking queue=%p\n",
+			 __func__, skb);
+		netif_wake_queue(dev);
+	}
+}
+
+void rmnet_smux_notify(void *priv, int event_type, const void *metadata)
+{
+	struct rmnet_private *p;
+	struct net_device *dev;
+	unsigned long flags;
+	struct sk_buff *skb = NULL;
+	u32 opmode;
+	const struct smux_meta_disconnected *ssr_info;
+	const struct smux_meta_read *read_meta_info;
+	const struct smux_meta_write *write_meta_info = metadata;
+
+
+	if (!priv)
+		DBG0("%s: priv(cookie) NULL, ignoring notification:"
+		     " %d\n", __func__, event_type);
+
+	switch (event_type) {
+	case SMUX_CONNECTED:
+		p = netdev_priv(priv);
+		dev = priv;
+
+		DBG0("[%s] SMUX_CONNECTED event dev:%s\n", __func__, dev->name);
+
+		netif_carrier_on(dev);
+		netif_start_queue(dev);
+
+		spin_lock_irqsave(&p->lock, flags);
+		p->device_state = DEVICE_ACTIVE;
+		spin_unlock_irqrestore(&p->lock, flags);
+		break;
+
+	case SMUX_DISCONNECTED:
+		p = netdev_priv(priv);
+		dev = priv;
+		ssr_info = metadata;
+
+		DBG0("[%s] SMUX_DISCONNECTED event dev:%s\n",
+		      __func__, dev->name);
+
+		if (ssr_info && ssr_info->is_ssr == 1)
+			DBG0("SSR detected on :%s\n", dev->name);
+
+		netif_carrier_off(dev);
+		netif_stop_queue(dev);
+
+		spin_lock_irqsave(&p->lock, flags);
+		p->device_state = DEVICE_INACTIVE;
+		spin_unlock_irqrestore(&p->lock, flags);
+		break;
+
+	case SMUX_READ_DONE:
+		smux_read_done(priv, metadata);
+		break;
+
+	case SMUX_READ_FAIL:
+		p = netdev_priv(priv);
+		dev = priv;
+		read_meta_info = metadata;
+
+		if (!dev || !read_meta_info) {
+			DBG1("%s: ERR:invalid read failed callback"
+			     " recieved", __func__);
+			return;
+		}
+
+		skb = (struct sk_buff *) read_meta_info->pkt_priv;
+
+		if (!skb) {
+			DBG1("%s: ERR:skb pointer NULL in read fail"
+			     " CALLBACK", __func__);
+			return;
+		}
+
+		DBG0("%s: read failed\n", __func__);
+
+		opmode = p->operation_mode;
+
+		if (RMNET_IS_MODE_IP(opmode) ||
+		    count_this_packet(skb->data, skb->len))
+			p->stats.rx_dropped++;
+
+		dev_kfree_skb_any(skb);
+		break;
+
+	case SMUX_WRITE_DONE:
+		smux_write_done(priv, metadata);
+		break;
+
+	case SMUX_WRITE_FAIL:
+		p = netdev_priv(priv);
+		dev = priv;
+		write_meta_info = metadata;
+
+		if (!dev || !write_meta_info) {
+			DBG1("%s: ERR:invalid WRITE_DONE"
+			     "callback recieved", __func__);
+			return;
+		}
+
+		skb = (struct sk_buff *) write_meta_info->pkt_priv;
+
+		if (!skb) {
+			DBG1("%s: ERR:skb pointer NULL in"
+			     " WRITE_DONE CALLBACK", __func__);
+			return;
+		}
+
+		DBG0("%s: write failed\n", __func__);
+
+		opmode = p->operation_mode;
+
+		if (RMNET_IS_MODE_IP(opmode) ||
+		    count_this_packet(skb->data, skb->len)) {
+			p->stats.tx_dropped++;
+		}
+
+		dev_kfree_skb_any(skb);
+		break;
+
+	case SMUX_LOW_WM_HIT:
+		dev = priv;
+		DBG0("[%s] Low WM hit dev:%s\n", __func__, dev->name);
+		netif_start_queue(dev);
+		break;
+
+	case SMUX_HIGH_WM_HIT:
+		dev = priv;
+		DBG0("[%s] Low WM hit dev:%s\n", __func__, dev->name);
+		netif_stop_queue(dev);
+		break;
+
+	default:
+		dev = priv;
+		DBG0("[%s] Invalid event:%d received on"
+		     " dev: %s\n", __func__, event_type, dev->name);
+		break;
+	}
+
+	return;
+}
+
+int get_rx_buffers(void *priv, void **pkt_priv, void **buffer, int size)
+{
+	struct net_device *dev = (struct net_device *) priv;
+	struct sk_buff *skb = NULL;
+	void *ptr = NULL;
+
+	DBG0("[%s] dev:%s\n", __func__, dev->name);
+	skb = __dev_alloc_skb(size, GFP_ATOMIC);
+	if (skb == NULL) {
+		DBG0("%s: unable to alloc skb\n", __func__);
+		return -ENOMEM;
+	}
+
+	/* TODO skb_reserve(skb, NET_IP_ALIGN); for ethernet mode */
+	/* Populate some params now. */
+	skb->dev = dev;
+	ptr = skb_put(skb, size);
+
+	skb_set_network_header(skb, 0);
+
+	/* done with skb setup, return the buffer pointer. */
+	*pkt_priv = skb;
+	*buffer = ptr;
+
+	return 0;
+}
+
+static int __rmnet_open(struct net_device *dev)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+
+	DBG0("[%s] __rmnet_open()\n", dev->name);
+
+	if (p->device_state == DEVICE_ACTIVE) {
+		return 0;
+	} else {
+		DBG0("[%s] Platform inactive\n", dev->name);
+		return -ENODEV;
+	}
+}
+
+static int rmnet_open(struct net_device *dev)
+{
+	int rc = 0;
+
+	DBG0("[%s] rmnet_open()\n", dev->name);
+
+	rc = __rmnet_open(dev);
+
+	if (rc == 0)
+		netif_start_queue(dev);
+
+	return rc;
+}
+
+static int rmnet_stop(struct net_device *dev)
+{
+	DBG0("[%s] rmnet_stop()\n", dev->name);
+
+	netif_stop_queue(dev);
+	return 0;
+}
+
+static int rmnet_change_mtu(struct net_device *dev, int new_mtu)
+{
+	if (0 > new_mtu || RMNET_DATA_LEN < new_mtu)
+		return -EINVAL;
+
+	DBG0("[%s] MTU change: old=%d new=%d\n",
+		 dev->name, dev->mtu, new_mtu);
+	dev->mtu = new_mtu;
+
+	return 0;
+}
+
+static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	int smux_ret;
+	struct QMI_QOS_HDR_S *qmih;
+	u32 opmode;
+	unsigned long flags;
+
+	/* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
+	spin_lock_irqsave(&p->lock, flags);
+	opmode = p->operation_mode;
+	spin_unlock_irqrestore(&p->lock, flags);
+
+	if (RMNET_IS_MODE_QOS(opmode)) {
+		qmih = (struct QMI_QOS_HDR_S *)
+			   skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
+		qmih->version = 1;
+		qmih->flags = 0;
+		qmih->flow_id = skb->mark;
+	}
+
+	dev->trans_start = jiffies;
+
+	/* if write() succeeds, skb access is unsafe in this process */
+	smux_ret = msm_smux_write(p->ch_id, skb, skb->data, skb->len);
+
+	if (smux_ret != 0 && smux_ret != -EAGAIN) {
+		pr_err("[%s] %s: write returned error %d",
+			   dev->name, __func__, smux_ret);
+		return -EPERM;
+	}
+
+	return smux_ret;
+}
+
+static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	int ret = 0;
+
+	if (netif_queue_stopped(dev) || (p->device_state == DEVICE_INACTIVE)) {
+		pr_err("[%s]fatal: rmnet_xmit called when "
+			   "netif_queue is stopped", dev->name);
+		return 0;
+	}
+
+	ret = _rmnet_xmit(skb, dev);
+
+	if (ret == -EPERM) {
+		/* Do not stop the queue here.
+		 * It will lead to ir-recoverable state.
+		 */
+		ret = NETDEV_TX_BUSY;
+		goto exit;
+	}
+
+	if (msm_smux_is_ch_full(p->ch_id) || (ret == -EAGAIN)) {
+		/*
+		 * EAGAIN means we attempted to overflow the high watermark
+		 * Clearly the queue is not stopped like it should be, so
+		 * stop it and return BUSY to the TCP/IP framework.  It will
+		 * retry this packet with the queue is restarted which happens
+		 * low watermark is called.
+		 */
+		netif_stop_queue(dev);
+		ret = NETDEV_TX_BUSY;
+		goto exit;
+	}
+exit:
+	return ret;
+}
+
+static struct net_device_stats *rmnet_get_stats(struct net_device *dev)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	return &p->stats;
+}
+
+static void rmnet_set_multicast_list(struct net_device *dev)
+{
+}
+
+static void rmnet_tx_timeout(struct net_device *dev)
+{
+	pr_warning("[%s] rmnet_tx_timeout()\n", dev->name);
+}
+
+static const struct net_device_ops rmnet_ops_ether = {
+	.ndo_open = rmnet_open,
+	.ndo_stop = rmnet_stop,
+	.ndo_start_xmit = rmnet_xmit,
+	.ndo_get_stats = rmnet_get_stats,
+	.ndo_set_multicast_list = rmnet_set_multicast_list,
+	.ndo_tx_timeout = rmnet_tx_timeout,
+	.ndo_do_ioctl = rmnet_ioctl,
+	.ndo_change_mtu = rmnet_change_mtu,
+	.ndo_set_mac_address = eth_mac_addr,
+	.ndo_validate_addr = eth_validate_addr,
+};
+
+static const struct net_device_ops rmnet_ops_ip = {
+	.ndo_open = rmnet_open,
+	.ndo_stop = rmnet_stop,
+	.ndo_start_xmit = rmnet_xmit,
+	.ndo_get_stats = rmnet_get_stats,
+	.ndo_set_multicast_list = rmnet_set_multicast_list,
+	.ndo_tx_timeout = rmnet_tx_timeout,
+	.ndo_do_ioctl = rmnet_ioctl,
+	.ndo_change_mtu = rmnet_change_mtu,
+	.ndo_set_mac_address = 0,
+	.ndo_validate_addr = 0,
+};
+
+static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	u32 old_opmode = p->operation_mode;
+	unsigned long flags;
+	int prev_mtu = dev->mtu;
+	int rc = 0;
+
+	/* Process IOCTL command */
+	switch (cmd) {
+	case RMNET_IOCTL_SET_LLP_ETHERNET:	/* Set Ethernet protocol   */
+		/* Perform Ethernet config only if in IP mode currently*/
+		if (p->operation_mode & RMNET_MODE_LLP_IP) {
+			ether_setup(dev);
+			random_ether_addr(dev->dev_addr);
+			dev->mtu = prev_mtu;
+
+			dev->netdev_ops = &rmnet_ops_ether;
+			spin_lock_irqsave(&p->lock, flags);
+			p->operation_mode &= ~RMNET_MODE_LLP_IP;
+			p->operation_mode |= RMNET_MODE_LLP_ETH;
+			spin_unlock_irqrestore(&p->lock, flags);
+			DBG0("[%s] rmnet_ioctl(): "
+				 "set Ethernet protocol mode\n",
+				 dev->name);
+		}
+		break;
+
+	case RMNET_IOCTL_SET_LLP_IP:		/* Set RAWIP protocol      */
+		/* Perform IP config only if in Ethernet mode currently*/
+		if (p->operation_mode & RMNET_MODE_LLP_ETH) {
+
+			/* Undo config done in ether_setup() */
+			dev->header_ops         = 0;  /* No header */
+			dev->type               = ARPHRD_RAWIP;
+			dev->hard_header_len    = 0;
+			dev->mtu                = prev_mtu;
+			dev->addr_len           = 0;
+			dev->flags              &= ~(IFF_BROADCAST |
+						     IFF_MULTICAST);
+
+			dev->needed_headroom = HEADROOM_FOR_SMUX +
+							HEADROOM_FOR_QOS;
+			dev->needed_tailroom = TAILROOM;
+			dev->netdev_ops = &rmnet_ops_ip;
+			spin_lock_irqsave(&p->lock, flags);
+			p->operation_mode &= ~RMNET_MODE_LLP_ETH;
+			p->operation_mode |= RMNET_MODE_LLP_IP;
+			spin_unlock_irqrestore(&p->lock, flags);
+			DBG0("[%s] rmnet_ioctl(): "
+				 "set IP protocol mode\n",
+				 dev->name);
+		}
+		break;
+
+	case RMNET_IOCTL_GET_LLP:	/* Get link protocol state */
+		ifr->ifr_ifru.ifru_data =
+		(void *)(p->operation_mode &
+				 (RMNET_MODE_LLP_ETH|RMNET_MODE_LLP_IP));
+		break;
+
+	case RMNET_IOCTL_SET_QOS_ENABLE:	/* Set QoS header enabled */
+		spin_lock_irqsave(&p->lock, flags);
+		p->operation_mode |= RMNET_MODE_QOS;
+		spin_unlock_irqrestore(&p->lock, flags);
+		DBG0("[%s] rmnet_ioctl(): set QMI QOS header enable\n",
+			 dev->name);
+		break;
+
+	case RMNET_IOCTL_SET_QOS_DISABLE:	/* Set QoS header disabled */
+		spin_lock_irqsave(&p->lock, flags);
+		p->operation_mode &= ~RMNET_MODE_QOS;
+		spin_unlock_irqrestore(&p->lock, flags);
+		DBG0("[%s] rmnet_ioctl(): set QMI QOS header disable\n",
+			 dev->name);
+		break;
+
+	case RMNET_IOCTL_GET_QOS:	/* Get QoS header state */
+		ifr->ifr_ifru.ifru_data =
+		(void *)(p->operation_mode & RMNET_MODE_QOS);
+		break;
+
+	case RMNET_IOCTL_GET_OPMODE:	/* Get operation mode */
+		ifr->ifr_ifru.ifru_data = (void *)p->operation_mode;
+		break;
+
+	case RMNET_IOCTL_OPEN:		/* Open transport port */
+		rc = __rmnet_open(dev);
+		DBG0("[%s] rmnet_ioctl(): open transport port\n",
+			 dev->name);
+		break;
+
+	case RMNET_IOCTL_CLOSE:		/* Close transport port */
+		DBG0("[%s] rmnet_ioctl(): close transport port\n",
+			 dev->name);
+		break;
+
+	default:
+		pr_err("[%s] error: rmnet_ioct called for unsupported cmd[%d]",
+			   dev->name, cmd);
+		return -EINVAL;
+	}
+
+	DBG2("[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08x\n",
+		 dev->name, __func__, cmd, old_opmode, p->operation_mode);
+	return rc;
+}
+
+static void __init rmnet_setup(struct net_device *dev)
+{
+	/* Using Ethernet mode by default */
+	dev->netdev_ops = &rmnet_ops_ether;
+	ether_setup(dev);
+
+	/* set this after calling ether_setup */
+	dev->mtu = RMNET_DATA_LEN;
+	dev->needed_headroom = HEADROOM_FOR_SMUX + HEADROOM_FOR_QOS ;
+	dev->needed_tailroom = TAILROOM;
+	random_ether_addr(dev->dev_addr);
+
+	dev->watchdog_timeo = 1000;	/* 10 seconds? */
+}
+
+
+static int smux_rmnet_probe(struct platform_device *pdev)
+{
+	int i;
+	int r;
+	struct rmnet_private *p;
+
+	for (i = 0; i < RMNET_SMUX_DEVICE_COUNT; i++) {
+		p = netdev_priv(netdevs[i]);
+
+		if ((p != NULL) && (p->device_state == DEVICE_INACTIVE)) {
+			r =  msm_smux_open(p->ch_id,
+					   netdevs[i],
+					   rmnet_smux_notify,
+					   get_rx_buffers);
+
+			if (r < 0) {
+				DBG0("%s: ch=%d open failed with rc %d\n",
+				      __func__, p->ch_id, r);
+			}
+		}
+	}
+	return 0;
+}
+
+static int smux_rmnet_remove(struct platform_device *pdev)
+{
+	int i;
+	int r;
+	struct rmnet_private *p;
+
+	for (i = 0; i < RMNET_SMUX_DEVICE_COUNT; i++) {
+		p = netdev_priv(netdevs[i]);
+
+		if ((p != NULL) && (p->device_state == DEVICE_ACTIVE)) {
+			r =  msm_smux_close(p->ch_id);
+
+			if (r < 0) {
+				DBG0("%s: ch=%d close failed with rc %d\n",
+				     __func__, p->ch_id, r);
+				continue;
+			}
+			netif_carrier_off(netdevs[i]);
+			netif_stop_queue(netdevs[i]);
+		}
+	}
+	return 0;
+}
+
+
+static struct platform_driver smux_rmnet_driver = {
+	.probe  = smux_rmnet_probe,
+	.remove = smux_rmnet_remove,
+	.driver = {
+		.name = "SMUX_RMNET",
+		.owner = THIS_MODULE,
+	},
+};
+
+
+static int __init rmnet_init(void)
+{
+	int ret;
+	struct device *d;
+	struct net_device *dev;
+	struct rmnet_private *p;
+	unsigned n;
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+	timeout_us = 0;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	timeout_suspend_us = 0;
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+#endif /* CONFIG_MSM_RMNET_DEBUG */
+
+	for (n = 0; n < RMNET_SMUX_DEVICE_COUNT; n++) {
+		dev = alloc_netdev(sizeof(struct rmnet_private),
+				   "rmnet_smux%d", rmnet_setup);
+
+		if (!dev) {
+			pr_err("%s: no memory for netdev %d\n", __func__, n);
+			return -ENOMEM;
+		}
+
+		netdevs[n] = dev;
+		d = &(dev->dev);
+		p = netdev_priv(dev);
+		/* Initial config uses Ethernet */
+		p->operation_mode = RMNET_MODE_LLP_ETH;
+		p->ch_id = n;
+		p->in_reset = 0;
+		spin_lock_init(&p->lock);
+#ifdef CONFIG_MSM_RMNET_DEBUG
+		p->timeout_us = timeout_us;
+		p->wakeups_xmit = p->wakeups_rcv = 0;
+#endif
+
+		ret = register_netdev(dev);
+		if (ret) {
+			pr_err("%s: unable to register netdev"
+				   " %d rc=%d\n", __func__, n, ret);
+			free_netdev(dev);
+			return ret;
+		}
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+		if (device_create_file(d, &dev_attr_timeout))
+			continue;
+		if (device_create_file(d, &dev_attr_wakeups_xmit))
+			continue;
+		if (device_create_file(d, &dev_attr_wakeups_rcv))
+			continue;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+		if (device_create_file(d, &dev_attr_timeout_suspend))
+			continue;
+
+		/* Only care about rmnet0 for suspend/resume tiemout hooks. */
+		if (n == 0)
+			rmnet0 = d;
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+#endif /* CONFIG_MSM_RMNET_DEBUG */
+
+	}
+
+	ret = platform_driver_register(&smux_rmnet_driver);
+	if (ret) {
+		pr_err("%s: registration failed n=%d rc=%d\n",
+			   __func__, n, ret);
+		return ret;
+	}
+	return 0;
+}
+
+module_init(rmnet_init);
+MODULE_DESCRIPTION("MSM RMNET SMUX TRANSPORT");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/msm/sps/bam.c b/drivers/platform/msm/sps/bam.c
index d7edb82..cf98f68 100644
--- a/drivers/platform/msm/sps/bam.c
+++ b/drivers/platform/msm/sps/bam.c
@@ -649,6 +649,8 @@
 	u32 cfg_bits = 0xffffffff & ~(1 << 11);
 	u32 ver = 0;
 
+	SPS_DBG2("sps:%s:bam=0x%x(va).ee=%d.", __func__, (u32) base, ee);
+
 	ver = bam_read_reg_field(base, REVISION, BAM_REVISION);
 
 	if ((ver < BAM_MIN_VERSION) || (ver > BAM_MAX_VERSION)) {
@@ -743,6 +745,8 @@
 	u32 mask;
 	u32 pipe;
 
+	SPS_DBG2("sps:%s:bam=0x%x(va).", __func__, (u32) base);
+
 	/*
 	 * Discover the hardware version number and the number of pipes
 	 * supported by this BAM
@@ -782,6 +786,8 @@
 {
 	u32 ver = 0;
 
+	SPS_DBG2("sps:%s:bam=0x%x(va).", __func__, (u32) base);
+
 	if (!bam_read_reg_field(base, CTRL, BAM_EN)) {
 		SPS_ERR("sps:%s:bam 0x%x(va) is not enabled.\n",
 				__func__, (u32) base);
@@ -813,6 +819,8 @@
  */
 void bam_exit(void *base, u32 ee)
 {
+	SPS_DBG2("sps:%s:bam=0x%x(va).ee=%d.", __func__, (u32) base, ee);
+
 	bam_write_reg_field(base, IRQ_SRCS_MSK_EE(ee), BAM_IRQ, 0);
 
 	bam_write_reg(base, IRQ_EN, 0);
@@ -899,6 +907,8 @@
 int bam_pipe_init(void *base, u32 pipe,	struct bam_pipe_parameters *param,
 					u32 ee)
 {
+	SPS_DBG2("sps:%s:bam=0x%x(va).pipe=%d.", __func__, (u32) base, pipe);
+
 	/* Reset the BAM pipe */
 	bam_write_reg(base, P_RST(pipe), 1);
 	/* No delay needed */
@@ -967,6 +977,8 @@
  */
 void bam_pipe_exit(void *base, u32 pipe, u32 ee)
 {
+	SPS_DBG2("sps:%s:bam=0x%x(va).pipe=%d.", __func__, (u32) base, pipe);
+
 	bam_write_reg(base, P_IRQ_EN(pipe), 0);
 
 	/* Disable the Pipe Interrupt at the BAM level */
@@ -982,6 +994,8 @@
  */
 void bam_pipe_enable(void *base, u32 pipe)
 {
+	SPS_DBG2("sps:%s:bam=0x%x(va).pipe=%d.", __func__, (u32) base, pipe);
+
 	bam_write_reg_field(base, P_CTRL(pipe), P_EN, 1);
 }
 
@@ -991,6 +1005,8 @@
  */
 void bam_pipe_disable(void *base, u32 pipe)
 {
+	SPS_DBG2("sps:%s:bam=0x%x(va).pipe=%d.", __func__, (u32) base, pipe);
+
 	bam_write_reg_field(base, P_CTRL(pipe), P_EN, 0);
 }
 
@@ -1010,6 +1026,8 @@
 void bam_pipe_set_irq(void *base, u32 pipe, enum bam_enable irq_en,
 		      u32 src_mask, u32 ee)
 {
+	SPS_DBG2("sps:%s:bam=0x%x(va).pipe=%d.", __func__, (u32) base, pipe);
+
 	bam_write_reg(base, P_IRQ_EN(pipe), src_mask);
 	bam_write_reg_field(base, IRQ_SRCS_MSK_EE(ee), (1 << pipe), irq_en);
 }
@@ -1287,7 +1305,7 @@
 		"BAM_P_IRQ_STTS: 0x%x\n"
 		"BAM_P_IRQ_STTS_P_TRNSFR_END_IRQ: 0x%x\n"
 		"BAM_P_IRQ_STTS_P_PRCSD_DESC_IRQ: 0x%x\n"
-		"BAM_P_IRQ_EN: %d\n"
+		"BAM_P_IRQ_EN: 0x%x\n"
 		"BAM_P_PRDCR_SDBNDn_BAM_P_BYTES_FREE: 0x%x (%d)\n"
 		"BAM_P_CNSMR_SDBNDn_BAM_P_BYTES_AVAIL: 0x%x (%d)\n"
 		"BAM_P_SW_DESC_OFST: 0x%x\n"
@@ -1335,4 +1353,50 @@
 		bam_read_reg_field(base, P_EVNT_GEN_TRSHLD(pipe),
 					P_EVNT_GEN_TRSHLD_P_TRSHLD));
 }
+
+/* output descriptor FIFO of a pipe */
+void print_bam_pipe_desc_fifo(void *virt_addr, u32 pipe_index)
+{
+	void *base = virt_addr;
+	u32 pipe = pipe_index;
+	u32 desc_fifo_addr;
+	u32 desc_fifo_size;
+	u32 *desc_fifo;
+	int i;
+
+	if (base == NULL)
+		return;
+
+	desc_fifo_addr = bam_read_reg(base, P_DESC_FIFO_ADDR(pipe));
+	desc_fifo_size = bam_read_reg_field(base, P_FIFO_SIZES(pipe),
+						P_DESC_FIFO_SIZE);
+
+	if (desc_fifo_addr == 0) {
+		SPS_ERR("sps:%s:desc FIFO address of Pipe %d is NULL.\n",
+			__func__, pipe);
+		return;
+	} else if (desc_fifo_size == 0) {
+		SPS_ERR("sps:%s:desc FIFO size of Pipe %d is 0.\n",
+			__func__, pipe);
+		return;
+	}
+
+	SPS_INFO("\nsps:----- descriptor FIFO of Pipe %d -----\n", pipe);
+
+	SPS_INFO("BAM_P_DESC_FIFO_ADDR: 0x%x\n"
+		"BAM_P_DESC_FIFO_SIZE: 0x%x (%d)\n\n",
+		desc_fifo_addr, desc_fifo_size, desc_fifo_size);
+
+	desc_fifo = (u32 *) phys_to_virt(desc_fifo_addr);
+
+	SPS_INFO("-------------------- begin of FIFO --------------------\n");
+
+	for (i = 0; i < desc_fifo_size; i += 0x10)
+		SPS_INFO("addr 0x%x: 0x%x, 0x%x, 0x%x, 0x%x.\n",
+			desc_fifo_addr + i,
+			desc_fifo[i / 4], desc_fifo[(i / 4) + 1],
+			desc_fifo[(i / 4) + 2], desc_fifo[(i / 4) + 3]);
+
+	SPS_INFO("--------------------  end of FIFO  --------------------\n");
+}
 #endif
diff --git a/drivers/platform/msm/sps/sps.c b/drivers/platform/msm/sps/sps.c
index 0fe8f2a..fbaea09 100644
--- a/drivers/platform/msm/sps/sps.c
+++ b/drivers/platform/msm/sps/sps.c
@@ -327,6 +327,26 @@
 		print_bam_pipe_selected_reg(vir_addr, 4);
 		print_bam_pipe_selected_reg(vir_addr, 5);
 		break;
+	case 6: /* output desc FIFO of all active pipes */
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_desc_fifo(vir_addr, i);
+		break;
+	case 7: /* output desc FIFO of some pipes */
+		print_bam_pipe_desc_fifo(vir_addr, 4);
+		print_bam_pipe_desc_fifo(vir_addr, 5);
+		break;
+	case 8: /* output selected registers and valid desc FIFO of all pipes */
+		for (i = 0; i < num_pipes; i++) {
+			print_bam_pipe_selected_reg(vir_addr, i);
+			print_bam_pipe_desc_fifo(vir_addr, i);
+		}
+		break;
+	case 9: /* output selected registers and desc FIFO of some pipes */
+		print_bam_pipe_selected_reg(vir_addr, 4);
+		print_bam_pipe_desc_fifo(vir_addr, 4);
+		print_bam_pipe_selected_reg(vir_addr, 5);
+		print_bam_pipe_desc_fifo(vir_addr, 5);
+		break;
 	default:
 		pr_info("sps:no dump option is chosen yet.");
 	}
@@ -455,7 +475,7 @@
 	struct sps_bam_props bamdma_props = {0};
 #endif
 
-	SPS_DBG("sps:sps_device_init");
+	SPS_DBG2("sps:%s.", __func__);
 
 	success = false;
 
@@ -581,6 +601,8 @@
  */
 static int sps_client_init(struct sps_pipe *client)
 {
+	SPS_DBG("sps:%s.", __func__);
+
 	if (client == NULL)
 		return -EINVAL;
 
@@ -609,6 +631,8 @@
  */
 static int sps_client_de_init(struct sps_pipe *client)
 {
+	SPS_DBG("sps:%s.", __func__);
+
 	if (client->client_state != SPS_STATE_DISCONNECT) {
 		SPS_ERR("sps:De-init client in connected state: 0x%x",
 				   client->client_state);
@@ -637,6 +661,8 @@
 {
 	struct sps_bam *bam;
 
+	SPS_DBG("sps:%s.", __func__);
+
 	list_for_each_entry(bam, &sps->bams_q, list) {
 		if (bam->props.phys_addr == phys_addr)
 			return bam;
@@ -662,6 +688,13 @@
 {
 	struct sps_bam *bam;
 
+	SPS_DBG("sps:%s.", __func__);
+
+	if (handle == NULL) {
+		SPS_ERR("sps:%s:handle is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
 	list_for_each_entry(bam, &sps->bams_q, list) {
 		if (bam->props.phys_addr == phys_addr) {
 			*handle = (u32) bam;
@@ -692,6 +725,8 @@
 int sps_setup_bam2bam_fifo(struct sps_mem_buffer *mem_buffer,
 		  u32 addr, u32 size, int use_offset)
 {
+	SPS_DBG("sps:%s.", __func__);
+
 	if ((mem_buffer == NULL) || (size == 0)) {
 		SPS_ERR("sps:invalid buffer address or size.");
 		return SPS_ERROR;
@@ -741,6 +776,8 @@
 {
 	struct sps_bam *bam;
 
+	SPS_DBG("sps:%s.", __func__);
+
 	if (h == SPS_DEV_HANDLE_MEM || h == SPS_DEV_HANDLE_INVALID)
 		return NULL;
 
@@ -816,6 +853,16 @@
 	struct sps_bam *bam;
 	int result;
 
+	SPS_DBG2("sps:%s.", __func__);
+
+	if (h == NULL) {
+		SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (connect == NULL) {
+		SPS_ERR("sps:%s:connection is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
 	if (sps == NULL)
 		return -ENODEV;
 
@@ -902,6 +949,8 @@
 	struct sps_bam *bam;
 	int result;
 
+	SPS_DBG2("sps:%s.", __func__);
+
 	if (pipe == NULL) {
 		SPS_ERR("sps:Invalid pipe.");
 		return SPS_ERROR;
@@ -959,6 +1008,14 @@
 
 	SPS_DBG2("sps:%s.", __func__);
 
+	if (h == NULL) {
+		SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (reg == NULL) {
+		SPS_ERR("sps:%s:registered event is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
 	if (sps == NULL)
 		return -ENODEV;
 
@@ -993,6 +1050,11 @@
 
 	SPS_DBG2("sps:%s.", __func__);
 
+	if (h == NULL) {
+		SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
 	bam = sps_bam_lock(pipe);
 	if (bam == NULL)
 		return SPS_ERROR;
@@ -1017,6 +1079,11 @@
 
 	SPS_DBG2("sps:%s.", __func__);
 
+	if (h == NULL) {
+		SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
 	bam = sps_bam_lock(pipe);
 	if (bam == NULL)
 		return SPS_ERROR;
@@ -1041,6 +1108,14 @@
 
 	SPS_DBG("sps:%s.", __func__);
 
+	if (h == NULL) {
+		SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (transfer == NULL) {
+		SPS_ERR("sps:%s:transfer is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
 	bam = sps_bam_lock(pipe);
 	if (bam == NULL)
 		return SPS_ERROR;
@@ -1066,6 +1141,11 @@
 
 	SPS_DBG("sps:%s.", __func__);
 
+	if (h == NULL) {
+		SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
 	if ((flags & SPS_IOVEC_FLAG_NWD) &&
 		!(flags & (SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_CMD))) {
 		SPS_ERR("sps:NWD is only valid with EOT or CMD.\n");
@@ -1119,6 +1199,14 @@
 
 	SPS_DBG("sps:%s.", __func__);
 
+	if (h == NULL) {
+		SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (notify == NULL) {
+		SPS_ERR("sps:%s:event_notify is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
 	bam = sps_bam_lock(pipe);
 	if (bam == NULL)
 		return SPS_ERROR;
@@ -1142,6 +1230,14 @@
 
 	SPS_DBG("sps:%s.", __func__);
 
+	if (h == NULL) {
+		SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (empty == NULL) {
+		SPS_ERR("sps:%s:result pointer is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
 	bam = sps_bam_lock(pipe);
 	if (bam == NULL)
 		return SPS_ERROR;
@@ -1165,6 +1261,14 @@
 
 	SPS_DBG("sps:%s.", __func__);
 
+	if (h == NULL) {
+		SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (count == NULL) {
+		SPS_ERR("sps:%s:result pointer is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
 	bam = sps_bam_lock(pipe);
 	if (bam == NULL)
 		return SPS_ERROR;
@@ -1187,6 +1291,11 @@
 
 	SPS_DBG2("sps:%s: dev = 0x%x", __func__, dev);
 
+	if (dev == 0) {
+		SPS_ERR("sps:%s:device handle should not be 0.\n", __func__);
+		return SPS_ERROR;
+	}
+
 	mutex_lock(&sps->lock);
 	/* Search for the target BAM device */
 	bam = sps_h2bam(dev);
@@ -1219,8 +1328,13 @@
 {
 	struct sps_pipe *pipe = h;
 
-	if (config == NULL) {
-		SPS_ERR("sps:Config pointer is NULL");
+	SPS_DBG("sps:%s.", __func__);
+
+	if (h == NULL) {
+		SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (config == NULL) {
+		SPS_ERR("sps:%s:config pointer is NULL.\n", __func__);
 		return SPS_ERROR;
 	}
 
@@ -1243,6 +1357,14 @@
 
 	SPS_DBG("sps:%s.", __func__);
 
+	if (h == NULL) {
+		SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (config == NULL) {
+		SPS_ERR("sps:%s:config pointer is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
 	bam = sps_bam_lock(pipe);
 	if (bam == NULL)
 		return SPS_ERROR;
@@ -1268,6 +1390,16 @@
 	struct sps_bam *bam;
 	int result;
 
+	SPS_DBG("sps:%s.", __func__);
+
+	if (h == NULL) {
+		SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (connect == NULL) {
+		SPS_ERR("sps:%s:connection is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
 	if (owner != SPS_OWNER_REMOTE) {
 		SPS_ERR("sps:Unsupported ownership state: %d", owner);
 		return SPS_ERROR;
@@ -1309,6 +1441,13 @@
 int sps_alloc_mem(struct sps_pipe *h, enum sps_mem mem,
 		  struct sps_mem_buffer *mem_buffer)
 {
+	SPS_DBG("sps:%s.", __func__);
+
+	if (h == NULL) {
+		SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
 	if (sps == NULL)
 		return -ENODEV;
 
@@ -1340,6 +1479,13 @@
  */
 int sps_free_mem(struct sps_pipe *h, struct sps_mem_buffer *mem_buffer)
 {
+	SPS_DBG("sps:%s.", __func__);
+
+	if (h == NULL) {
+		SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
 	if (mem_buffer == NULL || mem_buffer->phys_base == SPS_ADDR_INVALID) {
 		SPS_ERR("sps:invalid memory to free");
 		return SPS_ERROR;
@@ -1364,6 +1510,14 @@
 
 	SPS_DBG("sps:%s.", __func__);
 
+	if (h == NULL) {
+		SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (desc_num == NULL) {
+		SPS_ERR("sps:%s:result pointer is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
 	bam = sps_bam_lock(pipe);
 	if (bam == NULL)
 		return SPS_ERROR;
@@ -1390,6 +1544,16 @@
 	int ok;
 	int result;
 
+	SPS_DBG2("sps:%s.", __func__);
+
+	if (bam_props == NULL) {
+		SPS_ERR("sps:%s:bam_props is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (dev_handle == NULL) {
+		SPS_ERR("sps:%s:device handle is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
 	if (sps == NULL)
 		return SPS_ERROR;
 
@@ -1399,9 +1563,6 @@
 		return -EAGAIN;
 	}
 
-	if (bam_props == NULL || dev_handle == NULL)
-		return SPS_ERROR;
-
 	/* Check BAM parameters */
 	manage = bam_props->manage & SPS_BAM_MGR_ACCESS_MASK;
 	if (manage != SPS_BAM_MGR_NONE) {
@@ -1532,6 +1693,13 @@
 {
 	struct sps_bam *bam;
 
+	SPS_DBG2("sps:%s.", __func__);
+
+	if (dev_handle == 0) {
+		SPS_ERR("sps:%s:device handle should not be 0.\n", __func__);
+		return SPS_ERROR;
+	}
+
 	bam = sps_h2bam(dev_handle);
 	if (bam == NULL) {
 		SPS_ERR("sps:did not find a BAM for this handle");
@@ -1578,13 +1746,16 @@
 	struct sps_bam *bam;
 	int result;
 
-	if (h == NULL || iovec == NULL) {
-		SPS_ERR("sps:invalid pipe or iovec");
+	SPS_DBG("sps:%s.", __func__);
+
+	if (h == NULL) {
+		SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (iovec == NULL) {
+		SPS_ERR("sps:%s:iovec pointer is NULL.\n", __func__);
 		return SPS_ERROR;
 	}
 
-	SPS_DBG("sps:%s.", __func__);
-
 	bam = sps_bam_lock(pipe);
 	if (bam == NULL)
 		return SPS_ERROR;
@@ -1611,8 +1782,14 @@
 
 	SPS_DBG("sps:%s.", __func__);
 
-	if (h == NULL || timer_ctrl == NULL) {
-		SPS_ERR("sps:invalid pipe or timer ctrl");
+	if (h == NULL) {
+		SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (timer_ctrl == NULL) {
+		SPS_ERR("sps:%s:timer_ctrl pointer is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (timer_result == NULL) {
+		SPS_ERR("sps:%s:result pointer is NULL.\n", __func__);
 		return SPS_ERROR;
 	}
 
@@ -1637,6 +1814,8 @@
 {
 	struct sps_pipe *ctx = NULL;
 
+	SPS_DBG("sps:%s.", __func__);
+
 	ctx = kzalloc(sizeof(struct sps_pipe), GFP_KERNEL);
 	if (ctx == NULL) {
 		SPS_ERR("sps:Fail to allocate pipe context.");
@@ -1657,6 +1836,13 @@
 {
 	int res;
 
+	SPS_DBG("sps:%s.", __func__);
+
+	if (ctx == NULL) {
+		SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
 	res = sps_client_de_init(ctx);
 
 	if (res == 0)
@@ -1674,6 +1860,8 @@
 	struct resource *resource;
 	struct msm_sps_platform_data *pdata;
 
+	SPS_DBG("sps:%s.", __func__);
+
 	pdata = pdev->dev.platform_data;
 
 	if (pdata == NULL) {
@@ -1736,6 +1924,8 @@
 #ifdef CONFIG_SPS_SUPPORT_BAMDMA
 	struct resource *resource;
 
+	SPS_DBG("sps:%s.", __func__);
+
 	if (of_property_read_u32((&pdev->dev)->of_node,
 				"qcom,bam-dma-res-pipes",
 				&sps->bamdma_restricted_pipes)) {
@@ -1786,7 +1976,7 @@
 {
 	int ret;
 
-	SPS_DBG("sps:msm_sps_probe.");
+	SPS_DBG2("sps:%s.", __func__);
 
 	if (pdev->dev.of_node) {
 		if (get_device_tree_data(pdev)) {
diff --git a/drivers/platform/msm/sps/sps_bam.c b/drivers/platform/msm/sps/sps_bam.c
index e0289ad..245ccd2 100644
--- a/drivers/platform/msm/sps/sps_bam.c
+++ b/drivers/platform/msm/sps/sps_bam.c
@@ -221,7 +221,7 @@
 				    IRQF_TRIGGER_HIGH, "sps", dev);
 
 		if (result) {
-			SPS_ERR("sps:Failed to register BAM 0x%x IRQ %d",
+			SPS_ERR("sps:Failed to enable BAM 0x%x IRQ %d",
 				BAM_ID(dev), dev->props.irq);
 			return SPS_ERROR;
 		}
diff --git a/drivers/platform/msm/sps/spsi.h b/drivers/platform/msm/sps/spsi.h
index 8e4907b..e8ab832 100644
--- a/drivers/platform/msm/sps/spsi.h
+++ b/drivers/platform/msm/sps/spsi.h
@@ -193,6 +193,9 @@
 
 /* output the content of selected BAM pipe registers */
 void print_bam_pipe_selected_reg(void *, u32);
+
+/* output descriptor FIFO of a pipe */
+void print_bam_pipe_desc_fifo(void *, u32);
 #endif
 
 /**
diff --git a/drivers/power/pm8921-charger.c b/drivers/power/pm8921-charger.c
index b1a16bb..2c81f84 100644
--- a/drivers/power/pm8921-charger.c
+++ b/drivers/power/pm8921-charger.c
@@ -258,7 +258,6 @@
 	int				thermal_levels;
 	struct delayed_work		update_heartbeat_work;
 	struct delayed_work		eoc_work;
-	struct work_struct		unplug_ovp_fet_open_work;
 	struct delayed_work		unplug_check_work;
 	struct delayed_work		vin_collapse_check_work;
 	struct wake_lock		eoc_wake_lock;
@@ -1998,11 +1997,8 @@
 
 #define WRITE_BANK_4		0xC0
 #define USB_OVP_DEBOUNCE_TIME 0x06
-static void unplug_ovp_fet_open_worker(struct work_struct *work)
+static void unplug_ovp_fet_open(struct pm8921_chg_chip *chip)
 {
-	struct pm8921_chg_chip *chip = container_of(work,
-				struct pm8921_chg_chip,
-				unplug_ovp_fet_open_work);
 	int chg_gone, usb_chg_plugged_in;
 	int count = 0;
 
@@ -2348,7 +2344,7 @@
 		/* run the worker directly */
 		pr_debug(" ver5 step: chg_gone=%d, usb_valid = %d\n",
 						chg_gone, usb_chg_plugged_in);
-		schedule_work(&chip->unplug_ovp_fet_open_work);
+		unplug_ovp_fet_open(chip);
 	}
 
 	if (!(reg_loop & VIN_ACTIVE_BIT)) {
@@ -2453,8 +2449,7 @@
 static irqreturn_t chg_gone_irq_handler(int irq, void *data)
 {
 	struct pm8921_chg_chip *chip = data;
-	u8 reg;
-	int rc, chg_gone, usb_chg_plugged_in;
+	int chg_gone, usb_chg_plugged_in;
 
 	usb_chg_plugged_in = is_usb_chg_plugged_in(chip);
 	chg_gone = pm_chg_get_rt_status(chip, CHG_GONE_IRQ);
@@ -2462,14 +2457,6 @@
 	pr_debug("chg_gone=%d, usb_valid = %d\n", chg_gone, usb_chg_plugged_in);
 	pr_debug("Chg gone fsm_state=%d\n", pm_chg_get_fsm_state(data));
 
-	rc = pm8xxx_readb(chip->dev->parent, CHG_CNTRL_3, &reg);
-	if (rc)
-		pr_err("Failed to read CHG_CNTRL_3 rc=%d\n", rc);
-
-	if (reg & CHG_USB_SUSPEND_BIT)
-		return IRQ_HANDLED;
-	schedule_work(&chip->unplug_ovp_fet_open_work);
-
 	power_supply_changed(&chip->batt_psy);
 	power_supply_changed(&chip->usb_psy);
 	return IRQ_HANDLED;
@@ -3885,8 +3872,6 @@
 	INIT_DELAYED_WORK(&chip->eoc_work, eoc_worker);
 	INIT_DELAYED_WORK(&chip->vin_collapse_check_work,
 						vin_collapse_check_worker);
-	INIT_WORK(&chip->unplug_ovp_fet_open_work,
-					unplug_ovp_fet_open_worker);
 	INIT_DELAYED_WORK(&chip->unplug_check_work, unplug_check_worker);
 
 	rc = request_irqs(chip, pdev);
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 70131fa..ccb8012 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -29,14 +29,17 @@
  *
  */
 
-#include <linux/module.h>
 #include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/oom.h>
-#include <linux/sched.h>
-#include <linux/notifier.h>
+#include <linux/kobject.h>
 #include <linux/memory.h>
 #include <linux/memory_hotplug.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/oom.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
 
 static uint32_t lowmem_debug_level = 2;
 static int lowmem_adj[6] = {
@@ -54,9 +57,12 @@
 };
 static int lowmem_minfree_size = 4;
 
+static size_t lowmem_minfree_notif_trigger;
+
 static unsigned int offlining;
 static struct task_struct *lowmem_deathpending;
 static unsigned long lowmem_deathpending_timeout;
+static struct kobject *lowmem_kobj;
 
 #define lowmem_print(level, x...)			\
 	do {						\
@@ -108,6 +114,29 @@
 
 
 
+static void lowmem_notify_killzone_approach(void);
+
+static inline void get_free_ram(int *other_free, int *other_file)
+{
+	struct zone *zone;
+	*other_free = global_page_state(NR_FREE_PAGES);
+	*other_file = global_page_state(NR_FILE_PAGES) -
+						global_page_state(NR_SHMEM);
+
+	if (offlining) {
+		/* Discount all free space in the section being offlined */
+		for_each_zone(zone) {
+			 if (zone_idx(zone) == ZONE_MOVABLE) {
+				*other_free -= zone_page_state(zone,
+						NR_FREE_PAGES);
+				lowmem_print(4, "lowmem_shrink discounted "
+					"%lu pages in movable zone\n",
+					zone_page_state(zone, NR_FREE_PAGES));
+			}
+		}
+	}
+}
+
 static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
 {
 	struct task_struct *p;
@@ -119,23 +148,8 @@
 	int selected_tasksize = 0;
 	int selected_oom_adj;
 	int array_size = ARRAY_SIZE(lowmem_adj);
-	int other_free = global_page_state(NR_FREE_PAGES);
-	int other_file = global_page_state(NR_FILE_PAGES) -
-						global_page_state(NR_SHMEM);
-	struct zone *zone;
-
-	if (offlining) {
-		/* Discount all free space in the section being offlined */
-		for_each_zone(zone) {
-			 if (zone_idx(zone) == ZONE_MOVABLE) {
-				other_free -= zone_page_state(zone,
-						NR_FREE_PAGES);
-				lowmem_print(4, "lowmem_shrink discounted "
-					"%lu pages in movable zone\n",
-					zone_page_state(zone, NR_FREE_PAGES));
-			}
-		}
-	}
+	int other_free;
+	int other_file;
 	/*
 	 * If we already have a death outstanding, then
 	 * bail out right away; indicating to vmscan
@@ -147,6 +161,13 @@
 	    time_before_eq(jiffies, lowmem_deathpending_timeout))
 		return 0;
 
+	get_free_ram(&other_free, &other_file);
+
+	if (other_free < lowmem_minfree_notif_trigger &&
+			other_file < lowmem_minfree_notif_trigger) {
+		lowmem_notify_killzone_approach();
+	}
+
 	if (lowmem_adj_size < array_size)
 		array_size = lowmem_adj_size;
 	if (lowmem_minfree_size < array_size)
@@ -228,18 +249,91 @@
 	.seeks = DEFAULT_SEEKS * 16
 };
 
+static void lowmem_notify_killzone_approach(void)
+{
+	lowmem_print(3, "notification trigger activated\n");
+	sysfs_notify(lowmem_kobj, NULL, "notify_trigger_active");
+}
+
+static ssize_t lowmem_notify_trigger_active_show(struct kobject *k,
+		struct kobj_attribute *attr, char *buf)
+{
+	int other_free, other_file;
+	get_free_ram(&other_free, &other_file);
+	if (other_free < lowmem_minfree_notif_trigger &&
+			other_file < lowmem_minfree_notif_trigger)
+		return snprintf(buf, 3, "1\n");
+	else
+		return snprintf(buf, 3, "0\n");
+}
+
+static struct kobj_attribute lowmem_notify_trigger_active_attr =
+	__ATTR(notify_trigger_active, S_IRUGO,
+			lowmem_notify_trigger_active_show, NULL);
+
+static struct attribute *lowmem_default_attrs[] = {
+	&lowmem_notify_trigger_active_attr.attr,
+	NULL,
+};
+
+static ssize_t lowmem_show(struct kobject *k, struct attribute *attr, char *buf)
+{
+	struct kobj_attribute *kobj_attr;
+	kobj_attr = container_of(attr, struct kobj_attribute, attr);
+	return kobj_attr->show(k, kobj_attr, buf);
+}
+
+static const struct sysfs_ops lowmem_ops = {
+	.show = lowmem_show,
+};
+
+static void lowmem_kobj_release(struct kobject *kobj)
+{
+	/* Nothing to be done here */
+}
+
+static struct kobj_type lowmem_kobj_type = {
+	.release = lowmem_kobj_release,
+	.sysfs_ops = &lowmem_ops,
+	.default_attrs = lowmem_default_attrs,
+};
+
 static int __init lowmem_init(void)
 {
+	int rc;
 	task_free_register(&task_nb);
 	register_shrinker(&lowmem_shrinker);
 #ifdef CONFIG_MEMORY_HOTPLUG
 	hotplug_memory_notifier(lmk_hotplug_callback, 0);
 #endif
+
+	lowmem_kobj = kzalloc(sizeof(*lowmem_kobj), GFP_KERNEL);
+	if (!lowmem_kobj) {
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	rc = kobject_init_and_add(lowmem_kobj, &lowmem_kobj_type,
+			mm_kobj, "lowmemkiller");
+	if (rc)
+		goto err_kobj;
+
 	return 0;
+
+err_kobj:
+	kfree(lowmem_kobj);
+
+err:
+	unregister_shrinker(&lowmem_shrinker);
+	task_free_unregister(&task_nb);
+
+	return rc;
 }
 
 static void __exit lowmem_exit(void)
 {
+	kobject_put(lowmem_kobj);
+	kfree(lowmem_kobj);
 	unregister_shrinker(&lowmem_shrinker);
 	task_free_unregister(&task_nb);
 }
@@ -250,6 +344,8 @@
 module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size,
 			 S_IRUGO | S_IWUSR);
 module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
+module_param_named(notify_trigger, lowmem_minfree_notif_trigger, uint,
+			 S_IRUGO | S_IWUSR);
 
 module_init(lowmem_init);
 module_exit(lowmem_exit);
diff --git a/drivers/tty/n_smux.c b/drivers/tty/n_smux.c
index 7ba54fe..8d2a16e 100644
--- a/drivers/tty/n_smux.c
+++ b/drivers/tty/n_smux.c
@@ -2707,7 +2707,6 @@
 	int i;
 	int tmp;
 	unsigned long flags;
-	int ret = 0;
 
 	if (!smux.is_initialized)
 		return -ENODEV;
@@ -2716,14 +2715,14 @@
 	if (smux.ld_open_count) {
 		pr_err("%s: %p multiple instances not supported\n",
 			__func__, tty);
-		ret = -EEXIST;
-		goto out;
+		spin_unlock_irqrestore(&smux.lock_lha0, flags);
+		return -EEXIST;
 	}
 
 	++smux.ld_open_count;
 	if (tty->ops->write == NULL) {
-		ret = -EINVAL;
-		goto out;
+		spin_unlock_irqrestore(&smux.lock_lha0, flags);
+		return -EINVAL;
 	}
 
 	/* connect to TTY */
@@ -2742,6 +2741,7 @@
 	} else {
 		spin_unlock(&smux.tx_lock_lha2);
 	}
+	spin_unlock_irqrestore(&smux.lock_lha0, flags);
 
 	/* register platform devices */
 	for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
@@ -2750,10 +2750,7 @@
 			pr_err("%s: error %d registering device %s\n",
 				   __func__, tmp, smux_devs[i].name);
 	}
-
-out:
-	spin_unlock_irqrestore(&smux.lock_lha0, flags);
-	return ret;
+	return 0;
 }
 
 static void smuxld_close(struct tty_struct *tty)
@@ -2765,16 +2762,15 @@
 	if (smux.ld_open_count <= 0) {
 		pr_err("%s: invalid ld count %d\n", __func__,
 			smux.ld_open_count);
-		goto out;
+		spin_unlock_irqrestore(&smux.lock_lha0, flags);
+		return;
 	}
+	spin_unlock_irqrestore(&smux.lock_lha0, flags);
 
 	for (i = 0; i < ARRAY_SIZE(smux_devs); ++i)
 		platform_device_unregister(&smux_devs[i]);
 
 	--smux.ld_open_count;
-
-out:
-	spin_unlock_irqrestore(&smux.lock_lha0, flags);
 }
 
 /**
diff --git a/drivers/tty/serial/msm_serial_hs_lite.c b/drivers/tty/serial/msm_serial_hs_lite.c
index 62d25cf..59104ed 100644
--- a/drivers/tty/serial/msm_serial_hs_lite.c
+++ b/drivers/tty/serial/msm_serial_hs_lite.c
@@ -123,7 +123,7 @@
 	{}
 };
 static struct dentry *debug_base;
-static inline void wait_for_xmitr(struct uart_port *port, int bits);
+static inline void wait_for_xmitr(struct uart_port *port);
 static int get_console_state(struct uart_port *port);
 static inline void msm_hsl_write(struct uart_port *port,
 				 unsigned int val, unsigned int off)
@@ -383,15 +383,16 @@
 
 	/* Handle x_char */
 	if (port->x_char) {
-		wait_for_xmitr(port, UARTDM_ISR_TX_READY_BMSK);
-		msm_hsl_write(port, tx_count + 1,
-			regmap[vid][UARTDM_NCF_TX]);
+		wait_for_xmitr(port);
+		msm_hsl_write(port, tx_count + 1, regmap[vid][UARTDM_NCF_TX]);
+		msm_hsl_read(port, regmap[vid][UARTDM_NCF_TX]);
 		msm_hsl_write(port, port->x_char, regmap[vid][UARTDM_TF]);
 		port->icount.tx++;
 		port->x_char = 0;
 	} else if (tx_count) {
-		wait_for_xmitr(port, UARTDM_ISR_TX_READY_BMSK);
+		wait_for_xmitr(port);
 		msm_hsl_write(port, tx_count, regmap[vid][UARTDM_NCF_TX]);
+		msm_hsl_read(port, regmap[vid][UARTDM_NCF_TX]);
 	}
 	if (!tx_count) {
 		msm_hsl_stop_tx(port);
@@ -1103,7 +1104,7 @@
 /*
  *  Wait for transmitter & holding register to empty
  *  Derived from wait_for_xmitr in 8250 serial driver by Russell King  */
-void wait_for_xmitr(struct uart_port *port, int bits)
+static void wait_for_xmitr(struct uart_port *port)
 {
 	struct msm_hsl_port *msm_hsl_port = UART_TO_MSM(port);
 	unsigned int vid = msm_hsl_port->ver_id;
@@ -1111,8 +1112,8 @@
 
 	if (!(msm_hsl_read(port, regmap[vid][UARTDM_SR]) &
 			UARTDM_SR_TXEMT_BMSK)) {
-		while ((msm_hsl_read(port, regmap[vid][UARTDM_ISR]) &
-					bits) != bits) {
+		while (!(msm_hsl_read(port, regmap[vid][UARTDM_ISR]) &
+			UARTDM_ISR_TX_READY_BMSK)) {
 			udelay(1);
 			touch_nmi_watchdog();
 			cpu_relax();
@@ -1130,7 +1131,7 @@
 {
 	unsigned int vid = UART_TO_MSM(port)->ver_id;
 
-	wait_for_xmitr(port, UARTDM_ISR_TX_READY_BMSK);
+	wait_for_xmitr(port);
 	msm_hsl_write(port, 1, regmap[vid][UARTDM_NCF_TX]);
 	/*
 	 * Dummy read to add 1 AHB clock delay to fix UART hardware bug.
diff --git a/drivers/tty/smux_ctl.c b/drivers/tty/smux_ctl.c
index 26a49a0..69adbf3 100644
--- a/drivers/tty/smux_ctl.c
+++ b/drivers/tty/smux_ctl.c
@@ -42,7 +42,6 @@
 #define SMUX_CTL_MAX_BUF_SIZE 2048
 #define SMUX_CTL_MODULE_NAME "smux_ctl"
 #define DEBUG
-#define DEBUG_LOOPBACK
 
 static int msm_smux_ctl_debug_mask;
 module_param_named(debug_mask, msm_smux_ctl_debug_mask,
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
index 15fc0c1..92e95a6 100644
--- a/drivers/usb/gadget/android.c
+++ b/drivers/usb/gadget/android.c
@@ -61,6 +61,8 @@
 #include "u_rmnet_ctrl_smd.c"
 #include "u_ctrl_hsic.c"
 #include "u_data_hsic.c"
+#include "u_ctrl_hsuart.c"
+#include "u_data_hsuart.c"
 #include "f_serial.c"
 #include "f_acm.c"
 #include "f_adb.c"
@@ -200,7 +202,8 @@
 	u32 swfi_latency = 0;
 	static int last_vote = -1;
 
-	if (!pdata || vote == last_vote)
+	if (!pdata || vote == last_vote
+		|| !pdata->swfi_latency)
 		return;
 
 	swfi_latency = pdata->swfi_latency + 1;
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index b29ef82..a86fc22 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -3306,9 +3306,8 @@
 	udc->gadget.dev.parent   = dev;
 	udc->gadget.dev.release  = udc_release;
 
-	udc->transceiver = otg_get_transceiver();
-
 	if (udc->udc_driver->flags & CI13XXX_REQUIRE_TRANSCEIVER) {
+		udc->transceiver = otg_get_transceiver();
 		if (udc->transceiver == NULL) {
 			retval = -ENODEV;
 			goto free_udc;
diff --git a/drivers/usb/gadget/f_mbim.c b/drivers/usb/gadget/f_mbim.c
index b2a013c..41a1777 100644
--- a/drivers/usb/gadget/f_mbim.c
+++ b/drivers/usb/gadget/f_mbim.c
@@ -216,7 +216,7 @@
 	.wMaxControlMessage =	cpu_to_le16(0x1000),
 	.bNumberFilters =	0x10,
 	.bMaxFilterSize =	0x80,
-	.wMaxSegmentSize =	cpu_to_le16(0x1000),
+	.wMaxSegmentSize =	cpu_to_le16(0xfe0),
 	.bmNetworkCapabilities = 0x20,
 };
 
diff --git a/drivers/usb/gadget/f_rmnet.c b/drivers/usb/gadget/f_rmnet.c
index f7230fe..414a7b9 100644
--- a/drivers/usb/gadget/f_rmnet.c
+++ b/drivers/usb/gadget/f_rmnet.c
@@ -55,9 +55,11 @@
 static unsigned int nr_rmnet_ports;
 static unsigned int no_ctrl_smd_ports;
 static unsigned int no_ctrl_hsic_ports;
+static unsigned int no_ctrl_hsuart_ports;
 static unsigned int no_data_bam_ports;
 static unsigned int no_data_bam2bam_ports;
 static unsigned int no_data_hsic_ports;
+static unsigned int no_data_hsuart_ports;
 static struct rmnet_ports {
 	enum transport_type		data_xport;
 	enum transport_type		ctrl_xport;
@@ -162,6 +164,8 @@
 	NULL,
 };
 
+static void frmnet_ctrl_response_available(struct f_rmnet *dev);
+
 /* ------- misc functions --------------------*/
 
 static inline struct f_rmnet *func_to_rmnet(struct usb_function *f)
@@ -232,12 +236,12 @@
 	int	port_idx;
 	int	i;
 
-	pr_debug("%s: bam ports: %u bam2bam ports: %u data hsic ports: %u"
-		" smd ports: %u ctrl hsic ports: %u"
+	pr_debug("%s: bam ports: %u bam2bam ports: %u data hsic ports: %u data hsuart ports: %u"
+		" smd ports: %u ctrl hsic ports: %u ctrl hsuart ports: %u"
 	" nr_rmnet_ports: %u\n",
 		__func__, no_data_bam_ports, no_data_bam2bam_ports,
-		no_data_hsic_ports, no_ctrl_smd_ports,
-		no_ctrl_hsic_ports, nr_rmnet_ports);
+		no_data_hsic_ports, no_data_hsuart_ports, no_ctrl_smd_ports,
+		no_ctrl_hsic_ports, no_ctrl_hsuart_ports, nr_rmnet_ports);
 
 	if (no_data_bam_ports || no_data_bam2bam_ports) {
 		ret = gbam_setup(no_data_bam_ports,
@@ -280,6 +284,34 @@
 		}
 	}
 
+	if (no_data_hsuart_ports) {
+		port_idx = ghsuart_data_setup(no_data_hsuart_ports,
+				USB_GADGET_RMNET);
+		if (port_idx < 0)
+			return port_idx;
+		for (i = 0; i < nr_rmnet_ports; i++) {
+			if (rmnet_ports[i].data_xport ==
+					USB_GADGET_XPORT_HSUART) {
+				rmnet_ports[i].data_xport_num = port_idx;
+				port_idx++;
+			}
+		}
+	}
+
+	if (no_ctrl_hsuart_ports) {
+		port_idx = ghsuart_ctrl_setup(no_ctrl_hsuart_ports,
+				USB_GADGET_RMNET);
+		if (port_idx < 0)
+			return port_idx;
+		for (i = 0; i < nr_rmnet_ports; i++) {
+			if (rmnet_ports[i].ctrl_xport ==
+					USB_GADGET_XPORT_HSUART) {
+				rmnet_ports[i].ctrl_xport_num = port_idx;
+				port_idx++;
+			}
+		}
+	}
+
 	return 0;
 }
 
@@ -312,6 +344,14 @@
 			return ret;
 		}
 		break;
+	case USB_GADGET_XPORT_HSUART:
+		ret = ghsuart_ctrl_connect(&dev->port, port_num);
+		if (ret) {
+			pr_err("%s: ghsuart_ctrl_connect failed: err:%d\n",
+					__func__, ret);
+			return ret;
+		}
+		break;
 	case USB_GADGET_XPORT_NONE:
 		break;
 	default:
@@ -342,6 +382,15 @@
 			return ret;
 		}
 		break;
+	case USB_GADGET_XPORT_HSUART:
+		ret = ghsuart_data_connect(&dev->port, port_num);
+		if (ret) {
+			pr_err("%s: ghsuart_data_connect failed: err:%d\n",
+					__func__, ret);
+			ghsuart_ctrl_disconnect(&dev->port, port_num);
+			return ret;
+		}
+		break;
 	case USB_GADGET_XPORT_NONE:
 		 break;
 	default:
@@ -371,6 +420,9 @@
 	case USB_GADGET_XPORT_HSIC:
 		ghsic_ctrl_disconnect(&dev->port, port_num);
 		break;
+	case USB_GADGET_XPORT_HSUART:
+		ghsuart_ctrl_disconnect(&dev->port, port_num);
+		break;
 	case USB_GADGET_XPORT_NONE:
 		break;
 	default:
@@ -388,6 +440,9 @@
 	case USB_GADGET_XPORT_HSIC:
 		ghsic_data_disconnect(&dev->port, port_num);
 		break;
+	case USB_GADGET_XPORT_HSUART:
+		ghsuart_data_disconnect(&dev->port, port_num);
+		break;
 	case USB_GADGET_XPORT_NONE:
 		break;
 	default:
@@ -501,6 +556,7 @@
 	struct f_rmnet			*dev = func_to_rmnet(f);
 	struct usb_composite_dev	*cdev = dev->cdev;
 	int				ret;
+	struct list_head *cpkt;
 
 	pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
 
@@ -537,6 +593,11 @@
 
 	atomic_set(&dev->online, 1);
 
+	/* In case notifications were aborted, but there are pending control
+	   packets in the response queue, re-add the notifications */
+	list_for_each(cpkt, &dev->cpkt_resp_q)
+		frmnet_ctrl_response_available(dev);
+
 	return ret;
 }
 
@@ -998,6 +1059,8 @@
 	no_data_bam2bam_ports = 0;
 	no_ctrl_hsic_ports = 0;
 	no_data_hsic_ports = 0;
+	no_ctrl_hsuart_ports = 0;
+	no_data_hsuart_ports = 0;
 }
 
 static int frmnet_init_port(const char *ctrl_name, const char *data_name)
@@ -1041,6 +1104,10 @@
 		rmnet_port->ctrl_xport_num = no_ctrl_hsic_ports;
 		no_ctrl_hsic_ports++;
 		break;
+	case USB_GADGET_XPORT_HSUART:
+		rmnet_port->ctrl_xport_num = no_ctrl_hsuart_ports;
+		no_ctrl_hsuart_ports++;
+		break;
 	case USB_GADGET_XPORT_NONE:
 		break;
 	default:
@@ -1063,6 +1130,10 @@
 		rmnet_port->data_xport_num = no_data_hsic_ports;
 		no_data_hsic_ports++;
 		break;
+	case USB_GADGET_XPORT_HSUART:
+		rmnet_port->data_xport_num = no_data_hsuart_ports;
+		no_data_hsuart_ports++;
+		break;
 	case USB_GADGET_XPORT_NONE:
 		break;
 	default:
@@ -1084,6 +1155,8 @@
 	no_data_bam_ports = 0;
 	no_ctrl_hsic_ports = 0;
 	no_data_hsic_ports = 0;
+	no_ctrl_hsuart_ports = 0;
+	no_data_hsuart_ports = 0;
 
 	return ret;
 }
diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c
index 8d9f090..08a1712 100644
--- a/drivers/usb/gadget/f_serial.c
+++ b/drivers/usb/gadget/f_serial.c
@@ -27,7 +27,7 @@
  * CDC ACM driver.  However, for many purposes it's just as functional
  * if you can arrange appropriate host side drivers.
  */
-#define GSERIAL_NO_PORTS 2
+#define GSERIAL_NO_PORTS 3
 
 
 struct f_gser {
@@ -67,6 +67,7 @@
 static unsigned int no_sdio_ports;
 static unsigned int no_smd_ports;
 static unsigned int no_hsic_sports;
+static unsigned int no_hsuart_sports;
 static unsigned int nr_ports;
 
 static struct port_info {
@@ -249,9 +250,9 @@
 	int i;
 
 	pr_debug("%s: no_tty_ports: %u no_sdio_ports: %u"
-		" no_smd_ports: %u no_hsic_sports: %u nr_ports: %u\n",
+		" no_smd_ports: %u no_hsic_sports: %u no_hsuart_ports: %u nr_ports: %u\n",
 			__func__, no_tty_ports, no_sdio_ports, no_smd_ports,
-			no_hsic_sports, nr_ports);
+			no_hsic_sports, no_hsuart_sports, nr_ports);
 
 	if (no_tty_ports)
 		ret = gserial_setup(c->cdev->gadget, no_tty_ports);
@@ -278,6 +279,22 @@
 			return ret;
 		return 0;
 	}
+	if (no_hsuart_sports) {
+		port_idx = ghsuart_data_setup(no_hsuart_sports,
+					USB_GADGET_SERIAL);
+		if (port_idx < 0)
+			return port_idx;
+
+		for (i = 0; i < nr_ports; i++) {
+			if (gserial_ports[i].transport ==
+					USB_GADGET_XPORT_HSUART) {
+				gserial_ports[i].client_port_num = port_idx;
+				port_idx++;
+			}
+		}
+
+		return 0;
+	}
 	return ret;
 }
 
@@ -317,6 +334,14 @@
 			return ret;
 		}
 		break;
+	case USB_GADGET_XPORT_HSUART:
+		ret = ghsuart_data_connect(&gser->port, port_num);
+		if (ret) {
+			pr_err("%s: ghsuart_data_connect failed: err:%d\n",
+					__func__, ret);
+			return ret;
+		}
+		break;
 	default:
 		pr_err("%s: Un-supported transport: %s\n", __func__,
 				xport_to_str(gser->transport));
@@ -350,6 +375,9 @@
 		ghsic_ctrl_disconnect(&gser->port, port_num);
 		ghsic_data_disconnect(&gser->port, port_num);
 		break;
+	case USB_GADGET_XPORT_HSUART:
+		ghsuart_data_disconnect(&gser->port, port_num);
+		break;
 	default:
 		pr_err("%s: Un-supported transport:%s\n", __func__,
 				xport_to_str(gser->transport));
@@ -854,11 +882,13 @@
 	gser->port.func.disable = gser_disable;
 	gser->transport		= gserial_ports[port_num].transport;
 #ifdef CONFIG_MODEM_SUPPORT
-	/* We support only two ports for now */
+	/* We support only three ports for now */
 	if (port_num == 0)
 		gser->port.func.name = "modem";
-	else
+	else if (port_num == 1)
 		gser->port.func.name = "nmea";
+	else
+		gser->port.func.name = "modem2";
 	gser->port.func.setup = gser_setup;
 	gser->port.connect = gser_connect;
 	gser->port.get_dtr = gser_get_dtr;
@@ -910,6 +940,10 @@
 		/*client port number will be updated in gport_setup*/
 		no_hsic_sports++;
 		break;
+	case USB_GADGET_XPORT_HSUART:
+		/*client port number will be updated in gport_setup*/
+		no_hsuart_sports++;
+		break;
 	default:
 		pr_err("%s: Un-supported transport transport: %u\n",
 				__func__, gserial_ports[port_num].transport);
diff --git a/drivers/usb/gadget/msm72k_udc.c b/drivers/usb/gadget/msm72k_udc.c
index 863ddcd..0d53da0 100644
--- a/drivers/usb/gadget/msm72k_udc.c
+++ b/drivers/usb/gadget/msm72k_udc.c
@@ -2018,10 +2018,14 @@
 static int
 msm72k_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
 {
-	struct msm_endpoint *ept = to_msm_endpoint(_ep);
-	unsigned char ep_type =
-			desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+	struct msm_endpoint *ept;
+	unsigned char ep_type;
 
+	if (_ep == NULL || desc == NULL)
+		return -EINVAL;
+
+	ept = to_msm_endpoint(_ep);
+	ep_type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
 	_ep->maxpacket = le16_to_cpu(desc->wMaxPacketSize);
 	config_ept(ept);
 	ept->wedged = 0;
diff --git a/drivers/usb/gadget/u_ctrl_hsuart.c b/drivers/usb/gadget/u_ctrl_hsuart.c
new file mode 100644
index 0000000..7102d81
--- /dev/null
+++ b/drivers/usb/gadget/u_ctrl_hsuart.c
@@ -0,0 +1,576 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/termios.h>
+#include <linux/debugfs.h>
+#include <linux/smux.h>
+
+#include <mach/usb_gadget_xport.h>
+
+#define CH_OPENED 0
+#define CH_READY 1
+
+static unsigned int num_ctrl_ports;
+
+static const char *ghsuart_ctrl_names[] = {
+	"SMUX_RMNET_CTL_HSUART"
+};
+
+struct ghsuart_ctrl_port {
+	/* port */
+	unsigned port_num;
+	/* gadget */
+	enum gadget_type gtype;
+	spinlock_t port_lock;
+	void *port_usb;
+	/* work queue*/
+	struct workqueue_struct	*wq;
+	struct work_struct connect_w;
+	struct work_struct disconnect_w;
+	/*ctrl pkt response cb*/
+	int (*send_cpkt_response)(void *g, void *buf, size_t len);
+	void *ctxt;
+	unsigned int ch_id;
+	/* flow control bits */
+	unsigned long flags;
+	int (*send_pkt)(void *, void *, size_t actual);
+	/* Channel status */
+	unsigned long channel_sts;
+	/* control bits */
+	unsigned cbits_tomodem;
+	/* counters */
+	unsigned long to_modem;
+	unsigned long to_host;
+	unsigned long drp_cpkt_cnt;
+};
+
+static struct {
+	struct ghsuart_ctrl_port	*port;
+	struct platform_driver	pdrv;
+} ghsuart_ctrl_ports[NUM_HSUART_PORTS];
+
+static int ghsuart_ctrl_receive(void *dev, void *buf, size_t actual);
+
+static void smux_control_event(void *priv, int event_type, const void *metadata)
+{
+	struct grmnet		*gr = NULL;
+	struct ghsuart_ctrl_port	*port = priv;
+	void			*buf;
+	unsigned long		flags;
+	size_t			len;
+
+	switch (event_type) {
+	case SMUX_CONNECTED:
+		spin_lock_irqsave(&port->port_lock, flags);
+		if (!port->port_usb) {
+			spin_unlock_irqrestore(&port->port_lock, flags);
+			return;
+		}
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		set_bit(CH_OPENED, &port->channel_sts);
+		if (port->gtype == USB_GADGET_RMNET) {
+			gr = port->port_usb;
+			if (gr && gr->connect)
+				gr->connect(gr);
+		}
+		break;
+	case SMUX_DISCONNECTED:
+		clear_bit(CH_OPENED, &port->channel_sts);
+		break;
+	case SMUX_READ_DONE:
+		len = ((struct smux_meta_read *)metadata)->len;
+		buf = ((struct smux_meta_read *)metadata)->buffer;
+		ghsuart_ctrl_receive(port, buf, len);
+		break;
+	case SMUX_READ_FAIL:
+		buf = ((struct smux_meta_read *)metadata)->buffer;
+		kfree(buf);
+		break;
+	case SMUX_WRITE_DONE:
+	case SMUX_WRITE_FAIL:
+		buf = ((struct smux_meta_write *)metadata)->buffer;
+		kfree(buf);
+		break;
+	case SMUX_LOW_WM_HIT:
+	case SMUX_HIGH_WM_HIT:
+	case SMUX_TIOCM_UPDATE:
+		break;
+	default:
+		pr_err("%s Event %d not supported\n", __func__, event_type);
+	};
+}
+
+static int rx_control_buffer(void *priv, void **pkt_priv, void **buffer,
+			int size)
+{
+	void *rx_buf;
+
+	rx_buf = kmalloc(size, GFP_KERNEL);
+	if (!rx_buf)
+		return -EAGAIN;
+	*buffer = rx_buf;
+	*pkt_priv = NULL;
+
+	return 0;
+}
+
+static int ghsuart_ctrl_receive(void *dev, void *buf, size_t actual)
+{
+	struct ghsuart_ctrl_port	*port = dev;
+	int retval = 0;
+
+	pr_debug_ratelimited("%s: read complete bytes read: %d\n",
+			__func__, actual);
+
+	/* send it to USB here */
+	if (port && port->send_cpkt_response) {
+		retval = port->send_cpkt_response(port->port_usb, buf, actual);
+		port->to_host++;
+	}
+	kfree(buf);
+	return retval;
+}
+
+static int
+ghsuart_send_cpkt_tomodem(u8 portno, void *buf, size_t len)
+{
+	void			*cbuf;
+	struct ghsuart_ctrl_port	*port;
+	int			ret;
+
+	if (portno >= num_ctrl_ports) {
+		pr_err("%s: Invalid portno#%d\n", __func__, portno);
+		return -ENODEV;
+	}
+
+	port = ghsuart_ctrl_ports[portno].port;
+	if (!port) {
+		pr_err("%s: port is null\n", __func__);
+		return -ENODEV;
+	}
+	/* drop cpkt if ch is not open */
+	if (!test_bit(CH_OPENED, &port->channel_sts)) {
+		port->drp_cpkt_cnt++;
+		return 0;
+	}
+	cbuf = kmalloc(len, GFP_ATOMIC);
+	if (!cbuf)
+		return -ENOMEM;
+
+	memcpy(cbuf, buf, len);
+
+	pr_debug("%s: ctrl_pkt:%d bytes\n", __func__, len);
+
+	ret = msm_smux_write(port->ch_id, port, (void *)cbuf, len);
+	if (ret < 0) {
+		pr_err_ratelimited("%s: write error:%d\n",
+				__func__, ret);
+		port->drp_cpkt_cnt++;
+		kfree(cbuf);
+		return ret;
+	}
+	port->to_modem++;
+
+	return 0;
+}
+
+static void
+ghsuart_send_cbits_tomodem(void *gptr, u8 portno, int cbits)
+{
+	struct ghsuart_ctrl_port	*port;
+
+	if (portno >= num_ctrl_ports || !gptr) {
+		pr_err("%s: Invalid portno#%d\n", __func__, portno);
+		return;
+	}
+
+	port = ghsuart_ctrl_ports[portno].port;
+	if (!port) {
+		pr_err("%s: port is null\n", __func__);
+		return;
+	}
+
+	if (cbits == port->cbits_tomodem)
+		return;
+
+	port->cbits_tomodem = cbits;
+
+	if (!test_bit(CH_OPENED, &port->channel_sts))
+		return;
+
+	pr_debug("%s: ctrl_tomodem:%d\n", __func__, cbits);
+	/* Send the control bits to the Modem */
+	msm_smux_tiocm_set(port->ch_id, cbits, ~cbits);
+}
+
+static void ghsuart_ctrl_connect_w(struct work_struct *w)
+{
+	struct ghsuart_ctrl_port	*port =
+			container_of(w, struct ghsuart_ctrl_port, connect_w);
+	int			retval;
+
+	if (!port || !test_bit(CH_READY, &port->channel_sts))
+		return;
+
+	pr_debug("%s: port:%p\n", __func__, port);
+
+	retval = msm_smux_open(port->ch_id, port->ctxt, smux_control_event,
+				rx_control_buffer);
+	if (retval < 0) {
+		pr_err(" %s smux_open failed\n", __func__);
+		return;
+	}
+
+}
+
+int ghsuart_ctrl_connect(void *gptr, int port_num)
+{
+	struct ghsuart_ctrl_port	*port;
+	struct grmnet		*gr;
+	unsigned long		flags;
+
+	pr_debug("%s: port#%d\n", __func__, port_num);
+
+	if (port_num > num_ctrl_ports || !gptr) {
+		pr_err("%s: invalid portno#%d\n", __func__, port_num);
+		return -ENODEV;
+	}
+
+	port = ghsuart_ctrl_ports[port_num].port;
+	if (!port) {
+		pr_err("%s: port is null\n", __func__);
+		return -ENODEV;
+	}
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	gr = gptr;
+	port->send_cpkt_response = gr->send_cpkt_response;
+	gr->send_encap_cmd = ghsuart_send_cpkt_tomodem;
+	gr->notify_modem = ghsuart_send_cbits_tomodem;
+
+	port->port_usb = gptr;
+	port->to_host = 0;
+	port->to_modem = 0;
+	port->drp_cpkt_cnt = 0;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	if (test_bit(CH_READY, &port->channel_sts))
+		queue_work(port->wq, &port->connect_w);
+
+	return 0;
+}
+
+static void ghsuart_ctrl_disconnect_w(struct work_struct *w)
+{
+	struct ghsuart_ctrl_port	*port =
+			container_of(w, struct ghsuart_ctrl_port, disconnect_w);
+
+	if (!test_bit(CH_OPENED, &port->channel_sts))
+		return;
+
+	msm_smux_close(port->ch_id);
+	clear_bit(CH_OPENED, &port->channel_sts);
+}
+
+void ghsuart_ctrl_disconnect(void *gptr, int port_num)
+{
+	struct gctrl_port	*port;
+	struct grmnet		*gr = NULL;
+	unsigned long		flags;
+
+	pr_debug("%s: port#%d\n", __func__, port_num);
+
+	if (port_num > num_ctrl_ports) {
+		pr_err("%s: invalid portno#%d\n", __func__, port_num);
+		return;
+	}
+
+	port = gctrl_ports[port_num].port;
+
+	if (!gptr || !port) {
+		pr_err("%s: grmnet port is null\n", __func__);
+		return;
+	}
+
+	gr = gptr;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	gr->send_encap_cmd = 0;
+	gr->notify_modem = 0;
+	port->cbits_tomodem = 0;
+	port->port_usb = 0;
+	port->send_cpkt_response = 0;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	queue_work(port->wq, &port->disconnect_w);
+}
+
+static int ghsuart_ctrl_probe(struct platform_device *pdev)
+{
+	struct ghsuart_ctrl_port	*port;
+	unsigned long		flags;
+
+	pr_debug("%s: name:%s\n", __func__, pdev->name);
+
+	port = ghsuart_ctrl_ports[pdev->id].port;
+	set_bit(CH_READY, &port->channel_sts);
+
+	/* if usb is online, start read */
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (port->port_usb)
+		queue_work(port->wq, &port->connect_w);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	return 0;
+}
+
+static int ghsuart_ctrl_remove(struct platform_device *pdev)
+{
+	struct ghsuart_ctrl_port	*port;
+	struct grmnet		*gr = NULL;
+	unsigned long		flags;
+
+	pr_debug("%s: name:%s\n", __func__, pdev->name);
+
+	port = ghsuart_ctrl_ports[pdev->id].port;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (!port->port_usb) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		goto not_ready;
+	}
+
+	gr = port->port_usb;
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	if (gr && gr->disconnect)
+		gr->disconnect(gr);
+
+	clear_bit(CH_OPENED, &port->channel_sts);
+not_ready:
+	clear_bit(CH_READY, &port->channel_sts);
+
+	return 0;
+}
+
+static void ghsuart_ctrl_port_free(int portno)
+{
+	struct ghsuart_ctrl_port	*port = ghsuart_ctrl_ports[portno].port;
+	struct platform_driver	*pdrv = &gctrl_ports[portno].pdrv;
+
+	destroy_workqueue(port->wq);
+	if (pdrv)
+		platform_driver_unregister(pdrv);
+	kfree(port);
+}
+
+static int ghsuart_ctrl_port_alloc(int portno, enum gadget_type gtype)
+{
+	struct ghsuart_ctrl_port	*port;
+	struct platform_driver	*pdrv;
+	int err;
+
+	port = kzalloc(sizeof(struct ghsuart_ctrl_port), GFP_KERNEL);
+	if (!port)
+		return -ENOMEM;
+
+	port->wq = create_singlethread_workqueue(ghsuart_ctrl_names[portno]);
+	if (!port->wq) {
+		pr_err("%s: Unable to create workqueue:%s\n",
+			__func__, ghsuart_ctrl_names[portno]);
+		kfree(port);
+		return -ENOMEM;
+	}
+
+	port->port_num = portno;
+	port->gtype = gtype;
+
+	spin_lock_init(&port->port_lock);
+
+	INIT_WORK(&port->connect_w, ghsuart_ctrl_connect_w);
+	INIT_WORK(&port->disconnect_w, ghsuart_ctrl_disconnect_w);
+
+	port->ch_id = SMUX_USB_RMNET_CTL_0;
+	port->ctxt = port;
+	port->send_pkt = ghsuart_ctrl_receive;
+	ghsuart_ctrl_ports[portno].port = port;
+
+	pdrv = &ghsuart_ctrl_ports[portno].pdrv;
+	pdrv->probe = ghsuart_ctrl_probe;
+	pdrv->remove = ghsuart_ctrl_remove;
+	pdrv->driver.name = ghsuart_ctrl_names[portno];
+	pdrv->driver.owner = THIS_MODULE;
+
+	err = platform_driver_register(pdrv);
+	if (unlikely(err < 0))
+		return err;
+	pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
+
+	return 0;
+}
+
+int ghsuart_ctrl_setup(unsigned int num_ports, enum gadget_type gtype)
+{
+	int	first_port_id = num_ctrl_ports;
+	int	total_num_ports = num_ports + num_ctrl_ports;
+	int	i;
+	int	ret = 0;
+
+	if (!num_ports || total_num_ports > NUM_HSUART_PORTS) {
+		pr_err("%s: Invalid num of ports count:%d\n",
+				__func__, num_ports);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: requested ports:%d\n", __func__, num_ports);
+
+	for (i = first_port_id; i < (first_port_id + num_ports); i++) {
+
+		num_ctrl_ports++;
+		ret = ghsuart_ctrl_port_alloc(i, gtype);
+		if (ret) {
+			num_ctrl_ports--;
+			pr_err("%s: Unable to alloc port:%d\n", __func__, i);
+			goto free_ports;
+		}
+	}
+
+	return first_port_id;
+
+free_ports:
+	for (i = first_port_id; i < num_ctrl_ports; i++)
+		ghsuart_ctrl_port_free(i);
+		num_ctrl_ports = first_port_id;
+	return ret;
+}
+
+#define DEBUG_BUF_SIZE	1024
+static ssize_t ghsuart_ctrl_read_stats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	struct ghsuart_ctrl_port	*port;
+	char			*buf;
+	unsigned long		flags;
+	int			ret;
+	int			i;
+	int			temp = 0;
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	for (i = 0; i < num_ctrl_ports; i++) {
+		port = ghsuart_ctrl_ports[i].port;
+		if (!port)
+			continue;
+		spin_lock_irqsave(&port->port_lock, flags);
+
+		temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
+				"#PORT:%d port: %p\n"
+				"to_usbhost:    %lu\n"
+				"to_modem:      %lu\n"
+				"cpkt_drp_cnt:  %lu\n"
+				"DTR:           %s\n",
+				i, port,
+				port->to_host, port->to_modem,
+				port->drp_cpkt_cnt,
+				port->cbits_tomodem ? "HIGH" : "LOW");
+
+		spin_unlock_irqrestore(&port->port_lock, flags);
+	}
+
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
+
+	kfree(buf);
+
+	return ret;
+}
+
+static ssize_t ghsuart_ctrl_reset_stats(struct file *file,
+	const char __user *buf, size_t count, loff_t *ppos)
+{
+	struct ghsuart_ctrl_port	*port;
+	int			i;
+	unsigned long		flags;
+
+	for (i = 0; i < num_ctrl_ports; i++) {
+		port = ghsuart_ctrl_ports[i].port;
+		if (!port)
+			continue;
+
+		spin_lock_irqsave(&port->port_lock, flags);
+		port->to_host = 0;
+		port->to_modem = 0;
+		port->drp_cpkt_cnt = 0;
+		spin_unlock_irqrestore(&port->port_lock, flags);
+	}
+	return count;
+}
+
+static const struct file_operations ghsuart_ctrl_stats_ops = {
+	.read = ghsuart_ctrl_read_stats,
+	.write = ghsuart_ctrl_reset_stats,
+};
+
+static struct dentry	*ghsuart_ctrl_dent;
+static int ghsuart_ctrl_debugfs_init(void)
+{
+	struct dentry	*ghsuart_ctrl_dfile;
+
+	ghsuart_ctrl_dent = debugfs_create_dir("ghsuart_ctrl_xport", 0);
+	if (!ghsuart_ctrl_dent || IS_ERR(ghsuart_ctrl_dent))
+		return -ENODEV;
+
+	ghsuart_ctrl_dfile =
+		debugfs_create_file("status", S_IRUGO | S_IWUSR,
+				ghsuart_ctrl_dent, 0, &gctrl_stats_ops);
+	if (!ghsuart_ctrl_dfile || IS_ERR(ghsuart_ctrl_dfile)) {
+		debugfs_remove(ghsuart_ctrl_dent);
+		ghsuart_ctrl_dent = NULL;
+		return -ENODEV;
+	}
+	return 0;
+}
+
+static void ghsuart_ctrl_debugfs_exit(void)
+{
+	debugfs_remove_recursive(ghsuart_ctrl_dent);
+}
+
+static int __init ghsuart_ctrl_init(void)
+{
+	int ret;
+
+	ret = ghsuart_ctrl_debugfs_init();
+	if (ret) {
+		pr_debug("mode debugfs file is not available\n");
+		return ret;
+	}
+	return 0;
+}
+module_init(ghsuart_ctrl_init);
+
+static void __exit ghsuart_ctrl_exit(void)
+{
+	ghsuart_ctrl_debugfs_exit();
+}
+module_exit(ghsuart_ctrl_exit);
+
+MODULE_DESCRIPTION("HSUART control xport for RmNet");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/u_data_hsuart.c b/drivers/usb/gadget/u_data_hsuart.c
new file mode 100644
index 0000000..b2c57c4
--- /dev/null
+++ b/drivers/usb/gadget/u_data_hsuart.c
@@ -0,0 +1,1142 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/termios.h>
+#include <linux/netdevice.h>
+#include <linux/debugfs.h>
+#include <linux/bitops.h>
+#include <linux/smux.h>
+
+#include <mach/usb_gadget_xport.h>
+
+static unsigned int num_data_ports;
+
+static const char *ghsuart_data_names[] = {
+	"SMUX_DUN_DATA_HSUART",
+	"SMUX_RMNET_DATA_HSUART"
+};
+
+#define DATA_BRIDGE_NAME_MAX_LEN		20
+
+#define GHSUART_DATA_RMNET_RX_Q_SIZE		10
+#define GHSUART_DATA_RMNET_TX_Q_SIZE		20
+#define GHSUART_DATA_SERIAL_RX_Q_SIZE		5
+#define GHSUART_DATA_SERIAL_TX_Q_SIZE		5
+#define GHSUART_DATA_RX_REQ_SIZE		2048
+#define GHSUART_DATA_TX_INTR_THRESHOLD		1
+
+/* from cdc-acm.h */
+#define ACM_CTRL_RTS		(1 << 1)	/* unused with full duplex */
+#define ACM_CTRL_DTR		(1 << 0)	/* host is ready for data r/w */
+#define ACM_CTRL_OVERRUN	(1 << 6)
+#define ACM_CTRL_PARITY		(1 << 5)
+#define ACM_CTRL_FRAMING	(1 << 4)
+#define ACM_CTRL_RI		(1 << 3)
+#define ACM_CTRL_BRK		(1 << 2)
+#define ACM_CTRL_DSR		(1 << 1)
+#define ACM_CTRL_DCD		(1 << 0)
+
+static unsigned int ghsuart_data_rmnet_tx_q_size = GHSUART_DATA_RMNET_TX_Q_SIZE;
+module_param(ghsuart_data_rmnet_tx_q_size, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int ghsuart_data_rmnet_rx_q_size = GHSUART_DATA_RMNET_RX_Q_SIZE;
+module_param(ghsuart_data_rmnet_rx_q_size, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int ghsuart_data_serial_tx_q_size =
+					GHSUART_DATA_SERIAL_TX_Q_SIZE;
+module_param(ghsuart_data_serial_tx_q_size, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int ghsuart_data_serial_rx_q_size =
+				GHSUART_DATA_SERIAL_RX_Q_SIZE;
+module_param(ghsuart_data_serial_rx_q_size, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int ghsuart_data_rx_req_size = GHSUART_DATA_RX_REQ_SIZE;
+module_param(ghsuart_data_rx_req_size, uint, S_IRUGO | S_IWUSR);
+
+unsigned int ghsuart_data_tx_intr_thld = GHSUART_DATA_TX_INTR_THRESHOLD;
+module_param(ghsuart_data_tx_intr_thld, uint, S_IRUGO | S_IWUSR);
+
+#define CH_OPENED 0
+#define CH_READY 1
+
+struct ghsuart_data_port {
+	/* port */
+	unsigned		port_num;
+
+	/* gadget */
+	atomic_t		connected;
+	struct usb_ep		*in;
+	struct usb_ep		*out;
+
+	enum gadget_type	gtype;
+	spinlock_t		port_lock;
+	void *port_usb;
+
+	/* data transfer queues */
+	unsigned int		tx_q_size;
+	struct list_head	tx_idle;
+	struct sk_buff_head	tx_skb_q;
+	spinlock_t		tx_lock;
+
+	unsigned int		rx_q_size;
+	struct list_head	rx_idle;
+	struct sk_buff_head	rx_skb_q;
+	spinlock_t		rx_lock;
+
+	/* work */
+	struct workqueue_struct	*wq;
+	struct work_struct	connect_w;
+	struct work_struct	disconnect_w;
+	struct work_struct	write_tomdm_w;
+	struct work_struct	write_tohost_w;
+	void *ctx;
+	unsigned int ch_id;
+	/* flow control bits */
+	unsigned long flags;
+	/* channel status */
+	unsigned long		channel_sts;
+
+	unsigned int		n_tx_req_queued;
+
+	/* control bits */
+	unsigned		cbits_tomodem;
+	unsigned		cbits_tohost;
+
+	/* counters */
+	unsigned long		to_modem;
+	unsigned long		to_host;
+	unsigned int		tomodem_drp_cnt;
+};
+
+static struct {
+	struct ghsuart_data_port	*port;
+	struct platform_driver	pdrv;
+} ghsuart_data_ports[NUM_HSUART_PORTS];
+
+static void ghsuart_data_start_rx(struct ghsuart_data_port *port);
+
+static void ghsuart_data_free_requests(struct usb_ep *ep,
+				 struct list_head *head)
+{
+	struct usb_request	*req;
+
+	while (!list_empty(head)) {
+		req = list_entry(head->next, struct usb_request, list);
+		list_del(&req->list);
+		usb_ep_free_request(ep, req);
+	}
+}
+
+static int ghsuart_data_alloc_requests(struct usb_ep *ep,
+		struct list_head *head,
+		int num,
+		void (*cb)(struct usb_ep *ep, struct usb_request *),
+		gfp_t flags)
+{
+	int			i;
+	struct usb_request	*req;
+
+	pr_debug("%s: ep:%s head:%p num:%d cb:%p", __func__,
+			ep->name, head, num, cb);
+
+	for (i = 0; i < num; i++) {
+		req = usb_ep_alloc_request(ep, flags);
+		if (!req) {
+			pr_err("%s: req allocated:%d\n", __func__, i);
+			return list_empty(head) ? -ENOMEM : 0;
+		}
+		req->complete = cb;
+		list_add(&req->list, head);
+	}
+
+	return 0;
+}
+
+static void ghsuart_data_write_tohost(struct work_struct *w)
+{
+	unsigned long		flags;
+	struct sk_buff		*skb;
+	int			ret;
+	struct usb_request	*req;
+	struct usb_ep		*ep;
+	struct ghsuart_data_port	*port;
+
+	port = container_of(w, struct ghsuart_data_port, write_tohost_w);
+
+	if (!port || !atomic_read(&port->connected))
+		return;
+
+	spin_lock_irqsave(&port->tx_lock, flags);
+	ep = port->in;
+	if (!ep) {
+		spin_unlock_irqrestore(&port->tx_lock, flags);
+		return;
+	}
+
+	while (!list_empty(&port->tx_idle)) {
+		skb = __skb_dequeue(&port->tx_skb_q);
+		if (!skb)
+			break;
+
+		req = list_first_entry(&port->tx_idle, struct usb_request,
+				list);
+		req->context = skb;
+		req->buf = skb->data;
+		req->length = skb->len;
+
+		port->n_tx_req_queued++;
+		if (port->n_tx_req_queued == ghsuart_data_tx_intr_thld) {
+			req->no_interrupt = 0;
+			port->n_tx_req_queued = 0;
+		} else {
+			req->no_interrupt = 1;
+		}
+
+		list_del(&req->list);
+
+		spin_unlock_irqrestore(&port->tx_lock, flags);
+		ret = usb_ep_queue(ep, req, GFP_KERNEL);
+		spin_lock_irqsave(&port->tx_lock, flags);
+		if (ret) {
+			pr_err("%s: usb epIn failed\n", __func__);
+			list_add(&req->list, &port->tx_idle);
+			dev_kfree_skb_any(skb);
+			break;
+		}
+		port->to_host++;
+	}
+	spin_unlock_irqrestore(&port->tx_lock, flags);
+}
+
+static void ghsuart_data_write_tomdm(struct work_struct *w)
+{
+	struct ghsuart_data_port	*port;
+	struct sk_buff		*skb;
+	unsigned long		flags;
+	int			ret;
+
+	port = container_of(w, struct ghsuart_data_port, write_tomdm_w);
+
+	if (!port || !atomic_read(&port->connected))
+		return;
+
+	spin_lock_irqsave(&port->rx_lock, flags);
+	if (test_bit(TX_THROTTLED, &port->flags)) {
+		spin_unlock_irqrestore(&port->rx_lock, flags);
+		return;
+	}
+
+	while ((skb = __skb_dequeue(&port->rx_skb_q))) {
+		pr_debug("%s: port:%p tom:%lu pno:%d\n", __func__,
+				port, port->to_modem, port->port_num);
+
+		spin_unlock_irqrestore(&port->rx_lock, flags);
+		ret = msm_smux_write(port->ch_id, skb, skb->data, skb->len);
+		spin_lock_irqsave(&port->rx_lock, flags);
+		if (ret < 0) {
+			if (ret == -EAGAIN) {
+				/*flow control*/
+				set_bit(TX_THROTTLED, &port->flags);
+				__skb_queue_head(&port->rx_skb_q, skb);
+				break;
+			}
+			pr_err_ratelimited("%s: write error:%d\n",
+					__func__, ret);
+			port->tomodem_drp_cnt++;
+			dev_kfree_skb_any(skb);
+			break;
+		}
+		port->to_modem++;
+	}
+	spin_unlock_irqrestore(&port->rx_lock, flags);
+	ghsuart_data_start_rx(port);
+}
+
+static void ghsuart_data_epin_complete(struct usb_ep *ep,
+				struct usb_request *req)
+{
+	struct ghsuart_data_port	*port = ep->driver_data;
+	struct sk_buff		*skb = req->context;
+	int			status = req->status;
+
+	switch (status) {
+	case 0:
+		/* successful completion */
+		break;
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		dev_kfree_skb_any(skb);
+		req->buf = 0;
+		usb_ep_free_request(ep, req);
+		return;
+	default:
+		pr_err("%s: data tx ep error %d\n", __func__, status);
+		break;
+	}
+
+	dev_kfree_skb_any(skb);
+
+	spin_lock(&port->tx_lock);
+	list_add_tail(&req->list, &port->tx_idle);
+	spin_unlock(&port->tx_lock);
+
+	queue_work(port->wq, &port->write_tohost_w);
+}
+
+static void
+ghsuart_data_epout_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct ghsuart_data_port	*port = ep->driver_data;
+	struct sk_buff		*skb = req->context;
+	int			status = req->status;
+	int			queue = 0;
+
+	switch (status) {
+	case 0:
+		skb_put(skb, req->actual);
+		queue = 1;
+		break;
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* cable disconnection */
+		dev_kfree_skb_any(skb);
+		req->buf = 0;
+		usb_ep_free_request(ep, req);
+		return;
+	default:
+		pr_err_ratelimited("%s: %s response error %d, %d/%d\n",
+					__func__, ep->name, status,
+				req->actual, req->length);
+		dev_kfree_skb_any(skb);
+		list_add_tail(&req->list, &port->rx_idle);
+		return;
+	}
+
+	spin_lock(&port->rx_lock);
+	if (queue) {
+		__skb_queue_tail(&port->rx_skb_q, skb);
+		list_add_tail(&req->list, &port->rx_idle);
+		queue_work(port->wq, &port->write_tomdm_w);
+	}
+	spin_unlock(&port->rx_lock);
+}
+
+static void ghsuart_data_start_rx(struct ghsuart_data_port *port)
+{
+	struct usb_request	*req;
+	struct usb_ep		*ep;
+	unsigned long		flags;
+	int			ret;
+	struct sk_buff		*skb;
+
+	pr_debug("%s: port:%p\n", __func__, port);
+	if (!port)
+		return;
+
+	spin_lock_irqsave(&port->rx_lock, flags);
+	ep = port->out;
+	if (!ep) {
+		spin_unlock_irqrestore(&port->rx_lock, flags);
+		return;
+	}
+
+	if (test_bit(TX_THROTTLED, &port->flags)) {
+		spin_unlock_irqrestore(&port->rx_lock, flags);
+		return;
+	}
+
+	while (atomic_read(&port->connected) && !list_empty(&port->rx_idle)) {
+
+		req = list_first_entry(&port->rx_idle,
+					struct usb_request, list);
+
+		skb = alloc_skb(ghsuart_data_rx_req_size, GFP_ATOMIC);
+		if (!skb)
+			break;
+		list_del(&req->list);
+		req->buf = skb->data;
+		req->length = ghsuart_data_rx_req_size;
+		req->context = skb;
+
+		spin_unlock_irqrestore(&port->rx_lock, flags);
+		ret = usb_ep_queue(ep, req, GFP_KERNEL);
+		spin_lock_irqsave(&port->rx_lock, flags);
+		if (ret) {
+			dev_kfree_skb_any(skb);
+
+			pr_err_ratelimited("%s: rx queue failed\n", __func__);
+
+			if (atomic_read(&port->connected))
+				list_add(&req->list, &port->rx_idle);
+			else
+				usb_ep_free_request(ep, req);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&port->rx_lock, flags);
+}
+
+static void ghsuart_data_start_io(struct ghsuart_data_port *port)
+{
+	unsigned long	flags;
+	struct usb_ep	*ep;
+	int		ret;
+
+	pr_debug("%s: port:%p\n", __func__, port);
+
+	if (!port)
+		return;
+
+	spin_lock_irqsave(&port->rx_lock, flags);
+	ep = port->out;
+	if (!ep) {
+		spin_unlock_irqrestore(&port->rx_lock, flags);
+		return;
+	}
+
+	ret = ghsuart_data_alloc_requests(ep, &port->rx_idle,
+		port->rx_q_size, ghsuart_data_epout_complete, GFP_ATOMIC);
+	if (ret) {
+		pr_err("%s: rx req allocation failed\n", __func__);
+		spin_unlock_irqrestore(&port->rx_lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&port->rx_lock, flags);
+
+	spin_lock_irqsave(&port->tx_lock, flags);
+	ep = port->in;
+	if (!ep) {
+		spin_unlock_irqrestore(&port->tx_lock, flags);
+		return;
+	}
+
+	ret = ghsuart_data_alloc_requests(ep, &port->tx_idle,
+		port->tx_q_size, ghsuart_data_epin_complete, GFP_ATOMIC);
+	if (ret) {
+		pr_err("%s: tx req allocation failed\n", __func__);
+		ghsuart_data_free_requests(ep, &port->rx_idle);
+		spin_unlock_irqrestore(&port->tx_lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&port->tx_lock, flags);
+
+	/* queue out requests */
+	ghsuart_data_start_rx(port);
+}
+
+static void ghsuart_dunctrl_status(void *ctxt, unsigned int ctrl_bits)
+{
+	struct ghsuart_data_port  *port = ctxt;
+	struct gserial          *gser;
+	unsigned long	flags;
+
+	pr_debug("%s - input control lines: dcd%c dsr%c break%c "
+	"ring%c framing%c parity%c overrun%c\n", __func__,
+	ctrl_bits & ACM_CTRL_DCD ? '+' : '-',
+	ctrl_bits & ACM_CTRL_DSR ? '+' : '-',
+	ctrl_bits & ACM_CTRL_BRK ? '+' : '-',
+	ctrl_bits & ACM_CTRL_RI  ? '+' : '-',
+	ctrl_bits & ACM_CTRL_FRAMING ? '+' : '-',
+	ctrl_bits & ACM_CTRL_PARITY ? '+' : '-',
+	ctrl_bits & ACM_CTRL_OVERRUN ? '+' : '-');
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	port->cbits_tohost = ctrl_bits;
+	gser = port->port_usb;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+	if (gser && gser->send_modem_ctrl_bits)
+		gser->send_modem_ctrl_bits(gser, ctrl_bits);
+}
+
+const char *event_string(int event_type)
+{
+	switch (event_type) {
+	case SMUX_CONNECTED:
+		return "SMUX_CONNECTED";
+	case SMUX_DISCONNECTED:
+		return "SMUX_DISCONNECTED";
+	case SMUX_READ_DONE:
+		return "SMUX_READ_DONE";
+	case SMUX_READ_FAIL:
+		return "SMUX_READ_FAIL";
+	case SMUX_WRITE_DONE:
+		return "SMUX_WRITE_DONE";
+	case SMUX_WRITE_FAIL:
+		return "SMUX_WRITE_FAIL";
+	case SMUX_HIGH_WM_HIT:
+		return "SMUX_HIGH_WM_HIT";
+	case SMUX_LOW_WM_HIT:
+		return "SMUX_LOW_WM_HIT";
+	case SMUX_TIOCM_UPDATE:
+		return "SMUX_TIOCM_UPDATE";
+	default:
+		return "UNDEFINED";
+	}
+}
+
+static void ghsuart_notify_event(void *priv, int event_type,
+				const void *metadata)
+{
+	struct ghsuart_data_port	*port = priv;
+	struct smux_meta_write *meta_write =
+				(struct smux_meta_write *) metadata;
+	struct smux_meta_read *meta_read =
+				(struct smux_meta_read *) metadata;
+	struct sk_buff		*skb;
+	unsigned long		flags;
+	unsigned int		cbits;
+	struct gserial		*gser;
+
+	pr_debug("%s: event type: %s ", __func__, event_string(event_type));
+	switch (event_type) {
+	case SMUX_CONNECTED:
+		set_bit(CH_OPENED, &port->channel_sts);
+		if (port->gtype == USB_GADGET_SERIAL) {
+			cbits = msm_smux_tiocm_get(port->ch_id);
+			if (cbits & ACM_CTRL_DCD) {
+				gser = port->port_usb;
+				if (gser && gser->connect)
+					gser->connect(gser);
+			}
+		}
+		ghsuart_data_start_io(port);
+		break;
+	case SMUX_DISCONNECTED:
+		clear_bit(CH_OPENED, &port->channel_sts);
+		break;
+	case SMUX_READ_DONE:
+		skb = meta_read->pkt_priv;
+		skb->data = meta_read->buffer;
+		skb->len = meta_read->len;
+		spin_lock_irqsave(&port->tx_lock, flags);
+		__skb_queue_tail(&port->tx_skb_q, skb);
+		spin_unlock_irqrestore(&port->tx_lock, flags);
+		queue_work(port->wq, &port->write_tohost_w);
+		break;
+	case SMUX_WRITE_DONE:
+		skb = meta_write->pkt_priv;
+		skb->data = meta_write->buffer;
+		dev_kfree_skb_any(skb);
+		queue_work(port->wq, &port->write_tomdm_w);
+		break;
+	case SMUX_READ_FAIL:
+		skb = meta_read->pkt_priv;
+		skb->data = meta_read->buffer;
+		dev_kfree_skb_any(skb);
+		break;
+	case SMUX_WRITE_FAIL:
+		skb = meta_write->pkt_priv;
+		skb->data = meta_write->buffer;
+		dev_kfree_skb_any(skb);
+		break;
+	case SMUX_HIGH_WM_HIT:
+		spin_lock_irqsave(&port->rx_lock, flags);
+		set_bit(TX_THROTTLED, &port->flags);
+		spin_unlock_irqrestore(&port->rx_lock, flags);
+	case SMUX_LOW_WM_HIT:
+		spin_lock_irqsave(&port->rx_lock, flags);
+		clear_bit(TX_THROTTLED, &port->flags);
+		spin_unlock_irqrestore(&port->rx_lock, flags);
+		queue_work(port->wq, &port->write_tomdm_w);
+		break;
+	case SMUX_TIOCM_UPDATE:
+		if (port->gtype == USB_GADGET_SERIAL) {
+			cbits = msm_smux_tiocm_get(port->ch_id);
+			ghsuart_dunctrl_status(port, cbits);
+		}
+		break;
+	default:
+		pr_err("%s:wrong event recieved\n", __func__);
+	}
+}
+
+static int ghsuart_get_rx_buffer(void *priv, void **pkt_priv,
+			void **buffer, int size)
+{
+	struct sk_buff		*skb;
+
+	skb = alloc_skb(size, GFP_ATOMIC);
+	if (!skb)
+		return -ENOMEM;
+	*pkt_priv = skb;
+	*buffer = skb->data;
+
+	return 0;
+}
+
+static void ghsuart_data_connect_w(struct work_struct *w)
+{
+	struct ghsuart_data_port	*port =
+		container_of(w, struct ghsuart_data_port, connect_w);
+	int			ret;
+
+	if (!port || !atomic_read(&port->connected) ||
+		!test_bit(CH_READY, &port->channel_sts))
+		return;
+
+	pr_debug("%s: port:%p\n", __func__, port);
+
+	ret = msm_smux_open(port->ch_id, port, &ghsuart_notify_event,
+				&ghsuart_get_rx_buffer);
+	if (ret) {
+		pr_err("%s: unable to open smux ch:%d err:%d\n",
+				__func__, port->ch_id, ret);
+		return;
+	}
+}
+
+static void ghsuart_data_disconnect_w(struct work_struct *w)
+{
+	struct ghsuart_data_port	*port =
+		container_of(w, struct ghsuart_data_port, disconnect_w);
+
+	if (!test_bit(CH_OPENED, &port->channel_sts))
+		return;
+
+	msm_smux_close(port->ch_id);
+	clear_bit(CH_OPENED, &port->channel_sts);
+}
+
+static void ghsuart_data_free_buffers(struct ghsuart_data_port *port)
+{
+	struct sk_buff	*skb;
+	unsigned long	flags;
+
+	if (!port)
+		return;
+
+	spin_lock_irqsave(&port->tx_lock, flags);
+	if (!port->in) {
+		spin_unlock_irqrestore(&port->tx_lock, flags);
+		return;
+	}
+
+	ghsuart_data_free_requests(port->in, &port->tx_idle);
+
+	while ((skb = __skb_dequeue(&port->tx_skb_q)))
+		dev_kfree_skb_any(skb);
+	spin_unlock_irqrestore(&port->tx_lock, flags);
+
+	spin_lock_irqsave(&port->rx_lock, flags);
+	if (!port->out) {
+		spin_unlock_irqrestore(&port->rx_lock, flags);
+		return;
+	}
+
+	ghsuart_data_free_requests(port->out, &port->rx_idle);
+
+	while ((skb = __skb_dequeue(&port->rx_skb_q)))
+		dev_kfree_skb_any(skb);
+	spin_unlock_irqrestore(&port->rx_lock, flags);
+}
+
+static int ghsuart_data_probe(struct platform_device *pdev)
+{
+	struct ghsuart_data_port *port;
+
+	pr_debug("%s: name:%s num_data_ports= %d\n",
+		__func__, pdev->name, num_data_ports);
+
+	if (pdev->id >= num_data_ports) {
+		pr_err("%s: invalid port: %d\n", __func__, pdev->id);
+		return -EINVAL;
+	}
+
+	port = ghsuart_data_ports[pdev->id].port;
+	set_bit(CH_READY, &port->channel_sts);
+
+	/* if usb is online, try opening bridge */
+	if (atomic_read(&port->connected))
+		queue_work(port->wq, &port->connect_w);
+
+	return 0;
+}
+
+/* mdm disconnect */
+static int ghsuart_data_remove(struct platform_device *pdev)
+{
+	struct ghsuart_data_port *port;
+	struct usb_ep	*ep_in;
+	struct usb_ep	*ep_out;
+	int ret;
+	struct gserial		*gser = NULL;
+	unsigned long	flags;
+
+	pr_debug("%s: name:%s\n", __func__, pdev->name);
+
+	if (pdev->id >= num_data_ports) {
+		pr_err("%s: invalid port: %d\n", __func__, pdev->id);
+		return -EINVAL;
+	}
+
+	port = ghsuart_data_ports[pdev->id].port;
+
+	ep_in = port->in;
+	if (ep_in)
+		usb_ep_fifo_flush(ep_in);
+
+	ep_out = port->out;
+	if (ep_out)
+		usb_ep_fifo_flush(ep_out);
+
+	ghsuart_data_free_buffers(port);
+
+	if (port->gtype == USB_GADGET_SERIAL) {
+		spin_lock_irqsave(&port->port_lock, flags);
+		gser = port->port_usb;
+		port->cbits_tohost = 0;
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		if (gser && gser->disconnect)
+			gser->disconnect(gser);
+	}
+
+	ret = msm_smux_close(port->ch_id);
+	if (ret < 0)
+		pr_err("%s:Unable to close smux channel: %d\n",
+				__func__, port->ch_id);
+
+	clear_bit(CH_READY, &port->channel_sts);
+	clear_bit(CH_OPENED, &port->channel_sts);
+
+	return 0;
+}
+
+static void ghsuart_data_port_free(int portno)
+{
+	struct ghsuart_data_port	*port = ghsuart_data_ports[portno].port;
+	struct platform_driver	*pdrv = &ghsuart_data_ports[portno].pdrv;
+
+	destroy_workqueue(port->wq);
+	kfree(port);
+
+	if (pdrv)
+		platform_driver_unregister(pdrv);
+}
+
+static void
+ghsuart_send_controlbits_tomodem(void *gptr, u8 portno, int cbits)
+{
+	struct ghsuart_data_port	*port;
+
+	if (portno >= num_ctrl_ports || !gptr) {
+		pr_err("%s: Invalid portno#%d\n", __func__, portno);
+		return;
+	}
+
+	port = ghsuart_data_ports[portno].port;
+	if (!port) {
+		pr_err("%s: port is null\n", __func__);
+		return;
+	}
+
+	if (cbits == port->cbits_tomodem)
+		return;
+
+	port->cbits_tomodem = cbits;
+
+	if (!test_bit(CH_OPENED, &port->channel_sts))
+		return;
+
+	/* if DTR is high, update latest modem info to Host */
+	if (port->cbits_tomodem & ACM_CTRL_DTR) {
+		unsigned int i;
+
+		i = msm_smux_tiocm_get(port->ch_id);
+		ghsuart_dunctrl_status(port, i);
+	}
+
+	pr_debug("%s: ctrl_tomodem:%d\n", __func__, cbits);
+	/* Send the control bits to the Modem */
+	msm_smux_tiocm_set(port->ch_id, cbits, ~cbits);
+}
+
+static int ghsuart_data_port_alloc(unsigned port_num, enum gadget_type gtype)
+{
+	struct ghsuart_data_port	*port;
+	struct platform_driver	*pdrv;
+
+	port = kzalloc(sizeof(struct ghsuart_data_port), GFP_KERNEL);
+	if (!port)
+		return -ENOMEM;
+
+	port->wq = create_singlethread_workqueue(ghsuart_data_names[port_num]);
+	if (!port->wq) {
+		pr_err("%s: Unable to create workqueue:%s\n",
+			__func__, ghsuart_data_names[port_num]);
+		kfree(port);
+		return -ENOMEM;
+	}
+	port->port_num = port_num;
+
+	/* port initialization */
+	spin_lock_init(&port->port_lock);
+	spin_lock_init(&port->rx_lock);
+	spin_lock_init(&port->tx_lock);
+
+	INIT_WORK(&port->connect_w, ghsuart_data_connect_w);
+	INIT_WORK(&port->disconnect_w, ghsuart_data_disconnect_w);
+	INIT_WORK(&port->write_tohost_w, ghsuart_data_write_tohost);
+	INIT_WORK(&port->write_tomdm_w, ghsuart_data_write_tomdm);
+
+	INIT_LIST_HEAD(&port->tx_idle);
+	INIT_LIST_HEAD(&port->rx_idle);
+
+	skb_queue_head_init(&port->tx_skb_q);
+	skb_queue_head_init(&port->rx_skb_q);
+
+	port->gtype = gtype;
+	if (port->gtype == USB_GADGET_SERIAL)
+		port->ch_id = SMUX_USB_DUN_0;
+	else
+		port->ch_id = SMUX_USB_RMNET_DATA_0;
+	port->ctx = port;
+	ghsuart_data_ports[port_num].port = port;
+
+	pdrv = &ghsuart_data_ports[port_num].pdrv;
+	pdrv->probe = ghsuart_data_probe;
+	pdrv->remove = ghsuart_data_remove;
+	pdrv->driver.name = ghsuart_data_names[port_num];
+	pdrv->driver.owner = THIS_MODULE;
+
+	platform_driver_register(pdrv);
+
+	pr_debug("%s: port:%p portno:%d\n", __func__, port, port_num);
+
+	return 0;
+}
+
+void ghsuart_data_disconnect(void *gptr, int port_num)
+{
+	struct ghsuart_data_port	*port;
+	unsigned long		flags;
+	struct gserial		*gser = NULL;
+
+	pr_debug("%s: port#%d\n", __func__, port_num);
+
+	port = ghsuart_data_ports[port_num].port;
+
+	if (port_num > num_data_ports) {
+		pr_err("%s: invalid portno#%d\n", __func__, port_num);
+		return;
+	}
+
+	if (!gptr || !port) {
+		pr_err("%s: port is null\n", __func__);
+		return;
+	}
+
+	ghsuart_data_free_buffers(port);
+
+	/* disable endpoints */
+	if (port->in)
+		usb_ep_disable(port->in);
+
+	if (port->out)
+		usb_ep_disable(port->out);
+
+	atomic_set(&port->connected, 0);
+
+	if (port->gtype == USB_GADGET_SERIAL) {
+		gser = gptr;
+		spin_lock_irqsave(&port->port_lock, flags);
+		gser->notify_modem = 0;
+		port->cbits_tomodem = 0;
+		port->port_usb = 0;
+		spin_unlock_irqrestore(&port->port_lock, flags);
+	}
+
+	spin_lock_irqsave(&port->tx_lock, flags);
+	port->in = NULL;
+	port->n_tx_req_queued = 0;
+	clear_bit(RX_THROTTLED, &port->flags);
+	spin_unlock_irqrestore(&port->tx_lock, flags);
+
+	spin_lock_irqsave(&port->rx_lock, flags);
+	port->out = NULL;
+	clear_bit(TX_THROTTLED, &port->flags);
+	spin_unlock_irqrestore(&port->rx_lock, flags);
+
+	queue_work(port->wq, &port->disconnect_w);
+}
+
+int ghsuart_data_connect(void *gptr, int port_num)
+{
+	struct ghsuart_data_port		*port;
+	struct gserial			*gser;
+	struct grmnet			*gr;
+	unsigned long			flags;
+	int				ret = 0;
+
+	pr_debug("%s: port#%d\n", __func__, port_num);
+
+	port = ghsuart_data_ports[port_num].port;
+
+	if (port_num > num_data_ports) {
+		pr_err("%s: invalid portno#%d\n", __func__, port_num);
+		return -ENODEV;
+	}
+
+	if (!gptr || !port) {
+		pr_err("%s: port is null\n", __func__);
+		return -ENODEV;
+	}
+
+	if (port->gtype == USB_GADGET_SERIAL) {
+		gser = gptr;
+
+		spin_lock_irqsave(&port->tx_lock, flags);
+		port->in = gser->in;
+		spin_unlock_irqrestore(&port->tx_lock, flags);
+
+		spin_lock_irqsave(&port->rx_lock, flags);
+		port->out = gser->out;
+		spin_unlock_irqrestore(&port->rx_lock, flags);
+
+
+		port->tx_q_size = ghsuart_data_serial_tx_q_size;
+		port->rx_q_size = ghsuart_data_serial_rx_q_size;
+		gser->in->driver_data = port;
+		gser->out->driver_data = port;
+
+		spin_lock_irqsave(&port->port_lock, flags);
+		gser->notify_modem = ghsuart_send_controlbits_tomodem;
+		port->port_usb = gptr;
+		spin_unlock_irqrestore(&port->port_lock, flags);
+	} else {
+		gr = gptr;
+
+		spin_lock_irqsave(&port->tx_lock, flags);
+		port->in = gr->in;
+		spin_unlock_irqrestore(&port->tx_lock, flags);
+
+		spin_lock_irqsave(&port->rx_lock, flags);
+		port->out = gr->out;
+		spin_unlock_irqrestore(&port->rx_lock, flags);
+
+		port->tx_q_size = ghsuart_data_rmnet_tx_q_size;
+		port->rx_q_size = ghsuart_data_rmnet_rx_q_size;
+		gr->in->driver_data = port;
+		gr->out->driver_data = port;
+	}
+
+	ret = usb_ep_enable(port->in);
+	if (ret) {
+		pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
+				__func__, port->in);
+		goto fail;
+	}
+
+	ret = usb_ep_enable(port->out);
+	if (ret) {
+		pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
+				__func__, port->out);
+		usb_ep_disable(port->in);
+		goto fail;
+	}
+
+	atomic_set(&port->connected, 1);
+
+	spin_lock_irqsave(&port->tx_lock, flags);
+	port->to_host = 0;
+	spin_unlock_irqrestore(&port->tx_lock, flags);
+
+	spin_lock_irqsave(&port->rx_lock, flags);
+	port->to_modem = 0;
+	port->tomodem_drp_cnt = 0;
+	spin_unlock_irqrestore(&port->rx_lock, flags);
+
+	queue_work(port->wq, &port->connect_w);
+fail:
+	return ret;
+}
+
+#define DEBUG_BUF_SIZE 1024
+static ssize_t ghsuart_data_read_stats(struct file *file,
+	char __user *ubuf, size_t count, loff_t *ppos)
+{
+	struct ghsuart_data_port	*port;
+	struct platform_driver	*pdrv;
+	char			*buf;
+	unsigned long		flags;
+	int			ret;
+	int			i;
+	int			temp = 0;
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	for (i = 0; i < num_data_ports; i++) {
+		port = ghsuart_data_ports[i].port;
+		if (!port)
+			continue;
+		pdrv = &ghsuart_data_ports[i].pdrv;
+
+		spin_lock_irqsave(&port->rx_lock, flags);
+		temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
+				"\nName:           %s\n"
+				"#PORT:%d port#:   %p\n"
+				"data_ch_open:	   %d\n"
+				"data_ch_ready:    %d\n"
+				"\n******UL INFO*****\n\n"
+				"dpkts_to_modem:   %lu\n"
+				"tomodem_drp_cnt:  %u\n"
+				"rx_buf_len:       %u\n"
+				"TX_THROTTLED      %d\n",
+				pdrv->driver.name,
+				i, port,
+				test_bit(CH_OPENED, &port->channel_sts),
+				test_bit(CH_READY, &port->channel_sts),
+				port->to_modem,
+				port->tomodem_drp_cnt,
+				port->rx_skb_q.qlen,
+				test_bit(TX_THROTTLED, &port->flags));
+		spin_unlock_irqrestore(&port->rx_lock, flags);
+
+		spin_lock_irqsave(&port->tx_lock, flags);
+		temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
+				"\n******DL INFO******\n\n"
+				"dpkts_to_usbhost: %lu\n"
+				"tx_buf_len:	   %u\n"
+				"RX_THROTTLED	   %d\n",
+				port->to_host,
+				port->tx_skb_q.qlen,
+				test_bit(RX_THROTTLED, &port->flags));
+		spin_unlock_irqrestore(&port->tx_lock, flags);
+
+	}
+
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
+
+	kfree(buf);
+
+	return ret;
+}
+
+static ssize_t ghsuart_data_reset_stats(struct file *file,
+	const char __user *buf, size_t count, loff_t *ppos)
+{
+	struct ghsuart_data_port	*port;
+	int			i;
+	unsigned long		flags;
+
+	for (i = 0; i < num_data_ports; i++) {
+		port = ghsuart_data_ports[i].port;
+		if (!port)
+			continue;
+
+		spin_lock_irqsave(&port->rx_lock, flags);
+		port->to_modem = 0;
+		port->tomodem_drp_cnt = 0;
+		spin_unlock_irqrestore(&port->rx_lock, flags);
+
+		spin_lock_irqsave(&port->tx_lock, flags);
+		port->to_host = 0;
+		spin_unlock_irqrestore(&port->tx_lock, flags);
+	}
+	return count;
+}
+
+const struct file_operations ghsuart_data_stats_ops = {
+	.read = ghsuart_data_read_stats,
+	.write = ghsuart_data_reset_stats,
+};
+
+static struct dentry	*ghsuart_data_dent;
+static int ghsuart_data_debugfs_init(void)
+{
+	struct dentry	 *ghsuart_data_dfile;
+
+	ghsuart_data_dent = debugfs_create_dir("ghsic_data_xport", 0);
+	if (!ghsuart_data_dent || IS_ERR(ghsuart_data_dent))
+		return -ENODEV;
+
+	ghsuart_data_dfile = debugfs_create_file("status", S_IRUGO | S_IWUSR,
+				 ghsuart_data_dent, 0, &ghsuart_data_stats_ops);
+	if (!ghsuart_data_dfile || IS_ERR(ghsuart_data_dfile)) {
+		debugfs_remove(ghsuart_data_dent);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static void ghsuart_data_debugfs_exit(void)
+{
+	debugfs_remove_recursive(ghsuart_data_dent);
+}
+
+int ghsuart_data_setup(unsigned num_ports, enum gadget_type gtype)
+{
+	int		first_port_id = num_data_ports;
+	int		total_num_ports = num_ports + num_data_ports;
+	int		ret = 0;
+	int		i;
+
+	if (!num_ports || total_num_ports > NUM_PORTS) {
+		pr_err("%s: Invalid num of ports count:%d\n",
+				__func__, num_ports);
+		return -EINVAL;
+	}
+	pr_debug("%s: count: %d\n", __func__, num_ports);
+
+	for (i = first_port_id; i < total_num_ports; i++) {
+
+		/*probe can be called while port_alloc,so update no_data_ports*/
+		num_data_ports++;
+		ret = ghsuart_data_port_alloc(i, gtype);
+		if (ret) {
+			num_data_ports--;
+			pr_err("%s: Unable to alloc port:%d\n", __func__, i);
+			goto free_ports;
+		}
+	}
+
+	/*return the starting index*/
+	return first_port_id;
+
+free_ports:
+	for (i = first_port_id; i < num_data_ports; i++)
+		ghsuart_data_port_free(i);
+		num_data_ports = first_port_id;
+
+	return ret;
+}
+
+static int __init ghsuart_data_init(void)
+{
+	int ret;
+
+	ret = ghsuart_data_debugfs_init();
+	if (ret) {
+		pr_debug("mode debugfs file is not available");
+		return ret;
+	}
+
+	return 0;
+}
+module_init(ghsuart_data_init);
+
+static void __exit ghsuart_data_exit(void)
+{
+	ghsuart_data_debugfs_exit();
+}
+module_exit(ghsuart_data_exit);
+
+MODULE_DESCRIPTION("hsuart data xport driver for DUN and RMNET");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index af638f4..4e7c35f 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -261,6 +261,11 @@
 		if (t1 & PORT_OWNER)
 			set_bit(port, &ehci->owned_ports);
 		else if ((t1 & PORT_PE) && !(t1 & PORT_SUSPEND)) {
+			/*clear RS bit before setting SUSP bit
+			* and wait for HCH to get set*/
+			if (ehci->susp_sof_bug)
+				ehci_halt(ehci);
+
 			t2 |= PORT_SUSPEND;
 			set_bit(port, &ehci->bus_suspended);
 		}
@@ -311,8 +316,10 @@
 	if (ehci->bus_suspended)
 		udelay(150);
 
-	/* turn off now-idle HC */
-	ehci_halt (ehci);
+	/*if this bit is set, controller is already haled*/
+	if (!ehci->susp_sof_bug)
+		ehci_halt(ehci); /* turn off now-idle HC */
+
 	hcd->state = HC_STATE_SUSPENDED;
 
 	if (ehci->reclaim)
@@ -1199,8 +1206,10 @@
 			if ((temp & PORT_PE) == 0
 					|| (temp & PORT_RESET) != 0)
 				goto error;
-
-			ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
+			/*port gets suspended as part of bus suspend routine*/
+			if (!ehci->susp_sof_bug)
+				ehci_writel(ehci, temp | PORT_SUSPEND,
+						status_reg);
 #ifdef	CONFIG_USB_OTG
 			if (hcd->self.otg_port == (wIndex + 1) &&
 					hcd->self.b_hnp_enable) {
@@ -1215,7 +1224,11 @@
 			 */
 			temp &= ~PORT_WKCONN_E;
 			temp |= PORT_WKDISC_E | PORT_WKOC_E;
-			ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
+			if (ehci->susp_sof_bug)
+				ehci_writel(ehci, temp, status_reg);
+			else
+				ehci_writel(ehci, temp | PORT_SUSPEND,
+						status_reg);
 			if (hostpc_reg) {
 				spin_unlock_irqrestore(&ehci->lock, flags);
 				msleep(5);/* 5ms for HCD enter low pwr mode */
diff --git a/drivers/usb/host/ehci-msm-hsic.c b/drivers/usb/host/ehci-msm-hsic.c
index 82373e2..fd3d1ca 100644
--- a/drivers/usb/host/ehci-msm-hsic.c
+++ b/drivers/usb/host/ehci-msm-hsic.c
@@ -56,8 +56,11 @@
 	struct wake_lock	wlock;
 	int			peripheral_status_irq;
 	int			wakeup_irq;
+	int			wakeup_gpio;
 	bool			wakeup_irq_enabled;
+	atomic_t		pm_usage_cnt;
 	uint32_t		bus_perf_client;
+	uint32_t		wakeup_int_cnt;
 };
 
 static bool debug_bus_voting_enabled = true;
@@ -186,9 +189,20 @@
 		goto free_strobe;
 		}
 
+	if (mehci->wakeup_gpio) {
+		rc = gpio_request(mehci->wakeup_gpio, "HSIC_WAKEUP_GPIO");
+		if (rc < 0) {
+			dev_err(mehci->dev, "gpio request failed for HSIC WAKEUP\n");
+			goto free_data;
+		}
+	}
+
 	return 0;
 
 free_gpio:
+	if (mehci->wakeup_gpio)
+		gpio_free(mehci->wakeup_gpio);
+free_data:
 	gpio_free(pdata->data);
 free_strobe:
 	gpio_free(pdata->strobe);
@@ -321,6 +335,9 @@
 		ulpi_write(mehci, 0xA9, 0x30);
 	}
 
+	/*disable auto resume*/
+	ulpi_write(mehci, ULPI_IFC_CTRL_AUTORESUME, ULPI_CLR(ULPI_IFC_CTRL));
+
 	return 0;
 }
 
@@ -339,6 +356,12 @@
 		return 0;
 	}
 
+	if (!(readl_relaxed(USB_PORTSC) & PORT_PE)) {
+		dev_dbg(mehci->dev, "%s:port is not enabled skip suspend\n",
+				__func__);
+		return -EAGAIN;
+	}
+
 	disable_irq(hcd->irq);
 
 	/* make sure we don't race against a remote wakeup */
@@ -408,6 +431,11 @@
 
 	atomic_set(&mehci->in_lpm, 1);
 	enable_irq(hcd->irq);
+
+	mehci->wakeup_irq_enabled = 1;
+	enable_irq_wake(mehci->wakeup_irq);
+	enable_irq(mehci->wakeup_irq);
+
 	wake_unlock(&mehci->wlock);
 
 	dev_info(mehci->dev, "HSIC-USB in low power mode\n");
@@ -426,6 +454,12 @@
 		return 0;
 	}
 
+	if (mehci->wakeup_irq_enabled) {
+		disable_irq_wake(mehci->wakeup_irq);
+		disable_irq_nosync(mehci->wakeup_irq);
+		mehci->wakeup_irq_enabled = 0;
+	}
+
 	wake_lock(&mehci->wlock);
 
 	if (mehci->bus_perf_client && debug_bus_voting_enabled) {
@@ -478,6 +512,8 @@
 
 skip_phy_resume:
 
+	usb_hcd_resume_root_hub(hcd);
+
 	atomic_set(&mehci->in_lpm, 0);
 
 	if (mehci->async_int) {
@@ -486,6 +522,11 @@
 		enable_irq(hcd->irq);
 	}
 
+	if (atomic_read(&mehci->pm_usage_cnt)) {
+		atomic_set(&mehci->pm_usage_cnt, 0);
+		pm_runtime_put_noidle(mehci->dev);
+	}
+
 	dev_info(mehci->dev, "HSIC-USB exited from low power mode\n");
 
 	return 0;
@@ -687,7 +728,11 @@
 {
 	struct msm_hsic_hcd *mehci = data;
 
-	dev_dbg(mehci->dev, "%s: hsic remote wakeup interrupt\n", __func__);
+	mehci->wakeup_int_cnt++;
+	dev_dbg(mehci->dev, "%s: hsic remote wakeup interrupt cnt: %u\n",
+			__func__, mehci->wakeup_int_cnt);
+
+	wake_lock(&mehci->wlock);
 
 	if (mehci->wakeup_irq_enabled) {
 		mehci->wakeup_irq_enabled = 0;
@@ -695,6 +740,11 @@
 		disable_irq_nosync(irq);
 	}
 
+	if (!atomic_read(&mehci->pm_usage_cnt)) {
+		atomic_set(&mehci->pm_usage_cnt, 1);
+		pm_runtime_get(mehci->dev);
+	}
+
 	return IRQ_HANDLED;
 }
 
@@ -751,6 +801,27 @@
 	.release = single_release,
 };
 
+static int ehci_hsic_msm_wakeup_cnt_show(struct seq_file *s, void *unused)
+{
+	struct msm_hsic_hcd *mehci = s->private;
+
+	seq_printf(s, "%u\n", mehci->wakeup_int_cnt);
+
+	return 0;
+}
+
+static int ehci_hsic_msm_wakeup_cnt_open(struct inode *inode, struct file *f)
+{
+	return single_open(f, ehci_hsic_msm_wakeup_cnt_show, inode->i_private);
+}
+
+const struct file_operations ehci_hsic_msm_wakeup_cnt_fops = {
+	.open = ehci_hsic_msm_wakeup_cnt_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
 static struct dentry *ehci_hsic_msm_dbg_root;
 static int ehci_hsic_msm_debugfs_init(struct msm_hsic_hcd *mehci)
 {
@@ -771,6 +842,16 @@
 		return -ENODEV;
 	}
 
+	ehci_hsic_msm_dentry = debugfs_create_file("wakeup_cnt",
+		S_IRUGO,
+		ehci_hsic_msm_dbg_root, mehci,
+		&ehci_hsic_msm_wakeup_cnt_fops);
+
+	if (!ehci_hsic_msm_dentry) {
+		debugfs_remove_recursive(ehci_hsic_msm_dbg_root);
+		return -ENODEV;
+	}
+
 	return 0;
 }
 
@@ -830,12 +911,21 @@
 	mehci = hcd_to_hsic(hcd);
 	mehci->dev = &pdev->dev;
 
+	mehci->ehci.susp_sof_bug = 1;
+
 	res = platform_get_resource_byname(pdev,
 			IORESOURCE_IRQ,
 			"peripheral_status_irq");
 	if (res)
 		mehci->peripheral_status_irq = res->start;
 
+	res = platform_get_resource_byname(pdev, IORESOURCE_IO, "wakeup");
+	if (res) {
+		mehci->wakeup_gpio = res->start;
+		mehci->wakeup_irq = MSM_GPIO_TO_INT(res->start);
+		dev_dbg(mehci->dev, "wakeup_irq: %d\n", mehci->wakeup_irq);
+	}
+
 	ret = msm_hsic_init_clocks(mehci, 1);
 	if (ret) {
 		dev_err(&pdev->dev, "unable to initialize clocks\n");
@@ -878,12 +968,9 @@
 	}
 
 	/* configure wakeup irq */
-	ret = platform_get_irq(pdev, 2);
-	if (ret > 0) {
-		mehci->wakeup_irq = ret;
-		dev_dbg(&pdev->dev, "wakeup_irq: %d\n", mehci->wakeup_irq);
+	if (mehci->wakeup_irq) {
 		ret = request_irq(mehci->wakeup_irq, msm_hsic_wakeup_irq,
-				IRQF_TRIGGER_LOW,
+				IRQF_TRIGGER_HIGH,
 				"msm_hsic_wakeup", mehci);
 		if (!ret) {
 			disable_irq_nosync(mehci->wakeup_irq);
@@ -999,39 +1086,15 @@
 	return ret;
 }
 
-static int msm_hsic_pm_suspend_noirq(struct device *dev)
-{
-	struct usb_hcd *hcd = dev_get_drvdata(dev);
-	struct msm_hsic_hcd *mehci = hcd_to_hsic(hcd);
-
-	dev_dbg(dev, "ehci-msm-hsic PM suspend_noirq\n");
-
-	if (device_may_wakeup(dev) && !mehci->wakeup_irq_enabled) {
-		enable_irq(mehci->wakeup_irq);
-		enable_irq_wake(mehci->wakeup_irq);
-		mehci->wakeup_irq_enabled = 1;
-	}
-
-	return 0;
-}
-
 static int msm_hsic_pm_resume(struct device *dev)
 {
 	int ret;
 	struct usb_hcd *hcd = dev_get_drvdata(dev);
 	struct msm_hsic_hcd *mehci = hcd_to_hsic(hcd);
 
-	dev_dbg(dev, "ehci-msm-hsic PM resume\n");
-
 	if (device_may_wakeup(dev))
 		disable_irq_wake(hcd->irq);
 
-	if (mehci->wakeup_irq_enabled) {
-		mehci->wakeup_irq_enabled = 0;
-		disable_irq_wake(mehci->wakeup_irq);
-		disable_irq_nosync(mehci->wakeup_irq);
-	}
-
 	ret = msm_hsic_resume(mehci);
 	if (ret)
 		return ret;
@@ -1074,7 +1137,6 @@
 #ifdef CONFIG_PM
 static const struct dev_pm_ops msm_hsic_dev_pm_ops = {
 	SET_SYSTEM_SLEEP_PM_OPS(msm_hsic_pm_suspend, msm_hsic_pm_resume)
-	.suspend_noirq = msm_hsic_pm_suspend_noirq,
 	SET_RUNTIME_PM_OPS(msm_hsic_runtime_suspend, msm_hsic_runtime_resume,
 				msm_hsic_runtime_idle)
 };
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 3a5a5cc..af8ea8b 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -138,6 +138,7 @@
 	unsigned		use_dummy_qh:1;	/* AMD Frame List table quirk*/
 	unsigned		has_synopsys_hc_bug:1; /* Synopsys HC */
 	unsigned		frame_index_bug:1; /* MosChip (AKA NetMos) */
+	unsigned		susp_sof_bug:1; /*Chip Idea HC*/
 
 	/* required for usb32 quirk */
 	#define OHCI_CTRL_HCFS          (3 << 6)
diff --git a/drivers/usb/misc/mdm_ctrl_bridge.c b/drivers/usb/misc/mdm_ctrl_bridge.c
index c23c7b1..49591cd 100644
--- a/drivers/usb/misc/mdm_ctrl_bridge.c
+++ b/drivers/usb/misc/mdm_ctrl_bridge.c
@@ -255,7 +255,7 @@
 	}
 }
 
-int ctrl_bridge_start_read(struct ctrl_bridge *dev)
+static int ctrl_bridge_start_read(struct ctrl_bridge *dev)
 {
 	int	retval = 0;
 
@@ -281,7 +281,6 @@
 int ctrl_bridge_open(struct bridge *brdg)
 {
 	struct ctrl_bridge	*dev;
-	int			ret;
 
 	if (!brdg) {
 		err("bridge is null\n");
@@ -304,16 +303,10 @@
 	dev->set_ctrl_line_sts = 0;
 	dev->notify_ser_state = 0;
 
-	ret = usb_autopm_get_interface(dev->intf);
-	if (ret < 0) {
-		dev_err(&dev->udev->dev, "%s autopm_get fail: %d\n",
-			__func__, ret);
-		return ret;
-	}
+	if (brdg->ops.send_cbits)
+		brdg->ops.send_cbits(brdg->ctx, dev->cbits_tohost);
 
-	ret = ctrl_bridge_start_read(dev);
-	usb_autopm_put_interface(dev->intf);
-	return ret;
+	return 0;
 }
 EXPORT_SYMBOL(ctrl_bridge_open);
 
@@ -504,11 +497,7 @@
 		}
 	}
 
-	/* if the bridge is open, resume reading */
-	if (dev->brdg)
-		return ctrl_bridge_start_read(dev);
-
-	return 0;
+	return ctrl_bridge_start_read(dev);
 }
 
 #if defined(CONFIG_DEBUG_FS)
@@ -711,7 +700,7 @@
 
 	ch_id++;
 
-	return retval;
+	return ctrl_bridge_start_read(dev);
 
 free_rbuf:
 	kfree(dev->readbuf);
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index 5b05c5b..2d3a01d 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -75,6 +75,7 @@
 static struct regulator *hsusb_vddcx;
 static struct regulator *vbus_otg;
 static struct regulator *mhl_analog_switch;
+static struct power_supply *psy;
 
 static bool aca_id_turned_on;
 static inline bool aca_enabled(void)
@@ -994,9 +995,7 @@
 
 static int msm_otg_notify_power_supply(struct msm_otg *motg, unsigned mA)
 {
-	struct power_supply *psy;
 
-	psy = power_supply_get_by_name("usb");
 	if (!psy)
 		goto psy_not_supported;
 
@@ -2001,6 +2000,9 @@
 	case OTG_STATE_UNDEFINED:
 		msm_otg_reset(otg);
 		msm_otg_init_sm(motg);
+		psy = power_supply_get_by_name("usb");
+		if (!psy)
+			pr_err("couldn't get usb power supply\n");
 		otg->state = OTG_STATE_B_IDLE;
 		if (!test_bit(B_SESS_VLD, &motg->inputs) &&
 				test_bit(ID, &motg->inputs)) {
diff --git a/drivers/video/msm/lvds.c b/drivers/video/msm/lvds.c
index 18225fb..88c24b9 100644
--- a/drivers/video/msm/lvds.c
+++ b/drivers/video/msm/lvds.c
@@ -65,28 +65,14 @@
 	usleep(1000);
 
 	/* LVDS PHY PLL configuration */
-	MDP_OUTP(MDP_BASE + 0xc3000, 0x08);
-	MDP_OUTP(MDP_BASE + 0xc3004, 0x87);
+	MDP_OUTP(MDP_BASE + 0xc3004, 0x62);
 	MDP_OUTP(MDP_BASE + 0xc3008, 0x30);
-	MDP_OUTP(MDP_BASE + 0xc300c, 0x06);
-	MDP_OUTP(MDP_BASE + 0xc3014, 0x20);
-	MDP_OUTP(MDP_BASE + 0xc3018, 0x0F);
-	MDP_OUTP(MDP_BASE + 0xc301c, 0x01);
+	MDP_OUTP(MDP_BASE + 0xc300c, 0xc4);
+	MDP_OUTP(MDP_BASE + 0xc3014, 0x10);
+	MDP_OUTP(MDP_BASE + 0xc3018, 0x05);
+	MDP_OUTP(MDP_BASE + 0xc301c, 0x62);
 	MDP_OUTP(MDP_BASE + 0xc3020, 0x41);
 	MDP_OUTP(MDP_BASE + 0xc3024, 0x0d);
-	MDP_OUTP(MDP_BASE + 0xc3028, 0x07);
-	MDP_OUTP(MDP_BASE + 0xc302c, 0x00);
-	MDP_OUTP(MDP_BASE + 0xc3030, 0x1c);
-	MDP_OUTP(MDP_BASE + 0xc3034, 0x01);
-	MDP_OUTP(MDP_BASE + 0xc3038, 0x00);
-	MDP_OUTP(MDP_BASE + 0xc3040, 0xC0);
-	MDP_OUTP(MDP_BASE + 0xc3044, 0x00);
-	MDP_OUTP(MDP_BASE + 0xc3048, 0x30);
-	MDP_OUTP(MDP_BASE + 0xc304c, 0x00);
-
-	MDP_OUTP(MDP_BASE + 0xc3000, 0x11);
-	MDP_OUTP(MDP_BASE + 0xc3064, 0x05);
-	MDP_OUTP(MDP_BASE + 0xc3050, 0x20);
 
 	MDP_OUTP(MDP_BASE + 0xc3000, 0x01);
 	/* Wait until LVDS PLL is locked and ready */
@@ -210,6 +196,10 @@
 	if (lvds_clk)
 		clk_disable_unprepare(lvds_clk);
 
+	MDP_OUTP(MDP_BASE +  0xc3100, 0x0);
+	MDP_OUTP(MDP_BASE + 0xc3000, 0x0);
+	usleep(10);
+
 	if (lvds_pdata && lvds_pdata->lcdc_power_save)
 		lvds_pdata->lcdc_power_save(0);
 
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index 3d35dd5..7fd2d91 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -93,7 +93,7 @@
 static struct delayed_work mdp_pipe_ctrl_worker;
 
 static boolean mdp_suspended = FALSE;
-DEFINE_MUTEX(mdp_suspend_mutex);
+static DEFINE_MUTEX(mdp_suspend_mutex);
 
 #ifdef CONFIG_FB_MSM_MDP40
 struct mdp_dma_data dma2_data;
@@ -1573,7 +1573,8 @@
 		__mdp_histogram_kickoff(mgmt);
 
 	if (isr & INTR_HIST_DONE) {
-		if (waitqueue_active(&mgmt->mdp_hist_comp.wait)) {
+		if ((waitqueue_active(&mgmt->mdp_hist_comp.wait))
+			 && (mgmt->hist != NULL)) {
 			if (!queue_work(mdp_hist_wq,
 						&mgmt->mdp_histogram_worker)) {
 				pr_err("%s %d- can't queue hist_read\n",
@@ -2574,6 +2575,17 @@
 	return rc;
 }
 
+unsigned int mdp_check_suspended(void)
+{
+	unsigned int ret;
+
+	mutex_lock(&mdp_suspend_mutex);
+	ret = mdp_suspended;
+	mutex_unlock(&mdp_suspend_mutex);
+
+	return ret;
+}
+
 void mdp_footswitch_ctrl(boolean on)
 {
 	mutex_lock(&mdp_suspend_mutex);
@@ -2640,6 +2652,7 @@
 #ifdef CONFIG_FB_MSM_DTV
 	mdp4_dtv_set_black_screen();
 #endif
+	mdp4_iommu_detach();
 	mdp_footswitch_ctrl(FALSE);
 }
 
diff --git a/drivers/video/msm/mdp.h b/drivers/video/msm/mdp.h
index 40f62b9..b104b33 100644
--- a/drivers/video/msm/mdp.h
+++ b/drivers/video/msm/mdp.h
@@ -801,6 +801,7 @@
 void mdp_histogram_handle_isr(struct mdp_hist_mgmt *mgmt);
 void __mdp_histogram_kickoff(struct mdp_hist_mgmt *mgmt);
 void __mdp_histogram_reset(struct mdp_hist_mgmt *mgmt);
+unsigned int mdp_check_suspended(void);
 void mdp_footswitch_ctrl(boolean on);
 
 #ifdef CONFIG_FB_MSM_MDP303
@@ -828,6 +829,10 @@
 {
 	/* empty */
 }
+static inline void mdp4_iommu_detach(void)
+{
+    /* empty */
+}
 #endif
 
 int mdp_ppp_v4l2_overlay_set(struct fb_info *info, struct mdp_overlay *req);
diff --git a/drivers/video/msm/mdp4.h b/drivers/video/msm/mdp4.h
index de254d0..a7161fe 100644
--- a/drivers/video/msm/mdp4.h
+++ b/drivers/video/msm/mdp4.h
@@ -48,12 +48,6 @@
 	OVERLAY_PERF_LEVEL4
 };
 
-enum mdp4_overlay_status {
-	MDP4_OVERLAY_TYPE_UNSET,
-	MDP4_OVERLAY_TYPE_SET,
-	MDP4_OVERLAY_TYPE_MAX
-};
-
 typedef int (*cmd_fxn_t)(struct platform_device *pdev);
 
 enum {		/* display */
@@ -516,6 +510,7 @@
 struct mdp4_overlay_pipe *mdp4_overlay_stage_pipe(int mixer, int stage);
 void mdp4_mixer_stage_up(struct mdp4_overlay_pipe *pipe);
 void mdp4_mixer_stage_down(struct mdp4_overlay_pipe *pipe);
+void mdp4_mixer_pipe_cleanup(int mixer);
 int mdp4_mixer_stage_can_run(struct mdp4_overlay_pipe *pipe);
 void mdp4_overlayproc_cfg(struct mdp4_overlay_pipe *pipe);
 void mdp4_mddi_overlay(struct msm_fb_data_type *mfd);
@@ -726,8 +721,6 @@
 void mdp4_overlay_dsi_video_wait4vsync(struct msm_fb_data_type *mfd);
 void mdp4_primary_vsync_dsi_video(void);
 uint32_t mdp4_ss_table_value(int8_t param, int8_t index);
-void mdp4_overlay_status_write(enum mdp4_overlay_status type, bool val);
-bool mdp4_overlay_status_read(enum mdp4_overlay_status type);
 void mdp4_overlay_ctrl_db_reset(void);
 
 int mdp4_overlay_writeback_on(struct platform_device *pdev);
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index 034d6b6..6d4e44b 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -23,6 +23,7 @@
 #include <mach/hardware.h>
 #include <mach/iommu_domains.h>
 #include <mach/iommu.h>
+#include <linux/iommu.h>
 #include <linux/io.h>
 #include <linux/debugfs.h>
 #include <linux/fb.h>
@@ -197,19 +198,6 @@
 	}
 }
 
-/* static array with index 0 for unset status and 1 for set status */
-static bool overlay_status[MDP4_OVERLAY_TYPE_MAX];
-
-void mdp4_overlay_status_write(enum mdp4_overlay_status type, bool val)
-{
-	overlay_status[type] = val;
-}
-
-bool mdp4_overlay_status_read(enum mdp4_overlay_status type)
-{
-	return overlay_status[type];
-}
-
 void mdp4_overlay_ctrl_db_reset(void)
 {
 	int i;
@@ -1497,6 +1485,21 @@
 	}
 }
 
+void mdp4_mixer_pipe_cleanup(int mixer)
+{
+	struct mdp4_overlay_pipe *pipe;
+	int j;
+
+	for (j = MDP4_MIXER_STAGE_MAX - 1; j > MDP4_MIXER_STAGE_BASE; j--) {
+		pipe = ctrl->stage[mixer][j];
+		if (pipe == NULL)
+			continue;
+		pr_debug("%s(): pipe %u\n", __func__, pipe->pipe_ndx);
+		mdp4_mixer_stage_down(pipe);
+		mdp4_overlay_pipe_free(pipe);
+	}
+}
+
 static void mdp4_mixer_stage_commit(int mixer)
 {
 	struct mdp4_overlay_pipe *pipe;
@@ -2291,7 +2294,7 @@
 	if (mdp4_extn_disp)
 		return OVERLAY_PERF_LEVEL1;
 
-	if (req->flags & MDP_DEINTERLACE)
+	if (req->flags & (MDP_DEINTERLACE | MDP_BACKEND_COMPOSITION))
 		return OVERLAY_PERF_LEVEL1;
 
 	for (i = 0, cnt = 0; i < OVERLAY_PIPE_MAX; i++) {
@@ -2501,12 +2504,6 @@
 
 	mdp4_stat.overlay_set[pipe->mixer_num]++;
 
-	if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
-		if (mdp_hw_revision == MDP4_REVISION_V2_1 &&
-			pipe->mixer_num == MDP4_MIXER0)
-			mdp4_overlay_status_write(MDP4_OVERLAY_TYPE_SET, true);
-	}
-
 	if (ctrl->panel_mode & MDP4_PANEL_DTV &&
 	    pipe->mixer_num == MDP4_MIXER1) {
 		u32 use_blt = mdp4_overlay_blt_enable(req, mfd, perf_level);
@@ -2624,9 +2621,6 @@
 			}
 #else
 			if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
-				if (mdp_hw_revision == MDP4_REVISION_V2_1)
-					mdp4_overlay_status_write(
-						MDP4_OVERLAY_TYPE_UNSET, true);
 				if (mfd->panel_power_on)
 					mdp4_mddi_overlay_restore();
 			}
@@ -2733,7 +2727,7 @@
 	if (mfd->use_ov1_blt)
 		mdp4_overlay1_update_blt_mode(mfd);
 
-	mdp4_overlay_dtv_wait_for_ov(mfd, pipe);
+	mdp4_overlay_dtv_wait4vsync();
 	mdp4_iommu_unmap(pipe);
 
 	mutex_unlock(&mfd->dma->ov_mutex);
@@ -2921,17 +2915,21 @@
 		/* primary interface */
 		ctrl->mixer0_played++;
 		if (ctrl->panel_mode & MDP4_PANEL_LCDC) {
-			if (!mfd->use_ov0_blt)
-				mdp4_overlay_update_blt_mode(mfd);
+			mdp4_overlay_reg_flush(pipe, 0);
 			mdp4_overlay_lcdc_start();
 			mdp4_overlay_lcdc_vsync_push(mfd, pipe);
+			if (!mfd->use_ov0_blt &&
+					!(pipe->flags & MDP_OV_PLAY_NOWAIT))
+				mdp4_overlay_update_blt_mode(mfd);
 		}
 #ifdef CONFIG_FB_MSM_MIPI_DSI
 		else if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO) {
-			if (!mfd->use_ov0_blt)
-				mdp4_overlay_update_blt_mode(mfd);
+			mdp4_overlay_reg_flush(pipe, 0);
 			mdp4_overlay_dsi_video_start();
 			mdp4_overlay_dsi_video_vsync_push(mfd, pipe);
+			if (!mfd->use_ov0_blt &&
+					!(pipe->flags & MDP_OV_PLAY_NOWAIT))
+				mdp4_overlay_update_blt_mode(mfd);
 		}
 #endif
 		else {
@@ -3005,6 +3003,12 @@
 };
 
 static int iommu_enabled;
+static int mdp_iommu_fault_handler(struct iommu_domain *domain,
+	struct device *dev, unsigned long iova, int flags)
+{
+	pr_err("MDP IOMMU page fault: iova 0x%lx", iova);
+	return 0;
+}
 
 void mdp4_iommu_attach(void)
 {
@@ -3026,6 +3030,8 @@
 			if (!domain)
 				continue;
 
+			iommu_set_fault_handler(domain,
+				mdp_iommu_fault_handler);
 			if (iommu_attach_device(domain,	ctx)) {
 				WARN(1, "%s: could not attach domain %d to context %s."
 					" iommu programming will not occur.\n",
@@ -3044,6 +3050,9 @@
 	struct iommu_domain *domain;
 	int i;
 
+	if (!mdp_check_suspended() || mdp4_extn_disp)
+		return;
+
 	if (iommu_enabled) {
 		for (i = 0; i < ARRAY_SIZE(msm_iommu_ctx_names); i++) {
 			int domain_idx;
diff --git a/drivers/video/msm/mdp4_overlay_dsi_video.c b/drivers/video/msm/mdp4_overlay_dsi_video.c
index 8bed42d..574a657 100644
--- a/drivers/video/msm/mdp4_overlay_dsi_video.c
+++ b/drivers/video/msm/mdp4_overlay_dsi_video.c
@@ -24,6 +24,9 @@
 #include <linux/spinlock.h>
 #include <linux/fb.h>
 #include <linux/msm_mdp.h>
+#include <linux/ktime.h>
+#include <linux/wakelock.h>
+#include <linux/time.h>
 #include <asm/system.h>
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
@@ -32,6 +35,8 @@
 #include "mdp4.h"
 #include "mipi_dsi.h"
 
+#include <mach/iommu_domains.h>
+
 #define DSI_VIDEO_BASE	0xE0000
 
 static int first_pixel_start_x;
@@ -180,14 +185,10 @@
 	pipe->srcp0_ystride = fbi->fix.line_length;
 	pipe->bpp = bpp;
 
-	if (mfd->map_buffer) {
-		pipe->srcp0_addr = (unsigned int)mfd->map_buffer->iova[0] + \
-			buf_offset;
-		pr_debug("start 0x%lx srcp0_addr 0x%x\n", mfd->
-			map_buffer->iova[0], pipe->srcp0_addr);
-	} else {
+	if (mfd->display_iova)
+		pipe->srcp0_addr = mfd->display_iova + buf_offset;
+	else
 		pipe->srcp0_addr = (uint32)(buf + buf_offset);
-	}
 
 	pipe->dst_h = fbi->var.yres;
 	pipe->dst_w = fbi->var.xres;
@@ -296,6 +297,7 @@
 
 	/* MDP cmd block enable */
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	mdp4_mixer_pipe_cleanup(dsi_pipe->mixer_num);
 	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 0);
 	dsi_video_enabled = 0;
 	/* MDP cmd block disable */
@@ -304,13 +306,15 @@
 	mdp_histogram_ctrl_all(FALSE);
 	ret = panel_next_off(pdev);
 
+	/* delay to make sure the last frame finishes */
+	msleep(20);
+
 	/* dis-engage rgb0 from mixer0 */
 	if (dsi_pipe) {
 		mdp4_mixer_stage_down(dsi_pipe);
 		mdp4_iommu_unmap(dsi_pipe);
 	}
 
-	mdp4_iommu_detach();
 	return ret;
 }
 
@@ -369,14 +373,10 @@
 	pipe->dst_y = 0;
 	pipe->dst_x = 0;
 
-	if (mfd->map_buffer) {
-		pipe->srcp0_addr = (unsigned int)mfd->map_buffer->iova[0] + \
-			buf_offset;
-		pr_debug("start 0x%lx srcp0_addr 0x%x\n", mfd->
-			map_buffer->iova[0], pipe->srcp0_addr);
-	} else {
+	if (mfd->display_iova)
+		pipe->srcp0_addr = mfd->display_iova + buf_offset;
+	else
 		pipe->srcp0_addr = (uint32)(buf + buf_offset);
-	}
 
 	mdp4_overlay_rgb_setup(pipe);
 
@@ -447,6 +447,94 @@
 }
 
 /*
+ * MDP Timer/Functions
+ * Set up an HR timer to wake up the CPU's just before the return of a VSync
+ * This reduces any latencies that may arise if the CPU's were power collapsed
+ * in between.
+ */
+#define VSYNC_INTERVAL	16
+#define WAKE_DELAY	3 /* 3 ioctls * 600 us = 2ms + 1ms buffer */
+#define MAX_VSYNC_GAP   4 /* Marker to detect whether to skip timer */
+
+/* Move the globals into context data structure for 3.4 upgrade */
+static int first_time = 1;
+static ktime_t last_vsync_time_ns;
+struct hrtimer hr_mdp_timer_pc;
+
+static unsigned long compute_vsync_interval(void)
+{
+	ktime_t currtime_us;
+	unsigned long diff_from_vsync, vsync_interval;
+	/*
+	 * Get interval beween last vsync and current time
+	 * Current time = CPU programming MDP for next Vsync
+	 */
+	currtime_us = ktime_get();
+	diff_from_vsync =
+		(ktime_to_us(ktime_sub(currtime_us, last_vsync_time_ns)));
+	diff_from_vsync /= USEC_PER_MSEC;
+	/*
+	 * If the last Vsync occurred more than 64 ms ago, skip programming
+	 * the timer
+	 */
+	if (diff_from_vsync < (VSYNC_INTERVAL*MAX_VSYNC_GAP)) {
+		vsync_interval =
+			(VSYNC_INTERVAL-diff_from_vsync)%VSYNC_INTERVAL;
+	} else
+		vsync_interval = VSYNC_INTERVAL+1;
+
+	return vsync_interval;
+}
+
+enum hrtimer_restart mdp_pc_hrtimer_callback(struct hrtimer *timer)
+{
+	if (!wake_lock_active(&mdp_idle_wakelock)) {
+		/* Hold Wakelock if no locks held */
+		wake_lock(&mdp_idle_wakelock);
+	}
+	return HRTIMER_NORESTART;
+}
+
+void init_pc_timer(void)
+{
+	/*
+	 * Initialize hr timer which fires a few ms before Vsync - this
+	 * gets rid of any latencies that may arise due to
+	 * wake up from PC
+	 */
+	hrtimer_init(&hr_mdp_timer_pc, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	hr_mdp_timer_pc.function = &mdp_pc_hrtimer_callback;
+}
+
+void program_pc_timer(unsigned long diff_interval)
+{
+	ktime_t ktime_pc;
+	unsigned long delay_in_ns = 0;
+
+	/* Skip programming timer due to invalid delay */
+	if (diff_interval > VSYNC_INTERVAL)
+		return;
+
+	if (diff_interval < WAKE_DELAY) {
+		/*
+		 * Difference from last vsync was a multiple of refresh rate
+		 * (16*x)%16). Reset it to actual time to next vsync
+		 */
+		if (diff_interval == 0)
+			delay_in_ns = VSYNC_INTERVAL-WAKE_DELAY;
+		else
+			return;	/* too close to vsync to fire timer - skip */
+	} else if (diff_interval == WAKE_DELAY) {
+		delay_in_ns = 1;  /* diff_interval/WAKE_DELAY */
+	} else {
+		delay_in_ns = diff_interval-WAKE_DELAY;
+	}
+	delay_in_ns *= NSEC_PER_MSEC;
+	ktime_pc = ktime_set(0, delay_in_ns);
+	hrtimer_start(&hr_mdp_timer_pc, ktime_pc, HRTIMER_MODE_REL);
+}
+
+/*
  * mdp4_overlay_dsi_video_wait4event:
  * INTR_DMA_P_DONE and INTR_PRIMARY_VSYNC event only
  * no INTR_OVERLAY0_DONE event allowed.
@@ -456,12 +544,21 @@
 {
 	unsigned long flag;
 	unsigned int data;
+	unsigned long vsync_interval;
 
 	data = inpdw(MDP_BASE + DSI_VIDEO_BASE);
 	data &= 0x01;
 	if (data == 0)	/* timing generator disabled */
 		return;
-
+	if ((intr_done == INTR_PRIMARY_VSYNC) ||
+			(intr_done == INTR_DMA_P_DONE)) {
+		if (first_time) {
+			init_pc_timer();
+			first_time = 0;
+		}
+		vsync_interval = compute_vsync_interval();
+		program_pc_timer(vsync_interval);
+	}
 	spin_lock_irqsave(&mdp_spin_lock, flag);
 	INIT_COMPLETION(dsi_video_comp);
 	mfd->dma->waiting = TRUE;
@@ -548,6 +645,10 @@
 void mdp4_primary_vsync_dsi_video(void)
 {
 	complete_all(&dsi_video_comp);
+	last_vsync_time_ns = ktime_get();
+	/* Release Wakelock */
+	if (wake_lock_active(&mdp_idle_wakelock))
+		wake_unlock(&mdp_idle_wakelock);
 }
 
  /*
@@ -574,6 +675,10 @@
 		blt_cfg_changed = 0;
 	}
 	complete_all(&dsi_video_comp);
+	last_vsync_time_ns = ktime_get();
+	/*  Release Wakelock */
+	if (wake_lock_active(&mdp_idle_wakelock))
+		wake_unlock(&mdp_idle_wakelock);
 }
 
 /*
@@ -697,14 +802,10 @@
 
 	pipe = dsi_pipe;
 
-	if (mfd->map_buffer) {
-		pipe->srcp0_addr = (unsigned int)mfd->map_buffer->iova[0] + \
-			buf_offset;
-		pr_debug("start 0x%lx srcp0_addr 0x%x\n", mfd->
-			map_buffer->iova[0], pipe->srcp0_addr);
-	} else {
+	if (mfd->display_iova)
+		pipe->srcp0_addr = mfd->display_iova + buf_offset;
+	else
 		pipe->srcp0_addr = (uint32)(buf + buf_offset);
-	}
 
 	mdp4_overlay_rgb_setup(pipe);
 	mdp4_overlay_reg_flush(pipe, 0);
diff --git a/drivers/video/msm/mdp4_overlay_dtv.c b/drivers/video/msm/mdp4_overlay_dtv.c
index 8692b09..dd96439 100644
--- a/drivers/video/msm/mdp4_overlay_dtv.c
+++ b/drivers/video/msm/mdp4_overlay_dtv.c
@@ -208,6 +208,8 @@
 
 	/* MDP cmd block enable */
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	mdp4_mixer_pipe_cleanup(dtv_pipe->mixer_num);
+	msleep(20);
 	MDP_OUTP(MDP_BASE + DTV_BASE, 0);
 	dtv_enabled = 0;
 	/* MDP cmd block disable */
@@ -260,6 +262,10 @@
 
 	if (dtv_pipe != NULL) {
 		mdp4_dtv_stop(mfd);
+
+		/* delay to make sure the last frame finishes */
+		msleep(20);
+
 		mdp4_mixer_stage_down(dtv_pipe);
 		mdp4_overlay_pipe_free(dtv_pipe);
 		mdp4_iommu_unmap(dtv_pipe);
@@ -268,6 +274,7 @@
 	mdp4_overlay_panel_mode_unset(MDP4_MIXER1, MDP4_PANEL_DTV);
 
 	ret = panel_next_off(pdev);
+	mdp4_iommu_detach();
 	mdp_footswitch_ctrl(FALSE);
 
 	dev_info(&pdev->dev, "mdp4_overlay_dtv: off");
diff --git a/drivers/video/msm/mdp4_overlay_lcdc.c b/drivers/video/msm/mdp4_overlay_lcdc.c
index 72722ef..a1fecb6 100644
--- a/drivers/video/msm/mdp4_overlay_lcdc.c
+++ b/drivers/video/msm/mdp4_overlay_lcdc.c
@@ -142,14 +142,11 @@
 	pipe->src_w = fbi->var.xres;
 	pipe->src_y = 0;
 	pipe->src_x = 0;
-	if (mfd->map_buffer) {
-		pipe->srcp0_addr = (unsigned int)mfd->map_buffer->iova[0] + \
-			buf_offset;
-		pr_debug("start 0x%lx srcp0_addr 0x%x\n", mfd->
-			map_buffer->iova[0], pipe->srcp0_addr);
-	} else {
+
+	if (mfd->display_iova)
+		pipe->srcp0_addr = mfd->display_iova + buf_offset;
+	else
 		pipe->srcp0_addr = (uint32)(buf + buf_offset);
-	}
 
 	pipe->srcp0_ystride = fbi->fix.line_length;
 	pipe->bpp = bpp;
@@ -266,6 +263,7 @@
 
 	/* MDP cmd block enable */
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	mdp4_mixer_pipe_cleanup(lcdc_pipe->mixer_num);
 	MDP_OUTP(MDP_BASE + LCDC_BASE, 0);
 	lcdc_enabled = 0;
 	/* MDP cmd block disable */
@@ -278,7 +276,7 @@
 	mutex_unlock(&mfd->dma->ov_mutex);
 
 	/* delay to make sure the last frame finishes */
-	msleep(16);
+	msleep(20);
 
 	/* dis-engage rgb0 from mixer0 */
 	if (lcdc_pipe) {
@@ -290,7 +288,6 @@
 	mdp_bus_scale_update_request(0);
 #endif
 
-	mdp4_iommu_detach();
 	return ret;
 }
 
@@ -588,14 +585,12 @@
 	mutex_lock(&mfd->dma->ov_mutex);
 
 	pipe = lcdc_pipe;
-	if (mfd->map_buffer) {
-		pipe->srcp0_addr = (unsigned int)mfd->map_buffer->iova[0] + \
-			buf_offset;
-		pr_debug("start 0x%lx srcp0_addr 0x%x\n", mfd->
-			map_buffer->iova[0], pipe->srcp0_addr);
-	} else {
+
+	if (mfd->display_iova)
+		pipe->srcp0_addr = mfd->display_iova + buf_offset;
+	else
 		pipe->srcp0_addr = (uint32)(buf + buf_offset);
-	}
+
 	mdp4_overlay_rgb_setup(pipe);
 	mdp4_overlay_reg_flush(pipe, 0);
 	mdp4_mixer_stage_up(pipe);
diff --git a/drivers/video/msm/mdp4_overlay_mddi.c b/drivers/video/msm/mdp4_overlay_mddi.c
index 83959df..5aa5965 100644
--- a/drivers/video/msm/mdp4_overlay_mddi.c
+++ b/drivers/video/msm/mdp4_overlay_mddi.c
@@ -450,27 +450,6 @@
 	/* change mdp clk while mdp is idle` */
 	mdp4_set_perf_level();
 
-	if (mdp_hw_revision == MDP4_REVISION_V2_1) {
-		if (mdp4_overlay_status_read(MDP4_OVERLAY_TYPE_UNSET)) {
-			uint32  data;
-			data = inpdw(MDP_BASE + 0x0028);
-			data &= ~0x0300;        /* bit 8, 9, MASTER4 */
-			if (mfd->fbi->var.xres == 540) /* qHD, 540x960 */
-				data |= 0x0200;
-			else
-				data |= 0x0100;
-			MDP_OUTP(MDP_BASE + 0x00028, data);
-			mdp4_overlay_status_write(MDP4_OVERLAY_TYPE_UNSET,
-				false);
-		}
-		if (mdp4_overlay_status_read(MDP4_OVERLAY_TYPE_SET)) {
-			uint32  data;
-			data = inpdw(MDP_BASE + 0x0028);
-			data &= ~0x0300;        /* bit 8, 9, MASTER4 */
-			MDP_OUTP(MDP_BASE + 0x00028, data);
-			mdp4_overlay_status_write(MDP4_OVERLAY_TYPE_SET, false);
-		}
-	}
 	mdp_enable_irq(MDP_OVERLAY0_TERM);
 	mfd->dma->busy = TRUE;
 	/* start OVERLAY pipe */
diff --git a/drivers/video/msm/mdp4_overlay_writeback.c b/drivers/video/msm/mdp4_overlay_writeback.c
index 342f565..f1a2ada 100644
--- a/drivers/video/msm/mdp4_overlay_writeback.c
+++ b/drivers/video/msm/mdp4_overlay_writeback.c
@@ -98,11 +98,14 @@
 		pipe = writeback_pipe;
 	}
 	ret = panel_next_on(pdev);
+
 	/* MDP_LAYERMIXER_WB_MUX_SEL to use mixer1 axi for mixer2 writeback */
-	data = inpdw(MDP_BASE + 0x100F4);
-	data &= ~0x02; /* clear the mixer1 mux bit */
-	data |= 0x02;
+	if (hdmi_prim_display)
+		data = 0x01;
+	else
+		data = 0x02;
 	outpdw(MDP_BASE + 0x100F4, data);
+
 	MDP_OUTP(MDP_BASE + MDP4_OVERLAYPROC1_BASE + 0x5004,
 		((0x0 & 0xFFF) << 16) | /* 12-bit B */
 			(0x0 & 0xFFF));         /* 12-bit G */
@@ -117,7 +120,6 @@
 int mdp4_overlay_writeback_off(struct platform_device *pdev)
 {
 	int ret;
-	uint32 data;
 	struct msm_fb_data_type *mfd =
 			(struct msm_fb_data_type *)platform_get_drvdata(pdev);
 	if (mfd && writeback_pipe) {
@@ -129,11 +131,8 @@
 	}
 	ret = panel_next_off(pdev);
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-	/* MDP_LAYERMIXER_WB_MUX_SEL to restore
-	 * mixer1 axi for mixer1 writeback */
-	data = inpdw(MDP_BASE + 0x100F4);
-	data &= ~0x02; /* clear the mixer1 mux bit */
-	outpdw(MDP_BASE + 0x100F4, data);
+	/* MDP_LAYERMIXER_WB_MUX_SEL to restore to default cfg*/
+	outpdw(MDP_BASE + 0x100F4, 0x0);
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 	return ret;
 }
@@ -175,14 +174,10 @@
 	pipe->dst_y = 0;
 	pipe->dst_x = 0;
 
-	if (mfd->map_buffer) {
-		pipe->srcp0_addr = (unsigned int)mfd->map_buffer->iova[0] + \
-			buf_offset;
-		pr_debug("start 0x%lx srcp0_addr 0x%x\n", mfd->
-			map_buffer->iova[0], pipe->srcp0_addr);
-	} else {
+	if (mfd->display_iova)
+		pipe->srcp0_addr = mfd->display_iova + buf_offset;
+	else
 		pipe->srcp0_addr = (uint32)(buf + buf_offset);
-	}
 
 	mdp4_mixer_stage_up(pipe);
 
diff --git a/drivers/video/msm/mdp_dma.c b/drivers/video/msm/mdp_dma.c
index 32856ef..2ba2c85 100644
--- a/drivers/video/msm/mdp_dma.c
+++ b/drivers/video/msm/mdp_dma.c
@@ -529,8 +529,9 @@
 	down(&mfd->sem);
 
 	iBuf = &mfd->ibuf;
-	if (mfd->map_buffer)
-		iBuf->buf = (uint8 *)mfd->map_buffer->iova[0];
+
+	if (mfd->display_iova)
+		iBuf->buf = (uint8 *)mfd->display_iova;
 	else
 		iBuf->buf = (uint8 *) info->fix.smem_start;
 
diff --git a/drivers/video/msm/mipi_dsi.h b/drivers/video/msm/mipi_dsi.h
index ff2910f..d54c5b5 100644
--- a/drivers/video/msm/mipi_dsi.h
+++ b/drivers/video/msm/mipi_dsi.h
@@ -259,6 +259,8 @@
 char *mipi_dsi_buf_reserve_hdr(struct dsi_buf *dp, int hlen);
 char *mipi_dsi_buf_init(struct dsi_buf *dp);
 void mipi_dsi_init(void);
+void mipi_dsi_lane_cfg(void);
+void mipi_dsi_bist_ctrl(void);
 int mipi_dsi_buf_alloc(struct dsi_buf *, int size);
 int mipi_dsi_cmd_dma_add(struct dsi_buf *dp, struct dsi_cmd_desc *cm);
 int mipi_dsi_cmds_tx(struct msm_fb_data_type *mfd,
diff --git a/drivers/video/msm/mipi_toshiba_video_wsvga_pt.c b/drivers/video/msm/mipi_toshiba_video_wsvga_pt.c
index 48bdb1d..2a8610b 100644
--- a/drivers/video/msm/mipi_toshiba_video_wsvga_pt.c
+++ b/drivers/video/msm/mipi_toshiba_video_wsvga_pt.c
@@ -20,7 +20,7 @@
 static struct mipi_dsi_phy_ctrl dsi_video_mode_phy_db = {
 	/* 600*1024, RGB888, 3 Lane 55 fps video mode */
     /* regulator */
-	{0x03, 0x0a, 0x04, 0x00, 0x20},
+	{0x09, 0x08, 0x05, 0x00, 0x20},
 	/* timing */
 	{0xab, 0x8a, 0x18, 0x00, 0x92, 0x97, 0x1b, 0x8c,
 	0x0c, 0x03, 0x04, 0xa0},
diff --git a/drivers/video/msm/msm_dss_io_7x27a.c b/drivers/video/msm/msm_dss_io_7x27a.c
index 28204a5..f4f8375 100644
--- a/drivers/video/msm/msm_dss_io_7x27a.c
+++ b/drivers/video/msm/msm_dss_io_7x27a.c
@@ -256,8 +256,11 @@
 	int i, off;
 
 	MIPI_OUTP(MIPI_DSI_BASE + 0x128, 0x0001);/* start phy sw reset */
-	msleep(100);
+	wmb();
+	usleep(10);
 	MIPI_OUTP(MIPI_DSI_BASE + 0x128, 0x0000);/* end phy w reset */
+	wmb();
+	usleep(10);
 	MIPI_OUTP(MIPI_DSI_BASE + 0x2cc, 0x0003);/* regulator_ctrl_0 */
 	MIPI_OUTP(MIPI_DSI_BASE + 0x2d0, 0x0001);/* regulator_ctrl_1 */
 	MIPI_OUTP(MIPI_DSI_BASE + 0x2d4, 0x0001);/* regulator_ctrl_2 */
diff --git a/drivers/video/msm/msm_dss_io_8960.c b/drivers/video/msm/msm_dss_io_8960.c
index 30edbd1..b17c195 100644
--- a/drivers/video/msm/msm_dss_io_8960.c
+++ b/drivers/video/msm/msm_dss_io_8960.c
@@ -231,6 +231,45 @@
 		__func__, (int) ahb, MIPI_INP_SECURE(ahb));
 }
 
+void mipi_dsi_lane_cfg(void)
+{
+	int i, ln_offset;
+
+	ln_offset = 0x300;
+	for (i = 0; i < 4; i++) {
+		/* DSI1_DSIPHY_LN_CFG0 */
+		MIPI_OUTP(MIPI_DSI_BASE + ln_offset, 0x80);
+		/* DSI1_DSIPHY_LN_CFG1 */
+		MIPI_OUTP(MIPI_DSI_BASE + ln_offset + 0x04, 0x45);
+		/* DSI1_DSIPHY_LN_CFG2 */
+		MIPI_OUTP(MIPI_DSI_BASE + ln_offset + 0x08, 0x0);
+		/* DSI1_DSIPHY_LN_TEST_DATAPATH */
+		MIPI_OUTP(MIPI_DSI_BASE + ln_offset + 0x0c, 0x0);
+		/* DSI1_DSIPHY_LN_TEST_STR0 */
+		MIPI_OUTP(MIPI_DSI_BASE + ln_offset + 0x14, 0x1);
+		/* DSI1_DSIPHY_LN_TEST_STR1 */
+		MIPI_OUTP(MIPI_DSI_BASE + ln_offset + 0x18, 0x66);
+		ln_offset += 0x40;
+	}
+
+	MIPI_OUTP(MIPI_DSI_BASE + 0x0400, 0x40); /* DSI1_DSIPHY_LNCK_CFG0 */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x0404, 0x67); /* DSI1_DSIPHY_LNCK_CFG1 */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x0408, 0x0); /* DSI1_DSIPHY_LNCK_CFG2 */
+	/* DSI1_DSIPHY_LNCK_TEST_DATAPATH */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x040c, 0x0);
+	MIPI_OUTP(MIPI_DSI_BASE + 0x0414, 0x1); /* DSI1_DSIPHY_LNCK_TEST_STR0 */
+	/* DSI1_DSIPHY_LNCK_TEST_STR1 */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x0418, 0x88);
+}
+
+void mipi_dsi_bist_ctrl(void)
+{
+	MIPI_OUTP(MIPI_DSI_BASE + 0x049c, 0x0f); /* DSI1_DSIPHY_BIST_CTRL4 */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x0490, 0x03); /* DSI1_DSIPHY_BIST_CTRL1 */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x048c, 0x03); /* DSI1_DSIPHY_BIST_CTRL0 */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x049c, 0x0); /* DSI1_DSIPHY_BIST_CTRL4 */
+}
+
 static void mipi_dsi_calibration(void)
 {
 	int i = 0;
@@ -238,7 +277,7 @@
 	int cal_busy = MIPI_INP(MIPI_DSI_BASE + 0x550);
 
 	/* DSI1_DSIPHY_REGULATOR_CAL_PWR_CFG */
-	MIPI_OUTP(MIPI_DSI_BASE + 0x0518, 0x01);
+	MIPI_OUTP(MIPI_DSI_BASE + 0x0518, 0x03);
 
 	/* DSI1_DSIPHY_CAL_SW_CFG2 */
 	MIPI_OUTP(MIPI_DSI_BASE + 0x0534, 0x0);
@@ -487,14 +526,19 @@
 	int i, off;
 
 	MIPI_OUTP(MIPI_DSI_BASE + 0x128, 0x0001);/* start phy sw reset */
-	msleep(100);
+	wmb();
+	usleep(1);
 	MIPI_OUTP(MIPI_DSI_BASE + 0x128, 0x0000);/* end phy w reset */
+	wmb();
+	usleep(1);
 	MIPI_OUTP(MIPI_DSI_BASE + 0x500, 0x0003);/* regulator_ctrl_0 */
 	MIPI_OUTP(MIPI_DSI_BASE + 0x504, 0x0001);/* regulator_ctrl_1 */
 	MIPI_OUTP(MIPI_DSI_BASE + 0x508, 0x0001);/* regulator_ctrl_2 */
 	MIPI_OUTP(MIPI_DSI_BASE + 0x50c, 0x0000);/* regulator_ctrl_3 */
 	MIPI_OUTP(MIPI_DSI_BASE + 0x510, 0x0100);/* regulator_ctrl_4 */
 
+	MIPI_OUTP(MIPI_DSI_BASE + 0x4b0, 0x04);/* DSIPHY_LDO_CNTRL */
+
 	pd = (panel_info->mipi).dsi_phy_db;
 
 	off = 0x0480;	/* strength 0 - 2 */
@@ -518,6 +562,8 @@
 		off += 4;
 	}
 	mipi_dsi_calibration();
+	mipi_dsi_lane_cfg(); /* lane cfgs */
+	mipi_dsi_bist_ctrl(); /* bist ctrl */
 
 	off = 0x0204;	/* pll ctrl 1 - 19, skip 0 */
 	for (i = 1; i < 20; i++) {
diff --git a/drivers/video/msm/msm_dss_io_8x60.c b/drivers/video/msm/msm_dss_io_8x60.c
index bbee726..a1897e3 100644
--- a/drivers/video/msm/msm_dss_io_8x60.c
+++ b/drivers/video/msm/msm_dss_io_8x60.c
@@ -346,8 +346,11 @@
 	int i, off;
 
 	MIPI_OUTP(MIPI_DSI_BASE + 0x128, 0x0001);/* start phy sw reset */
-	msleep(100);
+	wmb();
+	usleep(1);
 	MIPI_OUTP(MIPI_DSI_BASE + 0x128, 0x0000);/* end phy w reset */
+	wmb();
+	usleep(1);
 	MIPI_OUTP(MIPI_DSI_BASE + 0x2cc, 0x0003);/* regulator_ctrl_0 */
 	MIPI_OUTP(MIPI_DSI_BASE + 0x2d0, 0x0001);/* regulator_ctrl_1 */
 	MIPI_OUTP(MIPI_DSI_BASE + 0x2d4, 0x0001);/* regulator_ctrl_2 */
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index ff08548..450fa85 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -29,6 +29,7 @@
 #include <linux/dma-mapping.h>
 #include <mach/board.h>
 #include <linux/uaccess.h>
+#include <mach/iommu_domains.h>
 
 #include <linux/workqueue.h>
 #include <linux/string.h>
@@ -52,6 +53,9 @@
 #define MSM_FB_NUM	3
 #endif
 
+/*  Idle wakelock to prevent PC between wake up and Vsync */
+struct wake_lock mdp_idle_wakelock;
+
 static unsigned char *fbram;
 static unsigned char *fbram_phys;
 static int fbram_size;
@@ -285,6 +289,9 @@
 	case MIPI_CMD_PANEL:
 		ret = snprintf(buf, PAGE_SIZE, "mipi dsi cmd panel\n");
 		break;
+	case WRITEBACK_PANEL:
+		ret = snprintf(buf, PAGE_SIZE, "writeback panel\n");
+		break;
 	default:
 		ret = snprintf(buf, PAGE_SIZE, "unknown panel\n");
 		break;
@@ -439,6 +446,10 @@
 	complete(&mfd->msmfb_no_update_notify);
 	complete(&mfd->msmfb_update_notify);
 
+	/* Do this only for the primary panel */
+	if (mfd->fbi->node == 0)
+		wake_lock_destroy(&mdp_idle_wakelock);
+
 	/* remove /dev/fb* */
 	unregister_framebuffer(mfd->fbi);
 
@@ -1026,9 +1037,6 @@
 	int *id;
 	int fbram_offset;
 	int remainder, remainder_mode2;
-	static int subsys_id[2] = {MSM_SUBSYSTEM_DISPLAY,
-		MSM_SUBSYSTEM_ROTATOR};
-	unsigned int flags = MSM_SUBSYSTEM_MAP_IOVA;
 
 	/*
 	 * fb info initialization
@@ -1310,15 +1318,22 @@
 	fbi->screen_base = fbram;
 	fbi->fix.smem_start = (unsigned long)fbram_phys;
 
-	mfd->map_buffer = msm_subsystem_map_buffer(
-		fbi->fix.smem_start, fbi->fix.smem_len,
-		flags, subsys_id, 2);
-	if (mfd->map_buffer) {
-		pr_debug("%s(): buf 0x%lx, mfd->map_buffer->iova[0] 0x%lx\n"
-			"mfd->map_buffer->iova[1] 0x%lx", __func__,
-			fbi->fix.smem_start, mfd->map_buffer->iova[0],
-			mfd->map_buffer->iova[1]);
-	}
+	msm_iommu_map_contig_buffer(fbi->fix.smem_start,
+					DISPLAY_DOMAIN,
+					GEN_POOL,
+					fbi->fix.smem_len,
+					SZ_4K,
+					1,
+					&(mfd->display_iova));
+
+	msm_iommu_map_contig_buffer(fbi->fix.smem_start,
+					ROTATOR_DOMAIN,
+					GEN_POOL,
+					fbi->fix.smem_len,
+					SZ_4K,
+					1,
+					&(mfd->rotator_iova));
+
 	if (!bf_supported || mfd->index == 0)
 		memset(fbi->screen_base, 0x0, fix->smem_len);
 
@@ -1356,6 +1371,9 @@
 		return -EPERM;
 	}
 
+	if (fbi->node == 0)
+		wake_lock_init(&mdp_idle_wakelock, WAKE_LOCK_IDLE, "mdp");
+
 	fbram += fix->smem_len;
 	fbram_phys += fix->smem_len;
 	fbram_size -= fix->smem_len;
@@ -3660,10 +3678,19 @@
 	}
 
 	mfd = (struct msm_fb_data_type *)info->par;
-	if (mfd->map_buffer)
-		*start = mfd->map_buffer->iova[subsys_id];
-	else
-		*start = info->fix.smem_start;
+
+	if (subsys_id == DISPLAY_SUBSYSTEM_ID) {
+		if (mfd->display_iova)
+			*start = mfd->display_iova;
+		else
+			*start = info->fix.smem_start;
+	} else {
+		if (mfd->rotator_iova)
+			*start = mfd->rotator_iova;
+		else
+			*start = info->fix.smem_start;
+	}
+
 	*len = info->fix.smem_len;
 
 	return 0;
diff --git a/drivers/video/msm/msm_fb.h b/drivers/video/msm/msm_fb.h
index 87753b2..8d45f07 100644
--- a/drivers/video/msm/msm_fb.h
+++ b/drivers/video/msm/msm_fb.h
@@ -23,7 +23,6 @@
 #include "linux/proc_fs.h"
 
 #include <mach/hardware.h>
-#include <mach/msm_subsystem_map.h>
 #include <linux/io.h>
 #include <mach/board.h>
 
@@ -34,6 +33,7 @@
 #include <linux/spinlock.h>
 #include <linux/workqueue.h>
 #include <linux/hrtimer.h>
+#include <linux/wakelock.h>
 
 #include <linux/fb.h>
 #include <linux/list.h>
@@ -44,6 +44,9 @@
 #include <linux/earlysuspend.h>
 #endif
 
+/*  Idle wakelock to prevent PC between wake up and Vsync */
+extern struct wake_lock mdp_idle_wakelock;
+
 #include "msm_fb_panel.h"
 #include "mdp.h"
 
@@ -175,7 +178,8 @@
 	struct list_head writeback_register_queue;
 	wait_queue_head_t wait_q;
 	struct ion_client *iclient;
-	struct msm_mapped_buffer *map_buffer;
+	unsigned long display_iova;
+	unsigned long rotator_iova;
 	struct mdp_buf_type *ov0_wb_buf;
 	struct mdp_buf_type *ov1_wb_buf;
 	u32 ov_start;
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.c
index 8da0995..81b1436 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.c
@@ -471,6 +471,10 @@
 	struct ddl_encoder_data *encoder =
 		&ddl->codec_data.encoder;
 	u32 vcd_status = VCD_S_SUCCESS;
+	struct vcd_transc *transc;
+	transc = (struct vcd_transc *)(ddl->client_data);
+	DDL_MSG_LOW("%s: transc = 0x%x, in_use = %u",
+		__func__, (u32)ddl->client_data, transc->in_use);
 	if (encoder->slice_delivery_info.enable) {
 		return ddl_encode_frame_batch(ddl_handle,
 					input_frame,
@@ -480,7 +484,6 @@
 					client_data);
 	}
 
-	DDL_MSG_HIGH("ddl_encode_frame");
 	ddl_set_core_start_time(__func__, ENC_OP_TIME);
 	ddl_context = ddl_get_context();
 	if (!DDL_IS_INITIALIZED(ddl_context)) {
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
index b480b42..64d2976 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
@@ -1064,20 +1064,10 @@
 
 void ddl_set_vidc_timeout(struct ddl_client_context *ddl)
 {
-	unsigned long core_clk_rate;
 	u32 vidc_time_out = 0;
-	if (ddl->codec_data.decoder.idr_only_decoding) {
+	if (ddl->codec_data.decoder.idr_only_decoding)
 		vidc_time_out = 2 * DDL_VIDC_1080P_200MHZ_TIMEOUT_VALUE;
-	} else {
-		res_trk_get_clk_rate(&core_clk_rate);
-		if (core_clk_rate == DDL_VIDC_1080P_48MHZ)
-			vidc_time_out = DDL_VIDC_1080P_48MHZ_TIMEOUT_VALUE;
-		else if (core_clk_rate == DDL_VIDC_1080P_133MHZ)
-			vidc_time_out = DDL_VIDC_1080P_133MHZ_TIMEOUT_VALUE;
-		else
-			vidc_time_out = DDL_VIDC_1080P_200MHZ_TIMEOUT_VALUE;
-	}
-	DDL_MSG_HIGH("%s Video core time out value = 0x%x"
+	DDL_MSG_HIGH("%s Video core time out value = 0x%x",
 		 __func__, vidc_time_out);
 	vidc_sm_set_video_core_timeout_value(
 		&ddl->shared_mem[ddl->command_channel], vidc_time_out);
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c
index 5b9aea8..e2c0a2a 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c
@@ -34,6 +34,8 @@
 static void ddl_handle_slice_done_slice_batch(struct ddl_client_context *ddl);
 static void ddl_handle_enc_frame_done_slice_mode(
 		struct ddl_client_context *ddl, u32 eos_present);
+static void ddl_handle_enc_skipframe_slice_mode(
+		struct ddl_client_context *ddl, u32 eos_present);
 
 static void ddl_fw_status_done_callback(struct ddl_context *ddl_context)
 {
@@ -511,11 +513,15 @@
 		if (encoder->enc_frame_info.enc_frame_size ||
 			(encoder->enc_frame_info.enc_frame ==
 			VIDC_1080P_ENCODE_FRAMETYPE_SKIPPED) ||
-			DDLCLIENT_STATE_IS(ddl,
-			DDL_CLIENT_WAIT_FOR_EOS_DONE)) {
+			DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_EOS_DONE)) {
 			if (encoder->slice_delivery_info.enable) {
-				ddl_handle_enc_frame_done_slice_mode(ddl,
-								eos_present);
+				if (encoder->enc_frame_info.enc_frame ==
+					VIDC_1080P_ENCODE_FRAMETYPE_SKIPPED)
+					ddl_handle_enc_skipframe_slice_mode(
+							ddl, eos_present);
+				else
+					ddl_handle_enc_frame_done_slice_mode(
+							ddl, eos_present);
 			} else {
 				ddl_handle_enc_frame_done(ddl, eos_present);
 			}
@@ -1892,3 +1898,78 @@
 				(u32 *) ddl, ddl->client_data);
 	}
 }
+
+static void ddl_handle_enc_skipframe_slice_mode(
+		struct ddl_client_context *ddl, u32 eos_present)
+{
+	struct ddl_context       *ddl_context = ddl->ddl_context;
+	struct ddl_encoder_data  *encoder = &(ddl->codec_data.encoder);
+	struct vcd_frame_data    *output_frame = NULL;
+	u32 bottom_frame_tag;
+	u8 *input_buffer_address = NULL;
+	u32 index = 0;
+	DDL_MSG_HIGH("ddl_handle_enc_skipframe_slice_mode: frame skipped");
+	vidc_sm_set_encoder_slice_batch_int_ctrl(
+			&ddl->shared_mem[ddl->command_channel],
+			1);
+	for (index = 0;
+		index < encoder->batch_frame.num_output_frames;
+		index++) {
+		output_frame =
+			&(encoder->batch_frame.output_frame[index].vcd_frm);
+		DDL_MSG_MED("output buffer: vcd_frm = 0x%x "
+				"frmbfr(virtual) = 0x%x frmbfr(physical) = 0x%x",
+				(u32)output_frame, (u32)output_frame->virtual,
+				(u32)output_frame->physical);
+		vidc_sm_get_frame_tags(
+				&ddl->shared_mem[ddl->command_channel],
+				&output_frame->ip_frm_tag,
+				&bottom_frame_tag);
+		ddl_get_encoded_frame(
+				output_frame,
+				encoder->codec.codec,
+				encoder->enc_frame_info.enc_frame);
+		output_frame->data_len = 0;
+		ddl->output_frame.frm_trans_end = false;
+		if (encoder->batch_frame.num_output_frames ==
+			(index + 1)) {
+			DDL_MSG_MED("last output bfr for skip frame, set EOF");
+			output_frame->flags |= VCD_FRAME_FLAG_ENDOFFRAME;
+			ddl_vidc_encode_dynamic_property(ddl, false);
+			if (eos_present)
+				ddl->output_frame.frm_trans_end = false;
+			else
+				ddl->output_frame.frm_trans_end = true;
+		}
+		ddl->output_frame.vcd_frm = *output_frame;
+		ddl_context->ddl_callback(
+				VCD_EVT_RESP_OUTPUT_DONE,
+				VCD_S_SUCCESS,
+				&(ddl->output_frame),
+				sizeof(struct ddl_frame_data_tag),
+				(u32 *)ddl,
+				ddl->client_data);
+
+		if (encoder->batch_frame.num_output_frames ==
+			(index + 1)) {
+			ddl->input_frame.frm_trans_end = false;
+			input_buffer_address =
+				ddl_context->dram_base_a.physical_base_addr +
+				(encoder->enc_frame_info.enc_luma_address);
+			ddl_get_input_frame_from_pool(ddl,
+					input_buffer_address);
+			DDL_MSG_MED("InpBfr: vcd_frm 0x%x frmbfr(virtual) 0x%x"
+					" frmbfr(physical) 0x%x",
+					(u32)&(ddl->input_frame.vcd_frm),
+					(u32)ddl->input_frame.vcd_frm.virtual,
+					(u32)ddl->input_frame.vcd_frm.physical);
+			ddl_context->ddl_callback(
+					VCD_EVT_RESP_INPUT_DONE,
+					VCD_S_SUCCESS,
+					&(ddl->input_frame),
+					sizeof(struct ddl_frame_data_tag),
+					(u32 *)ddl,
+					ddl->client_data);
+		}
+	}
+}
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_properties.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_properties.c
index 363fe53..3827bc1 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_properties.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_properties.c
@@ -983,8 +983,7 @@
 			DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) {
 			encoder->slice_delivery_info.enable
 				= *(u32 *)property_value;
-			DDL_MSG_HIGH("set encoder->slice_delivery_mode"
-				"  = %u\n",
+			DDL_MSG_HIGH("set encoder->slice_delivery_mode = %u\n",
 				encoder->slice_delivery_info.enable);
 			output_buf_size =
 				encoder->client_output_buf_req.sz;
@@ -998,8 +997,26 @@
 			encoder->slice_delivery_info.num_slices =
 				num_slices;
 			if (num_slices <= DDL_MAX_NUM_BFRS_FOR_SLICE_BATCH) {
-				encoder->client_output_buf_req.min_count
-				= ((DDL_ENC_SLICE_BATCH_FACTOR * num_slices + 2)
+				DDL_MSG_HIGH("%s: currently slice info "
+					"metadata is not supported when slice "
+					"delivery mode is enabled. hence "
+					"disabling slice info metadata.\n",
+					__func__);
+				slice_property_hdr.prop_id =
+					VCD_I_METADATA_ENABLE;
+				slice_property_hdr.sz =
+					sizeof(struct \
+					vcd_property_meta_data_enable);
+				ddl_get_metadata_params(ddl,
+						&slice_property_hdr,
+						&slice_meta_data);
+				slice_meta_data.meta_data_enable_flag
+					&= ~VCD_METADATA_ENC_SLICE;
+				ddl_set_metadata_params(ddl,
+						&slice_property_hdr,
+						&slice_meta_data);
+				encoder->client_output_buf_req.min_count =
+				((DDL_ENC_SLICE_BATCH_FACTOR * num_slices + 2)
 				> DDL_MAX_BUFFER_COUNT)
 				? DDL_MAX_BUFFER_COUNT :
 				(DDL_ENC_SLICE_BATCH_FACTOR * num_slices + 2);
@@ -1016,18 +1033,6 @@
 				encoder->client_output_buf_req.min_count,
 				output_buf_size,
 				encoder->client_output_buf_req.sz);
-				slice_property_hdr.prop_id =
-					VCD_I_METADATA_ENABLE;
-				slice_property_hdr.sz =
-				sizeof(struct vcd_property_meta_data_enable);
-				ddl_get_metadata_params(ddl,
-						&slice_property_hdr,
-						&slice_meta_data);
-				slice_meta_data.meta_data_enable_flag
-					&= ~VCD_METADATA_ENC_SLICE;
-				ddl_set_metadata_params(ddl,
-						&slice_property_hdr,
-						&slice_meta_data);
 				vcd_status = VCD_S_SUCCESS;
 			}
 		}
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_device_sm.c b/drivers/video/msm/vidc/common/vcd/vcd_device_sm.c
index 49d885c..d517028 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_device_sm.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_device_sm.c
@@ -129,10 +129,16 @@
 		{
 			transc = (struct vcd_transc *)client_data;
 
-			if (!transc || !transc->in_use
-				|| !transc->cctxt) {
+			if (!transc || !transc->in_use || !transc->cctxt) {
 				VCD_MSG_ERROR("Invalid clientdata "
-							  "received from DDL ");
+					"received from DDL, transc = 0x%x\n",
+					(u32)transc);
+				if (transc) {
+					VCD_MSG_ERROR("transc->in_use = %u, "
+						"transc->cctxt = 0x%x\n",
+						transc->in_use,
+						(u32)transc->cctxt);
+				}
 			} else {
 				cctxt = transc->cctxt;
 
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_sub.c b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
index 1218794..6ca4dbe 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_sub.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
@@ -1900,9 +1900,10 @@
 	} else {
 		memset(&dev_ctxt->trans_tbl[i], 0,
 			   sizeof(struct vcd_transc));
-
 		dev_ctxt->trans_tbl[i].in_use = true;
-
+		VCD_MSG_LOW("%s: Get transc = 0x%x, in_use = %u\n",
+			__func__, (u32)(&dev_ctxt->trans_tbl[i]),
+			dev_ctxt->trans_tbl[i].in_use);
 		return &dev_ctxt->trans_tbl[i];
 	}
 }
@@ -1911,7 +1912,8 @@
 {
 	if (trans_entry) {
 		trans_entry->in_use = false;
-		VCD_MSG_LOW("%s in_use set to false\n", __func__);
+		VCD_MSG_LOW("%s: Free transc = 0x%x, in_use = %u\n",
+			__func__, (u32)trans_entry, trans_entry->in_use);
 	}
 }
 
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 957c5b4..2e0e32e 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -56,6 +56,10 @@
 #define CPUFREQ_POLICY_POWERSAVE	(1)
 #define CPUFREQ_POLICY_PERFORMANCE	(2)
 
+/* Minimum frequency cutoff to notify the userspace about cpu utilization
+ * changes */
+#define MIN_CPU_UTIL_NOTIFY   40
+
 /* Frequency values here are CPU kHz so that hardware which doesn't run
  * with some frequencies can complain without having to guess what per
  * cent / per mille means.
@@ -96,6 +100,7 @@
 	unsigned int		max;    /* in kHz */
 	unsigned int		cur;    /* in kHz, only needed if cpufreq
 					 * governors are used */
+	unsigned int            util;  /* CPU utilization at max frequency */
 	unsigned int		policy; /* see above */
 	struct cpufreq_governor	*governor; /* see below */
 
@@ -255,7 +260,8 @@
 
 
 void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state);
-
+void cpufreq_notify_utilization(struct cpufreq_policy *policy,
+		unsigned int load);
 
 static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, unsigned int min, unsigned int max)
 {
diff --git a/include/linux/dvb/dmx.h b/include/linux/dvb/dmx.h
index 5abadb6..e0058d3 100644
--- a/include/linux/dvb/dmx.h
+++ b/include/linux/dvb/dmx.h
@@ -172,7 +172,60 @@
 
 typedef struct dmx_caps {
 	__u32 caps;
+
+/* Indicates whether demux support playback from memory in pull mode */
+#define DMX_CAP_PULL_MODE				0x01
+
+/* Indicates whether demux support indexing of recorded video stream */
+#define DMX_CAP_VIDEO_INDEXING			0x02
+
+/* Indicates whether demux support sending data directly to video decoder */
+#define DMX_CAP_VIDEO_DECODER_DATA		0x04
+
+/* Indicates whether demux support sending data directly to audio decoder */
+#define DMX_CAP_AUDIO_DECODER_DATA		0x08
+
+/* Indicates whether demux support sending data directly to subtitle decoder */
+#define DMX_CAP_SUBTITLE_DECODER_DATA	0x10
+
+	/* Number of decoders demux can output data to */
 	int num_decoders;
+
+	/* Number of demux devices */
+	int num_demux_devices;
+
+	/* Max number of PID filters */
+	int num_pid_filters;
+
+	/* Max number of section filters */
+	int num_section_filters;
+
+	/*
+	 * Max number of section filters using same PID,
+	 * 0 if not supported
+	 */
+	int num_section_filters_per_pid;
+
+	/*
+	 * Length of section filter, not including section
+	 * length field (2 bytes).
+	 */
+	int section_filter_length;
+
+	/* Max number of demod based input */
+	int num_demod_inputs;
+
+	/* Max number of memory based input */
+	int num_memory_inputs;
+
+	/* Overall bitrate from all inputs concurrently. Mbit/sec */
+	int max_bitrate;
+
+	/* Max bitrate from single demod input. Mbit/sec */
+	int demod_input_max_bitrate;
+
+	/* Max bitrate from single memory input. Mbit/sec */
+	int memory_input_max_bitrate;
 } dmx_caps_t;
 
 typedef enum {
diff --git a/include/linux/ion.h b/include/linux/ion.h
index b5495a0..d9443ff 100644
--- a/include/linux/ion.h
+++ b/include/linux/ion.h
@@ -148,6 +148,7 @@
  * @base:	base address of heap in physical memory if applicable
  * @size:	size of the heap in bytes if applicable
  * @memory_type:Memory type used for the heap
+ * @has_outer_cache:    set to 1 if outer cache is used, 0 otherwise.
  * @extra_data:	Extra data specific to each heap type
  */
 struct ion_platform_heap {
@@ -157,6 +158,7 @@
 	ion_phys_addr_t base;
 	size_t size;
 	enum ion_memory_types memory_type;
+	unsigned int has_outer_cache;
 	void *extra_data;
 };
 
@@ -228,6 +230,7 @@
 
 /**
  * struct ion_platform_data - array of platform heaps passed from board file
+ * @has_outer_cache:    set to 1 if outer cache is used, 0 otherwise.
  * @nr:    number of structures in the array
  * @request_region: function to be called when the number of allocations goes
  *						from 0 -> 1
@@ -239,6 +242,7 @@
  * Provided by the board file in the form of platform data to a platform device.
  */
 struct ion_platform_data {
+	unsigned int has_outer_cache;
 	int nr;
 	int (*request_region)(void *);
 	int (*release_region)(void *);
diff --git a/include/linux/mfd/wcd9xxx/wcd9304_registers.h b/include/linux/mfd/wcd9xxx/wcd9304_registers.h
index 70902bc..53ae67b 100644
--- a/include/linux/mfd/wcd9xxx/wcd9304_registers.h
+++ b/include/linux/mfd/wcd9xxx/wcd9304_registers.h
@@ -432,8 +432,14 @@
 
 #define SITAR_A_CDC_TX1_MUX_CTL			(0x223)
 #define SITAR_A_CDC_TX1_MUX_CTL__POR			(0x00000008)
-#define SITAR_A_CDC_TX1_CLK_FS_CTL			(0x224)
-#define SITAR_A_CDC_TX1_CLK_FS_CTL__POR			(0x00000003)
+#define SITAR_A_CDC_TX1_CLK_FS_CTL                      (0x00000224)
+#define SITAR_A_CDC_TX1_CLK_FS_CTL__POR                 (0x00000003)
+#define SITAR_A_CDC_TX2_CLK_FS_CTL                      (0x0000022C)
+#define SITAR_A_CDC_TX2_CLK_FS_CTL__POR                 (0x00000003)
+#define SITAR_A_CDC_TX3_CLK_FS_CTL                      (0x00000234)
+#define SITAR_A_CDC_TX3_CLK_FS_CTL__POR                 (0x00000003)
+#define SITAR_A_CDC_TX4_CLK_FS_CTL                      (0x0000023C)
+#define SITAR_A_CDC_TX4_CLK_FS_CTL__POR                 (0x00000003)
 #define SITAR_A_CDC_TX1_DMIC_CTL			(0x225)
 #define SITAR_A_CDC_TX1_DMIC_CTL__POR			(0x00000000)
 #define SITAR_A_CDC_TX2_MUX_CTL                 (0x22B)
@@ -479,18 +485,18 @@
 #define SITAR_A_CDC_RX3_B4_CTL__POR			 (0x00000000)
 
 #define SITAR_A_CDC_RX1_B5_CTL                  (0x000002B4)
-#define SITAR_A_CDC_RX1_B5_CTL__POR			 (0x00000060)
+#define SITAR_A_CDC_RX1_B5_CTL__POR			 (0x00000078)
 #define SITAR_A_CDC_RX2_B5_CTL                  (0x000002BC)
-#define SITAR_A_CDC_RX2_B5_CTL__POR			 (0x00000060)
+#define SITAR_A_CDC_RX2_B5_CTL__POR			 (0x00000078)
 #define SITAR_A_CDC_RX3_B5_CTL                  (0x000002C4)
-#define SITAR_A_CDC_RX3_B5_CTL__POR			 (0x00000060)
+#define SITAR_A_CDC_RX3_B5_CTL__POR			 (0x00000078)
 
 #define SITAR_A_CDC_RX1_B6_CTL                  (0x000002B5)
-#define SITAR_A_CDC_RX1_B6_CTL__POR			 (0x00000000)
+#define SITAR_A_CDC_RX1_B6_CTL__POR			 (0x00000080)
 #define SITAR_A_CDC_RX2_B6_CTL                  (0x000002BD)
-#define SITAR_A_CDC_RX2_B6_CTL__POR			 (0x00000000)
+#define SITAR_A_CDC_RX2_B6_CTL__POR			 (0x00000080)
 #define SITAR_A_CDC_RX3_B6_CTL                  (0x000002C5)
-#define SITAR_A_CDC_RX3_B6_CTL__POR			 (0x00000000)
+#define SITAR_A_CDC_RX3_B6_CTL__POR			 (0x00000080)
 
 
 #define SITAR_A_CDC_RX1_VOL_CTL_B1_CTL			(0x2B6)
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 14f2d43..beb5470 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -75,6 +75,9 @@
 	bool			hpi_en;			/* HPI enablebit */
 	bool			hpi;			/* HPI support bit */
 	unsigned int		hpi_cmd;		/* cmd used as HPI */
+	bool			bkops;		/* background support bit */
+	bool			bkops_en;	/* background enable bit */
+	u8			raw_exception_status;	/* 53 */
 	u8			raw_partition_support;	/* 160 */
 	u8			raw_erased_mem_count;	/* 181 */
 	u8			raw_ext_csd_structure;	/* 194 */
@@ -88,6 +91,7 @@
 	u8			raw_sec_erase_mult;	/* 230 */
 	u8			raw_sec_feature_support;/* 231 */
 	u8			raw_trim_mult;		/* 232 */
+	u8			raw_bkops_status;	/* 246 */
 	u8			raw_sectors[4];		/* 212 - 4 bytes */
 
 	unsigned int            feature_support;
@@ -172,6 +176,25 @@
 
 #define SDIO_MAX_FUNCS		7
 
+enum mmc_packed_stop_reasons {
+	EXCEEDS_SEGMENTS = 0,
+	EXCEEDS_SECTORS,
+	WRONG_DATA_DIR,
+	FLUSH_OR_DISCARD,
+	EMPTY_QUEUE,
+	REL_WRITE,
+	THRESHOLD,
+	MAX_REASONS,
+};
+
+struct mmc_wr_pack_stats {
+	u32 *packing_events;
+	u32 pack_stop_reason[MAX_REASONS];
+	spinlock_t lock;
+	bool enabled;
+	bool print_in_read;
+};
+
 /*
  * MMC device
  */
@@ -194,6 +217,9 @@
 #define MMC_CARD_SDXC		(1<<6)		/* card is SDXC */
 #define MMC_CARD_REMOVED	(1<<7)		/* card has been removed */
 #define MMC_STATE_HIGHSPEED_200	(1<<8)		/* card is in HS200 mode */
+#define MMC_STATE_NEED_BKOPS	(1<<9)		/* card need to do BKOPS */
+#define MMC_STATE_DOING_BKOPS	(1<<10)		/* card is doing BKOPS */
+#define MMC_STATE_CHECK_BKOPS	(1<<11)		/* card need to check BKOPS */
 	unsigned int		quirks; 	/* card quirks */
 #define MMC_QUIRK_LENIENT_FN0	(1<<0)		/* allow SDIO FN0 writes outside of the VS CCCR range */
 #define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1)	/* use func->cur_blksize */
@@ -239,6 +265,8 @@
 	unsigned int		sd_bus_speed;	/* Bus Speed Mode set for the card */
 
 	struct dentry		*debugfs_root;
+
+	struct mmc_wr_pack_stats wr_pack_stats; /* packed commands stats*/
 };
 
 /*
@@ -339,6 +367,9 @@
 #define mmc_sd_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED)
 #define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC)
 #define mmc_card_removed(c)	((c) && ((c)->state & MMC_CARD_REMOVED))
+#define mmc_card_need_bkops(c)	((c)->state & MMC_STATE_NEED_BKOPS)
+#define mmc_card_doing_bkops(c)	((c)->state & MMC_STATE_DOING_BKOPS)
+#define mmc_card_check_bkops(c) ((c)->state & MMC_STATE_CHECK_BKOPS)
 
 #define mmc_card_set_present(c)	((c)->state |= MMC_STATE_PRESENT)
 #define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
@@ -349,6 +380,14 @@
 #define mmc_sd_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED)
 #define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC)
 #define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED)
+#define mmc_card_set_need_bkops(c)	((c)->state |= MMC_STATE_NEED_BKOPS)
+#define mmc_card_set_doing_bkops(c)	((c)->state |= MMC_STATE_DOING_BKOPS)
+#define mmc_card_set_check_bkops(c) ((c)->state |= MMC_STATE_CHECK_BKOPS)
+
+#define mmc_card_clr_need_bkops(c)	((c)->state &= ~MMC_STATE_NEED_BKOPS)
+#define mmc_card_clr_doing_bkops(c)	((c)->state &= ~MMC_STATE_DOING_BKOPS)
+#define mmc_card_clr_check_bkops(c) ((c)->state &= ~MMC_STATE_CHECK_BKOPS)
+
 
 /*
  * Quirk add/remove for MMC products.
@@ -430,4 +469,8 @@
 extern void mmc_fixup_device(struct mmc_card *card,
 			     const struct mmc_fixup *table);
 
+extern struct mmc_wr_pack_stats *mmc_blk_get_packed_statistics(
+			struct mmc_card *card);
+extern void mmc_blk_init_packed_statistics(struct mmc_card *card);
+
 #endif
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 7f30e24..8b2ff74 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -18,6 +18,9 @@
 struct mmc_command {
 	u32			opcode;
 	u32			arg;
+#define MMC_CMD23_ARG_REL_WR	(1 << 31)
+#define MMC_CMD23_ARG_PACKED	((0 << 31) | (1 << 30))
+#define MMC_CMD23_ARG_TAG_REQ	(1 << 29)
 	u32			resp[4];
 	unsigned int		flags;		/* expected response type */
 #define MMC_RSP_PRESENT	(1 << 0)
@@ -134,6 +137,9 @@
 struct mmc_card;
 struct mmc_async_req;
 
+extern int mmc_interrupt_bkops(struct mmc_card *);
+extern int mmc_read_bkops_status(struct mmc_card *);
+extern int mmc_is_exception_event(struct mmc_card *, unsigned int);
 extern struct mmc_async_req *mmc_start_req(struct mmc_host *,
 					   struct mmc_async_req *, int *);
 extern int mmc_interrupt_hpi(struct mmc_card *);
@@ -143,6 +149,7 @@
 extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *,
 	struct mmc_command *, int);
 extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int);
+extern int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd);
 
 #define MMC_ERASE_ARG		0x00000000
 #define MMC_SECURE_ERASE_ARG	0x80000000
@@ -163,6 +170,7 @@
 extern int mmc_can_secure_erase_trim(struct mmc_card *card);
 extern int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
 				   unsigned int nr);
+extern void mmc_start_bkops(struct mmc_card *card);
 extern unsigned int mmc_calc_max_discard(struct mmc_card *card);
 
 extern int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen);
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 2489bb5..ae06ccf 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -245,6 +245,7 @@
 #define MMC_CAP2_CACHE_CTRL	(1 << 1)	/* Allow cache control */
 #define MMC_CAP2_POWEROFF_NOTIFY (1 << 2)	/* Notify poweroff supported */
 #define MMC_CAP2_NO_MULTI_READ	(1 << 3)	/* Multiblock reads don't work */
+#define MMC_CAP2_SANITIZE	(1<<4)		/* Support Sanitize */
 #define MMC_CAP2_HS200_1_8V_SDR	(1 << 5)        /* can support */
 #define MMC_CAP2_HS200_1_2V_SDR	(1 << 6)        /* can support */
 #define MMC_CAP2_HS200		(MMC_CAP2_HS200_1_8V_SDR | \
@@ -254,6 +255,11 @@
 #define MMC_CAP2_PACKED_WR	(1 << 11)	/* Allow packed write */
 #define MMC_CAP2_PACKED_CMD	(MMC_CAP2_PACKED_RD | \
 				 MMC_CAP2_PACKED_WR) /* Allow packed commands */
+#define MMC_CAP2_PACKED_WR_CONTROL (1 << 12) /* Allow write packing control */
+
+#define MMC_CAP2_BKOPS		    (1 << 14)	/* BKOPS supported */
+#define MMC_CAP2_INIT_BKOPS	    (1 << 15)	/* Need to set BKOPS_EN */
+
 	mmc_pm_flag_t		pm_caps;	/* supported pm features */
 	unsigned int        power_notify_type;
 #define MMC_HOST_PW_NOTIFY_NONE		0
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 06539dff..37b5344 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -139,6 +139,7 @@
 #define R1_CURRENT_STATE(x)	((x & 0x00001E00) >> 9)	/* sx, b (4 bits) */
 #define R1_READY_FOR_DATA	(1 << 8)	/* sx, a */
 #define R1_SWITCH_ERROR		(1 << 7)	/* sx, c */
+#define R1_EXCEPTION_EVENT	(1 << 6)	/* sx, a */
 #define R1_APP_CMD		(1 << 5)	/* sr, c */
 #define R1_EXP_EVENT		(1 << 6)	/* sr, a */
 
@@ -284,6 +285,8 @@
 #define EXT_CSD_PARTITION_SUPPORT	160	/* RO */
 #define EXT_CSD_HPI_MGMT		161	/* R/W */
 #define EXT_CSD_RST_N_FUNCTION		162	/* R/W */
+#define EXT_CSD_BKOPS_EN		163	/* R/W */
+#define EXT_CSD_BKOPS_START		164	/* W */
 #define EXT_CSD_SANITIZE_START		165     /* W */
 #define EXT_CSD_WR_REL_PARAM		166	/* RO */
 #define EXT_CSD_ERASE_GROUP_DEF		175	/* R/W */
@@ -316,11 +319,13 @@
 #define EXT_CSD_PWR_CL_200_360		237	/* RO */
 #define EXT_CSD_PWR_CL_DDR_52_195	238	/* RO */
 #define EXT_CSD_PWR_CL_DDR_52_360	239	/* RO */
+#define EXT_CSD_BKOPS_STATUS		246	/* RO */
 #define EXT_CSD_POWER_OFF_LONG_TIME	247	/* RO */
 #define EXT_CSD_GENERIC_CMD6_TIME	248	/* RO */
 #define EXT_CSD_CACHE_SIZE		249	/* RO, 4 bytes */
 #define EXT_CSD_MAX_PACKED_WRITES	500	/* RO */
 #define EXT_CSD_MAX_PACKED_READS	501	/* RO */
+#define EXT_CSD_BKOPS_SUPPORT		502	/* RO */
 #define EXT_CSD_HPI_FEATURES		503	/* RO */
 
 /*
@@ -450,5 +455,16 @@
 #define MMC_SWITCH_MODE_CLEAR_BITS	0x02	/* Clear bits which are 1 in value */
 #define MMC_SWITCH_MODE_WRITE_BYTE	0x03	/* Set target to value */
 
+/*
+ * BKOPS status level
+ */
+#define EXT_CSD_BKOPS_LEVEL_2		0x2
+
+/*
+ * EXCEPTION_EVENT_STATUS field (eMMC4.5)
+ */
+#define EXT_CSD_URGENT_BKOPS		BIT(0)
+#define EXT_CSD_DYNCAP_NEEDED		BIT(1)
+#define EXT_CSD_SYSPOOL_EXHAUSTED	BIT(2)
 #endif  /* MMC_MMC_PROTOCOL_H */
 
diff --git a/include/linux/msm_charm.h b/include/linux/msm_charm.h
index 779fd38..c31e493 100644
--- a/include/linux/msm_charm.h
+++ b/include/linux/msm_charm.h
@@ -10,6 +10,7 @@
 #define NORMAL_BOOT_DONE	_IOW(CHARM_CODE, 5, int)
 #define RAM_DUMP_DONE		_IOW(CHARM_CODE, 6, int)
 #define WAIT_FOR_RESTART	_IOR(CHARM_CODE, 7, int)
+#define GET_DLOAD_STATUS	_IOR(CHARM_CODE, 8, int)
 
 enum charm_boot_type {
 	CHARM_NORMAL_BOOT = 0,
diff --git a/include/linux/smux.h b/include/linux/smux.h
index 64d0ed6..308f969 100644
--- a/include/linux/smux.h
+++ b/include/linux/smux.h
@@ -247,7 +247,7 @@
 int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear);
 
 #else
-int msm_smux_open(uint8_t lcid, void *priv,
+static inline int msm_smux_open(uint8_t lcid, void *priv,
 	void (*notify)(void *priv, int event_type, const void *metadata),
 	int (*get_rx_buffer)(void *priv, void **pkt_priv,
 					void **buffer, int size))
@@ -255,37 +255,39 @@
 	return -ENODEV;
 }
 
-int msm_smux_close(uint8_t lcid)
+static inline int msm_smux_close(uint8_t lcid)
 {
 	return -ENODEV;
 }
 
-int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
+static inline int msm_smux_write(uint8_t lcid, void *pkt_priv,
+				const void *data, int len)
 {
 	return -ENODEV;
 }
 
-int msm_smux_is_ch_full(uint8_t lcid);
+static inline int msm_smux_is_ch_full(uint8_t lcid)
 {
 	return -ENODEV;
 }
 
-int msm_smux_is_ch_low(uint8_t lcid);
+static inline int msm_smux_is_ch_low(uint8_t lcid)
 {
 	return -ENODEV;
 }
 
-long msm_smux_tiocm_get(uint8_t lcid)
+static inline long msm_smux_tiocm_get(uint8_t lcid)
 {
 	return 0;
 }
 
-int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
+static inline int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
 {
 	return -ENODEV;
 }
 
-int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
+static inline int msm_smux_set_ch_option(uint8_t lcid, uint32_t set,
+					uint32_t clear)
 {
 	return -ENODEV;
 }
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index eda60c0..fe0f1a8 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -386,6 +386,7 @@
 #define V4L2_PIX_FMT_XVID     v4l2_fourcc('X', 'V', 'I', 'D') /* Xvid           */
 #define V4L2_PIX_FMT_VC1_ANNEX_G v4l2_fourcc('V', 'C', '1', 'G') /* SMPTE 421M Annex G compliant stream */
 #define V4L2_PIX_FMT_VC1_ANNEX_L v4l2_fourcc('V', 'C', '1', 'L') /* SMPTE 421M Annex L compliant stream */
+#define V4L2_PIX_FMT_DIVX_311  v4l2_fourcc('D', 'I', 'V', '3') /* DIVX        */
 
 /*  Vendor-specific formats   */
 #define V4L2_PIX_FMT_CPIA1    v4l2_fourcc('C', 'P', 'I', 'A') /* cpia1 YUV */
diff --git a/include/media/radio-iris.h b/include/media/radio-iris.h
index dfad18d..b5e8f2e 100644
--- a/include/media/radio-iris.h
+++ b/include/media/radio-iris.h
@@ -516,6 +516,7 @@
 	FM_RECV,
 	FM_TRANS,
 	FM_RESET,
+	FM_CALIB
 };
 
 enum v4l2_cid_private_iris_t {
diff --git a/include/media/videobuf2-msm-mem.h b/include/media/videobuf2-msm-mem.h
index 822dd69..84e2bea 100644
--- a/include/media/videobuf2-msm-mem.h
+++ b/include/media/videobuf2-msm-mem.h
@@ -16,7 +16,7 @@
 #define _VIDEOBUF2_PMEM_CONTIG_H
 
 #include <media/videobuf2-core.h>
-#include <mach/msm_subsystem_map.h>
+#include <mach/iommu_domains.h>
 #include <linux/ion.h>
 
 struct videobuf2_mapping {
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
new file mode 100644
index 0000000..695fea9
--- /dev/null
+++ b/include/sound/apr_audio-v2.h
@@ -0,0 +1,6172 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 and
+* only version 2 as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*/
+
+
+#ifndef _APR_AUDIO_V2_H_
+#define _APR_AUDIO_V2_H_
+
+#include <mach/qdsp6v2/apr.h>
+
+#define ADSP_ADM_VERSION    0x00070000
+
+#define ADM_CMD_SHARED_MEM_MAP_REGIONS    0x00010322
+#define ADM_CMDRSP_SHARED_MEM_MAP_REGIONS 0x00010323
+#define ADM_CMD_SHARED_MEM_UNMAP_REGIONS 0x00010324
+
+#define ADM_CMD_MATRIX_MAP_ROUTINGS_V5 0x00010325
+
+/* Enumeration for an audio Rx matrix ID.*/
+#define ADM_MATRIX_ID_AUDIO_RX              0
+
+#define ADM_MATRIX_ID_AUDIO_TX              1
+
+/* Enumeration for an audio Tx matrix ID.*/
+#define ADM_MATRIX_ID_AUDIOX              1
+
+#define ADM_MAX_COPPS 5
+
+
+/*  Session map node structure.
+*	Immediately following this structure are num_copps
+*	entries of COPP IDs. The COPP IDs are 16 bits, so
+*	there might be a padding 16-bit field if num_copps
+*	is odd.
+*/
+struct adm_session_map_node_v5 {
+	u16                  session_id;
+/* Handle of the ASM session to be routed. Supported values: 1
+* to 8.
+*/
+
+
+	u16                  num_copps;
+	/* Number of COPPs to which this session is to be routed.
+			Supported values: 0 < num_copps <= ADM_MAX_COPPS.
+	*/
+} __packed;
+
+/*  Payload of the #ADM_CMD_MATRIX_MAP_ROUTINGS_V5 command.
+*	Immediately following this structure are num_sessions of the session map
+*	node payload (adm_session_map_node_v5).
+*/
+
+struct adm_cmd_matrix_map_routings_v5 {
+	struct apr_hdr	hdr;
+
+	u32                  matrix_id;
+/* Specifies whether the matrix ID is Audio Rx (0) or Audio Tx
+* (1). Use the ADM_MATRIX_ID_AUDIO_RX or ADM_MATRIX_ID_AUDIOX
+* macros to set this field.
+*/
+	u32                  num_sessions;
+	/* Number of sessions being updated by this command (optional).*/
+} __packed;
+
+/* This command allows a client to open a COPP/Voice Proc. TX module
+*	and sets up	the device session: Matrix -> COPP -> AFE on the RX
+*	and AFE -> COPP -> Matrix on the TX. This enables PCM data to
+*	be transferred to/from the endpoint (AFEPortID).
+*
+*	@return
+*	#ADM_CMDRSP_DEVICE_OPEN_V5 with the resulting status and
+*	COPP ID.
+*/
+#define ADM_CMD_DEVICE_OPEN_V5                          0x00010326
+
+/* Indicates that endpoint_id_2 is to be ignored.*/
+#define ADM_CMD_COPP_OPEN_END_POINT_ID_2_IGNORE				0xFFFF
+
+#define ADM_CMD_COPP_OPEN_MODE_OF_OPERATION_RX_PATH_COPP		 1
+
+#define ADM_CMD_COPP_OPEN_MODE_OF_OPERATIONX_PATH_LIVE_COPP		 2
+
+#define ADM_CMD_COPP_OPEN_MODE_OF_OPERATIONX_PATH_NON_LIVE_COPP	 3
+
+/* Indicates that an audio COPP is to send/receive a mono PCM
+ * stream to/from
+ *	END_POINT_ID_1.
+ */
+#define ADM_CMD_COPP_OPEN_CHANNEL_CONFIG_MONO		1
+
+/* Indicates that an audio COPP is to send/receive a
+ *	stereo PCM stream to/from END_POINT_ID_1.
+ */
+#define ADM_CMD_COPP_OPEN_CHANNEL_CONFIG_STEREO		2
+
+/* Sample rate is 8000 Hz.*/
+#define ADM_CMD_COPP_OPEN_SAMPLE_RATE_8K 8000
+
+/* Sample rate is 16000 Hz.*/
+#define ADM_CMD_COPP_OPEN_SAMPLE_RATE_16K 16000
+
+/* Sample rate is 48000 Hz.*/
+#define ADM_CMD_COPP_OPEN_SAMPLE_RATE_48K 48000
+
+/* Definition for a COPP live input flag bitmask.*/
+#define ADM_BIT_MASK_COPP_LIVE_INPUT_FLAG (0x0001U)
+
+/* Definition for a COPP live shift value bitmask.*/
+#define ADM_SHIFT_COPP_LIVE_INPUT_FLAG	 0
+
+/* Definition for the COPP ID bitmask.*/
+#define ADM_BIT_MASK_COPP_ID  (0x0000FFFFUL)
+
+/* Definition for the COPP ID shift value.*/
+#define ADM_SHIFT_COPP_ID	0
+
+/* Definition for the service ID bitmask.*/
+#define ADM_BIT_MASK_SERVICE_ID  (0x00FF0000UL)
+
+/* Definition for the service ID shift value.*/
+#define ADM_SHIFT_SERVICE_ID	16
+
+/* Definition for the domain ID bitmask.*/
+#define ADM_BIT_MASK_DOMAIN_ID    (0xFF000000UL)
+
+/* Definition for the domain ID shift value.*/
+#define ADM_SHIFT_DOMAIN_ID	24
+
+/*  ADM device open command payload of the
+	#ADM_CMD_DEVICE_OPEN_V5 command.
+*/
+struct adm_cmd_device_open_v5 {
+	struct apr_hdr		hdr;
+	u16                  flags;
+/* Reserved for future use. Clients must set this field
+ * to zero.
+ */
+
+	u16                  mode_of_operation;
+/* Specifies whether the COPP must be opened on the Tx or Rx
+ * path. Use the ADM_CMD_COPP_OPEN_MODE_OF_OPERATION_* macros for
+ * supported values and interpretation.
+ * Supported values:
+ * - 0x1 -- Rx path COPP
+ * - 0x2 -- Tx path live COPP
+ * - 0x3 -- Tx path nonlive COPP
+ * Live connections cause sample discarding in the Tx device
+ * matrix if the destination output ports do not pull them
+ * fast enough. Nonlive connections queue the samples
+ * indefinitely.
+ */
+
+	u16                  endpoint_id_1;
+/* Logical and physical endpoint ID of the audio path.
+ * If the ID is a voice processor Tx block, it receives near
+ * samples.	Supported values: Any pseudoport, AFE Rx port,
+ * or AFE Tx port For a list of valid IDs, refer to
+ * @xhyperref{Q4,[Q4]}.
+ * Q4 = Hexagon Multimedia: AFE Interface Specification
+ */
+
+	u16                  endpoint_id_2;
+/* Logical and physical endpoint ID 2 for a voice processor
+ * Tx block.
+ * This is not applicable to audio COPP.
+ * Supported values:
+ * - AFE Rx port
+ * - 0xFFFF -- Endpoint 2 is unavailable and the voice
+ * processor Tx
+ * block ignores this endpoint
+ * When the voice processor Tx block is created on the audio
+ * record path,
+ * it can receive far-end samples from an AFE Rx port if the
+ * voice call
+ * is active. The ID of the AFE port is provided in this
+ * field.
+ * For a list of valid IDs, refer @xhyperref{Q4,[Q4]}.
+ */
+
+	u32                  topology_id;
+	/* Audio COPP topology ID; 32-bit GUID. */
+
+	u16                  dev_num_channel;
+/* Number of channels the audio COPP sends to/receives from
+ * the endpoint.
+ * Supported values: 1 to 8.
+ * The value is ignored for the voice processor Tx block,
+ * where channel
+ * configuration is derived from the topology ID.
+ */
+
+	u16                  bit_width;
+/* Bit width (in bits) that the audio COPP sends to/receives
+ * from the
+ * endpoint. The value is ignored for the voice processing
+ * Tx block,
+ * where the PCM width is 16 bits.
+ */
+
+	u32                  sample_rate;
+/* Sampling rate at which the audio COPP/voice processor
+ * Tx block
+ * interfaces with the endpoint.
+ * Supported values for voice processor Tx: 8000, 16000,
+ * 48000 Hz
+ * Supported values for audio COPP: >0 and <=192 kHz
+ */
+
+	u8                   dev_channel_mapping[8];
+/* Array of channel mapping of buffers that the audio COPP
+ * sends to the endpoint. Channel[i] mapping describes channel
+ * I inside the buffer, where 0 < i < dev_num_channel.
+ * This value is relevent only for an audio Rx COPP.
+ * For the voice processor block and Tx audio block, this field
+ * is set to zero and is ignored.
+ */
+} __packed;
+
+/*
+ *	This command allows the client to close a COPP and disconnect
+ *	the device session.
+ */
+#define ADM_CMD_DEVICE_CLOSE_V5                         0x00010327
+
+/* Sets one or more parameters to a COPP.
+*/
+#define ADM_CMD_SET_PP_PARAMS_V5                        0x00010328
+
+/*  Payload of the #ADM_CMD_SET_PP_PARAMS_V5 command.
+ *	If the data_payload_addr_lsw and data_payload_addr_msw element
+ *	are NULL, a series of adm_param_datastructures immediately
+ *	follows, whose total size is data_payload_size bytes.
+ */
+struct adm_cmd_set_pp_params_v5 {
+	struct apr_hdr hdr;
+		u32                  data_payload_addr_lsw;
+	/* LSW of parameter data payload address.*/
+	u32                  data_payload_addr_msw;
+	/* MSW of parameter data payload address.*/
+
+		u32                  mem_map_handle;
+/* Memory map handle returned by ADM_CMD_SHARED_MEM_MAP_REGIONS
+ * command */
+/* If mem_map_handle is zero implies the message is in
+ * the payload */
+
+	u32                  data_payload_size;
+/* Size in bytes of the variable payload accompanying this
+ * message or
+ * in shared memory. This is used for parsing the parameter
+ * payload.
+ */
+} __packed;
+
+/*  Payload format for COPP parameter data.
+ * Immediately following this structure are param_size bytes
+ * of parameter
+ * data.
+ */
+struct adm_param_data_v5 {
+	u32                  module_id;
+	/* Unique ID of the module. */
+	u32                  param_id;
+	/* Unique ID of the parameter. */
+	u16                  param_size;
+	/* Data size of the param_id/module_id combination.
+	This value is a
+		multiple of 4 bytes. */
+	u16                  reserved;
+	/* Reserved for future enhancements.
+	 * This field must be set to zero.
+	 */
+} __packed;
+
+/* Returns the status and COPP ID to an #ADM_CMD_DEVICE_OPEN_V5 command.
+ */
+#define ADM_CMDRSP_DEVICE_OPEN_V5                      0x00010329
+
+/*  Payload of the #ADM_CMDRSP_DEVICE_OPEN_V5 message,
+ *	which returns the
+ *	status and COPP ID to an #ADM_CMD_DEVICE_OPEN_V5 command.
+ */
+struct adm_cmd_rsp_device_open_v5 {
+	u32                  status;
+	/* Status message (error code).*/
+
+	u16                  copp_id;
+	/* COPP ID:  Supported values: 0 <= copp_id < ADM_MAX_COPPS*/
+
+	u16                  reserved;
+	/* Reserved. This field must be set to zero.*/
+} __packed;
+
+/* This command allows a query of one COPP parameter.
+*/
+#define ADM_CMD_GET_PP_PARAMS_V5                                0x0001032A
+
+/*  Payload an #ADM_CMD_GET_PP_PARAMS_V5 command.
+*/
+struct adm_cmd_get_pp_params_v5 {
+	u32                  data_payload_addr_lsw;
+	/* LSW of parameter data payload address.*/
+
+	u32                  data_payload_addr_msw;
+	/* MSW of parameter data payload address.*/
+
+	/* If the mem_map_handle is non zero,
+	 * on ACK, the ParamData payloads begin at
+	 * the address specified (out-of-band).
+	 */
+
+	u32                  mem_map_handle;
+	/* Memory map handle returned
+	 * by ADM_CMD_SHARED_MEM_MAP_REGIONS command.
+	 * If the mem_map_handle is 0, it implies that
+	 * the ACK's payload will contain the ParamData (in-band).
+	 */
+
+	u32                  module_id;
+	/* Unique ID of the module. */
+
+	u32                  param_id;
+	/* Unique ID of the parameter. */
+
+	u16                  param_max_size;
+	/* Maximum data size of the parameter
+	 *ID/module ID combination. This
+	 * field is a multiple of 4 bytes.
+	 */
+	u16                  reserved;
+	/* Reserved for future enhancements.
+	 * This field must be set to zero.
+	 */
+} __packed;
+
+/* Returns parameter values
+ *	in response to an #ADM_CMD_GET_PP_PARAMS_V5 command.
+ */
+#define ADM_CMDRSP_GET_PP_PARAMS_V5		0x0001032B
+
+/* Payload of the #ADM_CMDRSP_GET_PP_PARAMS_V5 message,
+ * which returns parameter values in response
+ * to an #ADM_CMD_GET_PP_PARAMS_V5 command.
+ * Immediately following this
+ * structure is the adm_param_data_v5
+ * structure containing the pre/postprocessing
+ * parameter data. For an in-band
+ * scenario, the variable payload depends
+ * on the size of the parameter.
+*/
+struct adm_cmd_rsp_get_pp_params_v5 {
+	u32                  status;
+	/* Status message (error code).*/
+} __packed;
+
+/* Allows a client to control the gains on various session-to-COPP paths.
+ */
+#define ADM_CMD_MATRIX_RAMP_GAINS_V5                                 0x0001032C
+
+/* Indicates that the target gain in the
+ *	current adm_session_copp_gain_v5
+ *	structure is to be applied to all
+ *	the session-to-COPP paths that exist for
+ *	the specified session.
+ */
+#define ADM_CMD_MATRIX_RAMP_GAINS_COPP_ID_ALL_CONNECTED_COPPS     0xFFFF
+
+/* Indicates that the target gain is
+ * to be immediately applied to the
+ * specified session-to-COPP path,
+ * without a ramping fashion.
+ */
+#define ADM_CMD_MATRIX_RAMP_GAINS_RAMP_DURATION_IMMEDIATE         0x0000
+
+/* Enumeration for a linear ramping curve.*/
+#define ADM_CMD_MATRIX_RAMP_GAINS_RAMP_CURVE_LINEAR               0x0000
+
+/*  Payload of the #ADM_CMD_MATRIX_RAMP_GAINS_V5 command.
+ * Immediately following this structure are num_gains of the
+ * adm_session_copp_gain_v5structure.
+ */
+struct adm_cmd_matrix_ramp_gains_v5 {
+	u32                  matrix_id;
+/* Specifies whether the matrix ID is Audio Rx (0) or Audio Tx (1).
+ * Use the ADM_MATRIX_ID_AUDIO_RX or  ADM_MATRIX_ID_AUDIOX
+ * macros to set this field.
+*/
+
+	u16                  num_gains;
+	/* Number of gains being applied. */
+
+	u16                  reserved_for_align;
+	/* Reserved. This field must be set to zero.*/
+} __packed;
+
+/*  Session-to-COPP path gain structure, used by the
+ *	#ADM_CMD_MATRIX_RAMP_GAINS_V5 command.
+ *	This structure specifies the target
+ *	gain (per channel) that must be applied
+ *	to a particular session-to-COPP path in
+ *	the audio matrix. The structure can
+ *	also be used to apply the gain globally
+ *	to all session-to-COPP paths that
+ *	exist for the given session.
+ *	The aDSP uses device channel mapping to
+ *	determine which channel gains to
+ *	use from this command. For example,
+ *	if the device is configured as stereo,
+ *	the aDSP uses only target_gain_ch_1 and
+ *	target_gain_ch_2, and it ignores
+ *	the others.
+ */
+struct adm_session_copp_gain_v5 {
+	u16                  session_id;
+/* Handle of the ASM session.
+ *	Supported values: 1 to 8.
+ */
+
+	u16                  copp_id;
+/* Handle of the COPP. Gain will be applied on the Session ID
+ * COPP ID path.
+ */
+
+	u16                  ramp_duration;
+/* Duration (in milliseconds) of the ramp over
+ * which target gains are
+ * to be applied. Use
+ * #ADM_CMD_MATRIX_RAMP_GAINS_RAMP_DURATION_IMMEDIATE
+ * to indicate that gain must be applied immediately.
+ */
+
+	u16                  step_duration;
+/* Duration (in milliseconds) of each step in the ramp.
+ * This parameter is ignored if ramp_duration is equal to
+ * #ADM_CMD_MATRIX_RAMP_GAINS_RAMP_DURATION_IMMEDIATE.
+ * Supported value: 1
+ */
+
+	u16                  ramp_curve;
+/* Type of ramping curve.
+ * Supported value: #ADM_CMD_MATRIX_RAMP_GAINS_RAMP_CURVE_LINEAR
+ */
+
+	u16                  reserved_for_align;
+	/* Reserved. This field must be set to zero. */
+
+	u16                  target_gain_ch_1;
+	/* Target linear gain for channel 1 in Q13 format; */
+
+	u16                  target_gain_ch_2;
+	/* Target linear gain for channel 2 in Q13 format; */
+
+	u16                  target_gain_ch_3;
+	/* Target linear gain for channel 3 in Q13 format; */
+
+	u16                  target_gain_ch_4;
+	/* Target linear gain for channel 4 in Q13 format; */
+
+	u16                  target_gain_ch_5;
+	/* Target linear gain for channel 5 in Q13 format; */
+
+	u16                  target_gain_ch_6;
+	/* Target linear gain for channel 6 in Q13 format; */
+
+	u16                  target_gain_ch_7;
+	/* Target linear gain for channel 7 in Q13 format; */
+
+	u16                  target_gain_ch_8;
+	/* Target linear gain for channel 8 in Q13 format; */
+} __packed;
+
+/* Allows to set mute/unmute on various session-to-COPP paths.
+ *	For every session-to-COPP path (stream-device interconnection),
+ *	mute/unmute can be set individually on the output channels.
+ */
+#define ADM_CMD_MATRIX_MUTE_V5                                0x0001032D
+
+/* Indicates that mute/unmute in the
+ *	current adm_session_copp_mute_v5structure
+ *	is to be applied to all the session-to-COPP
+ *	paths that exist for the specified session.
+ */
+#define ADM_CMD_MATRIX_MUTE_COPP_ID_ALL_CONNECTED_COPPS     0xFFFF
+
+/*  Payload of the #ADM_CMD_MATRIX_MUTE_V5 command*/
+struct adm_cmd_matrix_mute_v5 {
+	u32                  matrix_id;
+/* Specifies whether the matrix ID is Audio Rx (0) or Audio Tx (1).
+ * Use the ADM_MATRIX_ID_AUDIO_RX or  ADM_MATRIX_ID_AUDIOX
+ * macros to set this field.
+ */
+
+	u16                  session_id;
+/* Handle of the ASM session.
+ * Supported values: 1 to 8.
+ */
+
+	u16                  copp_id;
+/* Handle of the COPP.
+ * Use ADM_CMD_MATRIX_MUTE_COPP_ID_ALL_CONNECTED_COPPS
+ * to indicate that mute/unmute must be applied to
+ * all the COPPs connected to session_id.
+ * Supported values:
+ * - 0xFFFF -- Apply mute/unmute to all connected COPPs
+ * - Other values -- Valid COPP ID
+ */
+
+	u8                  mute_flag_ch_1;
+	/* Mute flag for channel 1 is set to unmute (0) or mute (1). */
+
+	u8                  mute_flag_ch_2;
+	/* Mute flag for channel 2 is set to unmute (0) or mute (1). */
+
+	u8                  mute_flag_ch_3;
+	/* Mute flag for channel 3 is set to unmute (0) or mute (1). */
+
+	u8                  mute_flag_ch_4;
+	/* Mute flag for channel 4 is set to unmute (0) or mute (1). */
+
+	u8                  mute_flag_ch_5;
+	/* Mute flag for channel 5 is set to unmute (0) or mute (1). */
+
+	u8                  mute_flag_ch_6;
+	/* Mute flag for channel 6 is set to unmute (0) or mute (1). */
+
+	u8                  mute_flag_ch_7;
+	/* Mute flag for channel 7 is set to unmute (0) or mute (1). */
+
+	u8                  mute_flag_ch_8;
+	/* Mute flag for channel 8 is set to unmute (0) or mute (1). */
+
+	u16                 ramp_duration;
+/* Period (in milliseconds) over which the soft mute/unmute will be
+ * applied.
+ * Supported values: 0 (Default) to 0xFFFF
+ * The default of 0 means mute/unmute will be applied immediately.
+ */
+
+	u16                 reserved_for_align;
+	/* Clients must set this field to zero.*/
+} __packed;
+
+/* Allows a client to connect the desired stream to
+ * the desired AFE port through the stream router
+ *
+ * This command allows the client to connect specified session to
+ * specified AFE port. This is used for compressed streams only
+ * opened using the #ASM_STREAM_CMD_OPEN_WRITE_COMPRESSED or
+ * #ASM_STREAM_CMD_OPEN_READ_COMPRESSED command.
+ *
+ * @prerequisites
+ * Session ID and AFE Port ID must be valid.
+ * #ASM_STREAM_CMD_OPEN_WRITE_COMPRESSED or
+ * #ASM_STREAM_CMD_OPEN_READ_COMPRESSED
+ * must have been called on this session.
+ */
+
+#define ADM_CMD_CONNECT_AFE_PORT_V5	0x0001032E
+#define ADM_CMD_DISCONNECT_AFE_PORT_V5	0x0001032F
+/* Enumeration for the Rx stream router ID.*/
+#define ADM_STRTR_ID_RX                    0
+/* Enumeration for the Tx stream router ID.*/
+#define ADM_STRTR_IDX                    1
+
+/*  Payload of the #ADM_CMD_CONNECT_AFE_PORT_V5 command.*/
+struct adm_cmd_connect_afe_port_v5 {
+	u8                  mode;
+/* ID of the stream router (RX/TX). Use the
+ * ADM_STRTR_ID_RX or ADM_STRTR_IDX macros
+ * to set this field.
+ */
+
+	u8                  session_id;
+	/* Session ID of the stream to connect */
+
+	u16                 afe_port_id;
+	/* Port ID of the AFE port to connect to.*/
+	u32                 num_channels;
+/* Number of device channels
+ * Supported values: 2(Audio Sample Packet),
+ * 8 (HBR Audio Stream Sample Packet)
+ */
+
+	u32                 sampling_rate;
+/* Device sampling rate
+* Supported values: Any
+*/
+} __packed;
+
+
+/* adsp_adm_api.h */
+
+
+/* Port ID. Update afe_get_port_index
+ *	when a new port is added here. */
+#define PRIMARY_I2S_RX 0		/* index = 0 */
+#define PRIMARY_I2S_TX 1		/* index = 1 */
+#define PCM_RX 2			/* index = 2 */
+#define PCM_TX 3			/* index = 3 */
+#define SECONDARY_I2S_RX 4		/* index = 4 */
+#define SECONDARY_I2S_TX 5		/* index = 5 */
+#define MI2S_RX 6			/* index = 6 */
+#define MI2S_TX 7			/* index = 7 */
+#define HDMI_RX 8			/* index = 8 */
+#define RSVD_2 9			/* index = 9 */
+#define RSVD_3 10			/* index = 10 */
+#define DIGI_MIC_TX 11			/* index = 11 */
+#define VOICE_RECORD_RX 0x8003		/* index = 12 */
+#define VOICE_RECORD_TX 0x8004		/* index = 13 */
+#define VOICE_PLAYBACK_TX 0x8005	/* index = 14 */
+
+/* Slimbus Multi channel port id pool  */
+#define SLIMBUS_0_RX		0x4000		/* index = 15 */
+#define SLIMBUS_0_TX		0x4001		/* index = 16 */
+#define SLIMBUS_1_RX		0x4002		/* index = 17 */
+#define SLIMBUS_1_TX		0x4003		/* index = 18 */
+#define SLIMBUS_2_RX		0x4004
+#define SLIMBUS_2_TX		0x4005
+#define SLIMBUS_3_RX		0x4006
+#define SLIMBUS_3_TX		0x4007
+#define SLIMBUS_4_RX		0x4008
+#define SLIMBUS_4_TX		0x4009		/* index = 24 */
+#define INT_BT_SCO_RX 0x3000		/* index = 25 */
+#define INT_BT_SCO_TX 0x3001		/* index = 26 */
+#define INT_BT_A2DP_RX 0x3002		/* index = 27 */
+#define INT_FM_RX 0x3004		/* index = 28 */
+#define INT_FM_TX 0x3005		/* index = 29 */
+#define RT_PROXY_PORT_001_RX	0x2000    /* index = 30 */
+#define RT_PROXY_PORT_001_TX	0x2001    /* index = 31 */
+
+#define AFE_PORT_INVALID 0xFFFF
+#define SLIMBUS_INVALID AFE_PORT_INVALID
+
+#define AFE_PORT_CMD_START 0x000100ca
+
+#define AFE_EVENT_RTPORT_START 0
+#define AFE_EVENT_RTPORT_STOP 1
+#define AFE_EVENT_RTPORT_LOW_WM 2
+#define AFE_EVENT_RTPORT_HI_WM 3
+
+#define ADSP_AFE_VERSION    0x00200000
+
+/* Size of the range of port IDs for the audio interface. */
+#define  AFE_PORT_ID_AUDIO_IF_PORT_RANGE_SIZE	0xF
+
+/* Size of the range of port IDs for internal BT-FM ports. */
+#define AFE_PORT_ID_INTERNAL_BT_FM_RANGE_SIZE	0x6
+
+/* Size of the range of port IDs for SLIMbus<sup>&reg;
+ * </sup> multichannel
+ * ports.
+ */
+#define AFE_PORT_ID_SLIMBUS_RANGE_SIZE	0xA
+
+/* Size of the range of port IDs for real-time proxy ports. */
+#define  AFE_PORT_ID_RT_PROXY_PORT_RANGE_SIZE	0x2
+
+/* Size of the range of port IDs for pseudoports. */
+#define AFE_PORT_ID_PSEUDOPORT_RANGE_SIZE	0x5
+
+/* Start of the range of port IDs for the audio interface. */
+#define  AFE_PORT_ID_AUDIO_IF_PORT_RANGE_START	0x1000
+
+/* End of the range of port IDs for the audio interface. */
+#define  AFE_PORT_ID_AUDIO_IF_PORT_RANGE_END \
+	(AFE_PORT_ID_AUDIO_IF_PORT_RANGE_START +\
+	AFE_PORT_ID_AUDIO_IF_PORT_RANGE_SIZE - 1)
+
+/* Start of the range of port IDs for real-time proxy ports. */
+#define  AFE_PORT_ID_RT_PROXY_PORT_RANGE_START	0x2000
+
+/* End of the range of port IDs for real-time proxy ports. */
+#define  AFE_PORT_ID_RT_PROXY_PORT_RANGE_END \
+	(AFE_PORT_ID_RT_PROXY_PORT_RANGE_START +\
+	AFE_PORT_ID_RT_PROXY_PORT_RANGE_SIZE-1)
+
+/* Start of the range of port IDs for internal BT-FM devices. */
+#define AFE_PORT_ID_INTERNAL_BT_FM_RANGE_START	0x3000
+
+/* End of the range of port IDs for internal BT-FM devices. */
+#define AFE_PORT_ID_INTERNAL_BT_FM_RANGE_END \
+	(AFE_PORT_ID_INTERNAL_BT_FM_RANGE_START +\
+	AFE_PORT_ID_INTERNAL_BT_FM_RANGE_SIZE-1)
+
+/*	Start of the range of port IDs for SLIMbus devices. */
+#define AFE_PORT_ID_SLIMBUS_RANGE_START	0x4000
+
+/*	End of the range of port IDs for SLIMbus devices. */
+#define AFE_PORT_ID_SLIMBUS_RANGE_END \
+	(AFE_PORT_ID_SLIMBUS_RANGE_START +\
+	AFE_PORT_ID_SLIMBUS_RANGE_SIZE-1)
+
+/* Start of the range of port IDs for pseudoports. */
+#define AFE_PORT_ID_PSEUDOPORT_RANGE_START	0x8001
+
+/* End of the range of port IDs for pseudoports.  */
+#define AFE_PORT_ID_PSEUDOPORT_RANGE_END \
+	(AFE_PORT_ID_PSEUDOPORT_RANGE_START +\
+	AFE_PORT_ID_PSEUDOPORT_RANGE_SIZE-1)
+
+#define AFE_PORT_ID_PRIMARY_MI2S_RX         0x1000
+#define AFE_PORT_ID_PRIMARY_MI2S_TX         0x1001
+#define AFE_PORT_ID_SECONDARY_MI2S_RX       0x1002
+#define AFE_PORT_ID_SECONDARY_MI2S_TX       0x1003
+#define AFE_PORT_IDERTIARY_MI2S_RX        0x1004
+#define AFE_PORT_IDERTIARY_MI2S_TX        0x1005
+#define AFE_PORT_ID_QUATERNARY_MI2S_RX      0x1006
+#define AFE_PORT_ID_QUATERNARY_MI2S_TX      0x1007
+#define AUDIO_PORT_ID_I2S_RX				0x1008
+#define AFE_PORT_ID_DIGITAL_MIC_TX          0x1009
+#define AFE_PORT_ID_PRIMARY_PCM_RX          0x100A
+#define AFE_PORT_ID_PRIMARY_PCM_TX          0x100B
+#define AFE_PORT_ID_SECONDARY_PCM_RX        0x100C
+#define AFE_PORT_ID_SECONDARY_PCM_TX        0x100D
+#define AFE_PORT_ID_MULTICHAN_HDMI_RX       0x100E
+#define  AFE_PORT_ID_RT_PROXY_PORT_001_RX   0x2000
+#define  AFE_PORT_ID_RT_PROXY_PORT_001_TX   0x2001
+#define AFE_PORT_ID_INTERNAL_BT_SCO_RX      0x3000
+#define AFE_PORT_ID_INTERNAL_BT_SCO_TX      0x3001
+#define AFE_PORT_ID_INTERNAL_BT_A2DP_RX     0x3002
+#define AFE_PORT_ID_INTERNAL_FM_RX          0x3004
+#define AFE_PORT_ID_INTERNAL_FM_TX          0x3005
+/* SLIMbus Rx port on channel 0. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_0_RX      0x4000
+/* SLIMbus Tx port on channel 0. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_0_TX      0x4001
+/* SLIMbus Rx port on channel 1. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_1_RX      0x4002
+/* SLIMbus Tx port on channel 1. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_1_TX      0x4003
+/* SLIMbus Rx port on channel 2. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_2_RX      0x4004
+/* SLIMbus Tx port on channel 2. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_2_TX      0x4005
+/* SLIMbus Rx port on channel 3. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_3_RX      0x4006
+/* SLIMbus Tx port on channel 3. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_3_TX      0x4007
+/* SLIMbus Rx port on channel 4. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_4_RX      0x4008
+/* SLIMbus Tx port on channel 4. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_4_TX      0x4009
+/* SLIMbus Rx port on channel 0. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_0_RX      0x4000
+/* SLIMbus Tx port on channel 0. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_0_TX      0x4001
+/* SLIMbus Rx port on channel 1. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_1_RX      0x4002
+/* SLIMbus Tx port on channel 1. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_1_TX      0x4003
+/* SLIMbus Rx port on channel 2. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_2_RX      0x4004
+/* SLIMbus Tx port on channel 2. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_2_TX      0x4005
+/* SLIMbus Rx port on channel 3. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_3_RX      0x4006
+/* SLIMbus Tx port on channel 3. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_3_TX      0x4007
+/* SLIMbus Rx port on channel 4. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_4_RX      0x4008
+/* SLIMbus Tx port on channel 4. */
+#define AFE_PORT_ID_SLIMBUS_MULTI_CHAN_4_TX      0x4009
+/* Generic pseudoport 1. */
+#define AFE_PORT_ID_PSEUDOPORT_01      0x8001
+/* Generic pseudoport 2. */
+#define AFE_PORT_ID_PSEUDOPORT_02      0x8002
+
+/* @xreflabel{hdr:AfePortIdPrimaryAuxPcmTx}
+	Primary Aux PCM Tx port ID.
+*/
+#define AFE_PORT_ID_PRIMARY_PCM_TX      0x100B
+/* Pseudoport that corresponds to the voice Rx path.
+ * For recording, the voice Rx path samples are written to this
+ * port and consumed by the audio path.
+ */
+
+#define AFE_PORT_ID_VOICE_RECORD_RX	0x8003
+
+/* Pseudoport that corresponds to the voice Tx path.
+ * For recording, the voice Tx path samples are written to this
+ * port and consumed by the audio path.
+ */
+
+#define AFE_PORT_ID_VOICE_RECORD_TX	0x8004
+/* Pseudoport that corresponds to in-call voice delivery samples.
+ * During in-call audio delivery, the audio path delivers samples
+ * to this port from where the voice path delivers them on the
+ * Rx path.
+ */
+#define AFE_PORT_ID_VOICE_PLAYBACK_TX   0x8005
+#define AFE_PORT_ID_INVALID             0xFFFF
+
+#define AAC_ENC_MODE_AAC_LC 0x02
+#define AAC_ENC_MODE_AAC_P 0x05
+#define AAC_ENC_MODE_EAAC_P 0x1D
+
+#define AFE_PSEUDOPORT_CMD_START 0x000100cf
+struct afe_pseudoport_start_command {
+	struct apr_hdr hdr;
+	u16 port_id;		/* Pseudo Port 1 = 0x8000 */
+				/* Pseudo Port 2 = 0x8001 */
+				/* Pseudo Port 3 = 0x8002 */
+	u16 timing;		/* FTRT = 0 , AVTimer = 1, */
+} __packed;
+
+#define AFE_PSEUDOPORT_CMD_STOP 0x000100d0
+struct afe_pseudoport_stop_command {
+	struct apr_hdr hdr;
+	u16 port_id;		/* Pseudo Port 1 = 0x8000 */
+				/* Pseudo Port 2 = 0x8001 */
+				/* Pseudo Port 3 = 0x8002 */
+	u16 reserved;
+} __packed;
+
+
+#define AFE_MODULE_SIDETONE_IIR_FILTER	0x00010202
+#define AFE_PARAM_ID_ENABLE	0x00010203
+
+/*  Payload of the #AFE_PARAM_ID_ENABLE
+ * parameter, which enables or
+ * disables any module.
+ * The fixed size of this structure is four bytes.
+ */
+
+struct afe_mod_enable_param {
+	u16                  enable;
+	/* Enables (1) or disables (0) the module. */
+
+	u16                  reserved;
+	/* This field must be set to zero.
+		*/
+} __packed;
+
+/* ID of the configuration parameter used by the
+ * #AFE_MODULE_SIDETONE_IIR_FILTER module.
+ */
+#define AFE_PARAM_ID_SIDETONE_IIR_FILTER_CONFIG	0x00010204
+
+struct afe_sidetone_iir_filter_config_params {
+	u16                  num_biquad_stages;
+/* Number of stages.
+ * Supported values: Minimum of 5 and maximum of 10
+ */
+
+	u16                  pregain;
+/* Pregain for the compensating filter response.
+ * Supported values: Any number in Q13 format
+ */
+} __packed;
+
+#define AFE_MODULE_LOOPBACK	0x00010205
+#define AFE_PARAM_ID_LOOPBACK_GAIN_PER_PATH	0x00010206
+
+/* Payload of the #AFE_PARAM_ID_LOOPBACK_GAIN_PER_PATH parameter,
+ * which gets/sets loopback gain of a port to an Rx port.
+ * The Tx port ID of the loopback is part of the set_param command.
+ */
+
+/*  Payload of the #AFE_PORT_CMD_SET_PARAM_V2 command's
+ * configuration/calibration settings for the AFE port.
+ */
+struct afe_port_cmd_set_param_v2 {
+	u16 port_id;
+/* Port interface and direction (Rx or Tx) to start.
+ */
+
+	u16 payload_size;
+/* Actual size of the payload in bytes.
+ * This is used for parsing the parameter payload.
+ * Supported values: > 0
+ */
+
+u32 payload_address_lsw;
+/* LSW of 64 bit Payload address.
+ * Address should be 32-byte,
+ * 4kbyte aligned and must be contiguous memory.
+ */
+
+u32 payload_address_msw;
+/* MSW of 64 bit Payload address.
+ * In case of 32-bit shared memory address,
+ * this field must be set to zero.
+ * In case of 36-bit shared memory address,
+ * bit-4 to bit-31 must be set to zero.
+ * Address should be 32-byte, 4kbyte aligned
+ * and must be contiguous memory.
+ */
+
+u32 mem_map_handle;
+/* Memory map handle returned by
+ * AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS commands.
+ * Supported Values:
+ * - NULL -- Message. The parameter data is in-band.
+ * - Non-NULL -- The parameter data is Out-band.Pointer to
+ * the physical address
+ * in shared memory of the payload data.
+ * An optional field is available if parameter
+ * data is in-band:
+ * afe_param_data_v2 param_data[...].
+ * For detailed payload content, see the
+ * afe_port_param_data_v2 structure.
+ */
+} __packed;
+
+#define AFE_PORT_CMD_SET_PARAM_V2	0x000100EF
+
+struct afe_port_param_data_v2 {
+	u32 module_id;
+/* ID of the module to be configured.
+ * Supported values: Valid module ID
+ */
+
+u32 param_id;
+/* ID of the parameter corresponding to the supported parameters
+ * for the module ID.
+ * Supported values: Valid parameter ID
+ */
+
+u16 param_size;
+/* Actual size of the data for the
+ * module_id/param_id pair. The size is a
+ * multiple of four bytes.
+ * Supported values: > 0
+ */
+
+u16 reserved;
+/* This field must be set to zero.
+ */
+} __packed;
+
+struct afe_loopback_gain_per_path_param {
+	struct apr_hdr	hdr;
+	struct afe_port_cmd_set_param_v2 param;
+	struct afe_port_param_data_v2    pdata;
+	u16                  rx_port_id;
+/* Rx port of the loopback. */
+
+u16                  gain;
+/* Loopback gain per path of the port.
+ * Supported values: Any number in Q13 format
+ */
+} __packed;
+
+/* Parameter ID used to configure and enable/disable the
+ * loopback path. The difference with respect to the existing
+ * API, AFE_PORT_CMD_LOOPBACK, is that it allows Rx port to be
+ * configured as source port in loopback path. Port-id in
+ * AFE_PORT_CMD_SET_PARAM cmd is the source port whcih can be
+ * Tx or Rx port. In addition, we can configure the type of
+ * routing mode to handle different use cases.
+ */
+#define AFE_PARAM_ID_LOOPBACK_CONFIG	0x0001020B
+#define AFE_API_VERSION_LOOPBACK_CONFIG	0x1
+
+enum afe_loopback_routing_mode {
+	LB_MODE_DEFAULT = 1,
+	/* Regular loopback from source to destination port */
+	LB_MODE_SIDETONE,
+	/* Sidetone feed from Tx source to Rx destination port */
+	LB_MODE_EC_REF_VOICE_AUDIO,
+	/* Echo canceller reference, voice + audio + DTMF */
+	LB_MODE_EC_REF_VOICE
+	/* Echo canceller reference, voice alone */
+} __packed;
+
+/*  Payload of the #AFE_PARAM_ID_LOOPBACK_CONFIG ,
+ * which enables/disables one AFE loopback.
+ */
+struct afe_loopback_cfg_v1 {
+	struct apr_hdr	hdr;
+	struct afe_port_cmd_set_param_v2 param;
+	struct afe_port_param_data_v2    pdata;
+	u32		loopback_cfg_minor_version;
+/* Minor version used for tracking the version of the RMC module
+ * configuration interface.
+ * Supported values: #AFE_API_VERSION_LOOPBACK_CONFIG
+ */
+	u16                  dst_port_id;
+	/* Destination Port Id. */
+	u16                  routing_mode;
+/* Specifies data path type from src to dest port.
+ * Supported values:
+ * #LB_MODE_DEFAULT
+ * #LB_MODE_SIDETONE
+ * #LB_MODE_EC_REF_VOICE_AUDIO
+ * #LB_MODE_EC_REF_VOICE_A
+ * #LB_MODE_EC_REF_VOICE
+ */
+
+	u16                  enable;
+/* Specifies whether to enable (1) or
+ * disable (0) an AFE loopback.
+ */
+	u16                  reserved;
+/* Reserved for 32-bit alignment. This field must be set to 0.
+ */
+
+} __packed;
+
+#define AFE_MODULE_SPEAKER_PROTECTION	0x00010209
+#define AFE_PARAM_ID_SPKR_PROT_CONFIG	0x0001020a
+#define AFE_API_VERSION_SPKR_PROT_CONFIG	0x1
+#define AFE_SPKR_PROT_EXCURSIONF_LEN	512
+struct afe_spkr_prot_cfg_param_v1 {
+	u32       spkr_prot_minor_version;
+/*
+ * Minor version used for tracking the version of the
+ * speaker protection module configuration interface.
+ * Supported values: #AFE_API_VERSION_SPKR_PROT_CONFIG
+ */
+
+int16_t        win_size;
+/* Analysis and synthesis window size (nWinSize).
+ * Supported values: 1024, 512, 256 samples
+ */
+
+int16_t        margin;
+/* Allowable margin for excursion prediction,
+ * in L16Q15 format. This is a
+ * control parameter to allow
+ * for overestimation of peak excursion.
+ */
+
+int16_t        spkr_exc_limit;
+/* Speaker excursion limit, in L16Q15 format.*/
+
+int16_t        spkr_resonance_freq;
+/* Resonance frequency of the speaker; used
+ * to define a frequency range
+ * for signal modification.
+ *
+ * Supported values: 0 to 2000 Hz */
+
+int16_t        limhresh;
+/* Threshold of the hard limiter; used to
+ * prevent overshooting beyond a
+ * signal level that was set by the limiter
+ * prior to speaker protection.
+ * Supported values: 0 to 32767
+ */
+
+int16_t        hpf_cut_off_freq;
+/* High pass filter cutoff frequency.
+ * Supported values: 100, 200, 300 Hz
+ */
+
+int16_t        hpf_enable;
+/* Specifies whether the high pass filter
+ * is enabled (0) or disabled (1).
+ */
+
+int16_t        reserved;
+/* This field must be set to zero. */
+
+int32_t        amp_gain;
+/* Amplifier gain in L32Q15 format.
+ * This is the RMS voltage at the
+ * loudspeaker when a 0dBFS tone
+ * is played in the digital domain.
+ */
+
+int16_t        excursionf[AFE_SPKR_PROT_EXCURSIONF_LEN];
+/* Array of the excursion transfer function.
+ * The peak excursion of the
+ * loudspeaker diaphragm is
+ * measured in millimeters for 1 Vrms Sine
+ * tone at all FFT bin frequencies.
+ * Supported values: Q15 format
+ */
+} __packed;
+
+
+#define AFE_SERVICE_CMD_REGISTER_RT_PORT_DRIVER	0x000100E0
+
+/*  Payload of the #AFE_SERVICE_CMD_REGISTER_RT_PORT_DRIVER
+ * command, which registers a real-time port driver
+ * with the AFE service.
+ */
+struct afe_service_cmd_register_rt_port_driver {
+	struct apr_hdr hdr;
+	u16                  port_id;
+/* Port ID with which the real-time driver exchanges data
+ * (registers for events).
+ * Supported values: #AFE_PORT_ID_RT_PROXY_PORT_RANGE_START to
+ * #AFE_PORT_ID_RT_PROXY_PORT_RANGE_END
+ */
+
+	u16                  reserved;
+	/* This field must be set to zero. */
+} __packed;
+
+#define AFE_SERVICE_CMD_UNREGISTER_RT_PORT_DRIVER	0x000100E1
+
+/*  Payload of the #AFE_SERVICE_CMD_UNREGISTER_RT_PORT_DRIVER
+ * command, which unregisters a real-time port driver from
+ * the AFE service.
+ */
+struct afe_service_cmd_unregister_rt_port_driver {
+	struct apr_hdr hdr;
+	u16                  port_id;
+/* Port ID from which the real-time
+ * driver unregisters for events.
+ * Supported values: #AFE_PORT_ID_RT_PROXY_PORT_RANGE_START to
+ * #AFE_PORT_ID_RT_PROXY_PORT_RANGE_END
+ */
+
+	u16                  reserved;
+	/* This field must be set to zero.	*/
+} __packed;
+
+#define AFE_EVENT_RT_PROXY_PORT_STATUS	0x00010105
+#define AFE_EVENTYPE_RT_PROXY_PORT_START	0
+#define AFE_EVENTYPE_RT_PROXY_PORT_STOP	1
+#define AFE_EVENTYPE_RT_PROXY_PORT_LOW_WATER_MARK	2
+#define AFE_EVENTYPE_RT_PROXY_PORT_HIGH_WATER_MARK	3
+#define AFE_EVENTYPE_RT_PROXY_PORT_INVALID	0xFFFF
+
+/*  Payload of the #AFE_EVENT_RT_PROXY_PORT_STATUS
+ * message, which sends an event from the AFE service
+ * to a registered client.
+ */
+struct afe_event_rt_proxy_port_status {
+	u16                  port_id;
+/* Port ID to which the event is sent.
+ * Supported values: #AFE_PORT_ID_RT_PROXY_PORT_RANGE_START to
+ * #AFE_PORT_ID_RT_PROXY_PORT_RANGE_END
+ */
+
+	u16                  eventype;
+/* Type of event.
+ * Supported values:
+ * - #AFE_EVENTYPE_RT_PROXY_PORT_START
+ * - #AFE_EVENTYPE_RT_PROXY_PORT_STOP
+ * - #AFE_EVENTYPE_RT_PROXY_PORT_LOW_WATER_MARK
+ * - #AFE_EVENTYPE_RT_PROXY_PORT_HIGH_WATER_MARK
+ */
+} __packed;
+
+#define AFE_PORT_DATA_CMD_RT_PROXY_PORT_WRITE_V2 0x000100ED
+
+struct afe_port_data_cmd_rt_proxy_port_write_v2 {
+	struct apr_hdr hdr;
+	u16                  port_id;
+/* Tx (mic) proxy port ID with which the real-time
+ * driver exchanges data.
+ * Supported values: #AFE_PORT_ID_RT_PROXY_PORT_RANGE_START to
+ * #AFE_PORT_ID_RT_PROXY_PORT_RANGE_END
+ */
+
+	u16                  reserved;
+	/* This field must be set to zero. */
+
+	u32                  buffer_address_lsw;
+/* LSW Address of the buffer containing the
+ * data from the real-time source
+ * device on a client.
+ */
+
+	u32                  buffer_address_msw;
+/* MSW Address of the buffer containing the
+ * data from the real-time source
+ * device on a client.
+ */
+
+	u32					mem_map_handle;
+/* A memory map handle encapsulating shared memory
+ * attributes is returned if
+ * AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS
+ * command is successful.
+ * Supported Values:
+ * - Any 32 bit value
+ */
+
+	u32                  available_bytes;
+/* Number of valid bytes available
+ * in the buffer (including all
+ * channels: number of bytes per
+ * channel = availableBytesumChannels).
+ * Supported values: > 0
+ *
+ * This field must be equal to the frame
+ * size specified in the #AFE_PORT_AUDIO_IF_CONFIG
+ * command that was sent to configure this
+ * port.
+ */
+} __packed;
+
+#define AFE_PORT_DATA_CMD_RT_PROXY_PORT_READ_V2	0x000100EE
+
+/*  Payload of the
+ * #AFE_PORT_DATA_CMD_RT_PROXY_PORT_READ_V2 command, which
+ * delivers an empty buffer to the AFE service. On
+ * acknowledgment, data is filled in the buffer.
+ */
+struct afe_port_data_cmd_rt_proxy_port_read_v2 {
+	struct apr_hdr hdr;
+	u16                  port_id;
+/* Rx proxy port ID with which the real-time
+ * driver exchanges data.
+ * Supported values: #AFE_PORT_ID_RT_PROXY_PORT_RANGE_START to
+ * #AFE_PORT_ID_RT_PROXY_PORT_RANGE_END
+ * (This must be an Rx (speaker) port.)
+ */
+
+	u16                  reserved;
+	/* This field must be set to zero. */
+
+	u32                  buffer_address_lsw;
+/* LSW Address of the buffer containing the data sent from the AFE
+ * service to a real-time sink device on the client.
+ */
+
+
+	u32                  buffer_address_msw;
+/* MSW Address of the buffer containing the data sent from the AFE
+ * service to a real-time sink device on the client.
+ */
+
+		u32				mem_map_handle;
+/* A memory map handle encapsulating shared memory attributes is
+ * returned if AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS command is
+ * successful.
+ * Supported Values:
+ * - Any 32 bit value
+ */
+
+	u32                  available_bytes;
+/* Number of valid bytes available in the buffer (including all
+ * channels).
+ * Supported values: > 0
+ * This field must be equal to the frame size specified in the
+ * #AFE_PORT_AUDIO_IF_CONFIG command that was sent to configure
+ * this port.
+ */
+} __packed;
+
+/* This module ID is related to device configuring like I2S,PCM,
+ * HDMI, SLIMBus etc. This module supports follwing parameter ids.
+ * - #AFE_PARAM_ID_I2S_CONFIG
+ * - #AFE_PARAM_ID_PCM_CONFIG
+ * - #AFE_PARAM_ID_DIGI_MIC_CONFIG
+ * - #AFE_PARAM_ID_HDMI_CONFIG
+ * - #AFE_PARAM_ID_INTERNAL_BT_FM_CONFIG
+ * - #AFE_PARAM_ID_SLIMBUS_CONFIG
+ * - #AFE_PARAM_ID_RT_PROXY_CONFIG
+ */
+
+#define AFE_MODULE_AUDIO_DEV_INTERFACE    0x0001020C
+#define AFE_PORT_SAMPLE_RATE_8K           8000
+#define AFE_PORT_SAMPLE_RATE_16K          16000
+#define AFE_PORT_SAMPLE_RATE_48K          48000
+#define AFE_PORT_SAMPLE_RATE_96K          96000
+#define AFE_PORT_SAMPLE_RATE_192K         192000
+#define AFE_LINEAR_PCM_DATA				0x0
+#define AFE_NON_LINEAR_DATA				0x1
+#define AFE_LINEAR_PCM_DATA_PACKED_60958 0x2
+#define AFE_NON_LINEAR_DATA_PACKED_60958 0x3
+
+/* This param id is used to configure I2S interface */
+#define AFE_PARAM_ID_I2S_CONFIG	0x0001020D
+#define AFE_API_VERSION_I2S_CONFIG	0x1
+/*	Enumeration for setting the I2S configuration
+ * channel_mode parameter to
+ * serial data wire number 1-3 (SD3).
+ */
+#define AFE_PORT_I2S_SD0                     0x1
+#define AFE_PORT_I2S_SD1                     0x2
+#define AFE_PORT_I2S_SD2                     0x3
+#define AFE_PORT_I2S_SD3                     0x4
+#define AFE_PORT_I2S_QUAD01                  0x5
+#define AFE_PORT_I2S_QUAD23                  0x6
+#define AFE_PORT_I2S_6CHS                    0x7
+#define AFE_PORT_I2S_8CHS                    0x8
+#define AFE_PORT_I2S_MONO                    0x0
+#define AFE_PORT_I2S_STEREO                  0x1
+#define AFE_PORT_CONFIG_I2S_WS_SRC_EXTERNAL  0x0
+#define AFE_PORT_CONFIG_I2S_WS_SRC_INTERNAL  0x1
+
+/*  Payload of the #AFE_PARAM_ID_I2S_CONFIG
+ * command's (I2S configuration
+ * parameter).
+ */
+struct afe_param_id_i2s_cfg {
+	u32                  i2s_cfg_minor_version;
+/* Minor version used for tracking the version of the I2S
+ * configuration interface.
+ * Supported values: #AFE_API_VERSION_I2S_CONFIG
+ */
+
+	u16                  bit_width;
+/* Bit width of the sample.
+ * Supported values: 16, 24
+ */
+
+	u16                  channel_mode;
+/* I2S lines and multichannel operation.
+ * Supported values:
+ * - #AFE_PORT_I2S_SD0
+ * - #AFE_PORT_I2S_SD1
+ * - #AFE_PORT_I2S_SD2
+ * - #AFE_PORT_I2S_SD3
+ * - #AFE_PORT_I2S_QUAD01
+ * - #AFE_PORT_I2S_QUAD23
+ * - #AFE_PORT_I2S_6CHS
+ * - #AFE_PORT_I2S_8CHS
+ */
+
+	u16                  mono_stereo;
+/* Specifies mono or stereo. This applies only when
+ * a single I2S line is used.
+ * Supported values:
+ * - #AFE_PORT_I2S_MONO
+ * - #AFE_PORT_I2S_STEREO
+ */
+
+	u16                  ws_src;
+/* Word select source: internal or external.
+ * Supported values:
+ * - #AFE_PORT_CONFIG_I2S_WS_SRC_EXTERNAL
+ * - #AFE_PORT_CONFIG_I2S_WS_SRC_INTERNAL
+ */
+
+	u32                  sample_rate;
+/* Sampling rate of the port.
+ * Supported values:
+ * - #AFE_PORT_SAMPLE_RATE_8K
+ * - #AFE_PORT_SAMPLE_RATE_16K
+ * - #AFE_PORT_SAMPLE_RATE_48K
+ * - #AFE_PORT_SAMPLE_RATE_96K
+ * - #AFE_PORT_SAMPLE_RATE_192K
+ */
+
+	u16					data_format;
+/* data format
+ * Supported values:
+ * - #LINEAR_PCM_DATA
+ * - #NON_LINEAR_DATA
+ * - #LINEAR_PCM_DATA_PACKED_IN_60958
+ * - #NON_LINEAR_DATA_PACKED_IN_60958
+ */
+		u16                  reserved;
+	/* This field must be set to zero. */
+} __packed;
+
+/*
+ * This param id is used to configure PCM interface
+ */
+#define AFE_PARAM_ID_PCM_CONFIG        0x0001020E
+#define AFE_API_VERSION_PCM_CONFIG	0x1
+/* Enumeration for the auxiliary PCM synchronization signal
+ * provided by an external source.
+ */
+
+#define AFE_PORT_PCM_SYNC_SRC_EXTERNAL 0x0
+/*	Enumeration for the auxiliary PCM synchronization signal
+ * provided by an internal source.
+ */
+#define AFE_PORT_PCM_SYNC_SRC_INTERNAL  0x1
+/*	Enumeration for the PCM configuration aux_mode parameter,
+ * which configures the auxiliary PCM interface to use
+ * short synchronization.
+ */
+#define AFE_PORT_PCM_AUX_MODE_PCM  0x0
+/*
+ * Enumeration for the PCM configuration aux_mode parameter,
+ * which configures the auxiliary PCM interface to use long
+ * synchronization.
+ */
+#define AFE_PORT_PCM_AUX_MODE_AUX    0x1
+/*
+ * Enumeration for setting the PCM configuration frame to 8.
+ */
+#define AFE_PORT_PCM_BITS_PER_FRAME_8  0x0
+/*
+ * Enumeration for setting the PCM configuration frame to 16.
+ */
+#define AFE_PORT_PCM_BITS_PER_FRAME_16   0x1
+
+/*	Enumeration for setting the PCM configuration frame to 32.*/
+#define AFE_PORT_PCM_BITS_PER_FRAME_32 0x2
+
+/*	Enumeration for setting the PCM configuration frame to 64.*/
+#define AFE_PORT_PCM_BITS_PER_FRAME_64   0x3
+
+/*	Enumeration for setting the PCM configuration frame to 128.*/
+#define AFE_PORT_PCM_BITS_PER_FRAME_128 0x4
+
+/*	Enumeration for setting the PCM configuration frame to 256.*/
+#define AFE_PORT_PCM_BITS_PER_FRAME_256 0x5
+
+/*	Enumeration for setting the PCM configuration
+ * quantype parameter to A-law with no padding.
+ */
+#define AFE_PORT_PCM_ALAW_NOPADDING 0x0
+
+/* Enumeration for setting the PCM configuration quantype
+ * parameter to mu-law with no padding.
+ */
+#define AFE_PORT_PCM_MULAW_NOPADDING 0x1
+/*	Enumeration for setting the PCM configuration quantype
+ * parameter to linear with no padding.
+ */
+#define AFE_PORT_PCM_LINEAR_NOPADDING 0x2
+/*	Enumeration for setting the PCM configuration quantype
+ * parameter to A-law with padding.
+ */
+#define AFE_PORT_PCM_ALAW_PADDING  0x3
+/*	Enumeration for setting the PCM configuration quantype
+ * parameter to mu-law with padding.
+ */
+#define AFE_PORT_PCM_MULAW_PADDING 0x4
+/*	Enumeration for setting the PCM configuration quantype
+ * parameter to linear with padding.
+ */
+#define AFE_PORT_PCM_LINEAR_PADDING 0x5
+/*	Enumeration for disabling the PCM configuration
+ * ctrl_data_out_enable parameter.
+ * The PCM block is the only master.
+ */
+#define AFE_PORT_PCM_CTRL_DATA_OE_DISABLE 0x0
+/*
+ * Enumeration for enabling the PCM configuration
+ * ctrl_data_out_enable parameter. The PCM block shares
+ * the signal with other masters.
+ */
+#define AFE_PORT_PCM_CTRL_DATA_OE_ENABLE  0x1
+
+/*  Payload of the #AFE_PARAM_ID_PCM_CONFIG command's
+ * (PCM configuration parameter).
+ */
+
+struct afe_param_id_pcm_cfg {
+	u32                  pcm_cfg_minor_version;
+/* Minor version used for tracking the version of the AUX PCM
+ * configuration interface.
+ * Supported values: #AFE_API_VERSION_PCM_CONFIG
+ */
+
+	u16                  aux_mode;
+/* PCM synchronization setting.
+ * Supported values:
+ * - #AFE_PORT_PCM_AUX_MODE_PCM
+ * - #AFE_PORT_PCM_AUX_MODE_AUX
+ */
+
+	u16                  sync_src;
+/* Synchronization source.
+ * Supported values:
+ * - #AFE_PORT_PCM_SYNC_SRC_EXTERNAL
+ * - #AFE_PORT_PCM_SYNC_SRC_INTERNAL
+ */
+
+	u16                  frame_setting;
+/* Number of bits per frame.
+ * Supported values:
+ * - #AFE_PORT_PCM_BITS_PER_FRAME_8
+ * - #AFE_PORT_PCM_BITS_PER_FRAME_16
+ * - #AFE_PORT_PCM_BITS_PER_FRAME_32
+ * - #AFE_PORT_PCM_BITS_PER_FRAME_64
+ * - #AFE_PORT_PCM_BITS_PER_FRAME_128
+ * - #AFE_PORT_PCM_BITS_PER_FRAME_256
+ */
+
+	u16                  quantype;
+/* PCM quantization type.
+ * Supported values:
+ * - #AFE_PORT_PCM_ALAW_NOPADDING
+ * - #AFE_PORT_PCM_MULAW_NOPADDING
+ * - #AFE_PORT_PCM_LINEAR_NOPADDING
+ * - #AFE_PORT_PCM_ALAW_PADDING
+ * - #AFE_PORT_PCM_MULAW_PADDING
+ * - #AFE_PORT_PCM_LINEAR_PADDING
+ */
+
+	u16                  ctrl_data_out_enable;
+/* Specifies whether the PCM block shares the data-out
+ * signal to the drive with other masters.
+ * Supported values:
+ * - #AFE_PORT_PCM_CTRL_DATA_OE_DISABLE
+ * - #AFE_PORT_PCM_CTRL_DATA_OE_ENABLE
+ */
+		u16                  reserved;
+	/* This field must be set to zero. */
+
+	u32                  sample_rate;
+/* Sampling rate of the port.
+ * Supported values:
+ * - #AFE_PORT_SAMPLE_RATE_8K
+ * - #AFE_PORT_SAMPLE_RATE_16K
+ */
+
+	u16                  bit_width;
+/* Bit width of the sample.
+ * Supported values: 16
+ */
+
+	u16                  num_channels;
+/* Number of channels.
+ * Supported values: 1 to 4
+ */
+
+	u16                  slot_number_mapping[4];
+/* Specifies the slot number for the each channel in
+ * multi channel scenario.
+ * Supported values: 1 to 32
+ */
+} __packed;
+
+/*
+ * This param id is used to configure DIGI MIC interface
+ */
+#define AFE_PARAM_ID_DIGI_MIC_CONFIG	0x0001020F
+/*  This version information is used to handle the new
+ *   additions to the config interface in future in backward
+ *   compatible manner.
+ */
+#define AFE_API_VERSION_DIGI_MIC_CONFIG 0x1
+
+/* Enumeration for setting the digital mic configuration
+ * channel_mode parameter to left 0.
+ */
+
+#define AFE_PORT_DIGI_MIC_MODE_LEFT0  0x1
+
+/*Enumeration for setting the digital mic configuration
+ * channel_mode parameter to right 0.
+ */
+
+
+#define AFE_PORT_DIGI_MIC_MODE_RIGHT0  0x2
+
+/* Enumeration for setting the digital mic configuration
+ * channel_mode parameter to left 1.
+ */
+
+#define AFE_PORT_DIGI_MIC_MODE_LEFT1  0x3
+
+/* Enumeration for setting the digital mic configuration
+ * channel_mode parameter to right 1.
+ */
+
+#define AFE_PORT_DIGI_MIC_MODE_RIGHT1 0x4
+
+/* Enumeration for setting the digital mic configuration
+ * channel_mode parameter to stereo 0.
+ */
+#define AFE_PORT_DIGI_MIC_MODE_STEREO0  0x5
+
+/* Enumeration for setting the digital mic configuration
+ * channel_mode parameter to stereo 1.
+ */
+
+
+#define AFE_PORT_DIGI_MIC_MODE_STEREO1    0x6
+
+/* Enumeration for setting the digital mic configuration
+ * channel_mode parameter to quad.
+ */
+
+#define AFE_PORT_DIGI_MIC_MODE_QUAD     0x7
+
+/*  Payload of the #AFE_PARAM_ID_DIGI_MIC_CONFIG command's
+ * (DIGI MIC configuration
+ * parameter).
+ */
+struct afe_param_id_digi_mic_cfg {
+	u32                  digi_mic_cfg_minor_version;
+/* Minor version used for tracking the version of the DIGI Mic
+ * configuration interface.
+ * Supported values: #AFE_API_VERSION_DIGI_MIC_CONFIG
+ */
+
+	u16                  bit_width;
+/* Bit width of the sample.
+ * Supported values: 16
+ */
+
+	u16                  channel_mode;
+/* Digital mic and multichannel operation.
+ * Supported values:
+ * - #AFE_PORT_DIGI_MIC_MODE_LEFT0
+ * - #AFE_PORT_DIGI_MIC_MODE_RIGHT0
+ * - #AFE_PORT_DIGI_MIC_MODE_LEFT1
+ * - #AFE_PORT_DIGI_MIC_MODE_RIGHT1
+ * - #AFE_PORT_DIGI_MIC_MODE_STEREO0
+ * - #AFE_PORT_DIGI_MIC_MODE_STEREO1
+ * - #AFE_PORT_DIGI_MIC_MODE_QUAD
+ */
+
+	u32                  sample_rate;
+/* Sampling rate of the port.
+ * Supported values:
+ * - #AFE_PORT_SAMPLE_RATE_8K
+ * - #AFE_PORT_SAMPLE_RATE_16K
+ * - #AFE_PORT_SAMPLE_RATE_48K
+ */
+} __packed;
+
+/*
+* This param id is used to configure HDMI interface
+*/
+#define AFE_PARAM_ID_HDMI_CONFIG     0x00010210
+
+/*  This version information is used to handle the new
+*   additions to the config interface in future in backward
+*   compatible manner.
+*/
+#define AFE_API_VERSION_HDMI_CONFIG 0x1
+
+/* Payload of the #AFE_PARAM_ID_HDMI_CONFIG command,
+ * which configures a multichannel HDMI audio interface.
+ */
+struct afe_param_id_hdmi_multi_chan_audio_cfg {
+	u32                  hdmi_cfg_minor_version;
+/* Minor version used for tracking the version of the HDMI
+ * configuration interface.
+ * Supported values: #AFE_API_VERSION_HDMI_CONFIG
+ */
+
+u16                  dataype;
+/* data type
+ * Supported values:
+ * - #LINEAR_PCM_DATA
+ * - #NON_LINEAR_DATA
+ * - #LINEAR_PCM_DATA_PACKED_IN_60958
+ * - #NON_LINEAR_DATA_PACKED_IN_60958
+ */
+
+u16                  channel_allocation;
+/* HDMI channel allocation information for programming an HDMI
+ * frame. The default is 0 (Stereo).
+ *
+ * This information is defined in the HDMI standard, CEA 861-D
+ * (refer to @xhyperref{S1,[S1]}). The number of channels is also
+ * inferred from this parameter.
+*/
+
+
+u32                  sample_rate;
+/* Sampling rate of the port.
+ * Supported values:
+ * - #AFE_PORT_SAMPLE_RATE_8K
+ * - #AFE_PORT_SAMPLE_RATE_16K
+ * - #AFE_PORT_SAMPLE_RATE_48K
+ * - #AFE_PORT_SAMPLE_RATE_96K
+ * - 22050, 44100, 176400 for compressed streams
+ */
+
+	u16                  bit_width;
+/* Bit width of the sample.
+ * Supported values: 16, 24
+ */
+		u16                  reserved;
+	/* This field must be set to zero. */
+} __packed;
+
+/*
+* This param id is used to configure BT or FM(RIVA) interface
+*/
+#define AFE_PARAM_ID_INTERNAL_BT_FM_CONFIG  0x00010211
+
+/*  This version information is used to handle the new
+*   additions to the config interface in future in backward
+*   compatible manner.
+*/
+#define AFE_API_VERSION_INTERNAL_BT_FM_CONFIG	0x1
+
+/*  Payload of the #AFE_PARAM_ID_INTERNAL_BT_FM_CONFIG
+ * command's BT voice/BT audio/FM configuration parameter.
+ */
+struct afe_param_id_internal_bt_fm_cfg {
+	u32                  bt_fm_cfg_minor_version;
+/* Minor version used for tracking the version of the BT and FM
+ * configuration interface.
+ * Supported values: #AFE_API_VERSION_INTERNAL_BT_FM_CONFIG
+ */
+
+	u16                  num_channels;
+/* Number of channels.
+ * Supported values: 1 to 2
+ */
+
+	u16                  bit_width;
+/* Bit width of the sample.
+ * Supported values: 16
+ */
+
+	u32                  sample_rate;
+/* Sampling rate of the port.
+ * Supported values:
+ * - #AFE_PORT_SAMPLE_RATE_8K (only for BTSCO)
+ * - #AFE_PORT_SAMPLE_RATE_16K (only for BTSCO)
+ * - #AFE_PORT_SAMPLE_RATE_48K (FM and A2DP)
+ */
+} __packed;
+
+/* This param id is used to configure SLIMBUS interface using
+ * shared channel approach.
+ */
+
+
+#define AFE_PARAM_ID_SLIMBUS_CONFIG    0x00010212
+
+/*  This version information is used to handle the new
+*   additions to the config interface in future in backward
+*   compatible manner.
+*/
+#define AFE_API_VERSION_SLIMBUS_CONFIG 0x1
+
+/*	Enumeration for setting SLIMbus device ID 1.
+*/
+#define AFE_SLIMBUS_DEVICE_1           0x0
+
+/*	Enumeration for setting SLIMbus device ID 2.
+*/
+#define AFE_SLIMBUS_DEVICE_2          0x1
+
+/*	Enumeration for setting the SLIMbus data formats.
+*/
+#define AFE_SB_DATA_FORMAT_NOT_INDICATED 0x0
+
+/* Enumeration for setting the maximum number of streams per
+ * device.
+ */
+
+#define AFE_PORT_MAX_AUDIO_CHAN_CNT	0x8
+
+/* Payload of the #AFE_PORT_CMD_SLIMBUS_CONFIG command's SLIMbus
+ * port configuration parameter.
+ */
+
+struct afe_param_id_slimbus_cfg {
+	u32                  sb_cfg_minor_version;
+/* Minor version used for tracking the version of the SLIMBUS
+ * configuration interface.
+ * Supported values: #AFE_API_VERSION_SLIMBUS_CONFIG
+ */
+
+	u16                  slimbus_dev_id;
+/* SLIMbus hardware device ID, which is required to handle
+ * multiple SLIMbus hardware blocks.
+ * Supported values: - #AFE_SLIMBUS_DEVICE_1 - #AFE_SLIMBUS_DEVICE_2
+ */
+
+
+	u16                  bit_width;
+/* Bit width of the sample.
+ * Supported values: 16, 24
+ */
+
+	u16                  data_format;
+/* Data format supported by the SLIMbus hardware. The default is
+ * 0 (#AFE_SB_DATA_FORMAT_NOT_INDICATED), which indicates the
+ * hardware does not perform any format conversions before the data
+ * transfer.
+ */
+
+
+	u16                  num_channels;
+/* Number of channels.
+ * Supported values: 1 to #AFE_PORT_MAX_AUDIO_CHAN_CNT
+ */
+
+	u8  shared_ch_mapping[AFE_PORT_MAX_AUDIO_CHAN_CNT];
+/* Mapping of shared channel IDs (128 to 255) to which the
+ * master port is to be connected.
+ * Shared_channel_mapping[i] represents the shared channel assigned
+ * for audio channel i in multichannel audio data.
+ */
+
+	u32              sample_rate;
+/* Sampling rate of the port.
+ * Supported values:
+ * - #AFE_PORT_SAMPLE_RATE_8K
+ * - #AFE_PORT_SAMPLE_RATE_16K
+ * - #AFE_PORT_SAMPLE_RATE_48K
+ * - #AFE_PORT_SAMPLE_RATE_96K
+ * - #AFE_PORT_SAMPLE_RATE_192K
+ */
+} __packed;
+
+/*
+* This param id is used to configure Real Time Proxy interface.
+*/
+#define AFE_PARAM_ID_RT_PROXY_CONFIG 0x00010213
+
+/*  This version information is used to handle the new
+*   additions to the config interface in future in backward
+*   compatible manner.
+*/
+#define AFE_API_VERSION_RT_PROXY_CONFIG 0x1
+
+/*  Payload of the #AFE_PARAM_ID_RT_PROXY_CONFIG
+ * command (real-time proxy port configuration parameter).
+ */
+struct afe_param_id_rt_proxy_port_cfg {
+	u32                  rt_proxy_cfg_minor_version;
+/* Minor version used for tracking the version of rt-proxy
+ * config interface.
+ */
+
+	u16                  bit_width;
+/* Bit width of the sample.
+ * Supported values: 16
+ */
+
+	u16                  interleaved;
+/* Specifies whether the data exchanged between the AFE
+ * interface and real-time port is interleaved.
+ * Supported values: - 0 -- Non-interleaved (samples from each
+ * channel are contiguous in the buffer) - 1 -- Interleaved
+ * (corresponding samples from each input channel are interleaved
+ * within the buffer)
+ */
+
+
+	u16                  frame_size;
+ /* Size of the frames that are used for PCM exchanges with this
+ * port.
+ * Supported values: > 0, in bytes
+ * For example, 5 ms buffers of 16 bits and 16 kHz stereo samples
+ * is 5 ms * 16 samples/ms * 2 bytes/sample * 2 channels = 320
+ * bytes.
+ */
+	u16                  jitter_allowance;
+/* Configures the amount of jitter that the port will allow.
+ * Supported values: > 0
+ * For example, if +/-10 ms of jitter is anticipated in the timing
+ * of sending frames to the port, and the configuration is 16 kHz
+ * mono with 16-bit samples, this field is 10 ms * 16 samples/ms * 2
+ * bytes/sample = 320.
+ */
+
+	u16                  low_water_mark;
+/* Low watermark in bytes (including all channels).
+ * Supported values:
+ * - 0 -- Do not send any low watermark events
+ * - > 0 -- Low watermark for triggering an event
+ * If the number of bytes in an internal circular buffer is lower
+ * than this low_water_mark parameter, a LOW_WATER_MARK event is
+ * sent to applications (via the #AFE_EVENT_RT_PROXY_PORT_STATUS
+ * event).
+ * Use of watermark events is optional for debugging purposes.
+ */
+
+	u16                  high_water_mark;
+/* High watermark in bytes (including all channels).
+ * Supported values:
+ * - 0 -- Do not send any high watermark events
+ * - > 0 -- High watermark for triggering an event
+ * If the number of bytes in an internal circular buffer exceeds
+ * TOTAL_CIRC_BUF_SIZE minus high_water_mark, a high watermark event
+ * is sent to applications (via the #AFE_EVENT_RT_PROXY_PORT_STATUS
+ * event).
+ * The use of watermark events is optional and for debugging
+ * purposes.
+ */
+
+
+	u32					sample_rate;
+/* Sampling rate of the port.
+ * Supported values:
+ * - #AFE_PORT_SAMPLE_RATE_8K
+ * - #AFE_PORT_SAMPLE_RATE_16K
+ * - #AFE_PORT_SAMPLE_RATE_48K
+ */
+
+	u16                  num_channels;
+/* Number of channels.
+ * Supported values: 1 to #AFE_PORT_MAX_AUDIO_CHAN_CNT
+ */
+
+	u16                  reserved;
+	/* For 32 bit alignment. */
+} __packed;
+
+union afe_port_config {
+	struct afe_param_id_pcm_cfg               pcm;
+	struct afe_param_id_i2s_cfg               i2s;
+	struct afe_param_id_hdmi_multi_chan_audio_cfg hdmi_multi_ch;
+	struct afe_param_id_slimbus_cfg           slim_sch;
+	struct afe_param_id_rt_proxy_port_cfg     rtproxy;
+} __packed;
+
+struct afe_audioif_config_command {
+	struct apr_hdr			hdr;
+	struct afe_port_cmd_set_param_v2 param;
+	struct afe_port_param_data_v2    pdata;
+	union afe_port_config            port;
+} __packed;
+
+#define AFE_PORT_CMD_DEVICE_START 0x000100E5
+
+/*  Payload of the #AFE_PORT_CMD_DEVICE_START.*/
+struct afe_port_cmd_device_start {
+	struct apr_hdr hdr;
+	u16                  port_id;
+/* Port interface and direction (Rx or Tx) to start. An even
+ * number represents the Rx direction, and an odd number represents
+ * the Tx direction.
+ */
+
+
+	u16                  reserved;
+/* Reserved for 32-bit alignment. This field must be set to 0.*/
+
+} __packed;
+
+#define AFE_PORT_CMD_DEVICE_STOP  0x000100E6
+
+/*  Payload of the #AFE_PORT_CMD_DEVICE_STOP.
+*/
+struct afe_port_cmd_device_stop {
+	struct apr_hdr hdr;
+	u16                  port_id;
+/* Port interface and direction (Rx or Tx) to start. An even
+ * number represents the Rx direction, and an odd number represents
+ * the Tx direction.
+ */
+
+	u16                  reserved;
+/* Reserved for 32-bit alignment. This field must be set to 0.*/
+} __packed;
+
+#define AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS 0x000100EA
+
+/*  Memory map regions command payload used by the
+ * #AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS .
+ * This structure allows clients to map multiple shared memory
+ * regions in a single command. Following this structure are
+ * num_regions of afe_service_shared_map_region_payload.
+ */
+struct afe_service_cmd_shared_mem_map_regions {
+	struct apr_hdr hdr;
+u16                  mem_pool_id;
+/* Type of memory on which this memory region is mapped.
+ * Supported values:
+ * - #ADSP_MEMORY_MAP_EBI_POOL
+ * - #ADSP_MEMORY_MAP_SMI_POOL
+ * - #ADSP_MEMORY_MAP_SHMEM8_4K_POOL
+ * - Other values are reserved
+ *
+ * The memory pool ID implicitly defines the characteristics of the
+ * memory. Characteristics may include alignment type, permissions,
+ * etc.
+ *
+ * ADSP_MEMORY_MAP_EBI_POOL is External Buffer Interface type memory
+ * ADSP_MEMORY_MAP_SMI_POOL is Shared Memory Interface type memory
+ * ADSP_MEMORY_MAP_SHMEM8_4K_POOL is shared memory, byte
+ * addressable, and 4 KB aligned.
+ */
+
+
+	u16                  num_regions;
+/* Number of regions to map.
+ * Supported values:
+ * - Any value greater than zero
+ */
+
+	u32                  property_flag;
+/* Configures one common property for all the regions in the
+ * payload.
+ *
+ * Supported values: - 0x00000000 to 0x00000001
+ *
+ * b0 - bit 0 indicates physical or virtual mapping 0 Shared memory
+ * address provided in afe_service_shared_map_region_payloadis a
+ * physical address. The shared memory needs to be mapped( hardware
+ * TLB entry) and a software entry needs to be added for internal
+ * book keeping.
+ *
+ * 1 Shared memory address provided in
+ * afe_service_shared_map_region_payloadis a virtual address. The
+ * shared memory must not be mapped (since hardware TLB entry is
+ * already available) but a software entry needs to be added for
+ * internal book keeping. This can be useful if two services with in
+ * ADSP is communicating via APR. They can now directly communicate
+ * via the Virtual address instead of Physical address. The virtual
+ * regions must be contiguous. num_regions must be 1 in this case.
+ *
+ * b31-b1 - reserved bits. must be set to zero
+ */
+
+
+} __packed;
+/*  Map region payload used by the
+ * afe_service_shared_map_region_payloadstructure.
+ */
+struct afe_service_shared_map_region_payload {
+	u32                  shm_addr_lsw;
+/* least significant word of starting address in the memory
+ * region to map. It must be contiguous memory, and it must be 4 KB
+ * aligned.
+ * Supported values: - Any 32 bit value
+ */
+
+
+	u32                  shm_addr_msw;
+/* most significant word of startng address in the memory region
+ * to map. For 32 bit shared memory address, this field must be set
+ * to zero. For 36 bit shared memory address, bit31 to bit 4 must be
+ * set to zero
+ *
+ * Supported values: - For 32 bit shared memory address, this field
+ * must be set to zero. - For 36 bit shared memory address, bit31 to
+ * bit 4 must be set to zero - For 64 bit shared memory address, any
+ * 32 bit value
+ */
+
+
+	u32                  mem_size_bytes;
+/* Number of bytes in the region. The aDSP will always map the
+ * regions as virtual contiguous memory, but the memory size must be
+ * in multiples of 4 KB to avoid gaps in the virtually contiguous
+ * mapped memory.
+ *
+ * Supported values: - multiples of 4KB
+ */
+
+} __packed;
+
+#define AFE_SERVICE_CMDRSP_SHARED_MEM_MAP_REGIONS 0x000100EB
+struct afe_service_cmdrsp_shared_mem_map_regions {
+	u32                  mem_map_handle;
+/* A memory map handle encapsulating shared memory attributes is
+ * returned iff AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS command is
+ * successful. In the case of failure , a generic APR error response
+ * is returned to the client.
+ *
+ * Supported Values: - Any 32 bit value
+ */
+
+} __packed;
+#define AFE_SERVICE_CMD_SHARED_MEM_UNMAP_REGIONS 0x000100EC
+/* Memory unmap regions command payload used by the
+ * #AFE_SERVICE_CMD_SHARED_MEM_UNMAP_REGIONS
+ *
+ * This structure allows clients to unmap multiple shared memory
+ * regions in a single command.
+ */
+
+
+struct afe_service_cmd_shared_mem_unmap_regions {
+	struct apr_hdr hdr;
+u32                  mem_map_handle;
+/* memory map handle returned by
+ * AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS commands
+ *
+ * Supported Values:
+ * - Any 32 bit value
+ */
+} __packed;
+
+#define  AFE_PORT_CMD_GET_PARAM_V2 0x000100F0
+
+/*  Payload of the #AFE_PORT_CMD_GET_PARAM_V2 command,
+ * which queries for one post/preprocessing parameter of a
+ * stream.
+ */
+struct afe_port_cmd_get_param_v2 {
+
+	struct apr_hdr hdr;
+u16                  port_id;
+/* Port interface and direction (Rx or Tx) to start. */
+
+	u16                  payload_size;
+/* Maximum data size of the parameter ID/module ID combination.
+ * This is a multiple of four bytes
+ * Supported values: > 0
+ */
+
+	u32 payload_address_lsw;
+/* LSW of 64 bit Payload address. Address should be 32-byte,
+ * 4kbyte aligned and must be contig memory.
+ */
+
+
+	u32 payload_address_msw;
+/* MSW of 64 bit Payload address. In case of 32-bit shared
+ * memory address, this field must be set to zero. In case of 36-bit
+ * shared memory address, bit-4 to bit-31 must be set to zero.
+ * Address should be 32-byte, 4kbyte aligned and must be contiguous
+ * memory.
+ */
+
+	u32 mem_map_handle;
+/* Memory map handle returned by
+ * AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS commands.
+ * Supported Values: - NULL -- Message. The parameter data is
+ * in-band. - Non-NULL -- The parameter data is Out-band.Pointer to
+ * - the physical address in shared memory of the payload data.
+ * For detailed payload content, see the afe_port_param_data_v2
+ * structure
+ */
+
+
+	u32                  module_id;
+/* ID of the module to be queried.
+ * Supported values: Valid module ID
+ */
+
+	u32                  param_id;
+/* ID of the parameter to be queried.
+ * Supported values: Valid parameter ID
+ */
+} __packed;
+
+#define AFE_PORT_CMDRSP_GET_PARAM_V2 0x00010106
+
+/* Payload of the #AFE_PORT_CMDRSP_GET_PARAM_V2 message, which
+ * responds to an #AFE_PORT_CMD_GET_PARAM_V2 command.
+ *
+ * Immediately following this structure is the parameters structure
+ * (afe_port_param_data) containing the response(acknowledgment)
+ * parameter payload. This payload is included for an in-band
+ * scenario. For an address/shared memory-based set parameter, this
+ * payload is not needed.
+ */
+
+
+struct afe_port_cmdrsp_get_param_v2 {
+	u32                  status;
+} __packed;
+
+/* adsp_afe_service_commands.h */
+
+#define ADSP_MEMORY_MAP_EBI_POOL      0
+
+#define ADSP_MEMORY_MAP_SMI_POOL      1
+#define ADSP_MEMORY_MAP_IMEM_POOL      2
+#define ADSP_MEMORY_MAP_SHMEM8_4K_POOL      3
+/*
+* Definition of virtual memory flag
+*/
+#define ADSP_MEMORY_MAP_VIRTUAL_MEMORY 1
+
+/*
+* Definition of physical memory flag
+*/
+#define ADSP_MEMORY_MAP_PHYSICAL_MEMORY 0
+
+
+#define DEFAULT_COPP_TOPOLOGY				0x00010be3
+#define DEFAULT_POPP_TOPOLOGY				0x00010be4
+#define VPM_TX_SM_ECNS_COPP_TOPOLOGY			0x00010F71
+#define VPM_TX_DM_FLUENCE_COPP_TOPOLOGY			0x00010F72
+#define VPM_TX_QMIC_FLUENCE_COPP_TOPOLOGY		0x00010F75
+
+/* Memory map regions command payload used by the
+ * #ASM_CMD_SHARED_MEM_MAP_REGIONS ,#ADM_CMD_SHARED_MEM_MAP_REGIONS
+ * commands.
+ *
+ * This structure allows clients to map multiple shared memory
+ * regions in a single command. Following this structure are
+ * num_regions of avs_shared_map_region_payload.
+ */
+
+
+struct avs_cmd_shared_mem_map_regions {
+	struct apr_hdr hdr;
+	u16                  mem_pool_id;
+/* Type of memory on which this memory region is mapped.
+ *
+ * Supported values: - #ADSP_MEMORY_MAP_EBI_POOL -
+ * #ADSP_MEMORY_MAP_SMI_POOL - #ADSP_MEMORY_MAP_IMEM_POOL
+ * (unsupported) - #ADSP_MEMORY_MAP_SHMEM8_4K_POOL - Other values
+ * are reserved
+ *
+ * The memory ID implicitly defines the characteristics of the
+ * memory. Characteristics may include alignment type, permissions,
+ * etc.
+ *
+ * SHMEM8_4K is shared memory, byte addressable, and 4 KB aligned.
+ */
+
+
+	u16                  num_regions;
+	/* Number of regions to map.*/
+
+	u32                  property_flag;
+/* Configures one common property for all the regions in the
+ * payload. No two regions in the same memory map regions cmd can
+ * have differnt property. Supported values: - 0x00000000 to
+ * 0x00000001
+ *
+ * b0 - bit 0 indicates physical or virtual mapping 0 shared memory
+ * address provided in avs_shared_map_regions_payload is physical
+ * address. The shared memory needs to be mapped( hardware TLB
+ * entry)
+ *
+ * and a software entry needs to be added for internal book keeping.
+ *
+ * 1 Shared memory address provided in MayPayload[usRegions] is
+ * virtual address. The shared memory must not be mapped (since
+ * hardware TLB entry is already available) but a software entry
+ * needs to be added for internal book keeping. This can be useful
+ * if two services with in ADSP is communicating via APR. They can
+ * now directly communicate via the Virtual address instead of
+ * Physical address. The virtual regions must be contiguous.
+ *
+ * b31-b1 - reserved bits. must be set to zero
+ */
+
+} __packed;
+
+struct avs_shared_map_region_payload {
+	u32                  shm_addr_lsw;
+/* least significant word of shared memory address of the memory
+ * region to map. It must be contiguous memory, and it must be 4 KB
+ * aligned.
+ */
+
+	u32                  shm_addr_msw;
+/* most significant word of shared memory address of the memory
+ * region to map. For 32 bit shared memory address, this field must
+ * tbe set to zero. For 36 bit shared memory address, bit31 to bit 4
+ * must be set to zero
+ */
+
+	u32                  mem_size_bytes;
+/* Number of bytes in the region.
+ *
+ * The aDSP will always map the regions as virtual contiguous
+ * memory, but the memory size must be in multiples of 4 KB to avoid
+ * gaps in the virtually contiguous mapped memory.
+ */
+
+} __packed;
+
+struct avs_cmd_shared_mem_unmap_regions {
+	struct apr_hdr       hdr;
+	u32                  mem_map_handle;
+/* memory map handle returned by ASM_CMD_SHARED_MEM_MAP_REGIONS
+ * , ADM_CMD_SHARED_MEM_MAP_REGIONS, commands
+ */
+
+} __packed;
+
+/* Memory map command response payload used by the
+ * #ASM_CMDRSP_SHARED_MEM_MAP_REGIONS
+ * ,#ADM_CMDRSP_SHARED_MEM_MAP_REGIONS
+ */
+
+
+struct avs_cmdrsp_shared_mem_map_regions {
+	u32                  mem_map_handle;
+/* A memory map handle encapsulating shared memory attributes is
+ * returned
+ */
+
+} __packed;
+
+/*adsp_audio_memmap_api.h*/
+
+/* ASM related data structures */
+struct asm_wma_cfg {
+	u16 format_tag;
+	u16 ch_cfg;
+	u32 sample_rate;
+	u32 avg_bytes_per_sec;
+	u16 block_align;
+	u16 valid_bits_per_sample;
+	u32 ch_mask;
+	u16 encode_opt;
+	u16 adv_encode_opt;
+	u32 adv_encode_opt2;
+	u32 drc_peak_ref;
+	u32 drc_peak_target;
+	u32 drc_ave_ref;
+	u32 drc_ave_target;
+} __packed;
+
+struct asm_wmapro_cfg {
+	u16 format_tag;
+	u16 ch_cfg;
+	u32 sample_rate;
+	u32 avg_bytes_per_sec;
+	u16 block_align;
+	u16 valid_bits_per_sample;
+	u32 ch_mask;
+	u16 encode_opt;
+	u16 adv_encode_opt;
+	u32 adv_encode_opt2;
+	u32 drc_peak_ref;
+	u32 drc_peak_target;
+	u32 drc_ave_ref;
+	u32 drc_ave_target;
+} __packed;
+
+struct asm_aac_cfg {
+	u16 format;
+	u16 aot;
+	u16 ep_config;
+	u16 section_data_resilience;
+	u16 scalefactor_data_resilience;
+	u16 spectral_data_resilience;
+	u16 ch_cfg;
+	u16 reserved;
+	u32 sample_rate;
+} __packed;
+
+struct asm_softpause_params {
+	u32 enable;
+	u32 period;
+	u32 step;
+	u32 rampingcurve;
+} __packed;
+
+struct asm_softvolume_params {
+	u32 period;
+	u32 step;
+	u32 rampingcurve;
+} __packed;
+
+#define ASM_END_POINT_DEVICE_MATRIX     0
+/* Front left channel. */
+#define PCM_CHANNEL_FL    1
+
+/* Front right channel. */
+#define PCM_CHANNEL_FR    2
+
+/* Front center channel. */
+#define PCM_CHANNEL_FC    3
+
+/* Left surround channel.*/
+#define PCM_CHANNEL_LS   4
+
+/* Right surround channel.*/
+#define PCM_CHANNEL_RS   5
+
+/* Low frequency effect channel. */
+#define PCM_CHANNEL_LFE  6
+
+/* Center surround channel; Rear center channel. */
+#define PCM_CHANNEL_CS   7
+
+/* Left back channel; Rear left channel. */
+#define PCM_CHANNEL_LB   8
+
+/* Right back channel; Rear right channel. */
+#define PCM_CHANNEL_RB   9
+
+/* Top surround channel. */
+#define PCM_CHANNELS   10
+
+/* Center vertical height channel.*/
+#define PCM_CHANNEL_CVH  11
+
+/* Mono surround channel.*/
+#define PCM_CHANNEL_MS   12
+
+/* Front left of center. */
+#define PCM_CHANNEL_FLC  13
+
+/* Front right of center. */
+#define PCM_CHANNEL_FRC  14
+
+/* Rear left of center. */
+#define PCM_CHANNEL_RLC  15
+
+/* Rear right of center. */
+#define PCM_CHANNEL_RRC  16
+
+#define PCM_FORMAT_MAX_NUM_CHANNEL  8
+
+#define ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2 0x00010DA5
+
+#define ASM_STREAM_POSTPROC_TOPO_ID_DEFAULT 0x00010BE4
+
+#define ASM_MEDIA_FMT_EVRCB_FS 0x00010BEF
+
+#define ASM_MEDIA_FMT_EVRCWB_FS 0x00010BF0
+
+#define ASM_MAX_EQ_BANDS 12
+
+#define ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2 0x00010D98
+
+struct asm_data_cmd_media_fmt_update_v2 {
+u32                    fmt_blk_size;
+	/* Media format block size in bytes.*/
+}  __packed;
+
+struct asm_multi_channel_pcm_fmt_blk_v2 {
+	struct apr_hdr hdr;
+	struct asm_data_cmd_media_fmt_update_v2 fmt_blk;
+
+	u16  num_channels;
+	/* Number of channels. Supported values: 1 to 8 */
+	u16  bits_per_sample;
+/* Number of bits per sample per channel. * Supported values:
+ * 16, 24 * When used for playback, the client must send 24-bit
+ * samples packed in 32-bit words. The 24-bit samples must be placed
+ * in the most significant 24 bits of the 32-bit word. When used for
+ * recording, the aDSP sends 24-bit samples packed in 32-bit words.
+ * The 24-bit samples are placed in the most significant 24 bits of
+ * the 32-bit word.
+ */
+
+
+	u32  sample_rate;
+/* Number of samples per second (in Hertz).
+ * Supported values: 2000 to 48000
+ */
+
+	u16  is_signed;
+	/* Flag that indicates the samples are signed (1). */
+
+	u16  reserved;
+	/* reserved field for 32 bit alignment. must be set to zero. */
+
+	u8   channel_mapping[8];
+/* Channel array of size 8.
+ * Supported values:
+ * - #PCM_CHANNEL_L
+ * - #PCM_CHANNEL_R
+ * - #PCM_CHANNEL_C
+ * - #PCM_CHANNEL_LS
+ * - #PCM_CHANNEL_RS
+ * - #PCM_CHANNEL_LFE
+ * - #PCM_CHANNEL_CS
+ * - #PCM_CHANNEL_LB
+ * - #PCM_CHANNEL_RB
+ * - #PCM_CHANNELS
+ * - #PCM_CHANNEL_CVH
+ * - #PCM_CHANNEL_MS
+ * - #PCM_CHANNEL_FLC
+ * - #PCM_CHANNEL_FRC
+ * - #PCM_CHANNEL_RLC
+ * - #PCM_CHANNEL_RRC
+ *
+ * Channel[i] mapping describes channel I. Each element i of the
+ * array describes channel I inside the buffer where 0 @le I <
+ * num_channels. An unused channel is set to zero.
+ */
+} __packed;
+
+struct asm_stream_cmd_set_encdec_param {
+		u32                  param_id;
+	/* ID of the parameter. */
+
+	u32                  param_size;
+/* Data size of this parameter, in bytes. The size is a multiple
+ * of 4 bytes.
+ */
+
+} __packed;
+
+struct asm_enc_cfg_blk_param_v2 {
+	u32                  frames_per_buf;
+/* Number of encoded frames to pack into each buffer.
+ *
+ * @note1hang This is only guidance information for the aDSP. The
+ * number of encoded frames put into each buffer (specified by the
+ * client) is less than or equal to this number.
+ */
+
+	u32                  enc_cfg_blk_size;
+/* Size in bytes of the encoder configuration block that follows
+ * this member.
+ */
+
+} __packed;
+
+/* @brief Multichannel PCM encoder configuration structure used
+ * in the #ASM_STREAM_CMD_OPEN_READ_V2 command.
+ */
+
+struct asm_multi_channel_pcm_enc_cfg_v2 {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param  encdec;
+	struct asm_enc_cfg_blk_param_v2	encblk;
+	uint16_t  num_channels;
+/*< Number of PCM channels.
+ *
+ * Supported values: - 0 -- Native mode - 1 -- 8 Native mode
+ * indicates that encoding must be performed with the number of
+ * channels at the input.
+ */
+
+	uint16_t  bits_per_sample;
+/*< Number of bits per sample per channel.
+ * Supported values: 16, 24
+ */
+
+	uint32_t  sample_rate;
+/*< Number of samples per second (in Hertz).
+ *
+ * Supported values: 0, 8000 to 48000 A value of 0 indicates the
+ * native sampling rate. Encoding is performed at the input sampling
+ * rate.
+ */
+
+	uint16_t  is_signed;
+/*< Specifies whether the samples are signed (1). Currently,
+ * only signed samples are supported.
+ */
+
+	uint16_t  reserved;
+/*< reserved field for 32 bit alignment. must be set to zero.*/
+
+
+	uint8_t   channel_mapping[8];
+} __packed;
+
+#define ASM_MEDIA_FMT_MP3 0x00010BE9
+#define ASM_MEDIA_FMT_AAC_V2 0x00010DA6
+
+/* @xreflabel
+ * {hdr:AsmMediaFmtDolbyAac} Media format ID for the
+ * Dolby AAC decoder. This format ID is be used if the client wants
+ * to use the Dolby AAC decoder to decode MPEG2 and MPEG4 AAC
+ * contents.
+ */
+
+#define ASM_MEDIA_FMT_DOLBY_AAC 0x00010D86
+
+/* Enumeration for the audio data transport stream AAC format. */
+#define ASM_MEDIA_FMT_AAC_FORMAT_FLAG_ADTS 0
+
+/* Enumeration for low overhead audio stream AAC format. */
+#define ASM_MEDIA_FMT_AAC_FORMAT_FLAG_LOAS                      1
+
+/* Enumeration for the audio data interchange format
+ * AAC format.
+ */
+#define ASM_MEDIA_FMT_AAC_FORMAT_FLAG_ADIF   2
+
+/* Enumeration for the raw AAC format. */
+#define ASM_MEDIA_FMT_AAC_FORMAT_FLAG_RAW    3
+
+#define ASM_MEDIA_FMT_AAC_AOT_LC             2
+#define ASM_MEDIA_FMT_AAC_AOT_SBR            5
+#define ASM_MEDIA_FMT_AAC_AOT_PS             29
+#define ASM_MEDIA_FMT_AAC_AOT_BSAC           22
+
+struct asm_aac_fmt_blk_v2 {
+	struct apr_hdr hdr;
+	struct asm_data_cmd_media_fmt_update_v2 fmt_blk;
+
+		u16          aac_fmt_flag;
+/* Bitstream format option.
+ * Supported values:
+ * - #ASM_MEDIA_FMT_AAC_FORMAT_FLAG_ADTS
+ * - #ASM_MEDIA_FMT_AAC_FORMAT_FLAG_LOAS
+ * - #ASM_MEDIA_FMT_AAC_FORMAT_FLAG_ADIF
+ * - #ASM_MEDIA_FMT_AAC_FORMAT_FLAG_RAW
+ */
+
+	u16          audio_objype;
+/* Audio Object Type (AOT) present in the AAC stream.
+ * Supported values:
+ * - #ASM_MEDIA_FMT_AAC_AOT_LC
+ * - #ASM_MEDIA_FMT_AAC_AOT_SBR
+ * - #ASM_MEDIA_FMT_AAC_AOT_BSAC
+ * - #ASM_MEDIA_FMT_AAC_AOT_PS
+ * - Otherwise -- Not supported
+ */
+
+	u16          channel_config;
+/* Number of channels present in the AAC stream.
+ * Supported values:
+ * - 1 -- Mono
+ * - 2 -- Stereo
+ * - 6 -- 5.1 content
+ */
+
+	u16          reserved;
+	/* Reserved. Clients must set this field to zero. */
+
+	u16          total_size_of_PCE_bits;
+/* greater or equal to zero. * -In case of RAW formats and
+ * channel config = 0 (PCE), client can send * the bit stream
+ * containing PCE immediately following this structure * (in-band).
+ * -This number does not include bits included for 32 bit alignment.
+ * -If zero, then the PCE info is assumed to be available in the
+ * audio -bit stream & not in-band.
+ */
+
+	u32          sample_rate;
+/* Number of samples per second (in Hertz).
+ *
+ * Supported values: 8000, 11025, 12000, 16000, 22050, 24000, 32000,
+ * 44100, 48000
+ *
+ * This field must be equal to the sample rate of the AAC-LC
+ * decoder's output. - For MP4 or 3GP containers, this is indicated
+ * by the samplingFrequencyIndex field in the AudioSpecificConfig
+ * element. - For ADTS format, this is indicated by the
+ * samplingFrequencyIndex in the ADTS fixed header. - For ADIF
+ * format, this is indicated by the samplingFrequencyIndex in the
+ * program_config_element present in the ADIF header.
+ */
+
+} __packed;
+
+struct asm_aac_enc_cfg_v2 {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param  encdec;
+	struct asm_enc_cfg_blk_param_v2	encblk;
+
+	u32          bit_rate;
+	/* Encoding rate in bits per second. */
+	u32          enc_mode;
+/* Encoding mode.
+ * Supported values:
+ * - #ASM_MEDIA_FMT_AAC_AOT_LC
+ * - #ASM_MEDIA_FMT_AAC_AOT_SBR
+ * - #ASM_MEDIA_FMT_AAC_AOT_PS
+ */
+	u16          aac_fmt_flag;
+/* AAC format flag.
+ * Supported values:
+ * - #ASM_MEDIA_FMT_AAC_FORMAT_FLAG_ADTS
+ * - #ASM_MEDIA_FMT_AAC_FORMAT_FLAG_RAW
+ */
+	u16          channel_cfg;
+/* Number of channels to encode.
+ * Supported values:
+ * - 0 -- Native mode
+ * - 1 -- Mono
+ * - 2 -- Stereo
+ * - Other values are not supported.
+ * @note1hang The eAAC+ encoder mode supports only stereo.
+ * Native mode indicates that encoding must be performed with the
+ * number of channels at the input.
+ * The number of channels must not change during encoding.
+ */
+
+	u32          sample_rate;
+/* Number of samples per second.
+ * Supported values: - 0 -- Native mode - For other values,
+ * Native mode indicates that encoding must be performed with the
+ * sampling rate at the input.
+ * The sampling rate must not change during encoding.
+ */
+
+} __packed;
+
+#define ASM_MEDIA_FMT_AMRNB_FS                  0x00010BEB
+
+/* Enumeration for 4.75 kbps AMR-NB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRNB_FS_ENCODE_MODE_MR475                0
+
+/* Enumeration for 5.15 kbps AMR-NB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRNB_FS_ENCODE_MODE_MR515                1
+
+/* Enumeration for 5.90 kbps AMR-NB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRNB_FS_ENCODE_MODE_MMR59                2
+
+/* Enumeration for 6.70 kbps AMR-NB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRNB_FS_ENCODE_MODE_MMR67                3
+
+/* Enumeration for 7.40 kbps AMR-NB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRNB_FS_ENCODE_MODE_MMR74                4
+
+/* Enumeration for 7.95 kbps AMR-NB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRNB_FS_ENCODE_MODE_MMR795               5
+
+/* Enumeration for 10.20 kbps AMR-NB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRNB_FS_ENCODE_MODE_MMR102               6
+
+/* Enumeration for 12.20 kbps AMR-NB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRNB_FS_ENCODE_MODE_MMR122               7
+
+/* Enumeration for AMR-NB Discontinuous Transmission mode off. */
+#define ASM_MEDIA_FMT_AMRNB_FS_DTX_MODE_OFF                     0
+
+/* Enumeration for AMR-NB DTX mode VAD1. */
+#define ASM_MEDIA_FMT_AMRNB_FS_DTX_MODE_VAD1                    1
+
+/* Enumeration for AMR-NB DTX mode VAD2. */
+#define ASM_MEDIA_FMT_AMRNB_FS_DTX_MODE_VAD2                    2
+
+/* Enumeration for AMR-NB DTX mode auto.
+	*/
+#define ASM_MEDIA_FMT_AMRNB_FS_DTX_MODE_AUTO                    3
+
+struct asm_amrnb_enc_cfg {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param  encdec;
+	struct asm_enc_cfg_blk_param_v2	encblk;
+
+	u16          enc_mode;
+/* AMR-NB encoding rate.
+ * Supported values:
+ * Use the ASM_MEDIA_FMT_AMRNB_FS_ENCODE_MODE_*
+ * macros
+ */
+
+	u16          dtx_mode;
+/* Specifies whether DTX mode is disabled or enabled.
+ * Supported values:
+ * - #ASM_MEDIA_FMT_AMRNB_FS_DTX_MODE_OFF
+ * - #ASM_MEDIA_FMT_AMRNB_FS_DTX_MODE_VAD1
+ */
+} __packed;
+
+#define ASM_MEDIA_FMT_AMRWB_FS                  0x00010BEC
+
+/* Enumeration for 6.6 kbps AMR-WB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRWB_FS_ENCODE_MODE_MR66                 0
+
+/* Enumeration for 8.85 kbps AMR-WB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRWB_FS_ENCODE_MODE_MR885                1
+
+/* Enumeration for 12.65 kbps AMR-WB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRWB_FS_ENCODE_MODE_MR1265               2
+
+/* Enumeration for 14.25 kbps AMR-WB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRWB_FS_ENCODE_MODE_MR1425               3
+
+/* Enumeration for 15.85 kbps AMR-WB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRWB_FS_ENCODE_MODE_MR1585               4
+
+/* Enumeration for 18.25 kbps AMR-WB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRWB_FS_ENCODE_MODE_MR1825               5
+
+/* Enumeration for 19.85 kbps AMR-WB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRWB_FS_ENCODE_MODE_MR1985               6
+
+/* Enumeration for 23.05 kbps AMR-WB Encoding mode. */
+#define ASM_MEDIA_FMT_AMRWB_FS_ENCODE_MODE_MR2305               7
+
+/* Enumeration for 23.85 kbps AMR-WB Encoding mode.
+	*/
+#define ASM_MEDIA_FMT_AMRWB_FS_ENCODE_MODE_MR2385               8
+
+struct asm_amrwb_enc_cfg {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param  encdec;
+	struct asm_enc_cfg_blk_param_v2	encblk;
+
+	u16          enc_mode;
+/* AMR-WB encoding rate.
+ * Suupported values:
+ * Use the ASM_MEDIA_FMT_AMRWB_FS_ENCODE_MODE_*
+ * macros
+ */
+
+	u16          dtx_mode;
+/* Specifies whether DTX mode is disabled or enabled.
+ * Supported values:
+ * - #ASM_MEDIA_FMT_AMRNB_FS_DTX_MODE_OFF
+ * - #ASM_MEDIA_FMT_AMRNB_FS_DTX_MODE_VAD1
+ */
+} __packed;
+
+#define ASM_MEDIA_FMT_V13K_FS                      0x00010BED
+
+/* Enumeration for 14.4 kbps V13K Encoding mode. */
+#define ASM_MEDIA_FMT_V13K_FS_ENCODE_MODE_MR1440                0
+
+/* Enumeration for 12.2 kbps V13K Encoding mode. */
+#define ASM_MEDIA_FMT_V13K_FS_ENCODE_MODE_MR1220                1
+
+/* Enumeration for 11.2 kbps V13K Encoding mode. */
+#define ASM_MEDIA_FMT_V13K_FS_ENCODE_MODE_MR1120                2
+
+/* Enumeration for 9.0 kbps V13K Encoding mode. */
+#define ASM_MEDIA_FMT_V13K_FS_ENCODE_MODE_MR90                  3
+
+/* Enumeration for 7.2 kbps V13K eEncoding mode. */
+#define ASM_MEDIA_FMT_V13K_FS_ENCODE_MODE_MR720                 4
+
+/* Enumeration for 1/8 vocoder rate.*/
+#define ASM_MEDIA_FMT_VOC_ONE_EIGHTH_RATE          1
+
+/* Enumeration for 1/4 vocoder rate. */
+#define ASM_MEDIA_FMT_VOC_ONE_FOURTH_RATE       2
+
+/* Enumeration for 1/2 vocoder rate. */
+#define ASM_MEDIA_FMT_VOC_HALF_RATE             3
+
+/* Enumeration for full vocoder rate.
+	*/
+#define ASM_MEDIA_FMT_VOC_FULL_RATE             4
+
+struct asm_v13k_enc_cfg {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param  encdec;
+	struct asm_enc_cfg_blk_param_v2	encblk;
+		u16          max_rate;
+/* Maximum allowed encoder frame rate.
+ * Supported values:
+ * - #ASM_MEDIA_FMT_VOC_ONE_EIGHTH_RATE
+ * - #ASM_MEDIA_FMT_VOC_ONE_FOURTH_RATE
+ * - #ASM_MEDIA_FMT_VOC_HALF_RATE
+ * - #ASM_MEDIA_FMT_VOC_FULL_RATE
+ */
+
+	u16          min_rate;
+/* Minimum allowed encoder frame rate.
+ * Supported values:
+ * - #ASM_MEDIA_FMT_VOC_ONE_EIGHTH_RATE
+ * - #ASM_MEDIA_FMT_VOC_ONE_FOURTH_RATE
+ * - #ASM_MEDIA_FMT_VOC_HALF_RATE
+ * - #ASM_MEDIA_FMT_VOC_FULL_RATE
+ */
+
+	u16          reduced_rate_cmd;
+/* Reduced rate command, used to change
+ * the average bitrate of the V13K
+ * vocoder.
+ * Supported values:
+ * - #ASM_MEDIA_FMT_V13K_FS_ENCODE_MODE_MR1440 (Default)
+ * - #ASM_MEDIA_FMT_V13K_FS_ENCODE_MODE_MR1220
+ * - #ASM_MEDIA_FMT_V13K_FS_ENCODE_MODE_MR1120
+ * - #ASM_MEDIA_FMT_V13K_FS_ENCODE_MODE_MR90
+ * - #ASM_MEDIA_FMT_V13K_FS_ENCODE_MODE_MR720
+ */
+
+	u16          rate_mod_cmd;
+/* Rate modulation command. Default = 0.
+ *- If bit 0=1, rate control is enabled.
+ *- If bit 1=1, the maximum number of consecutive full rate
+ *			frames is limited with numbers supplied in
+ *			bits 2 to 10.
+ *- If bit 1=0, the minimum number of non-full rate frames
+ *			in between two full rate frames is forced to
+ * the number supplied in bits 2 to 10. In both cases, if necessary,
+ * half rate is used to substitute full rate. - Bits 15 to 10 are
+ * reserved and must all be set to zero.
+ */
+
+} __packed;
+
+#define ASM_MEDIA_FMT_EVRC_FS                   0x00010BEE
+
+/*  EVRC encoder configuration structure used in the
+ * #ASM_STREAM_CMD_OPEN_READ_V2 command.
+ */
+struct asm_evrc_enc_cfg {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param  encdec;
+	struct asm_enc_cfg_blk_param_v2	encblk;
+	u16          max_rate;
+/* Maximum allowed encoder frame rate.
+ * Supported values:
+ * - #ASM_MEDIA_FMT_VOC_ONE_EIGHTH_RATE
+ * - #ASM_MEDIA_FMT_VOC_ONE_FOURTH_RATE
+ * - #ASM_MEDIA_FMT_VOC_HALF_RATE
+ * - #ASM_MEDIA_FMT_VOC_FULL_RATE
+ */
+
+	u16          min_rate;
+/* Minimum allowed encoder frame rate.
+ * Supported values:
+ * - #ASM_MEDIA_FMT_VOC_ONE_EIGHTH_RATE
+ * - #ASM_MEDIA_FMT_VOC_ONE_FOURTH_RATE
+ * - #ASM_MEDIA_FMT_VOC_HALF_RATE
+ * - #ASM_MEDIA_FMT_VOC_FULL_RATE
+ */
+
+	u16          rate_mod_cmd;
+/* Rate modulation command. Default: 0.
+ * - If bit 0=1, rate control is enabled.
+ * - If bit 1=1, the maximum number of consecutive full rate frames
+ * is limited with numbers supplied in bits 2 to 10.
+ *
+ * - If bit 1=0, the minimum number of non-full rate frames in
+ * between two full rate frames is forced to the number supplied in
+ * bits 2 to 10. In both cases, if necessary, half rate is used to
+ * substitute full rate.
+ *
+ * - Bits 15 to 10 are reserved and must all be set to zero.
+ */
+
+	u16          reserved;
+	/* Reserved. Clients must set this field to zero. */
+} __packed;
+
+#define ASM_MEDIA_FMT_WMA_V10PRO_V2                0x00010DA7
+
+struct asm_wmaprov10_fmt_blk_v2 {
+	struct apr_hdr hdr;
+	struct asm_data_cmd_media_fmt_update_v2 fmtblk;
+
+	u16          fmtag;
+/* WMA format type.
+ * Supported values:
+ * - 0x162 -- WMA 9 Pro
+ * - 0x163 -- WMA 9 Pro Lossless
+ * - 0x166 -- WMA 10 Pro
+ * - 0x167 -- WMA 10 Pro Lossless
+ */
+
+	u16          num_channels;
+/* Number of channels encoded in the input stream.
+ * Supported values: 1 to 8
+ */
+
+	u32          sample_rate;
+/* Number of samples per second (in Hertz).
+ * Supported values: 11025, 16000, 22050, 32000, 44100, 48000,
+ * 88200, 96000
+ */
+
+	u32          avg_bytes_per_sec;
+/* Bitrate expressed as the average bytes per second.
+ * Supported values: 2000 to 96000
+ */
+
+	u16          blk_align;
+/* Size of the bitstream packet size in bytes. WMA Pro files
+ * have a payload of one block per bitstream packet.
+ * Supported values: @le 13376
+ */
+
+	u16          bits_per_sample;
+/* Number of bits per sample in the encoded WMA stream.
+ * Supported values: 16, 24
+ */
+
+	u32          channel_mask;
+/* Bit-packed double word (32-bits) that indicates the
+ * recommended speaker positions for each source channel.
+ */
+
+	u16          enc_options;
+/* Bit-packed word with values that indicate whether certain
+ * features of the bitstream are used.
+ * Supported values: - 0x0001 -- ENCOPT3_PURE_LOSSLESS - 0x0006 --
+ * ENCOPT3_FRM_SIZE_MOD - 0x0038 -- ENCOPT3_SUBFRM_DIV - 0x0040 --
+ * ENCOPT3_WRITE_FRAMESIZE_IN_HDR - 0x0080 --
+ * ENCOPT3_GENERATE_DRC_PARAMS - 0x0100 -- ENCOPT3_RTMBITS
+ */
+
+
+	u16          usAdvancedEncodeOpt;
+	/* Advanced encoding option.  */
+
+	u32          advanced_enc_options2;
+	/* Advanced encoding option 2. */
+
+} __packed;
+
+#define ASM_MEDIA_FMT_WMA_V9_V2                    0x00010DA8
+struct asm_wmastdv9_fmt_blk_v2 {
+	struct apr_hdr hdr;
+	struct asm_data_cmd_media_fmt_update_v2 fmtblk;
+	u16          fmtag;
+/* WMA format tag.
+ * Supported values: 0x161 (WMA 9 standard)
+ */
+
+	u16          num_channels;
+/* Number of channels in the stream.
+ * Supported values: 1, 2
+ */
+
+	u32          sample_rate;
+/* Number of samples per second (in Hertz).
+ * Supported values: 48000
+ */
+
+	u32          avg_bytes_per_sec;
+	/* Bitrate expressed as the average bytes per second. */
+
+	u16          blk_align;
+/* Block align. All WMA files with a maximum packet size of
+ * 13376 are supported.
+ */
+
+
+	u16          bits_per_sample;
+/* Number of bits per sample in the output.
+ * Supported values: 16
+ */
+
+	u32          channel_mask;
+/* Channel mask.
+ * Supported values:
+ * - 3 -- Stereo (front left/front right)
+ * - 4 -- Mono (center)
+ */
+
+	u16          enc_options;
+	/* Options used during encoding. */
+
+} __packed;
+
+#define ASM_MEDIA_FMT_WMA_V8                    0x00010D91
+
+struct asm_wmastdv8_enc_cfg {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param  encdec;
+	struct asm_enc_cfg_blk_param_v2	encblk;
+	u32          bit_rate;
+	/* Encoding rate in bits per second. */
+
+	u32          sample_rate;
+/* Number of samples per second.
+ *
+ * Supported values:
+ * - 0 -- Native mode
+ * - Other Supported values are 22050, 32000, 44100, and 48000.
+ *
+ * Native mode indicates that encoding must be performed with the
+ * sampling rate at the input.
+ * The sampling rate must not change during encoding.
+ */
+
+	u16          channel_cfg;
+/* Number of channels to encode.
+ * Supported values:
+ * - 0 -- Native mode
+ * - 1 -- Mono
+ * - 2 -- Stereo
+ * - Other values are not supported.
+ *
+ * Native mode indicates that encoding must be performed with the
+ * number of channels at the input.
+ * The number of channels must not change during encoding.
+ */
+
+	u16          reserved;
+	/* Reserved. Clients must set this field to zero.*/
+	} __packed;
+
+#define ASM_MEDIA_FMT_AMR_WB_PLUS_V2               0x00010DA9
+
+struct asm_amrwbplus_fmt_blk_v2 {
+	struct apr_hdr hdr;
+	struct asm_data_cmd_media_fmt_update_v2 fmtblk;
+	u32          amr_frame_fmt;
+/* AMR frame format.
+ * Supported values:
+ * - 6 -- Transport Interface Format (TIF)
+ * - Any other value -- File storage format (FSF)
+ *
+ * TIF stream contains 2-byte header for each frame within the
+ * superframe. FSF stream contains one 2-byte header per superframe.
+ */
+
+} __packed;
+
+#define ASM_MEDIA_FMT_AC3_DEC                   0x00010BF6
+#define ASM_MEDIA_FMT_EAC3_DEC                   0x00010C3C
+#define ASM_MEDIA_FMT_DTS                    0x00010D88
+
+/* Media format ID for adaptive transform acoustic coding. This
+ * ID is used by the #ASM_STREAM_CMD_OPEN_WRITE_COMPRESSED command
+ * only.
+ */
+
+#define ASM_MEDIA_FMT_ATRAC                  0x00010D89
+
+/* Media format ID for metadata-enhanced audio transmission.
+ * This ID is used by the #ASM_STREAM_CMD_OPEN_WRITE_COMPRESSED
+ * command only.
+ */
+
+#define ASM_MEDIA_FMT_MAT                    0x00010D8A
+
+/*  adsp_media_fmt.h */
+
+#define ASM_DATA_CMD_WRITE_V2 0x00010DAB
+
+struct asm_data_cmd_write_v2 {
+	struct apr_hdr hdr;
+	u32                  buf_addr_lsw;
+/* The 64 bit address msw-lsw should be a valid, mapped address.
+ * 64 bit address should be a multiple of 32 bytes
+ */
+
+	u32                  buf_addr_msw;
+/* The 64 bit address msw-lsw should be a valid, mapped address.
+ * 64 bit address should be a multiple of 32 bytes.
+ * -Address of the buffer containing the data to be decoded.
+ * The buffer should be aligned to a 32 byte boundary.
+ * -In the case of 32 bit Shared memory address, msw field must
+ * -be set to zero.
+ * -In the case of 36 bit shared memory address, bit 31 to bit 4
+ * -of msw must be set to zero.
+ */
+	u32                  mem_map_handle;
+/* memory map handle returned by DSP through
+ * ASM_CMD_SHARED_MEM_MAP_REGIONS command
+ */
+	u32                  buf_size;
+/* Number of valid bytes available in the buffer for decoding. The
+ * first byte starts at buf_addr.
+ */
+
+	u32                  seq_id;
+	/* Optional buffer sequence ID. */
+
+	u32                  timestamp_lsw;
+/* Lower 32 bits of the 64-bit session time in microseconds of the
+ * first buffer sample.
+ */
+
+	u32                  timestamp_msw;
+/* Upper 32 bits of the 64-bit session time in microseconds of the
+ * first buffer sample.
+ */
+
+	u32                  flags;
+/* Bitfield of flags.
+ * Supported values for bit 31:
+ * - 1 -- Valid timestamp.
+ * - 0 -- Invalid timestamp.
+ * - Use #ASM_BIT_MASKIMESTAMP_VALID_FLAG as the bitmask and
+ * #ASM_SHIFTIMESTAMP_VALID_FLAG as the shift value to set this bit.
+ * Supported values for bit 30:
+ * - 1 -- Last buffer.
+ * - 0 -- Not the last buffer.
+ *
+ * Supported values for bit 29:
+ * - 1 -- Continue the timestamp from the previous buffer.
+ * - 0 -- Timestamp of the current buffer is not related
+ * to the timestamp of the previous buffer.
+ * - Use #ASM_BIT_MASKS_CONTINUE_FLAG and #ASM_SHIFTS_CONTINUE_FLAG
+ * to set this bit.
+ *
+ * Supported values for bit 4:
+ * - 1 -- End of the frame.
+ * - 0 -- Not the end of frame, or this information is not known.
+ * - Use #ASM_BIT_MASK_EOF_FLAG as the bitmask and #ASM_SHIFT_EOF_FLAG
+ * as the shift value to set this bit.
+ *
+ * All other bits are reserved and must be set to 0.
+ *
+ * If bit 31=0 and bit 29=1: The timestamp of the first sample in
+ * this buffer continues from the timestamp of the last sample in
+ * the previous buffer. If there is no previous buffer (i.e., this
+ * is the first buffer sent after opening the stream or after a
+ * flush operation), or if the previous buffer does not have a valid
+ * timestamp, the samples in the current buffer also do not have a
+ * valid timestamp. They are played out as soon as possible.
+ *
+ *
+ * If bit 31=0 and bit 29=0: No timestamp is associated with the
+ * first sample in this buffer. The samples are played out as soon
+ * as possible.
+ *
+ *
+ * If bit 31=1 and bit 29 is ignored: The timestamp specified in
+ * this payload is honored.
+ *
+ *
+ * If bit 30=0: Not the last buffer in the stream. This is useful
+ * in removing trailing samples.
+ *
+ *
+ * For bit 4: The client can set this flag for every buffer sent in
+ * which the last byte is the end of a frame. If this flag is set,
+ * the buffer can contain data from multiple frames, but it should
+ * always end at a frame boundary. Restrictions allow the aDSP to
+ * detect an end of frame without requiring additional processing.
+ */
+
+} __packed;
+
+#define ASM_DATA_CMD_READ_V2 0x00010DAC
+
+struct asm_data_cmd_read_v2 {
+	struct apr_hdr       hdr;
+	u32                  buf_addr_lsw;
+/* the 64 bit address msw-lsw should be a valid mapped address
+ * and should be a multiple of 32 bytes
+ */
+
+
+	u32                  buf_addr_msw;
+/* the 64 bit address msw-lsw should be a valid mapped address
+ * and should be a multiple of 32 bytes.
+* - Address of the buffer where the DSP puts the encoded data,
+* potentially, at an offset specified by the uOffset field in
+* ASM_DATA_EVENT_READ_DONE structure. The buffer should be aligned
+* to a 32 byte boundary.
+*- In the case of 32 bit Shared memory address, msw field must
+*- be set to zero.
+*- In the case of 36 bit shared memory address, bit 31 to bit
+*- 4 of msw must be set to zero.
+*/
+	u32                  mem_map_handle;
+/* memory map handle returned by DSP through
+ * ASM_CMD_SHARED_MEM_MAP_REGIONS command.
+ */
+
+	u32                  buf_size;
+/* Number of bytes available for the aDSP to write. The aDSP
+ * starts writing from buf_addr.
+ */
+
+	u32                  seq_id;
+	/* Optional buffer sequence ID.
+			*/
+} __packed;
+
+#define ASM_DATA_CMD_EOS               0x00010BDB
+#define ASM_DATA_EVENT_RENDERED_EOS    0x00010C1C
+#define ASM_DATA_EVENT_EOS             0x00010BDD
+
+#define ASM_DATA_EVENT_WRITE_DONE_V2 0x00010D99
+struct asm_data_event_write_done_v2 {
+	u32                  buf_addr_lsw;
+	/* lsw of the 64 bit address */
+	u32                  buf_addr_msw;
+	/* msw of the 64 bit address. address given by the client in
+	* ASM_DATA_CMD_WRITE_V2 command.
+	*/
+	u32                  mem_map_handle;
+	/* memory map handle in the ASM_DATA_CMD_WRITE_V2  */
+
+	u32                  status;
+/* Status message (error code) that indicates whether the
+ * referenced buffer has been successfully consumed.
+ * Supported values: Refer to @xhyperref{Q3,[Q3]}
+ */
+} __packed;
+
+#define ASM_DATA_EVENT_READ_DONE_V2 0x00010D9A
+
+/* Definition of the frame metadata flag bitmask.*/
+#define ASM_BIT_MASK_FRAME_METADATA_FLAG (0x40000000UL)
+
+/* Definition of the frame metadata flag shift value. */
+#define ASM_SHIFT_FRAME_METADATA_FLAG 30
+
+struct asm_data_event_read_done_v2 {
+	u32                  status;
+/* Status message (error code).
+ * Supported values: Refer to @xhyperref{Q3,[Q3]}
+ */
+
+u32                  buf_addr_lsw;
+/* 64 bit address msw-lsw is a valid, mapped address. 64 bit
+ * address is a multiple of 32 bytes.
+ */
+
+u32                  buf_addr_msw;
+/* 64 bit address msw-lsw is a valid, mapped address. 64 bit
+* address is a multiple of 32 bytes.
+*
+* -Same address provided by the client in ASM_DATA_CMD_READ_V2
+* -In the case of 32 bit Shared memory address, msw field is set to
+* zero.
+* -In the case of 36 bit shared memory address, bit 31 to bit 4
+* -of msw is set to zero.
+*/
+
+u32                  mem_map_handle;
+/* memory map handle in the ASM_DATA_CMD_READ_V2  */
+
+u32                  enc_framesotal_size;
+/* Total size of the encoded frames in bytes.
+ * Supported values: >0
+ */
+
+u32                  offset;
+/* Offset (from buf_addr) to the first byte of the first encoded
+ * frame. All encoded frames are consecutive, starting from this
+ * offset.
+ * Supported values: > 0
+ */
+
+u32                  timestamp_lsw;
+/* Lower 32 bits of the 64-bit session time in microseconds of
+ * the first sample in the buffer. If Bit 5 of mode_flags flag of
+ * ASM_STREAM_CMD_OPEN_READ_V2 is 1 then the 64 bit timestamp is
+ * absolute capture time otherwise it is relative session time. The
+ * absolute timestamp doesnt reset unless the system is reset.
+ */
+
+
+u32                  timestamp_msw;
+/* Upper 32 bits of the 64-bit session time in microseconds of
+ * the first sample in the buffer.
+ */
+
+
+u32                  flags;
+/* Bitfield of flags. Bit 30 indicates whether frame metadata is
+ * present. If frame metadata is present, num_frames consecutive
+ * instances of @xhyperref{hdr:FrameMetaData,Frame metadata} start
+ * at the buffer address.
+ * Supported values for bit 31:
+ * - 1 -- Timestamp is valid.
+ * - 0 -- Timestamp is invalid.
+ * - Use #ASM_BIT_MASKIMESTAMP_VALID_FLAG and
+ * #ASM_SHIFTIMESTAMP_VALID_FLAG to set this bit.
+ *
+ * Supported values for bit 30:
+ * - 1 -- Frame metadata is present.
+ * - 0 -- Frame metadata is absent.
+ * - Use #ASM_BIT_MASK_FRAME_METADATA_FLAG and
+ * #ASM_SHIFT_FRAME_METADATA_FLAG to set this bit.
+ *
+ * All other bits are reserved; the aDSP sets them to 0.
+ */
+
+u32                  num_frames;
+/* Number of encoded frames in the buffer. */
+
+u32                  seq_id;
+/* Optional buffer sequence ID.	*/
+} __packed;
+
+struct asm_data_read_buf_metadata_v2 {
+	u32          offset;
+/* Offset from buf_addr in #ASM_DATA_EVENT_READ_DONE_PAYLOAD to
+ * the frame associated with this metadata.
+ * Supported values: > 0
+ */
+
+u32          frm_size;
+/* Size of the encoded frame in bytes.
+ * Supported values: > 0
+ */
+
+u32          num_encoded_pcm_samples;
+/* Number of encoded PCM samples (per channel) in the frame
+ * associated with this metadata.
+ * Supported values: > 0
+ */
+
+u32          timestamp_lsw;
+/* Lower 32 bits of the 64-bit session time in microseconds of the
+ * first sample for this frame.
+ * If Bit 5 of mode_flags flag of ASM_STREAM_CMD_OPEN_READ_V2 is 1
+ * then the 64 bit timestamp is absolute capture time otherwise it
+ * is relative session time. The absolute timestamp doesnt reset
+ * unless the system is reset.
+ */
+
+
+u32          timestamp_msw;
+/* Lower 32 bits of the 64-bit session time in microseconds of the
+ * first sample for this frame.
+ */
+
+u32          flags;
+/* Frame flags.
+ * Supported values for bit 31:
+ * - 1 -- Time stamp is valid
+ * - 0 -- Time stamp is not valid
+ * - All other bits are reserved; the aDSP sets them to 0.
+*/
+} __packed;
+
+/* Notifies the client of a change in the data sampling rate or
+ * Channel mode. This event is raised by the decoder service. The
+ * event is enabled through the mode flags of
+ * #ASM_STREAM_CMD_OPEN_WRITE_V2 or
+ * #ASM_STREAM_CMD_OPEN_READWRITE_V2. - The decoder detects a change
+ * in the output sampling frequency or the number/positioning of
+ * output channels, or if it is the first frame decoded.The new
+ * sampling frequency or the new channel configuration is
+ * communicated back to the client asynchronously.
+ */
+
+#define ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY 0x00010C65
+
+/*  Payload of the #ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY event.
+ * This event is raised when the following conditions are both true:
+ * - The event is enabled through the mode_flags of
+ * #ASM_STREAM_CMD_OPEN_WRITE_V2 or
+ * #ASM_STREAM_CMD_OPEN_READWRITE_V2. - The decoder detects a change
+ * in either the output sampling frequency or the number/positioning
+ * of output channels, or if it is the first frame decoded.
+ * This event is not raised (even if enabled) if the decoder is
+ * MIDI, because
+ */
+
+
+struct asm_data_event_sr_cm_change_notify {
+	u32                  sample_rate;
+/* New sampling rate (in Hertz) after detecting a change in the
+ * bitstream.
+ * Supported values: 2000 to 48000
+ */
+
+	u16                  num_channels;
+/* New number of channels after detecting a change in the
+ * bitstream.
+ * Supported values: 1 to 8
+ */
+
+
+	u16                  reserved;
+	/* Reserved for future use. This field must be set to 0.*/
+
+	u8                   channel_mapping[8];
+
+} __packed;
+
+/* Notifies the client of a data sampling rate or channel mode
+ * change. This event is raised by the encoder service.
+ * This event is raised when :
+ * - Native mode encoding was requested in the encoder
+ * configuration (i.e., the channel number was 0), the sample rate
+ * was 0, or both were 0.
+ *
+ * - The input data frame at the encoder is the first one, or the
+ * sampling rate/channel mode is different from the previous input
+ * data frame.
+ *
+ */
+#define ASM_DATA_EVENT_ENC_SR_CM_CHANGE_NOTIFY 0x00010BDE
+
+struct asm_data_event_enc_sr_cm_change_notify {
+	u32                  sample_rate;
+/* New sampling rate (in Hertz) after detecting a change in the
+ * input data.
+ * Supported values: 2000 to 48000
+ */
+
+
+	u16                  num_channels;
+/* New number of channels after detecting a change in the input
+ * data. Supported values: 1 to 8
+ */
+
+
+	u16                  bits_per_sample;
+/* New bits per sample after detecting a change in the input
+ * data.
+ * Supported values: 16, 24
+ */
+
+
+	u8                   channel_mapping[8];
+
+} __packed;
+#define ASM_DATA_CMD_IEC_60958_FRAME_RATE 0x00010D87
+
+
+/* Payload of the #ASM_DATA_CMD_IEC_60958_FRAME_RATE command,
+ * which is used to indicate the IEC 60958 frame rate of a given
+ * packetized audio stream.
+ */
+
+struct asm_data_cmd_iec_60958_frame_rate {
+	u32                  frame_rate;
+/* IEC 60958 frame rate of the incoming IEC 61937 packetized stream.
+ * Supported values: Any valid frame rate
+ */
+} __packed;
+
+/* adsp_asm_data_commands.h*/
+#define ASM_SVC_CMD_GET_STREAM_HANDLES         0x00010C0B
+
+#define ASM_SVC_CMDRSP_GET_STREAM_HANDLES      0x00010C1B
+
+/* Definition of the stream ID bitmask.*/
+#define ASM_BIT_MASK_STREAM_ID                 (0x000000FFUL)
+
+/* Definition of the stream ID shift value.*/
+#define ASM_SHIFT_STREAM_ID                    0
+
+/* Definition of the session ID bitmask.*/
+#define ASM_BIT_MASK_SESSION_ID                (0x0000FF00UL)
+
+/* Definition of the session ID shift value.*/
+#define ASM_SHIFT_SESSION_ID                   8
+
+/* Definition of the service ID bitmask.*/
+#define ASM_BIT_MASK_SERVICE_ID                (0x00FF0000UL)
+
+/* Definition of the service ID shift value.*/
+#define ASM_SHIFT_SERVICE_ID                   16
+
+/* Definition of the domain ID bitmask.*/
+#define ASM_BIT_MASK_DOMAIN_ID                (0xFF000000UL)
+
+/* Definition of the domain ID shift value.*/
+#define ASM_SHIFT_DOMAIN_ID                    24
+
+/* Payload of the #ASM_SVC_CMDRSP_GET_STREAM_HANDLES message,
+ * which returns a list of currently active stream handles.
+ * Immediately following this structure are num_handles of uint32
+ * stream handles.
+ */
+
+
+struct asm_svc_cmdrsp_get_stream_handles {
+	u32                  num_handles;
+	/* Number of active stream handles.	*/
+} __packed;
+
+#define ASM_CMD_SHARED_MEM_MAP_REGIONS               0x00010D92
+#define ASM_CMDRSP_SHARED_MEM_MAP_REGIONS     0x00010D93
+#define ASM_CMD_SHARED_MEM_UNMAP_REGIONS              0x00010D94
+
+/* adsp_asm_service_commands.h */
+
+#define ASM_MAX_SESSION_ID  (8)
+
+/* Maximum number of sessions.*/
+#define ASM_MAX_NUM_SESSIONS                ASM_MAX_SESSION_ID
+
+/* Maximum number of streams per session.*/
+#define ASM_MAX_STREAMS_PER_SESSION (8)
+#define ASM_SESSION_CMD_RUN_V2                   0x00010DAA
+#define ASM_SESSION_CMD_RUN_STARTIME_RUN_IMMEDIATE  0
+#define ASM_SESSION_CMD_RUN_STARTIME_RUN_AT_ABSOLUTEIME 1
+#define ASM_SESSION_CMD_RUN_STARTIME_RUN_AT_RELATIVEIME 2
+#define ASM_SESSION_CMD_RUN_STARTIME_RUN_WITH_DELAY     3
+
+#define ASM_BIT_MASK_RUN_STARTIME                 (0x00000003UL)
+
+/* Bit shift value used to specify the start time for the
+ * ASM_SESSION_CMD_RUN_V2 command.
+ */
+#define ASM_SHIFT_RUN_STARTIME 0
+struct asm_session_cmd_run_v2 {
+	struct apr_hdr hdr;
+	u32                  flags;
+/* Specifies whether to run immediately or at a specific
+ * rendering time or with a specified delay. Run with delay is
+ * useful for delaying in case of ASM loopback opened through
+ * ASM_STREAM_CMD_OPEN_LOOPBACK_V2. Use #ASM_BIT_MASK_RUN_STARTIME
+ * and #ASM_SHIFT_RUN_STARTIME to set this 2-bit flag.
+ *
+ *
+ *Bits 0 and 1 can take one of four possible values:
+ *
+ *- #ASM_SESSION_CMD_RUN_STARTIME_RUN_IMMEDIATE
+ *- #ASM_SESSION_CMD_RUN_STARTIME_RUN_AT_ABSOLUTEIME
+ *- #ASM_SESSION_CMD_RUN_STARTIME_RUN_AT_RELATIVEIME
+ *- #ASM_SESSION_CMD_RUN_STARTIME_RUN_WITH_DELAY
+ *
+ *All other bits are reserved; clients must set them to zero.
+ */
+
+	u32                  time_lsw;
+/* Lower 32 bits of the time in microseconds used to align the
+ * session origin time. When bits 0-1 of flags is
+ * ASM_SESSION_CMD_RUN_START_RUN_WITH_DELAY, time lsw is the lsw of
+ * the delay in us. For ASM_SESSION_CMD_RUN_START_RUN_WITH_DELAY,
+ * maximum value of the 64 bit delay is 150 ms.
+ */
+
+	u32                  time_msw;
+/* Upper 32 bits of the time in microseconds used to align the
+ * session origin time. When bits 0-1 of flags is
+ * ASM_SESSION_CMD_RUN_START_RUN_WITH_DELAY, time msw is the msw of
+ * the delay in us. For ASM_SESSION_CMD_RUN_START_RUN_WITH_DELAY,
+ * maximum value of the 64 bit delay is 150 ms.
+ */
+
+} __packed;
+
+#define ASM_SESSION_CMD_PAUSE 0x00010BD3
+#define ASM_SESSION_CMD_GET_SESSIONTIME_V3 0x00010D9D
+#define ASM_SESSION_CMD_REGISTER_FOR_RX_UNDERFLOW_EVENTS 0x00010BD5
+
+struct asm_session_cmd_rgstr_rx_underflow {
+	struct apr_hdr hdr;
+	u16                  enable_flag;
+/* Specifies whether a client is to receive events when an Rx
+ * session underflows.
+ * Supported values:
+ * - 0 -- Do not send underflow events
+ * - 1 -- Send underflow events
+ */
+	u16                  reserved;
+	/* Reserved. This field must be set to zero.*/
+} __packed;
+
+#define ASM_SESSION_CMD_REGISTER_FORX_OVERFLOW_EVENTS 0x00010BD6
+
+struct asm_session_cmd_regx_overflow {
+	struct apr_hdr hdr;
+	u16                  enable_flag;
+/* Specifies whether a client is to receive events when a Tx
+* session overflows.
+ * Supported values:
+ * - 0 -- Do not send overflow events
+ * - 1 -- Send overflow events
+ */
+
+	u16                  reserved;
+	/* Reserved. This field must be set to zero.*/
+} __packed;
+
+#define ASM_SESSION_EVENT_RX_UNDERFLOW        0x00010C17
+#define ASM_SESSION_EVENTX_OVERFLOW           0x00010C18
+#define ASM_SESSION_CMDRSP_GET_SESSIONTIME_V3 0x00010D9E
+
+struct asm_session_cmdrsp_get_sessiontime_v3 {
+	u32                  status;
+	/* Status message (error code).
+	* Supported values: Refer to @xhyperref{Q3,[Q3]}
+	*/
+
+	u32                  sessiontime_lsw;
+	/* Lower 32 bits of the current session time in microseconds.*/
+
+	u32                  sessiontime_msw;
+	/* Upper 32 bits of the current session time in microseconds.*/
+
+	u32                  absolutetime_lsw;
+/* Lower 32 bits in micro seconds of the absolute time at which
+ * the * sample corresponding to the above session time gets
+ * rendered * to hardware. This absolute time may be slightly in the
+ * future or past.
+ */
+
+
+	u32                  absolutetime_msw;
+/* Upper 32 bits in micro seconds of the absolute time at which
+ * the * sample corresponding to the above session time gets
+ * rendered to * hardware. This absolute time may be slightly in the
+ * future or past.
+ */
+
+} __packed;
+
+#define ASM_SESSION_CMD_ADJUST_SESSION_CLOCK_V2     0x00010D9F
+
+struct asm_session_cmd_adjust_session_clock_v2 {
+	struct apr_hdr hdr;
+u32                  adjustime_lsw;
+/* Lower 32 bits of the signed 64-bit quantity that specifies the
+ * adjustment time in microseconds to the session clock.
+ *
+ * Positive values indicate advancement of the session clock.
+ * Negative values indicate delay of the session clock.
+ */
+
+
+	u32                  adjustime_msw;
+/* Upper 32 bits of the signed 64-bit quantity that specifies
+ * the adjustment time in microseconds to the session clock.
+ * Positive values indicate advancement of the session clock.
+ * Negative values indicate delay of the session clock.
+ */
+
+} __packed;
+
+#define ASM_SESSION_CMDRSP_ADJUST_SESSION_CLOCK_V2    0x00010DA0
+
+struct asm_session_cmdrsp_adjust_session_clock_v2 {
+	u32                  status;
+/* Status message (error code).
+ * Supported values: Refer to @xhyperref{Q3,[Q3]}
+ * An error means the session clock is not adjusted. In this case,
+ * the next two fields are irrelevant.
+ */
+
+
+	u32                  actual_adjustime_lsw;
+/* Lower 32 bits of the signed 64-bit quantity that specifies
+ * the actual adjustment in microseconds performed by the aDSP.
+ * A positive value indicates advancement of the session clock. A
+ * negative value indicates delay of the session clock.
+ */
+
+
+	u32                  actual_adjustime_msw;
+/* Upper 32 bits of the signed 64-bit quantity that specifies
+ * the actual adjustment in microseconds performed by the aDSP.
+ * A positive value indicates advancement of the session clock. A
+ * negative value indicates delay of the session clock.
+ */
+
+
+	u32                  cmd_latency_lsw;
+/* Lower 32 bits of the unsigned 64-bit quantity that specifies
+ * the amount of time in microseconds taken to perform the session
+ * clock adjustment.
+ */
+
+
+	u32                  cmd_latency_msw;
+/* Upper 32 bits of the unsigned 64-bit quantity that specifies
+ * the amount of time in microseconds taken to perform the session
+ * clock adjustment.
+ */
+
+} __packed;
+
+#define ASM_SESSION_CMD_GET_PATH_DELAY_V2	 0x00010DAF
+#define ASM_SESSION_CMDRSP_GET_PATH_DELAY_V2 0x00010DB0
+
+struct asm_session_cmdrsp_get_path_delay_v2 {
+	u32                  status;
+/* Status message (error code). Whether this get delay operation
+ * is successful or not. Delay value is valid only if status is
+ * success.
+ * Supported values: Refer to @xhyperref{Q5,[Q5]}
+ */
+
+	u32                  audio_delay_lsw;
+	/* Upper 32 bits of the aDSP delay in microseconds. */
+
+	u32                  audio_delay_msw;
+	/* Lower 32 bits of the aDSP delay  in microseconds. */
+
+} __packed;
+
+/* adsp_asm_session_command.h*/
+#define ASM_STREAM_CMD_OPEN_WRITE_V2       0x00010D8F
+
+struct asm_stream_cmd_open_write_v2 {
+	struct apr_hdr			hdr;
+	uint32_t                    mode_flags;
+/* Mode flags that configure the stream to notify the client
+ * whenever it detects an SR/CM change at the input to its POPP.
+ * Supported values for bits 0 to 1:
+ * - Reserved; clients must set them to zero.
+ * Supported values for bit 2:
+ * - 0 -- SR/CM change notification event is disabled.
+ * - 1 -- SR/CM change notification event is enabled.
+ * - Use #ASM_BIT_MASK_SR_CM_CHANGE_NOTIFY_FLAG and
+ * #ASM_SHIFT_SR_CM_CHANGE_NOTIFY_FLAG to set or get this bit.
+ *
+ * Supported values for bit 31:
+ * - 0 -- Stream to be opened in on-Gapless mode.
+ * - 1 -- Stream to be opened in Gapless mode. In Gapless mode,
+ * successive streams must be opened with same session ID but
+ * different stream IDs.
+ *
+ * - Use #ASM_BIT_MASK_GAPLESS_MODE_FLAG and
+ * #ASM_SHIFT_GAPLESS_MODE_FLAG to set or get this bit.
+ *
+ *
+ * @note1hang MIDI and DTMF streams cannot be opened in Gapless mode.
+ */
+
+	uint16_t                    sink_endpointype;
+/*< Sink point type.
+ * Supported values:
+ * - 0 -- Device matrix
+ * - Other values are reserved.
+ *
+ * The device matrix is the gateway to the hardware ports.
+ */
+
+	uint16_t                    bits_per_sample;
+/*< Number of bits per sample processed by ASM modules.
+ * Supported values: 16 and 24 bits per sample
+ */
+
+	uint32_t                    postprocopo_id;
+/*< Specifies the topology (order of processing) of
+ * postprocessing algorithms. <i>None</i> means no postprocessing.
+ * Supported values:
+ * - #ASM_STREAM_POSTPROCOPO_ID_DEFAULT
+ * - #ASM_STREAM_POSTPROCOPO_ID_MCH_PEAK_VOL
+ * - #ASM_STREAM_POSTPROCOPO_ID_NONE
+ *
+ * This field can also be enabled through SetParams flags.
+ */
+
+	uint32_t                    dec_fmt_id;
+/*< Configuration ID of the decoder media format.
+ *
+ * Supported values:
+ * - #ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2
+ * - #ASM_MEDIA_FMT_ADPCM
+ * - #ASM_MEDIA_FMT_MP3
+ * - #ASM_MEDIA_FMT_AAC_V2
+ * - #ASM_MEDIA_FMT_DOLBY_AAC
+ * - #ASM_MEDIA_FMT_AMRNB_FS
+ * - #ASM_MEDIA_FMT_AMRWB_FS
+ * - #ASM_MEDIA_FMT_AMR_WB_PLUS_V2
+ * - #ASM_MEDIA_FMT_V13K_FS
+ * - #ASM_MEDIA_FMT_EVRC_FS
+ * - #ASM_MEDIA_FMT_EVRCB_FS
+ * - #ASM_MEDIA_FMT_EVRCWB_FS
+ * - #ASM_MEDIA_FMT_SBC
+ * - #ASM_MEDIA_FMT_WMA_V10PRO_V2
+ * - #ASM_MEDIA_FMT_WMA_V9_V2
+ * - #ASM_MEDIA_FMT_AC3_DEC
+ * - #ASM_MEDIA_FMT_EAC3_DEC
+ * - #ASM_MEDIA_FMT_G711_ALAW_FS
+ * - #ASM_MEDIA_FMT_G711_MLAW_FS
+ * - #ASM_MEDIA_FMT_G729A_FS
+ * - #ASM_MEDIA_FMT_FR_FS
+ * - #ASM_MEDIA_FMT_VORBIS
+ * - #ASM_MEDIA_FMT_FLAC
+ * - #ASM_MEDIA_FMT_EXAMPLE
+ */
+} __packed;
+
+#define ASM_STREAM_CMD_OPEN_READ_V2                 0x00010D8C
+/* Definition of the timestamp type flag bitmask */
+#define ASM_BIT_MASKIMESTAMPYPE_FLAG        (0x00000020UL)
+
+/* Definition of the timestamp type flag shift value. */
+#define ASM_SHIFTIMESTAMPYPE_FLAG 5
+
+/* Relative timestamp is identified by this value.*/
+#define ASM_RELATIVEIMESTAMP      0
+
+/* Absolute timestamp is identified by this value.*/
+#define ASM_ABSOLUTEIMESTAMP      1
+
+
+struct asm_stream_cmd_open_read_v2 {
+	struct apr_hdr hdr;
+	u32                    mode_flags;
+/* Mode flags that indicate whether meta information per encoded
+ * frame is to be provided.
+ * Supported values for bit 4:
+ *
+ * - 0 -- Return data buffer contains all encoded frames only; it
+ * does not contain frame metadata.
+ *
+ * - 1 -- Return data buffer contains an array of metadata and
+ * encoded frames.
+ *
+ * - Use #ASM_BIT_MASK_META_INFO_FLAG as the bitmask and
+ * #ASM_SHIFT_META_INFO_FLAG as the shift value for this bit.
+ *
+ *
+ * Supported values for bit 5:
+ *
+ * - ASM_RELATIVEIMESTAMP -- ASM_DATA_EVENT_READ_DONE_V2 will have
+ * - relative time-stamp.
+ * - ASM_ABSOLUTEIMESTAMP -- ASM_DATA_EVENT_READ_DONE_V2 will
+ * - have absolute time-stamp.
+ *
+ * - Use #ASM_BIT_MASKIMESTAMPYPE_FLAG as the bitmask and
+ * #ASM_SHIFTIMESTAMPYPE_FLAG as the shift value for this bit.
+ *
+ * All other bits are reserved; clients must set them to zero.
+ */
+
+	u32                    src_endpointype;
+/* Specifies the endpoint providing the input samples.
+ * Supported values:
+ * - 0 -- Device matrix
+ * - All other values are reserved; clients must set them to zero.
+ * Otherwise, an error is returned.
+ * The device matrix is the gateway from the tunneled Tx ports.
+ */
+
+	u32                    preprocopo_id;
+/* Specifies the topology (order of processing) of preprocessing
+ * algorithms. <i>None</i> means no preprocessing.
+ * Supported values:
+ * - #ASM_STREAM_PREPROCOPO_ID_DEFAULT
+ * - #ASM_STREAM_PREPROCOPO_ID_NONE
+ *
+ * This field can also be enabled through SetParams flags.
+ */
+
+	u32                    enc_cfg_id;
+/* Media configuration ID for encoded output.
+ * Supported values:
+ * - #ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2
+ * - #ASM_MEDIA_FMT_AAC_V2
+ * - #ASM_MEDIA_FMT_AMRNB_FS
+ * - #ASM_MEDIA_FMT_AMRWB_FS
+ * - #ASM_MEDIA_FMT_V13K_FS
+ * - #ASM_MEDIA_FMT_EVRC_FS
+ * - #ASM_MEDIA_FMT_EVRCB_FS
+ * - #ASM_MEDIA_FMT_EVRCWB_FS
+ * - #ASM_MEDIA_FMT_SBC
+ * - #ASM_MEDIA_FMT_G711_ALAW_FS
+ * - #ASM_MEDIA_FMT_G711_MLAW_FS
+ * - #ASM_MEDIA_FMT_G729A_FS
+ * - #ASM_MEDIA_FMT_EXAMPLE
+ * - #ASM_MEDIA_FMT_WMA_V8
+ */
+
+	u16                    bits_per_sample;
+/* Number of bits per sample processed by ASM modules.
+ * Supported values: 16 and 24 bits per sample
+ */
+
+	u16                    reserved;
+/* Reserved for future use. This field must be set to zero.*/
+} __packed;
+
+#define ASM_POPP_OUTPUT_SR_NATIVE_RATE                                  0
+
+/* Enumeration for the maximum sampling rate at the POPP output.*/
+#define ASM_POPP_OUTPUT_SR_MAX_RATE             48000
+
+#define ASM_STREAM_CMD_OPEN_READWRITE_V2        0x00010D8D
+#define ASM_STREAM_CMD_OPEN_READWRITE_V2        0x00010D8D
+#define ASM_STREAM_CMD_OPEN_READ_V2             0x00010D8C
+
+struct asm_stream_cmd_open_readwrite_v2 {
+	struct apr_hdr         hdr;
+	u32                    mode_flags;
+/* Mode flags.
+ * Supported values for bit 2:
+ * - 0 -- SR/CM change notification event is disabled.
+ * - 1 -- SR/CM change notification event is enabled. Use
+ * #ASM_BIT_MASK_SR_CM_CHANGE_NOTIFY_FLAG and
+ * #ASM_SHIFT_SR_CM_CHANGE_NOTIFY_FLAG to set or
+ * getting this flag.
+ *
+ * Supported values for bit 4:
+ * - 0 -- Return read data buffer contains all encoded frames only; it
+ * does not contain frame metadata.
+ * - 1 -- Return read data buffer contains an array of metadata and
+ * encoded frames.
+ *
+ * All other bits are reserved; clients must set them to zero.
+ */
+
+	u32                    postprocopo_id;
+/* Specifies the topology (order of processing) of postprocessing
+ * algorithms. <i>None</i> means no postprocessing.
+ *
+ * Supported values:
+ * - #ASM_STREAM_POSTPROCOPO_ID_DEFAULT
+ * - #ASM_STREAM_POSTPROCOPO_ID_MCH_PEAK_VOL
+ * - #ASM_STREAM_POSTPROCOPO_ID_NONE
+ */
+
+	u32                    dec_fmt_id;
+/* Specifies the media type of the input data. PCM indicates that
+ * no decoding must be performed, e.g., this is an NT encoder
+ * session.
+ * Supported values:
+ * - #ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2
+ * - #ASM_MEDIA_FMT_ADPCM
+ * - #ASM_MEDIA_FMT_MP3
+ * - #ASM_MEDIA_FMT_AAC_V2
+ * - #ASM_MEDIA_FMT_DOLBY_AAC
+ * - #ASM_MEDIA_FMT_AMRNB_FS
+ * - #ASM_MEDIA_FMT_AMRWB_FS
+ * - #ASM_MEDIA_FMT_V13K_FS
+ * - #ASM_MEDIA_FMT_EVRC_FS
+ * - #ASM_MEDIA_FMT_EVRCB_FS
+ * - #ASM_MEDIA_FMT_EVRCWB_FS
+ * - #ASM_MEDIA_FMT_SBC
+ * - #ASM_MEDIA_FMT_WMA_V10PRO_V2
+ * - #ASM_MEDIA_FMT_WMA_V9_V2
+ * - #ASM_MEDIA_FMT_AMR_WB_PLUS_V2
+ * - #ASM_MEDIA_FMT_AC3_DEC
+ * - #ASM_MEDIA_FMT_G711_ALAW_FS
+ * - #ASM_MEDIA_FMT_G711_MLAW_FS
+ * - #ASM_MEDIA_FMT_G729A_FS
+ * - #ASM_MEDIA_FMT_EXAMPLE
+ */
+
+	u32                    enc_cfg_id;
+/* Specifies the media type for the output of the stream. PCM
+ * indicates that no encoding must be performed, e.g., this is an NT
+ * decoder session.
+ * Supported values:
+ * - #ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2
+ * - #ASM_MEDIA_FMT_AAC_V2
+ * - #ASM_MEDIA_FMT_AMRNB_FS
+ * - #ASM_MEDIA_FMT_AMRWB_FS
+ * - #ASM_MEDIA_FMT_V13K_FS
+ * - #ASM_MEDIA_FMT_EVRC_FS
+ * - #ASM_MEDIA_FMT_EVRCB_FS
+ * - #ASM_MEDIA_FMT_EVRCWB_FS
+ * - #ASM_MEDIA_FMT_SBC
+ * - #ASM_MEDIA_FMT_G711_ALAW_FS
+ * - #ASM_MEDIA_FMT_G711_MLAW_FS
+ * - #ASM_MEDIA_FMT_G729A_FS
+ * - #ASM_MEDIA_FMT_EXAMPLE
+ * - #ASM_MEDIA_FMT_WMA_V8
+ */
+
+	u16                    bits_per_sample;
+/* Number of bits per sample processed by ASM modules.
+ * Supported values: 16 and 24 bits per sample
+ */
+
+	u16                    reserved;
+/* Reserved for future use. This field must be set to zero.*/
+
+} __packed;
+
+#define ASM_STREAM_CMD_OPEN_LOOPBACK_V2 0x00010D8E
+struct asm_stream_cmd_open_loopback_v2 {
+	struct apr_hdr         hdr;
+	u32                    mode_flags;
+/* Mode flags.
+ * Bit 0-31: reserved; client should set these bits to 0
+ */
+	u16                    src_endpointype;
+	/* Endpoint type. 0 = Tx Matrix */
+	u16                    sink_endpointype;
+	/* Endpoint type. 0 = Rx Matrix */
+	u32                    postprocopo_id;
+/* Postprocessor topology ID. Specifies the topology of
+ * postprocessing algorithms.
+ */
+
+	u16                    bits_per_sample;
+/* The number of bits per sample processed by ASM modules
+ * Supported values: 16 and 24 bits per sample
+ */
+	u16                    reserved;
+/* Reserved for future use. This field must be set to zero. */
+} __packed;
+
+#define ASM_STREAM_CMD_CLOSE             0x00010BCD
+#define ASM_STREAM_CMD_FLUSH             0x00010BCE
+
+
+#define ASM_STREAM_CMD_FLUSH_READBUFS   0x00010C09
+#define ASM_STREAM_CMD_SET_PP_PARAMS_V2 0x00010DA1
+
+struct asm_stream_cmd_set_pp_params_v2 {
+	u32                  data_payload_addr_lsw;
+/* LSW of parameter data payload address. Supported values: any. */
+	u32                  data_payload_addr_msw;
+/* MSW of Parameter data payload address. Supported values: any.
+ * - Must be set to zero for in-band data.
+ * - In the case of 32 bit Shared memory address, msw  field must be
+ * - set to zero.
+ * - In the case of 36 bit shared memory address, bit 31 to bit 4 of
+ * msw
+ *
+ * - must be set to zero.
+ */
+	u32                  mem_map_handle;
+/* Supported Values: Any.
+* memory map handle returned by DSP through
+* ASM_CMD_SHARED_MEM_MAP_REGIONS
+* command.
+* if mmhandle is NULL, the ParamData payloads are within the
+* message payload (in-band).
+* If mmhandle is non-NULL, the ParamData payloads begin at the
+* address specified in the address msw and lsw (out-of-band).
+*/
+
+	u32                  data_payload_size;
+/* Size in bytes of the variable payload accompanying the
+message, or in shared memory. This field is used for parsing the
+parameter payload. */
+
+} __packed;
+
+
+struct asm_stream_param_data_v2 {
+	u32                  module_id;
+	/* Unique module ID. */
+
+	u32                  param_id;
+	/* Unique parameter ID. */
+
+	u16                  param_size;
+/* Data size of the param_id/module_id combination. This is
+ * a multiple of 4 bytes.
+ */
+
+	u16                  reserved;
+/* Reserved for future enhancements. This field must be set to
+ * zero.
+ */
+
+} __packed;
+
+#define ASM_STREAM_CMD_GET_PP_PARAMS_V2		0x00010DA2
+
+struct asm_stream_cmd_get_pp_params_v2 {
+	u32                  data_payload_addr_lsw;
+	/* LSW of the parameter data payload address. */
+	u32                  data_payload_addr_msw;
+/* MSW of the parameter data payload address.
+ * - Size of the shared memory, if specified, shall be large enough
+ * to contain the whole ParamData payload, including Module ID,
+ * Param ID, Param Size, and Param Values
+ * - Must be set to zero for in-band data
+ * - In the case of 32 bit Shared memory address, msw field must be
+ * set to zero.
+ * - In the case of 36 bit shared memory address, bit 31 to bit 4 of
+ * msw must be set to zero.
+ */
+
+	u32                  mem_map_handle;
+/* Supported Values: Any.
+* memory map handle returned by DSP through ASM_CMD_SHARED_MEM_MAP_REGIONS
+* command.
+* if mmhandle is NULL, the ParamData payloads in the ACK are within the
+* message payload (in-band).
+* If mmhandle is non-NULL, the ParamData payloads in the ACK begin at the
+* address specified in the address msw and lsw.
+* (out-of-band).
+*/
+
+	u32                  module_id;
+	/* Unique module ID. */
+
+	u32                  param_id;
+	/* Unique parameter ID. */
+
+	u16                  param_max_size;
+/* Maximum data size of the module_id/param_id combination. This
+ * is a multiple of 4 bytes.
+ */
+
+
+	u16                  reserved;
+/* Reserved for backward compatibility. Clients must set this
+* field to zero.
+*/
+
+} __packed;
+
+#define ASM_STREAM_CMD_SET_ENCDEC_PARAM 0x00010C10
+
+#define ASM_PARAM_ID_ENCDEC_BITRATE     0x00010C13
+
+struct asm_bitrate_param {
+	u32                  bitrate;
+/* Maximum supported bitrate. Only the AAC encoder is supported.*/
+
+} __packed;
+
+#define ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2 0x00010DA3
+#define ASM_PARAM_ID_AAC_SBR_PS_FLAG		 0x00010C63
+
+/* Flag to turn off both SBR and PS processing, if they are
+ * present in the bitstream.
+ */
+
+#define ASM_AAC_SBR_OFF_PS_OFF (2)
+
+/* Flag to turn on SBR but turn off PS processing,if they are
+ * present in the bitstream.
+ */
+
+#define ASM_AAC_SBR_ON_PS_OFF  (1)
+
+/* Flag to turn on both SBR and PS processing, if they are
+ * present in the bitstream (default behavior).
+ */
+
+
+#define ASM_AAC_SBR_ON_PS_ON   (0)
+
+/* Structure for an AAC SBR PS processing flag. */
+
+/*  Payload of the #ASM_PARAM_ID_AAC_SBR_PS_FLAG parameter in the
+ * #ASM_STREAM_CMD_SET_ENCDEC_PARAM command.
+ */
+struct asm_aac_sbr_ps_flag_param {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param  encdec;
+	struct asm_enc_cfg_blk_param_v2	encblk;
+
+	u32                  sbr_ps_flag;
+/* Control parameter to enable or disable SBR/PS processing in
+ * the AAC bitstream. Use the following macros to set this field:
+ * - #ASM_AAC_SBR_OFF_PS_OFF -- Turn off both SBR and PS
+ * processing, if they are present in the bitstream.
+ * - #ASM_AAC_SBR_ON_PS_OFF -- Turn on SBR processing, but not PS
+ * processing, if they are present in the bitstream.
+ * - #ASM_AAC_SBR_ON_PS_ON -- Turn on both SBR and PS processing,
+ * if they are present in the bitstream (default behavior).
+ * - All other values are invalid.
+ * Changes are applied to the next decoded frame.
+ */
+} __packed;
+
+#define ASM_PARAM_ID_AAC_DUAL_MONO_MAPPING                      0x00010C64
+
+/*	First single channel element in a dual mono bitstream.*/
+#define ASM_AAC_DUAL_MONO_MAP_SCE_1                                 (1)
+
+/*	Second single channel element in a dual mono bitstream.*/
+#define ASM_AAC_DUAL_MONO_MAP_SCE_2                                 (2)
+
+/* Structure for AAC decoder dual mono channel mapping. */
+
+
+struct asm_aac_dual_mono_mapping_param {
+	struct apr_hdr							hdr;
+	struct asm_stream_cmd_set_encdec_param	encdec;
+	struct asm_enc_cfg_blk_param_v2			encblk;
+	u16    left_channel_sce;
+	u16    right_channel_sce;
+
+} __packed;
+
+#define ASM_STREAM_CMDRSP_GET_PP_PARAMS_V2 0x00010DA4
+
+struct asm_stream_cmdrsp_get_pp_params_v2 {
+	u32                  status;
+} __packed;
+
+#define ASM_PARAM_ID_AC3_KARAOKE_MODE 0x00010D73
+
+/* Enumeration for both vocals in a karaoke stream.*/
+#define AC3_KARAOKE_MODE_NO_VOCAL     (0)
+
+/* Enumeration for only the left vocal in a karaoke stream.*/
+#define AC3_KARAOKE_MODE_LEFT_VOCAL   (1)
+
+/* Enumeration for only the right vocal in a karaoke stream.*/
+#define AC3_KARAOKE_MODE_RIGHT_VOCAL (2)
+
+/* Enumeration for both vocal channels in a karaoke stream.*/
+#define AC3_KARAOKE_MODE_BOTH_VOCAL             (3)
+#define ASM_PARAM_ID_AC3_DRC_MODE               0x00010D74
+/* Enumeration for the Custom Analog mode.*/
+#define AC3_DRC_MODE_CUSTOM_ANALOG              (0)
+
+/* Enumeration for the Custom Digital mode.*/
+#define AC3_DRC_MODE_CUSTOM_DIGITAL             (1)
+/* Enumeration for the Line Out mode (light compression).*/
+#define AC3_DRC_MODE_LINE_OUT  (2)
+
+/* Enumeration for the RF remodulation mode (heavy compression).*/
+#define AC3_DRC_MODE_RF_REMOD                         (3)
+#define ASM_PARAM_ID_AC3_DUAL_MONO_MODE               0x00010D75
+
+/* Enumeration for playing dual mono in stereo mode.*/
+#define AC3_DUAL_MONO_MODE_STEREO                     (0)
+
+/* Enumeration for playing left mono.*/
+#define AC3_DUAL_MONO_MODE_LEFT_MONO                  (1)
+
+/* Enumeration for playing right mono.*/
+#define AC3_DUAL_MONO_MODE_RIGHT_MONO                 (2)
+
+/* Enumeration for mixing both dual mono channels and playing them.*/
+#define AC3_DUAL_MONO_MODE_MIXED_MONO        (3)
+#define ASM_PARAM_ID_AC3_STEREO_DOWNMIX_MODE 0x00010D76
+
+/* Enumeration for using the Downmix mode indicated in the bitstream. */
+
+#define AC3_STEREO_DOWNMIX_MODE_AUTO_DETECT  (0)
+
+/* Enumeration for Surround Compatible mode (preserves the
+ * surround information).
+ */
+
+#define AC3_STEREO_DOWNMIX_MODE_LT_RT        (1)
+/* Enumeration for Mono Compatible mode (if the output is to be
+ * further downmixed to mono).
+ */
+
+#define AC3_STEREO_DOWNMIX_MODE_LO_RO (2)
+
+/* ID of the AC3 PCM scale factor parameter in the
+ * #ASM_STREAM_CMD_SET_ENCDEC_PARAM command.
+ */
+#define ASM_PARAM_ID_AC3_PCM_SCALEFACTOR 0x00010D78
+
+/* ID of the AC3 DRC boost scale factor parameter in the
+ * #ASM_STREAM_CMD_SET_ENCDEC_PARAM command.
+ */
+#define ASM_PARAM_ID_AC3_DRC_BOOST_SCALEFACTOR 0x00010D79
+
+/* ID of the AC3 DRC cut scale factor parameter in the
+ * #ASM_STREAM_CMD_SET_ENCDEC_PARAM command.
+ */
+#define ASM_PARAM_ID_AC3_DRC_CUT_SCALEFACTOR 0x00010D7A
+
+/* Structure for AC3 Generic Parameter. */
+
+/*  Payload of the AC3 parameters in the
+ * #ASM_STREAM_CMD_SET_ENCDEC_PARAM command.
+ */
+struct asm_ac3_generic_param {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param  encdec;
+	struct asm_enc_cfg_blk_param_v2	encblk;
+	u32                  generic_parameter;
+/* AC3 generic parameter. Select from one of the following
+ * possible values.
+ *
+ * For #ASM_PARAM_ID_AC3_KARAOKE_MODE, supported values are:
+ * - AC3_KARAOKE_MODE_NO_VOCAL
+ * - AC3_KARAOKE_MODE_LEFT_VOCAL
+ * - AC3_KARAOKE_MODE_RIGHT_VOCAL
+ * - AC3_KARAOKE_MODE_BOTH_VOCAL
+ *
+ * For #ASM_PARAM_ID_AC3_DRC_MODE, supported values are:
+ * - AC3_DRC_MODE_CUSTOM_ANALOG
+ * - AC3_DRC_MODE_CUSTOM_DIGITAL
+ * - AC3_DRC_MODE_LINE_OUT
+ * - AC3_DRC_MODE_RF_REMOD
+ *
+ * For #ASM_PARAM_ID_AC3_DUAL_MONO_MODE, supported values are:
+ * - AC3_DUAL_MONO_MODE_STEREO
+ * - AC3_DUAL_MONO_MODE_LEFT_MONO
+ * - AC3_DUAL_MONO_MODE_RIGHT_MONO
+ * - AC3_DUAL_MONO_MODE_MIXED_MONO
+ *
+ * For #ASM_PARAM_ID_AC3_STEREO_DOWNMIX_MODE, supported values are:
+ * - AC3_STEREO_DOWNMIX_MODE_AUTO_DETECT
+ * - AC3_STEREO_DOWNMIX_MODE_LT_RT
+ * - AC3_STEREO_DOWNMIX_MODE_LO_RO
+ *
+ * For #ASM_PARAM_ID_AC3_PCM_SCALEFACTOR, supported values are
+ * 0 to 1 in Q31 format.
+ *
+ * For #ASM_PARAM_ID_AC3_DRC_BOOST_SCALEFACTOR, supported values are
+ * 0 to 1 in Q31 format.
+ *
+ * For #ASM_PARAM_ID_AC3_DRC_CUT_SCALEFACTOR, supported values are
+ * 0 to 1 in Q31 format.
+ */
+} __packed;
+
+/* Enumeration for Raw mode (no downmixing), which specifies
+ * that all channels in the bitstream are to be played out as is
+ * without any downmixing. (Default)
+ */
+
+#define WMAPRO_CHANNEL_MASK_RAW (-1)
+
+/* Enumeration for setting the channel mask to 0. The 7.1 mode
+ * (Home Theater) is assigned.
+ */
+
+
+#define WMAPRO_CHANNEL_MASK_ZERO 0x0000
+
+/* Speaker layout mask for one channel (Home Theater, mono).
+ * - Speaker front center
+ */
+#define WMAPRO_CHANNEL_MASK_1_C 0x0004
+
+/* Speaker layout mask for two channels (Home Theater, stereo).
+ * - Speaker front left
+ * - Speaker front right
+ */
+#define WMAPRO_CHANNEL_MASK_2_L_R 0x0003
+
+/* Speaker layout mask for three channels (Home Theater).
+ * - Speaker front left
+ * - Speaker front right
+ * - Speaker front center
+ */
+#define WMAPRO_CHANNEL_MASK_3_L_C_R 0x0007
+
+/* Speaker layout mask for two channels (stereo).
+ * - Speaker back left
+ * - Speaker back right
+ */
+#define WMAPRO_CHANNEL_MASK_2_Bl_Br  0x0030
+
+/* Speaker layout mask for four channels.
+ * - Speaker front left
+ * - Speaker front right
+ * - Speaker back left
+ * - Speaker back right
+*/
+#define WMAPRO_CHANNEL_MASK_4_L_R_Bl_Br 0x0033
+
+/* Speaker layout mask for four channels (Home Theater).
+ * - Speaker front left
+ * - Speaker front right
+ * - Speaker front center
+ * - Speaker back center
+*/
+#define WMAPRO_CHANNEL_MASK_4_L_R_C_Bc_HT 0x0107
+/* Speaker layout mask for five channels.
+ * - Speaker front left
+ * - Speaker front right
+ * - Speaker front center
+ * - Speaker back left
+ * - Speaker back right
+ */
+#define WMAPRO_CHANNEL_MASK_5_L_C_R_Bl_Br  0x0037
+
+/* Speaker layout mask for five channels (5 mode, Home Theater).
+ * - Speaker front left
+ * - Speaker front right
+ * - Speaker front center
+ * - Speaker side left
+ * - Speaker side right
+ */
+#define WMAPRO_CHANNEL_MASK_5_L_C_R_Sl_Sr_HT   0x0607
+/* Speaker layout mask for six channels (5.1 mode).
+ * - Speaker front left
+ * - Speaker front right
+ * - Speaker front center
+ * - Speaker low frequency
+ * - Speaker back left
+ * - Speaker back right
+ */
+#define WMAPRO_CHANNEL_MASK_5DOT1_L_C_R_Bl_Br_SLF  0x003F
+/* Speaker layout mask for six channels (5.1 mode, Home Theater).
+ * - Speaker front left
+ * - Speaker front right
+ * - Speaker front center
+ * - Speaker low frequency
+ * - Speaker side left
+ * - Speaker side right
+ */
+#define WMAPRO_CHANNEL_MASK_5DOT1_L_C_R_Sl_Sr_SLF_HT  0x060F
+/* Speaker layout mask for six channels (5.1 mode, no LFE).
+ * - Speaker front left
+ * - Speaker front right
+ * - Speaker front center
+ * - Speaker back left
+ * - Speaker back right
+ * - Speaker back center
+ */
+#define WMAPRO_CHANNEL_MASK_5DOT1_L_C_R_Bl_Br_Bc  0x0137
+/* Speaker layout mask for six channels (5.1 mode, Home Theater,
+  * no LFE).
+  * - Speaker front left
+  * - Speaker front right
+  * - Speaker front center
+  * - Speaker back center
+  * - Speaker side left
+  * - Speaker side right
+ */
+#define WMAPRO_CHANNEL_MASK_5DOT1_L_C_R_Sl_Sr_Bc_HT   0x0707
+
+/* Speaker layout mask for seven channels (6.1 mode).
+ * - Speaker front left
+ * - Speaker front right
+ * - Speaker front center
+ * - Speaker low frequency
+ * - Speaker back left
+ * - Speaker back right
+ * - Speaker back center
+ */
+#define WMAPRO_CHANNEL_MASK_6DOT1_L_C_R_Bl_Br_Bc_SLF   0x013F
+
+/* Speaker layout mask for seven channels (6.1 mode, Home
+  * Theater).
+  * - Speaker front left
+  * - Speaker front right
+  * - Speaker front center
+  * - Speaker low frequency
+  * - Speaker back center
+  * - Speaker side left
+  * - Speaker side right
+*/
+#define WMAPRO_CHANNEL_MASK_6DOT1_L_C_R_Sl_Sr_Bc_SLF_HT 0x070F
+
+/* Speaker layout mask for seven channels (6.1 mode, no LFE).
+ * - Speaker front left
+ * - Speaker front right
+ * - Speaker front center
+ * - Speaker back left
+ * - Speaker back right
+ * - Speaker front left of center
+ * - Speaker front right of center
+*/
+#define WMAPRO_CHANNEL_MASK_6DOT1_L_C_R_Bl_Br_SFLOC_SFROC   0x00F7
+
+/* Speaker layout mask for seven channels (6.1 mode, Home
+ * Theater, no LFE).
+ * - Speaker front left
+ * - Speaker front right
+ * - Speaker front center
+ * - Speaker side left
+ * - Speaker side right
+ * - Speaker front left of center
+ * - Speaker front right of center
+*/
+#define WMAPRO_CHANNEL_MASK_6DOT1_L_C_R_Sl_Sr_SFLOC_SFROC_HT 0x0637
+
+/* Speaker layout mask for eight channels (7.1 mode).
+ * - Speaker front left
+ * - Speaker front right
+ * - Speaker front center
+ * - Speaker back left
+ * - Speaker back right
+ * - Speaker low frequency
+ * - Speaker front left of center
+ * - Speaker front right of center
+ */
+#define WMAPRO_CHANNEL_MASK_7DOT1_L_C_R_Bl_Br_SLF_SFLOC_SFROC \
+					0x00FF
+
+/* Speaker layout mask for eight channels (7.1 mode, Home Theater).
+ * - Speaker front left
+ * - Speaker front right
+ * - Speaker front center
+ * - Speaker side left
+ * - Speaker side right
+ * - Speaker low frequency
+ * - Speaker front left of center
+ * - Speaker front right of center
+ *
+*/
+#define WMAPRO_CHANNEL_MASK_7DOT1_L_C_R_Sl_Sr_SLF_SFLOC_SFROC_HT \
+					0x063F
+
+#define ASM_PARAM_ID_DEC_OUTPUT_CHAN_MAP  0x00010D82
+
+/*	Maximum number of decoder output channels.*/
+#define MAX_CHAN_MAP_CHANNELS  16
+
+/* Structure for decoder output channel mapping. */
+
+/* Payload of the #ASM_PARAM_ID_DEC_OUTPUT_CHAN_MAP parameter in the
+ * #ASM_STREAM_CMD_SET_ENCDEC_PARAM command.
+ */
+struct asm_dec_out_chan_map_param {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param  encdec;
+	struct asm_enc_cfg_blk_param_v2	encblk;
+	u32                 num_channels;
+/* Number of decoder output channels.
+ * Supported values: 0 to #MAX_CHAN_MAP_CHANNELS
+ *
+ * A value of 0 indicates native channel mapping, which is valid
+ * only for NT mode. This means the output of the decoder is to be
+ * preserved as is.
+ */
+	u8                  channel_mapping[MAX_CHAN_MAP_CHANNELS];
+} __packed;
+
+#define ASM_STREAM_CMD_OPEN_WRITE_COMPRESSED  0x00010D84
+
+/* Bitmask for the IEC 61937 enable flag.*/
+#define ASM_BIT_MASK_IEC_61937_STREAM_FLAG   (0x00000001UL)
+
+/* Shift value for the IEC 61937 enable flag.*/
+#define ASM_SHIFT_IEC_61937_STREAM_FLAG  0
+
+/* Bitmask for the IEC 60958 enable flag.*/
+#define ASM_BIT_MASK_IEC_60958_STREAM_FLAG   (0x00000002UL)
+
+/* Shift value for the IEC 60958 enable flag.*/
+#define ASM_SHIFT_IEC_60958_STREAM_FLAG   1
+
+/* Payload format for open write compressed comand */
+
+/* Payload format for the #ASM_STREAM_CMD_OPEN_WRITE_COMPRESSED
+ * comand, which opens a stream for a given session ID and stream ID
+ * to be rendered in the compressed format.
+ */
+
+struct asm_stream_cmd_open_write_compressed {
+	struct apr_hdr hdr;
+	u32                    flags;
+/* Mode flags that configure the stream for a specific format.
+ * Supported values:
+ * - Bit 0 -- IEC 61937 compatibility
+ *   - 0 -- Stream is not in IEC 61937 format
+ *   - 1 -- Stream is in IEC 61937 format
+ * - Bit 1 -- IEC 60958 compatibility
+ *   - 0 -- Stream is not in IEC 60958 format
+ *   - 1 -- Stream is in IEC 60958 format
+ * - Bits 2 to 31 -- 0 (Reserved)
+ *
+ * For the same stream, bit 0 cannot be set to 0 and bit 1 cannot
+ * be set to 1. A compressed stream connot have IEC 60958
+ * packetization applied without IEC 61937 packetization.
+ * @note1hang Currently, IEC 60958 packetized input streams are not
+ * supported.
+ */
+
+
+	u32                    fmt_id;
+/* Specifies the media type of the HDMI stream to be opened.
+ * Supported values:
+ * - #ASM_MEDIA_FMT_AC3_DEC
+ * - #ASM_MEDIA_FMT_EAC3_DEC
+ * - #ASM_MEDIA_FMT_DTS
+ * - #ASM_MEDIA_FMT_ATRAC
+ * - #ASM_MEDIA_FMT_MAT
+ *
+ * @note1hang This field must be set to a valid media type even if
+ * IEC 61937 packetization is not performed by the aDSP.
+ */
+
+} __packed;
+
+#define ASM_STREAM_CMD_OPEN_READ_COMPRESSED                        0x00010D95
+
+struct asm_stream_cmd_open_read_compressed {
+	struct apr_hdr hdr;
+	u32                    mode_flags;
+/* Mode flags that indicate whether meta information per encoded
+ * frame is to be provided.
+ * Supported values for bit 4:
+ * - 0 -- Return data buffer contains all encoded frames only; it does
+ *      not contain frame metadata.
+ * - 1 -- Return data buffer contains an array of metadata and encoded
+ *      frames.
+ * - Use #ASM_BIT_MASK_META_INFO_FLAG to set the bitmask and
+ * #ASM_SHIFT_META_INFO_FLAG to set the shift value for this bit.
+ * All other bits are reserved; clients must set them to zero.
+ */
+
+	u32                    frames_per_buf;
+/* Indicates the number of frames that need to be returned per
+ * read buffer
+ * Supported values: should be greater than 0
+ */
+
+} __packed;
+
+/* adsp_asm_stream_commands.h*/
+
+
+/* adsp_asm_api.h (no changes)*/
+#define ASM_STREAM_POSTPROCOPO_ID_DEFAULT \
+								0x00010BE4
+#define ASM_STREAM_POSTPROCOPO_ID_PEAKMETER \
+								0x00010D83
+#define ASM_STREAM_POSTPROCOPO_ID_NONE \
+								0x00010C68
+#define ASM_STREAM_POSTPROCOPO_ID_MCH_PEAK_VOL \
+								0x00010D8B
+#define ASM_STREAM_PREPROCOPO_ID_DEFAULT \
+			ASM_STREAM_POSTPROCOPO_ID_DEFAULT
+#define ASM_STREAM_PREPROCOPO_ID_NONE \
+			ASM_STREAM_POSTPROCOPO_ID_NONE
+#define ADM_CMD_COPP_OPENOPOLOGY_ID_NONE_AUDIO_COPP \
+			0x00010312
+#define ADM_CMD_COPP_OPENOPOLOGY_ID_SPEAKER_MONO_AUDIO_COPP \
+								0x00010313
+#define ADM_CMD_COPP_OPENOPOLOGY_ID_SPEAKER_STEREO_AUDIO_COPP \
+								0x00010314
+#define ADM_CMD_COPP_OPENOPOLOGY_ID_SPEAKER_STEREO_IIR_AUDIO_COPP\
+								0x00010704
+#define ADM_CMD_COPP_OPENOPOLOGY_ID_SPEAKER_MONO_AUDIO_COPP_MBDRCV2\
+								0x0001070D
+#define ADM_CMD_COPP_OPENOPOLOGY_ID_SPEAKER_STEREO_AUDIO_COPP_MBDRCV2\
+								0x0001070E
+#define ADM_CMD_COPP_OPENOPOLOGY_ID_SPEAKER_STEREO_IIR_AUDIO_COPP_MBDRCV2\
+								0x0001070F
+#define ADM_CMD_COPP_OPENOPOLOGY_ID_SPEAKER_MCH_PEAK_VOL \
+								0x0001031B
+#define ADM_CMD_COPP_OPENOPOLOGY_ID_MIC_MONO_AUDIO_COPP  0x00010315
+#define ADM_CMD_COPP_OPENOPOLOGY_ID_MIC_STEREO_AUDIO_COPP 0x00010316
+#define AUDPROC_COPPOPOLOGY_ID_MCHAN_IIR_AUDIO           0x00010715
+#define ADM_CMD_COPP_OPENOPOLOGY_ID_DEFAULT_AUDIO_COPP   0x00010BE3
+#define ADM_CMD_COPP_OPENOPOLOGY_ID_PEAKMETER_AUDIO_COPP 0x00010317
+#define AUDPROC_MODULE_ID_AIG   0x00010716
+#define AUDPROC_PARAM_ID_AIG_ENABLE		0x00010717
+#define AUDPROC_PARAM_ID_AIG_CONFIG		0x00010718
+
+struct Audio_AigParam {
+	uint16_t	mode;
+/*< Mode word for enabling AIG/SIG mode .
+ * Byte offset: 0
+ */
+	int16_t		staticGainL16Q12;
+/*< Static input gain when aigMode is set to 1.
+ * Byte offset: 2
+ */
+	int16_t		initialGainDBL16Q7;
+/*<Initial value that the adaptive gain update starts from dB
+ * Q7 Byte offset: 4
+ */
+	int16_t		idealRMSDBL16Q7;
+/*<Average RMS level that AIG attempts to achieve Q8.7
+ * Byte offset: 6
+ */
+	int32_t		noiseGateL32;
+/*Threshold below which signal is considered as noise and AIG
+ * Byte offset: 8
+ */
+	int32_t		minGainL32Q15;
+/*Minimum gain that can be provided by AIG Q16.15
+ * Byte offset: 12
+ */
+	int32_t		maxGainL32Q15;
+/*Maximum gain that can be provided by AIG Q16.15
+ * Byte offset: 16
+ */
+	uint32_t		gainAtRtUL32Q31;
+/*Attack/release time for AIG update Q1.31
+ * Byte offset: 20
+ */
+	uint32_t		longGainAtRtUL32Q31;
+/*Long attack/release time while updating gain for
+ * noise/silence Q1.31 Byte offset: 24
+ */
+
+	uint32_t		rmsTavUL32Q32;
+/* RMS smoothing time constant used for long-term RMS estimate
+ * Q0.32 Byte offset: 28
+ */
+
+	uint32_t		gainUpdateStartTimMsUL32Q0;
+/* The waiting time before which AIG starts to apply adaptive
+ * gain update Q32.0 Byte offset: 32
+ */
+
+} __packed;
+
+
+#define ADM_MODULE_ID_EANS                            0x00010C4A
+#define ADM_PARAM_ID_EANS_ENABLE                      0x00010C4B
+#define ADM_PARAM_ID_EANS_PARAMS                      0x00010C4C
+
+struct adm_eans_enable {
+
+	uint32_t                  enable_flag;
+/*< Specifies whether EANS is disabled (0) or enabled
+ * (nonzero).
+ * This is supported only for sampling rates of 8, 12, 16, 24, 32,
+ * and 48 kHz. It is not supported for sampling rates of 11.025,
+ * 22.05, or 44.1 kHz.
+ */
+
+} __packed;
+
+
+struct adm_eans_params {
+	int16_t                         eans_mode;
+/*< Mode word for enabling/disabling submodules.
+ * Byte offset: 0
+ */
+
+	int16_t                         eans_input_gain;
+/*< Q2.13 input gain to the EANS module.
+ * Byte offset: 2
+ */
+
+	int16_t                         eans_output_gain;
+/*< Q2.13 output gain to the EANS module.
+ * Byte offset: 4
+ */
+
+	int16_t                         eansarget_ns;
+/*< Target noise suppression level in dB.
+ * Byte offset: 6
+ */
+
+	int16_t                         eans_s_alpha;
+/*< Q3.12 over-subtraction factor for stationary noise
+ * suppression.
+ * Byte offset: 8
+ */
+
+	int16_t                         eans_n_alpha;
+/* < Q3.12 over-subtraction factor for nonstationary noise
+ * suppression.
+ * Byte offset: 10
+ */
+
+	int16_t                         eans_n_alphamax;
+/*< Q3.12 maximum over-subtraction factor for nonstationary
+ * noise suppression.
+ * Byte offset: 12
+ */
+	int16_t                         eans_e_alpha;
+/*< Q15 scaling factor for excess noise suppression.
+ * Byte offset: 14
+ */
+
+	int16_t                         eans_ns_snrmax;
+/*< Upper boundary in dB for SNR estimation.
+ * Byte offset: 16
+ */
+
+	int16_t                         eans_sns_block;
+/*< Quarter block size for stationary noise suppression.
+ * Byte offset: 18
+ */
+
+	int16_t                         eans_ns_i;
+/*< Initialization block size for noise suppression.
+ * Byte offset: 20
+ */
+	int16_t                         eans_np_scale;
+/*< Power scale factor for nonstationary noise update.
+ * Byte offset: 22
+ */
+
+	int16_t                         eans_n_lambda;
+/*< Smoothing factor for higher level nonstationary noise
+ * update.
+ * Byte offset: 24
+ */
+
+	int16_t                         eans_n_lambdaf;
+/*< Medium averaging factor for noise update.
+ * Byte offset: 26
+ */
+
+	int16_t                         eans_gs_bias;
+/*< Bias factor in dB for gain calculation.
+ * Byte offset: 28
+ */
+
+	int16_t                         eans_gs_max;
+/*< SNR lower boundary in dB for aggressive gain calculation.
+ * Byte offset: 30
+ */
+
+	int16_t                         eans_s_alpha_hb;
+/*< Q3.12 over-subtraction factor for high-band stationary
+ * noise suppression.
+ * Byte offset: 32
+ */
+
+	int16_t                         eans_n_alphamax_hb;
+/*< Q3.12 maximum over-subtraction factor for high-band
+ * nonstationary noise suppression.
+ * Byte offset: 34
+ */
+
+	int16_t                         eans_e_alpha_hb;
+/*< Q15 scaling factor for high-band excess noise suppression.
+ * Byte offset: 36
+ */
+
+	int16_t                         eans_n_lambda0;
+/*< Smoothing factor for nonstationary noise update during
+ * speech activity.
+ * Byte offset: 38
+ */
+
+	int16_t                         thresh;
+/*< Threshold for generating a binary VAD decision.
+ * Byte offset: 40
+ */
+
+	int16_t                         pwr_scale;
+/*< Indirect lower boundary of the noise level estimate.
+ * Byte offset: 42
+ */
+
+	int16_t                         hangover_max;
+/*< Avoids mid-speech clipping and reliably detects weak speech
+ * bursts at the end of speech activity.
+ * Byte offset: 44
+ */
+
+	int16_t                         alpha_snr;
+/*< Controls responsiveness of the VAD.
+ * Byte offset: 46
+ */
+
+	int16_t                         snr_diff_max;
+/*< Maximum SNR difference. Decreasing this parameter value may
+ * help in making correct decisions during abrupt changes; however,
+ * decreasing too much may increase false alarms during long
+ * pauses/silences.
+ * Byte offset: 48
+ */
+
+	int16_t                         snr_diff_min;
+/*< Minimum SNR difference. Decreasing this parameter value may
+ * help in making correct decisions during abrupt changes; however,
+ * decreasing too much may increase false alarms during long
+ * pauses/silences.
+ * Byte offset: 50
+ */
+
+	int16_t                         init_length;
+/*< Defines the number of frames for which a noise level
+ * estimate is set to a fixed value.
+ * Byte offset: 52
+ */
+
+	int16_t                         max_val;
+/*< Defines the upper limit of the noise level.
+ * Byte offset: 54
+ */
+
+	int16_t                         init_bound;
+/*< Defines the initial bounding value for the noise level
+ * estimate. This is used during the initial segment defined by the
+ * init_length parameter.
+ * Byte offset: 56
+ */
+
+	int16_t                         reset_bound;
+/*< Reset boundary for noise tracking.
+ * Byte offset: 58
+ */
+
+	int16_t                         avar_scale;
+/*< Defines the bias factor in noise estimation.
+ * Byte offset: 60
+ */
+
+	int16_t                         sub_nc;
+/*< Defines the window length for noise estimation.
+ * Byte offset: 62
+ */
+
+	int16_t                         spow_min;
+/*< Defines the minimum signal power required to update the
+ * boundaries for the noise floor estimate.
+ * Byte offset: 64
+ */
+
+	int16_t                         eans_gs_fast;
+/*< Fast smoothing factor for postprocessor gain.
+ * Byte offset: 66
+ */
+
+	int16_t                         eans_gs_med;
+/*< Medium smoothing factor for postprocessor gain.
+ * Byte offset: 68
+ */
+
+	int16_t                         eans_gs_slow;
+/*< Slow smoothing factor for postprocessor gain.
+ * Byte offset: 70
+ */
+
+	int16_t                         eans_swb_salpha;
+/*< Q3.12 super wideband aggressiveness factor for stationary
+ * noise suppression.
+ * Byte offset: 72
+ */
+
+	int16_t                         eans_swb_nalpha;
+/*< Q3.12 super wideband aggressiveness factor for
+ * nonstationary noise suppression.
+ * Byte offset: 74
+ */
+} __packed;
+#define ADM_MODULE_IDX_MIC_GAIN_CTRL   0x00010C35
+
+/* @addtogroup audio_pp_param_ids
+ * ID of the Tx mic gain control parameter used by the
+ * #ADM_MODULE_IDX_MIC_GAIN_CTRL module.
+ * @messagepayload
+ * @structure{admx_mic_gain}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ADM_PARAM_IDX_MIC_GAIN.tex}
+ */
+#define ADM_PARAM_IDX_MIC_GAIN       0x00010C36
+
+/* Structure for a Tx mic gain parameter for the mic gain
+ * control module.
+ */
+
+
+/* @brief Payload of the #ADM_PARAM_IDX_MIC_GAIN parameter in the
+ * Tx Mic Gain Control module.
+ */
+struct admx_mic_gain {
+	uint16_t                  tx_mic_gain;
+	/*< Linear gain in Q13 format. */
+
+	uint16_t                  reserved;
+	/*< Clients must set this field to zero. */
+} __packed;
+
+/* end_addtogroup audio_pp_param_ids */
+
+/* @ingroup audio_pp_module_ids
+ * ID of the Rx Codec Gain Control module.
+ *
+ * This module supports the following parameter ID:
+ * - #ADM_PARAM_ID_RX_CODEC_GAIN
+ */
+#define ADM_MODULE_ID_RX_CODEC_GAIN_CTRL       0x00010C37
+
+/* @addtogroup audio_pp_param_ids
+ * ID of the Rx codec gain control parameter used by the
+ * #ADM_MODULE_ID_RX_CODEC_GAIN_CTRL module.
+ *
+ * @messagepayload
+ * @structure{adm_rx_codec_gain}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ADM_PARAM_ID_RX_CODEC_GAIN.tex}
+*/
+#define ADM_PARAM_ID_RX_CODEC_GAIN   0x00010C38
+
+/* Structure for the Rx common codec gain control module. */
+
+
+/* @brief Payload of the #ADM_PARAM_ID_RX_CODEC_GAIN parameter
+ * in the Rx Codec Gain Control module.
+ */
+
+
+struct adm_rx_codec_gain {
+	uint16_t                  rx_codec_gain;
+	/*< Linear gain in Q13 format. */
+
+	uint16_t                  reserved;
+	/*< Clients must set this field to zero.*/
+} __packed;
+
+/* end_addtogroup audio_pp_param_ids */
+
+/* @ingroup audio_pp_module_ids
+ * ID of the HPF Tuning Filter module on the Tx path.
+ * This module supports the following parameter IDs:
+ * - #ADM_PARAM_ID_HPF_IIRX_FILTER_ENABLE_CONFIG
+ * - #ADM_PARAM_ID_HPF_IIRX_FILTER_PRE_GAIN
+ * - #ADM_PARAM_ID_HPF_IIRX_FILTER_CONFIG_PARAMS
+ */
+#define ADM_MODULE_ID_HPF_IIRX_FILTER    0x00010C3D
+
+/* @addtogroup audio_pp_param_ids */
+/* ID of the Tx HPF IIR filter enable parameter used by the
+ * #ADM_MODULE_ID_HPF_IIRX_FILTER module.
+ * @parspace Message payload
+ * @structure{adm_hpfx_iir_filter_enable_cfg}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ADM_PARAM_ID_HPF_IIRX_FILTER_ENABLE_CONFIG.tex}
+ */
+#define ADM_PARAM_ID_HPF_IIRX_FILTER_ENABLE_CONFIG   0x00010C3E
+
+/* ID of the Tx HPF IIR filter pregain parameter used by the
+ * #ADM_MODULE_ID_HPF_IIRX_FILTER module.
+ * @parspace Message payload
+ * @structure{adm_hpfx_iir_filter_pre_gain}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ADM_PARAM_ID_HPF_IIRX_FILTER_PRE_GAIN.tex}
+ */
+#define ADM_PARAM_ID_HPF_IIRX_FILTER_PRE_GAIN   0x00010C3F
+
+/* ID of the Tx HPF IIR filter configuration parameters used by the
+ * #ADM_MODULE_ID_HPF_IIRX_FILTER module.
+ * @parspace Message payload
+ * @structure{adm_hpfx_iir_filter_cfg_params}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ADM_PARAM_ID_HPF_IIRX_FILTER_CONFIG_PA
+ * RAMS.tex}
+ */
+#define ADM_PARAM_ID_HPF_IIRX_FILTER_CONFIG_PARAMS  0x00010C40
+
+/* Structure for enabling a configuration parameter for
+ * the HPF IIR tuning filter module on the Tx path.
+ */
+
+/* @brief Payload of the #ADM_PARAM_ID_HPF_IIRX_FILTER_ENABLE_CONFIG
+ * parameter in the Tx path HPF Tuning Filter module.
+ */
+struct adm_hpfx_iir_filter_enable_cfg {
+	uint32_t                  enable_flag;
+/*< Specifies whether the HPF tuning filter is disabled (0) or
+ * enabled (nonzero).
+ */
+} __packed;
+
+
+/* Structure for the pregain parameter for the HPF
+	IIR tuning filter module on the Tx path. */
+
+
+/* @brief Payload of the #ADM_PARAM_ID_HPF_IIRX_FILTER_PRE_GAIN parameter
+ * in the Tx path HPF Tuning Filter module.
+ */
+struct adm_hpfx_iir_filter_pre_gain {
+	uint16_t                  pre_gain;
+	/*< Linear gain in Q13 format. */
+
+	uint16_t                  reserved;
+	/*< Clients must set this field to zero.*/
+} __packed;
+
+
+/* Structure for the configuration parameter for the
+	HPF IIR tuning filter module on the Tx path. */
+
+
+/* @brief Payload of the #ADM_PARAM_ID_HPF_IIRX_FILTER_CONFIG_PARAMS
+ * parameters in the Tx path HPF Tuning Filter module. \n
+ * \n
+ * This structure is followed by tuning filter coefficients as follows: \n
+ * - Sequence of int32_t FilterCoeffs.
+ * Each band has five coefficients, each in int32_t format in the order of
+ * b0, b1, b2, a1, a2.
+ * - Sequence of int16_t NumShiftFactor.
+ * One int16_t per band. The numerator shift factor is related to the Q
+ * factor of the filter coefficients.
+ * - Sequence of uint16_t PanSetting.
+ * One uint16_t for each band to indicate application of the filter to
+ * left (0), right (1), or both (2) channels.
+ */
+struct adm_hpfx_iir_filter_cfg_params {
+	uint16_t                  num_biquad_stages;
+/*< Number of bands.
+ * Supported values: 0 to 20
+ */
+
+	uint16_t                  reserved;
+	/*< Clients must set this field to zero.*/
+} __packed;
+
+/* end_addtogroup audio_pp_module_ids */
+
+/* @addtogroup audio_pp_module_ids */
+/* ID of the Tx path IIR Tuning Filter module.
+ *	This module supports the following parameter IDs:
+ *	- #ADM_PARAM_IDX_IIR_FILTER_ENABLE_CONFIG
+ */
+#define ADM_MODULE_IDX_IIR_FILTER 0x00010C41
+
+/* ID of the Rx path IIR Tuning Filter module for the left channel.
+ *	The parameter IDs of the IIR tuning filter module
+ *	(#ASM_MODULE_ID_IIRUNING_FILTER) are used for the left IIR Rx tuning
+ *	filter.
+ *
+ * Pan parameters are not required for this per-channel IIR filter; the pan
+ * parameters are ignored by this module.
+ */
+#define ADM_MODULE_ID_LEFT_IIRUNING_FILTER      0x00010705
+
+/* ID of the the Rx path IIR Tuning Filter module for the right
+ * channel.
+ * The parameter IDs of the IIR tuning filter module
+ * (#ASM_MODULE_ID_IIRUNING_FILTER) are used for the right IIR Rx
+ * tuning filter.
+ *
+ * Pan parameters are not required for this per-channel IIR filter;
+ * the pan parameters are ignored by this module.
+ */
+#define ADM_MODULE_ID_RIGHT_IIRUNING_FILTER    0x00010706
+
+/* end_addtogroup audio_pp_module_ids */
+
+/* @addtogroup audio_pp_param_ids */
+
+/* ID of the Tx IIR filter enable parameter used by the
+ * #ADM_MODULE_IDX_IIR_FILTER module.
+ * @parspace Message payload
+ * @structure{admx_iir_filter_enable_cfg}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ADM_PARAM_IDX_IIR_FILTER_ENABLE_CONFIG.tex}
+ */
+#define ADM_PARAM_IDX_IIR_FILTER_ENABLE_CONFIG   0x00010C42
+
+/* ID of the Tx IIR filter pregain parameter used by the
+ * #ADM_MODULE_IDX_IIR_FILTER module.
+ * @parspace Message payload
+ * @structure{admx_iir_filter_pre_gain}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ADM_PARAM_IDX_IIR_FILTER_PRE_GAIN.tex}
+ */
+#define ADM_PARAM_IDX_IIR_FILTER_PRE_GAIN    0x00010C43
+
+/* ID of the Tx IIR filter configuration parameters used by the
+ * #ADM_MODULE_IDX_IIR_FILTER module.
+ * @parspace Message payload
+ * @structure{admx_iir_filter_cfg_params}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ADM_PARAM_IDX_IIR_FILTER_CONFIG_PARAMS.tex}
+ */
+#define ADM_PARAM_IDX_IIR_FILTER_CONFIG_PARAMS     0x00010C44
+
+/* Structure for enabling the configuration parameter for the
+ * IIR filter module on the Tx path.
+ */
+
+/* @brief Payload of the #ADM_PARAM_IDX_IIR_FILTER_ENABLE_CONFIG
+ * parameter in the Tx Path IIR Tuning Filter module.
+ */
+
+struct admx_iir_filter_enable_cfg {
+	uint32_t                  enable_flag;
+/*< Specifies whether the IIR tuning filter is disabled (0) or
+ * enabled (nonzero).
+ */
+
+} __packed;
+
+
+/* Structure for the pregain parameter for the
+ * IIR filter module on the Tx path.
+ */
+
+
+/* @brief Payload of the #ADM_PARAM_IDX_IIR_FILTER_PRE_GAIN
+ * parameter in the Tx Path IIR Tuning Filter module.
+ */
+
+struct admx_iir_filter_pre_gain {
+	uint16_t                  pre_gain;
+	/*< Linear gain in Q13 format. */
+
+	uint16_t                  reserved;
+	/*< Clients must set this field to zero.*/
+} __packed;
+
+
+/* Structure for the configuration parameter for the
+ * IIR filter module on the Tx path.
+ */
+
+
+/* @brief Payload of the #ADM_PARAM_IDX_IIR_FILTER_CONFIG_PARAMS
+ * parameter in the Tx Path IIR Tuning Filter module. \n
+ *	\n
+ * This structure is followed by the HPF IIR filter coefficients on
+ * the Tx path as follows: \n
+ * - Sequence of int32_t ulFilterCoeffs. Each band has five
+ * coefficients, each in int32_t format in the order of b0, b1, b2,
+ * a1, a2.
+ * - Sequence of int16_t sNumShiftFactor. One int16_t per band. The
+ * numerator shift factor is related to the Q factor of the filter
+ * coefficients.
+ * - Sequence of uint16_t usPanSetting. One uint16_t for each band
+ * to indicate if the filter is applied to left (0), right (1), or
+ * both (2) channels.
+ */
+struct admx_iir_filter_cfg_params {
+	uint16_t                  num_biquad_stages;
+/*< Number of bands.
+ * Supported values: 0 to 20
+ */
+
+	uint16_t                  reserved;
+	/*< Clients must set this field to zero.*/
+} __packed;
+
+/* end_addtogroup audio_pp_module_ids */
+
+/* @ingroup audio_pp_module_ids
+ *	ID of the QEnsemble module.
+ *	This module supports the following parameter IDs:
+ *	- #ADM_PARAM_ID_QENSEMBLE_ENABLE
+ *	- #ADM_PARAM_ID_QENSEMBLE_BACKGAIN
+ *	- #ADM_PARAM_ID_QENSEMBLE_SET_NEW_ANGLE
+ */
+#define ADM_MODULE_ID_QENSEMBLE    0x00010C59
+
+/* @addtogroup audio_pp_param_ids */
+/* ID of the QEnsemble enable parameter used by the
+ * #ADM_MODULE_ID_QENSEMBLE module.
+ * @messagepayload
+ * @structure{adm_qensemble_enable}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ADM_PARAM_ID_QENSEMBLE_ENABLE.tex}
+ */
+#define ADM_PARAM_ID_QENSEMBLE_ENABLE   0x00010C60
+
+/* ID of the QEnsemble back gain parameter used by the
+ * #ADM_MODULE_ID_QENSEMBLE module.
+ * @messagepayload
+ * @structure{adm_qensemble_param_backgain}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ADM_PARAM_ID_QENSEMBLE_BACKGAIN.tex}
+ */
+#define ADM_PARAM_ID_QENSEMBLE_BACKGAIN   0x00010C61
+
+/* ID of the QEnsemble new angle parameter used by the
+ * #ADM_MODULE_ID_QENSEMBLE module.
+ * @messagepayload
+ * @structure{adm_qensemble_param_set_new_angle}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ADM_PARAM_ID_QENSEMBLE_SET_NEW_ANGLE.tex}
+ */
+#define ADM_PARAM_ID_QENSEMBLE_SET_NEW_ANGLE    0x00010C62
+
+/* Structure for enabling the configuration parameter for the
+ * QEnsemble module.
+ */
+
+
+/* @brief Payload of the #ADM_PARAM_ID_QENSEMBLE_ENABLE
+ * parameter used by the QEnsemble module.
+ */
+struct adm_qensemble_enable {
+	uint32_t                  enable_flag;
+/*< Specifies whether the QEnsemble module is disabled (0) or enabled
+ * (nonzero).
+ */
+} __packed;
+
+
+/* Structure for the background gain for the QEnsemble module. */
+
+
+/* @brief Payload of the #ADM_PARAM_ID_QENSEMBLE_BACKGAIN
+ * parameter used by
+ * the QEnsemble module.
+ */
+struct adm_qensemble_param_backgain {
+	int16_t                  back_gain;
+/*< Linear gain in Q15 format.
+ * Supported values: 0 to 32767
+ */
+
+	uint16_t                 reserved;
+	/*< Clients must set this field to zero.*/
+} __packed;
+/* Structure for setting a new angle for the QEnsemble module. */
+
+
+/* @brief Payload of the #ADM_PARAM_ID_QENSEMBLE_SET_NEW_ANGLE
+ * parameter used
+ * by the QEnsemble module.
+ */
+struct adm_qensemble_param_set_new_angle {
+	int16_t                    new_angle;
+/*< New angle in degrees.
+ * Supported values: 0 to 359
+ */
+
+	int16_t                    time_ms;
+/*< Transition time in milliseconds to set the new angle.
+ * Supported values: 0 to 32767
+ */
+} __packed;
+
+/* end_addtogroup audio_pp_module_ids */
+
+/* @ingroup audio_pp_module_ids
+ * ID of the Volume Control module pre/postprocessing block.
+ * This module supports the following parameter IDs:
+ * - #ASM_PARAM_ID_VOL_CTRL_MASTER_GAIN
+ * - #ASM_PARAM_ID_VOL_CTRL_LR_CHANNEL_GAIN
+ * - #ASM_PARAM_ID_VOL_CTRL_MUTE_CONFIG
+ * - #ASM_PARAM_ID_SOFT_VOL_STEPPING_PARAMETERS
+ * - #ASM_PARAM_ID_SOFT_PAUSE_PARAMETERS
+ * - #ASM_PARAM_ID_MULTICHANNEL_GAIN
+ * - #ASM_PARAM_ID_MULTICHANNEL_MUTE
+ */
+#define ASM_MODULE_ID_VOL_CTRL   0x00010BFE
+
+/* @addtogroup audio_pp_param_ids */
+/* ID of the master gain parameter used by the #ASM_MODULE_ID_VOL_CTRL
+ * module.
+ * @messagepayload
+ * @structure{asm_volume_ctrl_master_gain}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ASM_PARAM_ID_VOL_CTRL_MASTER_GAIN.tex}
+ */
+#define ASM_PARAM_ID_VOL_CTRL_MASTER_GAIN    0x00010BFF
+
+/* ID of the left/right channel gain parameter used by the
+ * #ASM_MODULE_ID_VOL_CTRL module.
+ * @messagepayload
+ * @structure{asm_volume_ctrl_lr_chan_gain}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ASM_PARAM_ID_VOL_CTRL_LR_CHANNEL_GAIN.tex}
+ */
+#define ASM_PARAM_ID_VOL_CTRL_LR_CHANNEL_GAIN     0x00010C00
+
+/* ID of the mute configuration parameter used by the
+ * #ASM_MODULE_ID_VOL_CTRL module.
+ * @messagepayload
+ * @structure{asm_volume_ctrl_mute_config}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ASM_PARAM_ID_VOL_CTRL_MUTE_CONFIG.tex}
+ */
+#define ASM_PARAM_ID_VOL_CTRL_MUTE_CONFIG   0x00010C01
+
+/* ID of the soft stepping volume parameters used by the
+ * #ASM_MODULE_ID_VOL_CTRL module.
+ * @messagepayload
+ * @structure{asm_soft_step_volume_params}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ASM_PARAM_ID_SOFT_VOL_STEPPING_PARAMET
+ * ERS.tex}
+ */
+#define ASM_PARAM_ID_SOFT_VOL_STEPPING_PARAMETERS  0x00010C29
+
+/* ID of the soft pause parameters used by the #ASM_MODULE_ID_VOL_CTRL
+ * module.
+ */
+#define ASM_PARAM_ID_SOFT_PAUSE_PARAMETERS   0x00010D6A
+
+/* ID of the multiple-channel volume control parameters used by the
+ * #ASM_MODULE_ID_VOL_CTRL module.
+ */
+#define ASM_PARAM_ID_MULTICHANNEL_GAIN  0x00010713
+
+/* ID of the multiple-channel mute configuration parameters used by the
+ * #ASM_MODULE_ID_VOL_CTRL module.
+ */
+
+#define ASM_PARAM_ID_MULTICHANNEL_MUTE  0x00010714
+
+/* Structure for the master gain parameter for a volume control
+ * module.
+ */
+
+
+/* @brief Payload of the #ASM_PARAM_ID_VOL_CTRL_MASTER_GAIN
+ * parameter used by the Volume Control module.
+ */
+
+
+
+struct asm_volume_ctrl_master_gain {
+	struct apr_hdr	hdr;
+	struct asm_stream_cmd_set_pp_params_v2 param;
+	struct asm_stream_param_data_v2 data;
+	uint16_t                  master_gain;
+	/*< Linear gain in Q13 format. */
+
+	uint16_t                  reserved;
+	/*< Clients must set this field to zero.
+		*/
+} __packed;
+
+
+/* Structure for the left/right channel gain parameter for a
+ * volume control module.
+ */
+
+
+/* @brief Payload of the #ASM_PARAM_ID_VOL_CTRL_LR_CHANNEL_GAIN
+ * parameters used by the Volume Control module.
+ */
+
+
+
+struct asm_volume_ctrl_lr_chan_gain {
+	struct apr_hdr	hdr;
+	struct asm_stream_cmd_set_pp_params_v2 param;
+	struct asm_stream_param_data_v2 data;
+
+	uint16_t                  l_chan_gain;
+	/*< Linear gain in Q13 format for the left channel. */
+
+	uint16_t                  r_chan_gain;
+	/*< Linear gain in Q13 format for the right channel.*/
+} __packed;
+
+
+/* Structure for the mute configuration parameter for a
+	volume control module. */
+
+
+/* @brief Payload of the #ASM_PARAM_ID_VOL_CTRL_MUTE_CONFIG
+ * parameter used by the Volume Control module.
+ */
+
+
+struct asm_volume_ctrl_mute_config {
+	struct apr_hdr	hdr;
+	struct asm_stream_cmd_set_pp_params_v2 param;
+	struct asm_stream_param_data_v2 data;
+	uint32_t                  mute_flag;
+/*< Specifies whether mute is disabled (0) or enabled (nonzero).*/
+
+} __packed;
+
+/*
+ * Supported parameters for a soft stepping linear ramping curve.
+ */
+#define ASM_PARAM_SVC_RAMPINGCURVE_LINEAR  0
+
+/*
+ * Exponential ramping curve.
+ */
+#define ASM_PARAM_SVC_RAMPINGCURVE_EXP    1
+
+/*
+ * Logarithmic ramping curve.
+ */
+#define ASM_PARAM_SVC_RAMPINGCURVE_LOG    2
+
+/* Structure for holding soft stepping volume parameters. */
+
+
+/*  Payload of the #ASM_PARAM_ID_SOFT_VOL_STEPPING_PARAMETERS
+ * parameters used by the Volume Control module.
+ */
+struct asm_soft_step_volume_params {
+	struct apr_hdr	hdr;
+	struct asm_stream_cmd_set_pp_params_v2 param;
+	struct asm_stream_param_data_v2 data;
+	uint32_t                  period;
+/*< Period in milliseconds.
+ * Supported values: 0 to 15000
+ */
+
+	uint32_t                  step;
+/*< Step in microseconds.
+ * Supported values: 0 to 15000000
+ */
+
+	uint32_t                  ramping_curve;
+/*< Ramping curve type.
+ * Supported values:
+ * - #ASM_PARAM_SVC_RAMPINGCURVE_LINEAR
+ * - #ASM_PARAM_SVC_RAMPINGCURVE_EXP
+ * - #ASM_PARAM_SVC_RAMPINGCURVE_LOG
+ */
+} __packed;
+
+
+/* Structure for holding soft pause parameters. */
+
+
+/* Payload of the #ASM_PARAM_ID_SOFT_PAUSE_PARAMETERS
+ * parameters used by the Volume Control module.
+ */
+
+
+struct asm_soft_pause_params {
+	struct apr_hdr	hdr;
+	struct asm_stream_cmd_set_pp_params_v2 param;
+	struct asm_stream_param_data_v2 data;
+	uint32_t                  enable_flag;
+/*< Specifies whether soft pause is disabled (0) or enabled
+ * (nonzero).
+ */
+
+
+
+	uint32_t                  period;
+/*< Period in milliseconds.
+ * Supported values: 0 to 15000
+ */
+
+	uint32_t                  step;
+/*< Step in microseconds.
+ * Supported values: 0 to 15000000
+ */
+
+	uint32_t                  ramping_curve;
+/*< Ramping curve.
+ * Supported values:
+ * - #ASM_PARAM_SVC_RAMPINGCURVE_LINEAR
+ * - #ASM_PARAM_SVC_RAMPINGCURVE_EXP
+ * - #ASM_PARAM_SVC_RAMPINGCURVE_LOG
+ */
+} __packed;
+
+
+/* Maximum number of channels.*/
+#define VOLUME_CONTROL_MAX_CHANNELS                       8
+
+/* Structure for holding one channel type - gain pair. */
+
+
+/* Payload of the #ASM_PARAM_ID_MULTICHANNEL_GAIN channel
+ * type/gain pairs used by the Volume Control module. \n \n This
+ * structure immediately follows the
+ * asm_volume_ctrl_multichannel_gain structure.
+ */
+
+
+struct asm_volume_ctrl_channelype_gain_pair {
+	struct apr_hdr	hdr;
+	struct asm_stream_cmd_set_pp_params_v2 param;
+	struct asm_stream_param_data_v2 data;
+	uint8_t                   channelype;
+/*< Channel type for which the gain setting is to be applied.
+ * Supported values:
+ * - #PCM_CHANNEL_L
+ * - #PCM_CHANNEL_R
+ * - #PCM_CHANNEL_C
+ * - #PCM_CHANNEL_LS
+ * - #PCM_CHANNEL_RS
+ * - #PCM_CHANNEL_LFE
+ * - #PCM_CHANNEL_CS
+ * - #PCM_CHANNEL_LB
+ * - #PCM_CHANNEL_RB
+ * - #PCM_CHANNELS
+ * - #PCM_CHANNEL_CVH
+ * - #PCM_CHANNEL_MS
+ * - #PCM_CHANNEL_FLC
+ * - #PCM_CHANNEL_FRC
+ * - #PCM_CHANNEL_RLC
+ * - #PCM_CHANNEL_RRC
+ */
+
+	uint8_t                   reserved1;
+	/*< Clients must set this field to zero. */
+
+	uint8_t                   reserved2;
+	/*< Clients must set this field to zero. */
+
+	uint8_t                   reserved3;
+	/*< Clients must set this field to zero. */
+
+	uint32_t                  gain;
+/*< Gain value for this channel in Q28 format.
+ * Supported values: Any
+ */
+} __packed;
+
+
+/* Structure for the multichannel gain command */
+
+
+/* Payload of the #ASM_PARAM_ID_MULTICHANNEL_GAIN
+ * parameters used by the Volume Control module.
+ */
+
+
+struct asm_volume_ctrl_multichannel_gain {
+	struct apr_hdr	hdr;
+	struct asm_stream_cmd_set_pp_params_v2 param;
+	struct asm_stream_param_data_v2 data;
+	uint32_t                  num_channels;
+/*< Number of channels for which gain values are provided. Any
+ * channels present in the data for which gain is not provided are
+ * set to unity gain.
+ * Supported values: 1 to 8
+ */
+
+
+	struct asm_volume_ctrl_channelype_gain_pair
+		gain_data[VOLUME_CONTROL_MAX_CHANNELS];
+	/*< Array of channel type/gain pairs.*/
+} __packed;
+
+
+/* Structure for holding one channel type - mute pair. */
+
+
+/* Payload of the #ASM_PARAM_ID_MULTICHANNEL_MUTE channel
+ * type/mute setting pairs used by the Volume Control module. \n \n
+ * This structure immediately follows the
+ * asm_volume_ctrl_multichannel_mute structure.
+ */
+
+
+struct asm_volume_ctrl_channelype_mute_pair {
+	struct apr_hdr	hdr;
+	struct asm_stream_cmd_set_pp_params_v2 param;
+	struct asm_stream_param_data_v2 data;
+	uint8_t                   channelype;
+/*< Channel type for which the mute setting is to be applied.
+ * Supported values:
+ * - #PCM_CHANNEL_L
+ * - #PCM_CHANNEL_R
+ * - #PCM_CHANNEL_C
+ * - #PCM_CHANNEL_LS
+ * - #PCM_CHANNEL_RS
+ * - #PCM_CHANNEL_LFE
+ * - #PCM_CHANNEL_CS
+ * - #PCM_CHANNEL_LB
+ * - #PCM_CHANNEL_RB
+ * - #PCM_CHANNELS
+ * - #PCM_CHANNEL_CVH
+ * - #PCM_CHANNEL_MS
+ * - #PCM_CHANNEL_FLC
+ * - #PCM_CHANNEL_FRC
+ * - #PCM_CHANNEL_RLC
+ * - #PCM_CHANNEL_RRC
+ */
+
+	uint8_t                   reserved1;
+	/*< Clients must set this field to zero. */
+
+	uint8_t                   reserved2;
+	/*< Clients must set this field to zero. */
+
+	uint8_t                   reserved3;
+	/*< Clients must set this field to zero. */
+
+	uint32_t                  mute;
+/*< Mute setting for this channel.
+ * Supported values:
+ * - 0 = Unmute
+ * - Nonzero = Mute
+ */
+} __packed;
+
+
+/* Structure for the multichannel mute command */
+
+
+/* @brief Payload of the #ASM_PARAM_ID_MULTICHANNEL_MUTE
+ * parameters used by the Volume Control module.
+ */
+
+
+struct asm_volume_ctrl_multichannel_mute {
+	struct apr_hdr	hdr;
+	struct asm_stream_cmd_set_pp_params_v2 param;
+	struct asm_stream_param_data_v2 data;
+	uint32_t                  num_channels;
+/*< Number of channels for which mute configuration is
+ * provided. Any channels present in the data for which mute
+ * configuration is not provided are set to unmute.
+ * Supported values: 1 to 8
+ */
+
+struct asm_volume_ctrl_channelype_mute_pair
+				mute_data[VOLUME_CONTROL_MAX_CHANNELS];
+	/*< Array of channel type/mute setting pairs.*/
+} __packed;
+/* end_addtogroup audio_pp_param_ids */
+
+/* audio_pp_module_ids
+ * ID of the IIR Tuning Filter module.
+ * This module supports the following parameter IDs:
+ * - #ASM_PARAM_ID_IIRUNING_FILTER_ENABLE_CONFIG
+ * - #ASM_PARAM_ID_IIRUNING_FILTER_PRE_GAIN
+ * - #ASM_PARAM_ID_IIRUNING_FILTER_CONFIG_PARAMS
+ */
+#define ASM_MODULE_ID_IIRUNING_FILTER   0x00010C02
+
+/* @addtogroup audio_pp_param_ids */
+/* ID of the IIR tuning filter enable parameter used by the
+ * #ASM_MODULE_ID_IIRUNING_FILTER module.
+ * @messagepayload
+ * @structure{asm_iiruning_filter_enable}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ASM_PARAM_ID_IIRUNING_FILTER_ENABLE_CO
+ * NFIG.tex}
+ */
+#define ASM_PARAM_ID_IIRUNING_FILTER_ENABLE_CONFIG   0x00010C03
+
+/* ID of the IIR tuning filter pregain parameter used by the
+ * #ASM_MODULE_ID_IIRUNING_FILTER module.
+ */
+#define ASM_PARAM_ID_IIRUNING_FILTER_PRE_GAIN  0x00010C04
+
+/* ID of the IIR tuning filter configuration parameters used by the
+ * #ASM_MODULE_ID_IIRUNING_FILTER module.
+ */
+#define ASM_PARAM_ID_IIRUNING_FILTER_CONFIG_PARAMS  0x00010C05
+
+/* Structure for an enable configuration parameter for an
+ * IIR tuning filter module.
+ */
+
+
+/* @brief Payload of the #ASM_PARAM_ID_IIRUNING_FILTER_ENABLE_CONFIG
+ * parameter used by the IIR Tuning Filter module.
+ */
+struct asm_iiruning_filter_enable {
+	uint32_t                  enable_flag;
+/*< Specifies whether the IIR tuning filter is disabled (0) or
+ * enabled (1).
+ */
+} __packed;
+
+/* Structure for the pregain parameter for an IIR tuning filter module. */
+
+
+/* Payload of the #ASM_PARAM_ID_IIRUNING_FILTER_PRE_GAIN
+ * parameters used by the IIR Tuning Filter module.
+ */
+struct asm_iiruning_filter_pregain {
+	uint16_t                  pregain;
+	/*< Linear gain in Q13 format. */
+
+	uint16_t                  reserved;
+	/*< Clients must set this field to zero.*/
+} __packed;
+
+/* Structure for the configuration parameter for an IIR tuning filter
+ * module.
+ */
+
+
+/* @brief Payload of the #ASM_PARAM_ID_IIRUNING_FILTER_CONFIG_PARAMS
+ * parameters used by the IIR Tuning Filter module. \n
+ * \n
+ * This structure is followed by the IIR filter coefficients: \n
+ * - Sequence of int32_t FilterCoeffs \n
+ * Five coefficients for each band. Each coefficient is in int32_t format, in
+ * the order of b0, b1, b2, a1, a2.
+ * - Sequence of int16_t NumShiftFactor \n
+ * One int16_t per band. The numerator shift factor is related to the Q
+ * factor of the filter coefficients.
+ * - Sequence of uint16_t PanSetting \n
+ * One uint16_t per band, indicating if the filter is applied to left (0),
+ * right (1), or both (2) channels.
+ */
+struct asm_iir_filter_config_params {
+	uint16_t                  num_biquad_stages;
+/*< Number of bands.
+ * Supported values: 0 to 20
+ */
+
+	uint16_t                  reserved;
+	/*< Clients must set this field to zero.*/
+} __packed;
+
+/* audio_pp_module_ids
+ * ID of the Multiband Dynamic Range Control (MBDRC) module on the Tx/Rx
+ * paths.
+ * This module supports the following parameter IDs:
+ * - #ASM_PARAM_ID_MBDRC_ENABLE
+ * - #ASM_PARAM_ID_MBDRC_CONFIG_PARAMS
+ */
+#define ASM_MODULE_ID_MBDRC   0x00010C06
+
+/* audio_pp_param_ids */
+/* ID of the MBDRC enable parameter used by the #ASM_MODULE_ID_MBDRC module.
+ * @messagepayload
+ * @structure{asm_mbdrc_enable}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ASM_PARAM_ID_MBDRC_ENABLE.tex}
+ */
+#define ASM_PARAM_ID_MBDRC_ENABLE   0x00010C07
+
+/* ID of the MBDRC configuration parameters used by the
+ * #ASM_MODULE_ID_MBDRC module.
+ * @messagepayload
+ * @structure{asm_mbdrc_config_params}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ASM_PARAM_ID_MBDRC_CONFIG_PARAMS.tex}
+ *
+ * @parspace Sub-band DRC configuration parameters
+ * @structure{asm_subband_drc_config_params}
+ * @tablespace
+ * @inputtable{Audio_Postproc_ASM_PARAM_ID_MBDRC_CONFIG_PARAMS_subband_DRC.tex}
+ *
+ * @keep{6}
+ * To obtain legacy ADRC from MBDRC, use the calibration tool to:
+ *
+ * - Enable MBDRC (EnableFlag = TRUE)
+ * - Set number of bands to 1 (uiNumBands = 1)
+ * - Enable the first MBDRC band (DrcMode[0] = DRC_ENABLED = 1)
+ * - Clear the first band mute flag (MuteFlag[0] = 0)
+ * - Set the first band makeup gain to unity (compMakeUpGain[0] = 0x2000)
+ * - Use the legacy ADRC parameters to calibrate the rest of the MBDRC
+ * parameters.
+ */
+#define ASM_PARAM_ID_MBDRC_CONFIG_PARAMS  0x00010C08
+
+/* end_addtogroup audio_pp_param_ids */
+
+/* audio_pp_module_ids
+ * ID of the MMBDRC module version 2 pre/postprocessing block.
+ * This module differs from the original MBDRC (#ASM_MODULE_ID_MBDRC) in
+ * the length of the filters used in each sub-band.
+ * This module supports the following parameter ID:
+ * - #ASM_PARAM_ID_MBDRC_CONFIG_PARAMS_IMPROVED_FILTBANK_V2
+ */
+#define ASM_MODULE_ID_MBDRCV2                                0x0001070B
+
+/* @addtogroup audio_pp_param_ids */
+/* ID of the configuration parameters used by the
+ * #ASM_MODULE_ID_MBDRCV2 module for the improved filter structure
+ * of the MBDRC v2 pre/postprocessing block.
+ * The update to this configuration structure from the original
+ * MBDRC is the number of filter coefficients in the filter
+ * structure. The sequence for is as follows:
+ * - 1 band = 0 FIR coefficient + 1 mute flag + uint16_t padding
+ * - 2 bands = 141 FIR coefficients + 2 mute flags + uint16_t padding
+ * - 3 bands = 141+81 FIR coefficients + 3 mute flags + uint16_t padding
+ * - 4 bands = 141+81+61 FIR coefficients + 4 mute flags + uint16_t
+ * padding
+ * - 5 bands = 141+81+61+61 FIR coefficients + 5 mute flags +
+ * uint16_t padding
+ *	This block uses the same parameter structure as
+ *	#ASM_PARAM_ID_MBDRC_CONFIG_PARAMS.
+ */
+#define ASM_PARAM_ID_MBDRC_CONFIG_PARAMS_IMPROVED_FILTBANK_V2 \
+								0x0001070C
+
+/* Structure for the enable parameter for an MBDRC module. */
+
+
+/* Payload of the #ASM_PARAM_ID_MBDRC_ENABLE parameter used by the
+ * MBDRC module.
+ */
+struct asm_mbdrc_enable {
+	uint32_t                  enable_flag;
+/*< Specifies whether MBDRC is disabled (0) or enabled (nonzero).*/
+} __packed;
+
+/* Structure for the configuration parameters for an MBDRC module. */
+
+
+/* Payload of the #ASM_PARAM_ID_MBDRC_CONFIG_PARAMS
+ * parameters used by the MBDRC module. \n \n Following this
+ * structure is the payload for sub-band DRC configuration
+ * parameters (asm_subband_drc_config_params). This sub-band
+ * structure must be repeated for each band.
+ */
+
+
+struct asm_mbdrc_config_params {
+	uint16_t                  num_bands;
+/*< Number of bands.
+ * Supported values: 1 to 5
+ */
+
+	int16_t                   limiterhreshold;
+/*< Threshold in decibels for the limiter output.
+ * Supported values: -72 to 18 \n
+ * Recommended value: 3994 (-0.22 db in Q3.12 format)
+ */
+
+	int16_t                   limiter_makeup_gain;
+/*< Makeup gain in decibels for the limiter output.
+ * Supported values: -42 to 42 \n
+ * Recommended value: 256 (0 dB in Q7.8 format)
+ */
+
+	int16_t                   limiter_gc;
+/*< Limiter gain recovery coefficient.
+ * Supported values: 0.5 to 0.99 \n
+ * Recommended value: 32440 (0.99 in Q15 format)
+ */
+
+	int16_t                   limiter_delay;
+/*< Limiter delay in samples.
+ * Supported values: 0 to 10 \n
+ * Recommended value: 262 (0.008 samples in Q15 format)
+ */
+
+	int16_t                   limiter_max_wait;
+/*< Maximum limiter waiting time in samples.
+ * Supported values: 0 to 10 \n
+ * Recommended value: 262 (0.008 samples in Q15 format)
+ */
+} __packed;
+
+/* DRC configuration structure for each sub-band of an MBDRC module. */
+
+
+/* Payload of the #ASM_PARAM_ID_MBDRC_CONFIG_PARAMS DRC
+ * configuration parameters for each sub-band in the MBDRC module.
+ * After this DRC structure is configured for valid bands, the next
+ * MBDRC setparams expects the sequence of sub-band MBDRC filter
+ * coefficients (the length depends on the number of bands) plus the
+ * mute flag for that band plus uint16_t padding.
+ *
+ * @keep{10}
+ * The filter coefficient and mute flag are of type int16_t:
+ * - FIR coefficient = int16_t firFilter
+ * - Mute flag = int16_t fMuteFlag
+ *
+ * The sequence is as follows:
+ * - 1 band = 0 FIR coefficient + 1 mute flag + uint16_t padding
+ * - 2 bands = 97 FIR coefficients + 2 mute flags + uint16_t padding
+ * - 3 bands = 97+33 FIR coefficients + 3 mute flags + uint16_t padding
+ * - 4 bands = 97+33+33 FIR coefficients + 4 mute flags + uint16_t padding
+ * - 5 bands = 97+33+33+33 FIR coefficients + 5 mute flags + uint16_t padding
+ *
+ * For improved filterbank, the sequence is as follows:
+ * - 1 band = 0 FIR coefficient + 1 mute flag + uint16_t padding
+ * - 2 bands = 141 FIR coefficients + 2 mute flags + uint16_t padding
+ * - 3 bands = 141+81 FIR coefficients + 3 mute flags + uint16_t padding
+ * - 4 bands = 141+81+61 FIR coefficients + 4 mute flags + uint16_t padding
+ * - 5 bands = 141+81+61+61 FIR coefficients + 5 mute flags + uint16_t padding
+ */
+struct asm_subband_drc_config_params {
+	int16_t                   drc_stereo_linked_flag;
+/*< Specifies whether all stereo channels have the same applied
+ * dynamics (1) or if they process their dynamics independently (0).
+ * Supported values:
+ * - 0 -- Not linked
+ * - 1 -- Linked
+ */
+
+	int16_t                   drc_mode;
+/*< Specifies whether DRC mode is bypassed for sub-bands.
+ * Supported values:
+ * - 0 -- Disabled
+ * - 1 -- Enabled
+ */
+
+	int16_t                   drc_down_sample_level;
+/*< DRC down sample level.
+ * Supported values: @ge 1
+ */
+
+	int16_t                   drc_delay;
+/*< DRC delay in samples.
+ * Supported values: 0 to 1200
+ */
+
+	uint16_t                  drc_rmsime_avg_const;
+/*< RMS signal energy time-averaging constant.
+ * Supported values: 0 to 2^16-1
+ */
+
+	uint16_t                  drc_makeup_gain;
+/*< DRC makeup gain in decibels.
+ * Supported values: 258 to 64917
+ */
+	/* Down expander settings */
+	int16_t                   down_expdrhreshold;
+/*< Down expander threshold.
+ * Supported Q7 format values: 1320 to up_cmpsrhreshold
+ */
+
+	int16_t                   down_expdr_slope;
+/*< Down expander slope.
+ * Supported Q8 format values: -32768 to 0.
+ */
+
+	uint32_t                  down_expdr_attack;
+/*< Down expander attack constant.
+ * Supported Q31 format values: 196844 to 2^31.
+ */
+
+	uint32_t                  down_expdr_release;
+/*< Down expander release constant.
+ * Supported Q31 format values: 19685 to 2^31
+ */
+
+	uint16_t                  down_expdr_hysteresis;
+/*< Down expander hysteresis constant.
+ * Supported Q14 format values: 1 to 32690
+ */
+
+	uint16_t                  reserved;
+	/*< Clients must set this field to zero. */
+
+	int32_t                   down_expdr_min_gain_db;
+/*< Down expander minimum gain.
+ * Supported Q23 format values: -805306368 to 0.
+ */
+
+	/* Up compressor settings */
+
+	int16_t                   up_cmpsrhreshold;
+/*< Up compressor threshold.
+ * Supported Q7 format values: down_expdrhreshold to
+ * down_cmpsrhreshold.
+ */
+
+	uint16_t                  up_cmpsr_slope;
+/*< Up compressor slope.
+ * Supported Q16 format values: 0 to 64881.
+ */
+
+	uint32_t                  up_cmpsr_attack;
+/*< Up compressor attack constant.
+ * Supported Q31 format values: 196844 to 2^31.
+ */
+
+	uint32_t                  up_cmpsr_release;
+/*< Up compressor release constant.
+ * Supported Q31 format values: 19685 to 2^31.
+ */
+
+	uint16_t                  up_cmpsr_hysteresis;
+/*< Up compressor hysteresis constant.
+  * Supported Q14 format values: 1 to 32690.
+  */
+
+	/* Down compressor settings */
+
+	int16_t                   down_cmpsrhreshold;
+/*< Down compressor threshold.
+ * Supported Q7 format values: up_cmpsrhreshold to 11560.
+ */
+
+	uint16_t                  down_cmpsr_slope;
+/*< Down compressor slope.
+ * Supported Q16 format values: 0 to 64881.
+ */
+
+	uint16_t                  reserved1;
+/*< Clients must set this field to zero. */
+
+	uint32_t                  down_cmpsr_attack;
+/*< Down compressor attack constant.
+ * Supported Q31 format values: 196844 to 2^31.
+ */
+
+	uint32_t                  down_cmpsr_release;
+/*< Down compressor release constant.
+ * Supported Q31 format values: 19685 to 2^31.
+ */
+
+	uint16_t                  down_cmpsr_hysteresis;
+/*< Down compressor hysteresis constant.
+ * Supported Q14 values: 1 to 32690.
+ */
+
+	uint16_t                  reserved2;
+/*< Clients must set this field to zero.*/
+} __packed;
+
+#define ASM_MODULE_ID_EQUALIZER            0x00010C27
+#define ASM_PARAM_ID_EQUALIZER_PARAMETERS  0x00010C28
+
+#define ASM_MAX_EQ_BANDS 12
+
+struct asm_eq_per_band_params {
+	uint32_t                  band_idx;
+/*< Band index.
+ * Supported values: 0 to 11
+ */
+
+	uint32_t                  filterype;
+/*< Type of filter.
+ * Supported values:
+ * - #ASM_PARAM_EQYPE_NONE
+ * - #ASM_PARAM_EQ_BASS_BOOST
+ * - #ASM_PARAM_EQ_BASS_CUT
+ * - #ASM_PARAM_EQREBLE_BOOST
+ * - #ASM_PARAM_EQREBLE_CUT
+ * - #ASM_PARAM_EQ_BAND_BOOST
+ * - #ASM_PARAM_EQ_BAND_CUT
+ */
+
+	uint32_t                  center_freq_hz;
+	/*< Filter band center frequency in Hertz. */
+
+	int32_t                   filter_gain;
+/*< Filter band initial gain.
+ * Supported values: +12 to -12 dB in 1 dB increments
+ */
+
+	int32_t                   q_factor;
+/*< Filter band quality factor expressed as a Q8 number, i.e., a
+ * fixed-point number with q factor of 8. For example, 3000/(2^8).
+ */
+} __packed;
+
+struct asm_eq_params {
+	struct apr_hdr	hdr;
+	struct asm_stream_cmd_set_pp_params_v2 param;
+	struct asm_stream_param_data_v2 data;
+		uint32_t                  enable_flag;
+/*< Specifies whether the equalizer module is disabled (0) or enabled
+ * (nonzero).
+ */
+
+		uint32_t                  num_bands;
+/*< Number of bands.
+ * Supported values: 1 to 12
+ */
+	struct asm_eq_per_band_params eq_bands[ASM_MAX_EQ_BANDS];
+
+} __packed;
+
+/*	No equalizer effect.*/
+#define ASM_PARAM_EQYPE_NONE      0
+
+/*	Bass boost equalizer effect.*/
+#define ASM_PARAM_EQ_BASS_BOOST     1
+
+/*Bass cut equalizer effect.*/
+#define ASM_PARAM_EQ_BASS_CUT       2
+
+/*	Treble boost equalizer effect */
+#define ASM_PARAM_EQREBLE_BOOST   3
+
+/*	Treble cut equalizer effect.*/
+#define ASM_PARAM_EQREBLE_CUT     4
+
+/*	Band boost equalizer effect.*/
+#define ASM_PARAM_EQ_BAND_BOOST     5
+
+/*	Band cut equalizer effect.*/
+#define ASM_PARAM_EQ_BAND_CUT       6
+
+
+/* ERROR CODES */
+/* Success. The operation completed with no errors. */
+#define ADSP_EOK          0x00000000
+/* General failure. */
+#define ADSP_EFAILED      0x00000001
+/* Bad operation parameter. */
+#define ADSP_EBADPARAM    0x00000002
+/* Unsupported routine or operation. */
+#define ADSP_EUNSUPPORTED 0x00000003
+/* Unsupported version. */
+#define ADSP_EVERSION     0x00000004
+/* Unexpected problem encountered. */
+#define ADSP_EUNEXPECTED  0x00000005
+/* Unhandled problem occurred. */
+#define ADSP_EPANIC       0x00000006
+/* Unable to allocate resource. */
+#define ADSP_ENORESOURCE  0x00000007
+/* Invalid handle. */
+#define ADSP_EHANDLE      0x00000008
+/* Operation is already processed. */
+#define ADSP_EALREADY     0x00000009
+/* Operation is not ready to be processed. */
+#define ADSP_ENOTREADY    0x0000000A
+/* Operation is pending completion. */
+#define ADSP_EPENDING     0x0000000B
+/* Operation could not be accepted or processed. */
+#define ADSP_EBUSY        0x0000000C
+/* Operation aborted due to an error. */
+#define ADSP_EABORTED     0x0000000D
+/* Operation preempted by a higher priority. */
+#define ADSP_EPREEMPTED   0x0000000E
+/* Operation requests intervention to complete. */
+#define ADSP_ECONTINUE    0x0000000F
+/* Operation requests immediate intervention to complete. */
+#define ADSP_EIMMEDIATE   0x00000010
+/* Operation is not implemented. */
+#define ADSP_ENOTIMPL     0x00000011
+/* Operation needs more data or resources. */
+#define ADSP_ENEEDMORE    0x00000012
+/* Operation does not have memory. */
+#define ADSP_ENOMEMORY     0x00000014
+/* Item does not exist. */
+#define ADSP_ENOTEXIST      0x00000015
+/* Operation is finished. */
+#define ADSP_ETERMINATED    0x00011174
+
+/*bharath, adsp_error_codes.h */
+
+#endif /*_APR_AUDIO_V2_H_ */
diff --git a/include/sound/apr_audio.h b/include/sound/apr_audio.h
index 134d044..96795a3 100644
--- a/include/sound/apr_audio.h
+++ b/include/sound/apr_audio.h
@@ -611,6 +611,19 @@
 #define VPM_TX_DM_FLUENCE_COPP_TOPOLOGY			0x00010F72
 #define VPM_TX_QMIC_FLUENCE_COPP_TOPOLOGY		0x00010F75
 
+/* SRS TRUMEDIA GUIDS */
+/* topology */
+#define SRS_TRUMEDIA_TOPOLOGY_ID			0x00010D90
+/* module */
+#define SRS_TRUMEDIA_MODULE_ID				0x10005010
+/* parameters */
+#define SRS_TRUMEDIA_PARAMS				0x10005011
+#define SRS_TRUMEDIA_PARAMS_WOWHD			0x10005012
+#define SRS_TRUMEDIA_PARAMS_CSHP			0x10005013
+#define SRS_TRUMEDIA_PARAMS_HPF				0x10005014
+#define SRS_TRUMEDIA_PARAMS_PEQ				0x10005015
+#define SRS_TRUMEDIA_PARAMS_HL				0x10005016
+
 #define ASM_MAX_EQ_BANDS 12
 
 struct asm_eq_band {
@@ -1427,4 +1440,96 @@
 #define ADSP_ENOTIMPL     0x00000011 /* Operation is not implemented. */
 #define ADSP_ENEEDMORE    0x00000012 /* Operation needs more data or resources*/
 
+/* SRS TRUMEDIA start */
+#define SRS_ID_GLOBAL	0x00000001
+#define SRS_ID_WOWHD	0x00000002
+#define SRS_ID_CSHP	0x00000003
+#define SRS_ID_HPF	0x00000004
+#define SRS_ID_PEQ	0x00000005
+#define SRS_ID_HL	0x00000006
+
+#define SRS_CMD_UPLOAD		0x7FFF0000
+#define SRS_PARAM_INDEX_MASK	0x80000000
+#define SRS_PARAM_OFFSET_MASK	0x3FFF0000
+#define SRS_PARAM_VALUE_MASK	0x0000FFFF
+
+struct srs_trumedia_params_GLOBAL {
+	uint8_t                  v1;
+	uint8_t                  v2;
+	uint8_t                  v3;
+	uint8_t                  v4;
+	uint8_t                  v5;
+	uint8_t                  v6;
+	uint8_t                  v7;
+	uint8_t                  v8;
+} __packed;
+
+struct srs_trumedia_params_WOWHD {
+	uint32_t				v1;
+	uint16_t				v2;
+	uint16_t				v3;
+	uint16_t				v4;
+	uint16_t				v5;
+	uint16_t				v6;
+	uint16_t				v7;
+	uint16_t				v8;
+	uint16_t				v____A1;
+	uint32_t				v9;
+	uint16_t				v10;
+	uint16_t				v11;
+	uint32_t				v12[16];
+} __packed;
+
+struct srs_trumedia_params_CSHP {
+	uint32_t				v1;
+	uint16_t				v2;
+	uint16_t				v3;
+	uint16_t				v4;
+	uint16_t				v5;
+	uint16_t				v6;
+	uint16_t				v____A1;
+	uint32_t				v7;
+	uint16_t				v8;
+	uint16_t				v9;
+	uint32_t				v10[16];
+} __packed;
+
+struct srs_trumedia_params_HPF {
+	uint32_t				v1;
+	uint32_t				v2[26];
+} __packed;
+
+struct srs_trumedia_params_PEQ {
+	uint32_t				v1;
+	uint16_t				v2;
+	uint16_t				v3;
+	uint16_t				v4;
+	uint16_t				v____A1;
+	uint32_t				v5[26];
+	uint32_t				v6[26];
+} __packed;
+
+struct srs_trumedia_params_HL {
+	uint16_t				v1;
+	uint16_t				v2;
+	uint16_t				v3;
+	uint16_t				v____A1;
+	int32_t					v4;
+	uint32_t				v5;
+	uint16_t				v6;
+	uint16_t				v____A2;
+	uint32_t				v7;
+} __packed;
+
+struct srs_trumedia_params {
+	struct srs_trumedia_params_GLOBAL	global;
+	struct srs_trumedia_params_WOWHD	wowhd;
+	struct srs_trumedia_params_CSHP		cshp;
+	struct srs_trumedia_params_HPF		hpf;
+	struct srs_trumedia_params_PEQ		peq;
+	struct srs_trumedia_params_HL		hl;
+} __packed;
+int srs_trumedia_open(int port_id, int srs_tech_id, void *srs_params);
+/* SRS TruMedia end */
+
 #endif /*_APR_AUDIO_H_*/
diff --git a/include/sound/msm-dai-q6-v2.h b/include/sound/msm-dai-q6-v2.h
new file mode 100644
index 0000000..3d5ffdd
--- /dev/null
+++ b/include/sound/msm-dai-q6-v2.h
@@ -0,0 +1,42 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_DAI_Q6_PDATA_H__
+
+#define __MSM_DAI_Q6_PDATA_H__
+
+#define MSM_MI2S_SD0 (1 << 0)
+#define MSM_MI2S_SD1 (1 << 1)
+#define MSM_MI2S_SD2 (1 << 2)
+#define MSM_MI2S_SD3 (1 << 3)
+#define MSM_MI2S_CAP_RX 0
+#define MSM_MI2S_CAP_TX 1
+
+struct msm_dai_auxpcm_pdata {
+	const char *clk;
+	u16 mode;
+	u16 sync;
+	u16 frame;
+	u16 quant;
+	/* modify slot to arr[4] to specify
+	* the slot number for each channel
+	* in multichannel scenario */
+	u16 slot;
+	u16 data;
+	int pcm_clk_rate;
+};
+
+struct msm_i2s_data {
+	u32 capability; /* RX or TX */
+	u16 sd_lines;
+};
+#endif
diff --git a/include/sound/q6adm-v2.h b/include/sound/q6adm-v2.h
new file mode 100644
index 0000000..cb2f3d7
--- /dev/null
+++ b/include/sound/q6adm-v2.h
@@ -0,0 +1,50 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __Q6_ADM_V2_H__
+#define __Q6_ADM_V2_H__
+
+
+#define ADM_PATH_PLAYBACK 0x1
+#define ADM_PATH_LIVE_REC 0x2
+#define ADM_PATH_NONLIVE_REC 0x3
+#include <sound/q6audio-v2.h>
+
+#define Q6_AFE_MAX_PORTS 32
+
+/* multiple copp per stream. */
+struct route_payload {
+	unsigned int copp_ids[Q6_AFE_MAX_PORTS];
+	unsigned short num_copps;
+	unsigned int session_id;
+};
+
+int adm_open(int port, int path, int rate, int mode, int topology);
+
+int adm_multi_ch_copp_open(int port, int path, int rate, int mode,
+				int topology);
+
+int adm_memory_map_regions(int port_id, uint32_t *buf_add, uint32_t mempool_id,
+				uint32_t *bufsz, uint32_t bufcnt);
+
+int adm_memory_unmap_regions(int port_id, uint32_t *buf_add, uint32_t *bufsz,
+						uint32_t bufcnt);
+
+int adm_close(int port);
+
+int adm_matrix_map(int session_id, int path, int num_copps,
+				unsigned int *port_id, int copp_id);
+
+int adm_connect_afe_port(int mode, int session_id, int port_id);
+
+int adm_get_copp_id(int port_id);
+
+#endif /* __Q6_ADM_V2_H__ */
diff --git a/include/sound/q6afe-v2.h b/include/sound/q6afe-v2.h
new file mode 100644
index 0000000..1587d38
--- /dev/null
+++ b/include/sound/q6afe-v2.h
@@ -0,0 +1,107 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __Q6AFE_V2_H__
+#define __Q6AFE_V2_H__
+#include <sound/apr_audio-v2.h>
+
+#define MSM_AFE_MONO        0
+#define MSM_AFE_MONO_RIGHT  1
+#define MSM_AFE_MONO_LEFT   2
+#define MSM_AFE_STEREO      3
+#define MSM_AFE_4CHANNELS   4
+#define MSM_AFE_6CHANNELS   6
+#define MSM_AFE_8CHANNELS   8
+
+#define MSM_AFE_I2S_FORMAT_LPCM		0
+#define MSM_AFE_I2S_FORMAT_COMPR		1
+#define MSM_AFE_I2S_FORMAT_IEC60958_LPCM	2
+#define MSM_AFE_I2S_FORMAT_IEC60958_COMPR	3
+
+#define MSM_AFE_PORT_TYPE_RX 0
+#define MSM_AFE_PORT_TYPE_TX 1
+
+#define RT_PROXY_DAI_001_RX	0xE0
+#define RT_PROXY_DAI_001_TX	0xF0
+#define RT_PROXY_DAI_002_RX	0xF1
+#define RT_PROXY_DAI_002_TX	0xE1
+#define VIRTUAL_ID_TO_PORTID(val) ((val & 0xF) | 0x2000)
+
+enum {
+	IDX_PRIMARY_I2S_RX = 0,
+	IDX_PRIMARY_I2S_TX = 1,
+	IDX_PCM_RX = 2,
+	IDX_PCM_TX = 3,
+	IDX_SECONDARY_I2S_RX = 4,
+	IDX_SECONDARY_I2S_TX = 5,
+	IDX_MI2S_RX = 6,
+	IDX_MI2S_TX = 7,
+	IDX_HDMI_RX = 8,
+	IDX_RSVD_2 = 9,
+	IDX_RSVD_3 = 10,
+	IDX_DIGI_MIC_TX = 11,
+	IDX_VOICE_RECORD_RX = 12,
+	IDX_VOICE_RECORD_TX = 13,
+	IDX_VOICE_PLAYBACK_TX = 14,
+	IDX_SLIMBUS_0_RX = 15,
+	IDX_SLIMBUS_0_TX = 16,
+	IDX_SLIMBUS_1_RX = 17,
+	IDX_SLIMBUS_1_TX = 18,
+	IDX_SLIMBUS_2_RX = 19,
+	IDX_SLIMBUS_2_TX = 20,
+	IDX_SLIMBUS_3_RX = 21,
+	IDX_SLIMBUS_3_TX = 22,
+	IDX_SLIMBUS_4_RX = 23,
+	IDX_SLIMBUS_4_TX = 24,
+	IDX_INT_BT_SCO_RX = 25,
+	IDX_INT_BT_SCO_TX = 26,
+	IDX_INT_BT_A2DP_RX = 27,
+	IDX_INT_FM_RX = 28,
+	IDX_INT_FM_TX = 29,
+	IDX_RT_PROXY_PORT_001_RX = 30,
+	IDX_RT_PROXY_PORT_001_TX = 31,
+	AFE_MAX_PORTS
+};
+
+int afe_open(u16 port_id, union afe_port_config *afe_config, int rate);
+int afe_close(int port_id);
+int afe_loopback(u16 enable, u16 rx_port, u16 tx_port);
+int afe_sidetone(u16 tx_port_id, u16 rx_port_id, u16 enable, uint16_t gain);
+int afe_loopback_gain(u16 port_id, u16 volume);
+int afe_validate_port(u16 port_id);
+int afe_start_pseudo_port(u16 port_id);
+int afe_stop_pseudo_port(u16 port_id);
+int afe_cmd_memory_map(u32 dma_addr_p, u32 dma_buf_sz);
+int afe_cmd_memory_map_nowait(int port_id, u32 dma_addr_p, u32 dma_buf_sz);
+int afe_cmd_memory_unmap(u32 dma_addr_p);
+int afe_cmd_memory_unmap_nowait(u32 dma_addr_p);
+
+int afe_register_get_events(u16 port_id,
+		void (*cb) (uint32_t opcode,
+		uint32_t token, uint32_t *payload, void *priv),
+		void *private_data);
+int afe_unregister_get_events(u16 port_id);
+int afe_rt_proxy_port_write(u32 buf_addr_p, u32 mem_map_handle, int bytes);
+int afe_rt_proxy_port_read(u32 buf_addr_p, u32 mem_map_handle, int bytes);
+int afe_port_start_nowait(u16 port_id, union afe_port_config *afe_config,
+	u32 rate);
+int afe_port_stop_nowait(int port_id);
+int afe_apply_gain(u16 port_id, u16 gain);
+int afe_q6_interface_prepare(void);
+int afe_get_port_type(u16 port_id);
+/* if port_id is virtual, convert to physical..
+ * if port_id is already physical, return physical
+ */
+int afe_convert_virtual_to_portid(u16 port_id);
+
+int afe_pseudo_port_start_nowait(u16 port_id);
+int afe_pseudo_port_stop_nowait(u16 port_id);
+#endif /* __Q6AFE_V2_H__ */
diff --git a/include/sound/q6asm-v2.h b/include/sound/q6asm-v2.h
new file mode 100644
index 0000000..7ef15ac
--- /dev/null
+++ b/include/sound/q6asm-v2.h
@@ -0,0 +1,303 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __Q6_ASM_V2_H__
+#define __Q6_ASM_V2_H__
+
+#include <mach/qdsp6v2/apr.h>
+#include <mach/msm_subsystem_map.h>
+#include <sound/apr_audio-v2.h>
+#include <linux/list.h>
+#include <linux/ion.h>
+
+#define IN                      0x000
+#define OUT                     0x001
+#define CH_MODE_MONO            0x001
+#define CH_MODE_STEREO          0x002
+
+#define FORMAT_LINEAR_PCM   0x0000
+#define FORMAT_DTMF         0x0001
+#define FORMAT_ADPCM	    0x0002
+#define FORMAT_YADPCM       0x0003
+#define FORMAT_MP3          0x0004
+#define FORMAT_MPEG4_AAC    0x0005
+#define FORMAT_AMRNB	    0x0006
+#define FORMAT_AMRWB	    0x0007
+#define FORMAT_V13K	    0x0008
+#define FORMAT_EVRC	    0x0009
+#define FORMAT_EVRCB	    0x000a
+#define FORMAT_EVRCWB	    0x000b
+#define FORMAT_MIDI	    0x000c
+#define FORMAT_SBC	    0x000d
+#define FORMAT_WMA_V10PRO   0x000e
+#define FORMAT_WMA_V9	    0x000f
+#define FORMAT_AMR_WB_PLUS  0x0010
+#define FORMAT_MPEG4_MULTI_AAC 0x0011
+#define FORMAT_MULTI_CHANNEL_LINEAR_PCM 0x0012
+
+#define ENCDEC_SBCBITRATE   0x0001
+#define ENCDEC_IMMEDIATE_DECODE 0x0002
+#define ENCDEC_CFG_BLK          0x0003
+
+#define CMD_PAUSE          0x0001
+#define CMD_FLUSH          0x0002
+#define CMD_EOS            0x0003
+#define CMD_CLOSE          0x0004
+#define CMD_OUT_FLUSH      0x0005
+
+/* bit 0:1 represents priority of stream */
+#define STREAM_PRIORITY_NORMAL	0x0000
+#define STREAM_PRIORITY_LOW	0x0001
+#define STREAM_PRIORITY_HIGH	0x0002
+
+/* bit 4 represents META enable of encoded data buffer */
+#define BUFFER_META_ENABLE	0x0010
+
+/* Enable Sample_Rate/Channel_Mode notification event from Decoder */
+#define SR_CM_NOTIFY_ENABLE	0x0004
+
+#define ASYNC_IO_MODE	0x0002
+#define SYNC_IO_MODE	0x0001
+#define NO_TIMESTAMP    0xFF00
+#define SET_TIMESTAMP   0x0000
+
+#define SOFT_PAUSE_ENABLE	1
+#define SOFT_PAUSE_DISABLE	0
+
+#define SESSION_MAX	0x08
+
+#define SOFT_PAUSE_PERIOD       30   /* ramp up/down for 30ms    */
+#define SOFT_PAUSE_STEP         2000 /* Step value 2ms or 2000us */
+enum {
+	SOFT_PAUSE_CURVE_LINEAR = 0,
+	SOFT_PAUSE_CURVE_EXP,
+	SOFT_PAUSE_CURVE_LOG,
+};
+
+#define SOFT_VOLUME_PERIOD       30   /* ramp up/down for 30ms    */
+#define SOFT_VOLUME_STEP         2000 /* Step value 2ms or 2000us */
+enum {
+	SOFT_VOLUME_CURVE_LINEAR = 0,
+	SOFT_VOLUME_CURVE_EXP,
+	SOFT_VOLUME_CURVE_LOG,
+};
+
+typedef void (*app_cb)(uint32_t opcode, uint32_t token,
+			uint32_t *payload, void *priv);
+
+struct audio_buffer {
+	dma_addr_t phys;
+	void       *data;
+	uint32_t   used;
+	uint32_t   size;/* size of buffer */
+	uint32_t   actual_size; /* actual number of bytes read by DSP */
+	struct      ion_handle *handle;
+	struct      ion_client *client;
+};
+
+struct audio_aio_write_param {
+	unsigned long paddr;
+	uint32_t      len;
+	uint32_t      uid;
+	uint32_t      lsw_ts;
+	uint32_t      msw_ts;
+	uint32_t      flags;
+};
+
+struct audio_aio_read_param {
+	unsigned long paddr;
+	uint32_t      len;
+	uint32_t      uid;
+};
+
+struct audio_port_data {
+	struct audio_buffer *buf;
+	uint32_t	    max_buf_cnt;
+	uint32_t	    dsp_buf;
+	uint32_t	    cpu_buf;
+	struct list_head    mem_map_handle;
+	uint32_t	    tmp_hdl;
+	/* read or write locks */
+	struct mutex	    lock;
+	spinlock_t	    dsp_lock;
+};
+
+struct audio_client {
+	int                    session;
+	app_cb		       cb;
+	atomic_t	       cmd_state;
+	/* Relative or absolute TS */
+	uint32_t	       time_flag;
+	void		       *priv;
+	uint32_t               io_mode;
+	uint64_t	       time_stamp;
+	struct apr_svc         *apr;
+	struct apr_svc         *mmap_apr;
+	struct mutex	       cmd_lock;
+	/* idx:1 out port, 0: in port*/
+	struct audio_port_data port[2];
+	wait_queue_head_t      cmd_wait;
+};
+
+void q6asm_audio_client_free(struct audio_client *ac);
+
+struct audio_client *q6asm_audio_client_alloc(app_cb cb, void *priv);
+
+struct audio_client *q6asm_get_audio_client(int session_id);
+
+int q6asm_audio_client_buf_alloc(unsigned int dir/* 1:Out,0:In */,
+				struct audio_client *ac,
+				unsigned int bufsz,
+				unsigned int bufcnt);
+int q6asm_audio_client_buf_alloc_contiguous(unsigned int dir
+				/* 1:Out,0:In */,
+				struct audio_client *ac,
+				unsigned int bufsz,
+				unsigned int bufcnt);
+
+int q6asm_audio_client_buf_free_contiguous(unsigned int dir,
+			struct audio_client *ac);
+
+int q6asm_open_read(struct audio_client *ac, uint32_t format
+		/*, uint16_t bits_per_sample*/);
+
+int q6asm_open_write(struct audio_client *ac, uint32_t format
+		/*, uint16_t bits_per_sample*/);
+
+int q6asm_open_read_write(struct audio_client *ac,
+			uint32_t rd_format,
+			uint32_t wr_format);
+
+int q6asm_write(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
+				uint32_t lsw_ts, uint32_t flags);
+int q6asm_write_nolock(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
+				uint32_t lsw_ts, uint32_t flags);
+
+int q6asm_async_write(struct audio_client *ac,
+					  struct audio_aio_write_param *param);
+
+int q6asm_async_read(struct audio_client *ac,
+					  struct audio_aio_read_param *param);
+
+int q6asm_read(struct audio_client *ac);
+int q6asm_read_nolock(struct audio_client *ac);
+
+int q6asm_memory_map(struct audio_client *ac, uint32_t buf_add,
+			int dir, uint32_t bufsz, uint32_t bufcnt);
+
+int q6asm_memory_unmap(struct audio_client *ac, uint32_t buf_add,
+							int dir);
+
+int q6asm_run(struct audio_client *ac, uint32_t flags,
+		uint32_t msw_ts, uint32_t lsw_ts);
+
+int q6asm_run_nowait(struct audio_client *ac, uint32_t flags,
+		uint32_t msw_ts, uint32_t lsw_ts);
+
+int q6asm_reg_tx_overflow(struct audio_client *ac, uint16_t enable);
+
+int q6asm_cmd(struct audio_client *ac, int cmd);
+
+int q6asm_cmd_nowait(struct audio_client *ac, int cmd);
+
+void *q6asm_is_cpu_buf_avail(int dir, struct audio_client *ac,
+				uint32_t *size, uint32_t *idx);
+
+void *q6asm_is_cpu_buf_avail_nolock(int dir, struct audio_client *ac,
+					uint32_t *size, uint32_t *idx);
+
+int q6asm_is_dsp_buf_avail(int dir, struct audio_client *ac);
+
+/* File format specific configurations to be added below */
+
+int q6asm_enc_cfg_blk_aac(struct audio_client *ac,
+			 uint32_t frames_per_buf,
+			uint32_t sample_rate, uint32_t channels,
+			 uint32_t bit_rate,
+			 uint32_t mode, uint32_t format);
+
+int q6asm_enc_cfg_blk_pcm(struct audio_client *ac,
+			uint32_t rate, uint32_t channels);
+
+int q6asm_set_encdec_chan_map(struct audio_client *ac,
+		uint32_t num_channels);
+
+int q6asm_enable_sbrps(struct audio_client *ac,
+			uint32_t sbr_ps);
+
+int q6asm_cfg_dual_mono_aac(struct audio_client *ac,
+			uint16_t sce_left, uint16_t sce_right);
+
+int q6asm_enc_cfg_blk_qcelp(struct audio_client *ac, uint32_t frames_per_buf,
+		uint16_t min_rate, uint16_t max_rate,
+		uint16_t reduced_rate_level, uint16_t rate_modulation_cmd);
+
+int q6asm_enc_cfg_blk_evrc(struct audio_client *ac, uint32_t frames_per_buf,
+		uint16_t min_rate, uint16_t max_rate,
+		uint16_t rate_modulation_cmd);
+
+int q6asm_enc_cfg_blk_amrnb(struct audio_client *ac, uint32_t frames_per_buf,
+		uint16_t band_mode, uint16_t dtx_enable);
+
+int q6asm_enc_cfg_blk_amrwb(struct audio_client *ac, uint32_t frames_per_buf,
+		uint16_t band_mode, uint16_t dtx_enable);
+
+int q6asm_media_format_block_pcm(struct audio_client *ac,
+			uint32_t rate, uint32_t channels);
+
+int q6asm_media_format_block_multi_ch_pcm(struct audio_client *ac,
+				uint32_t rate, uint32_t channels);
+
+int q6asm_media_format_block_aac(struct audio_client *ac,
+			struct asm_aac_cfg *cfg);
+
+int q6asm_media_format_block_multi_aac(struct audio_client *ac,
+			struct asm_aac_cfg *cfg);
+
+int q6asm_media_format_block_wma(struct audio_client *ac,
+			void *cfg);
+
+int q6asm_media_format_block_wmapro(struct audio_client *ac,
+			void *cfg);
+
+/* PP specific */
+int q6asm_equalizer(struct audio_client *ac, void *eq);
+
+/* Send Volume Command */
+int q6asm_set_volume(struct audio_client *ac, int volume);
+
+/* Set SoftPause Params */
+int q6asm_set_softpause(struct audio_client *ac,
+			struct asm_softpause_params *param);
+
+/* Set Softvolume Params */
+int q6asm_set_softvolume(struct audio_client *ac,
+			struct asm_softvolume_params *param);
+
+/* Send left-right channel gain */
+int q6asm_set_lrgain(struct audio_client *ac, int left_gain, int right_gain);
+
+/* Enable Mute/unmute flag */
+int q6asm_set_mute(struct audio_client *ac, int muteflag);
+
+uint64_t q6asm_get_session_time(struct audio_client *ac);
+
+/* Client can set the IO mode to either AIO/SIO mode */
+int q6asm_set_io_mode(struct audio_client *ac, uint32_t mode);
+
+/* Get Service ID for APR communication */
+int q6asm_get_apr_service_id(int session_id);
+
+/* Common format block without any payload
+*/
+int q6asm_media_format_block(struct audio_client *ac, uint32_t format);
+
+#endif /* __Q6_ASM_H__ */
diff --git a/include/sound/q6audio-v2.h b/include/sound/q6audio-v2.h
new file mode 100644
index 0000000..1a5dce1
--- /dev/null
+++ b/include/sound/q6audio-v2.h
@@ -0,0 +1,26 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _Q6_AUDIO_H_
+#define _Q6_AUDIO_H_
+
+#include <mach/qdsp6v2/apr.h>
+
+int q6audio_get_port_index(u16 port_id);
+
+int q6audio_convert_virtual_to_portid(u16 port_id);
+
+int q6audio_validate_port(u16 port_id);
+
+int q6audio_get_port_id(u16 port_id);
+
+#endif
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index a7b95d3..3ecc6d4 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -5397,9 +5397,6 @@
 
 	BT_DBG("sk %p", sk);
 
-	if (!sk)
-		return;
-
 	lock_sock(sk);
 
 	if (sk->sk_state != BT_CONNECTED && !l2cap_pi(sk)->amp_id) {
@@ -5539,8 +5536,11 @@
 		container_of(work, struct l2cap_logical_link_work, work);
 	struct sock *sk = log_link_work->chan->l2cap_sk;
 
-	l2cap_logical_link_complete(log_link_work->chan, log_link_work->status);
-	sock_put(sk);
+	if (sk) {
+		l2cap_logical_link_complete(log_link_work->chan,
+							log_link_work->status);
+		sock_put(sk);
+	}
 	hci_chan_put(log_link_work->chan);
 	kfree(log_link_work);
 }
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 80f4bd6..86ae763 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -308,7 +308,7 @@
 
 		cmd = list_entry(p, struct pending_cmd, list);
 
-		if (cmd->opcode != opcode)
+		if (opcode > 0 && cmd->opcode != opcode)
 			continue;
 
 		if (index >= 0 && cmd->index != index)
@@ -2475,6 +2475,14 @@
 	return err;
 }
 
+static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
+{
+	u8 *status = data;
+
+	cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
+	mgmt_pending_remove(cmd);
+}
+
 int mgmt_index_added(u16 index)
 {
 	BT_DBG("%d", index);
@@ -2483,7 +2491,12 @@
 
 int mgmt_index_removed(u16 index)
 {
+	u8 status = ENODEV;
+
 	BT_DBG("%d", index);
+
+	mgmt_pending_foreach(0, index, cmd_status_rsp, &status);
+
 	return mgmt_event(MGMT_EV_INDEX_REMOVED, index, NULL, 0, NULL);
 }
 
@@ -2522,6 +2535,11 @@
 
 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, index, mode_rsp, &match);
 
+	if (!powered) {
+		u8 status = ENETDOWN;
+		mgmt_pending_foreach(0, index, cmd_status_rsp, &status);
+	}
+
 	ev.val = powered;
 
 	ret = mgmt_event(MGMT_EV_POWERED, index, &ev, sizeof(ev), match.sk);
diff --git a/scripts/gcc-wrapper.py b/scripts/gcc-wrapper.py
index d3d393d..16c623c 100755
--- a/scripts/gcc-wrapper.py
+++ b/scripts/gcc-wrapper.py
@@ -44,8 +44,8 @@
     "alignment.c:720",
     "async.c:122",
     "async.c:270",
-    "block.c:835",
-    "block.c:836",
+    "block.c:885",
+    "block.c:886",
     "dir.c:43",
     "dm.c:1053",
     "dm.c:1080",
diff --git a/sound/soc/codecs/wcd9304-tables.c b/sound/soc/codecs/wcd9304-tables.c
index 823f926..252cb0e 100644
--- a/sound/soc/codecs/wcd9304-tables.c
+++ b/sound/soc/codecs/wcd9304-tables.c
@@ -590,6 +590,8 @@
 	[SITAR_A_CDC_RX1_B3_CTL] = 1,
 	[SITAR_A_CDC_RX1_B4_CTL] = 1,
 	[SITAR_A_CDC_RX1_B5_CTL] = 1,
+	[SITAR_A_CDC_RX2_B5_CTL] = 1,
+	[SITAR_A_CDC_RX3_B5_CTL] = 1,
 	[SITAR_A_CDC_RX1_B6_CTL] = 1,
 	[SITAR_A_CDC_RX1_VOL_CTL_B1_CTL] = 1,
 	[SITAR_A_CDC_RX1_VOL_CTL_B2_CTL] = 1,
diff --git a/sound/soc/codecs/wcd9304.c b/sound/soc/codecs/wcd9304.c
index ff83197..f53f276 100644
--- a/sound/soc/codecs/wcd9304.c
+++ b/sound/soc/codecs/wcd9304.c
@@ -629,8 +629,7 @@
 };
 
 static const char *sb_tx5_mux_text[] = {
-	"ZERO", "RMIX1", "RMIX2", "RMIX3", "RMIX4", "RMIX5", "RMIX6", "RMIX7",
-		"DEC5"
+	"ZERO", "RMIX1", "RMIX2", "RMIX3", "DEC1", "DEC2", "DEC3", "DEC4"
 };
 
 static const char *dec1_mux_text[] = {
@@ -642,11 +641,11 @@
 };
 
 static const char *dec3_mux_text[] = {
-	"ZERO", "DMIC2", "DMIC3", "DMIC4", "ADC1", "ADC2", "ADC3", "MBADC",
+	"ZERO", "DMIC3", "ADC1", "ADC2", "ADC3", "MBADC", "DMIC2", "DMIC4"
 };
 
 static const char *dec4_mux_text[] = {
-	"ZERO", "DMIC1", "DMIC2", "DMIC3", "DMIC4", "ADC1", "ADC2", "ADC3",
+	"ZERO", "DMIC4", "ADC1", "ADC2", "ADC3", "DMIC3", "DMIC2", "DMIC1"
 };
 
 static const char *iir1_inp1_text[] = {
@@ -898,7 +897,7 @@
 	unsigned int dmic;
 	int ret;
 
-	ret = kstrtouint(strpbrk(w->name, "12"), 10, &dmic);
+	ret = kstrtouint(strpbrk(w->name, "1234"), 10, &dmic);
 	if (ret < 0) {
 		pr_err("%s: Invalid DMIC line on the codec\n", __func__);
 		return -EINVAL;
@@ -1250,7 +1249,7 @@
 		break;
 	case SND_SOC_DAPM_POST_PMU:
 		if (sitar->mbhc_polling_active &&
-		    sitar->micbias == micb_line) {
+		    sitar->mbhc_cfg.micbias == micb_line) {
 			SITAR_ACQUIRE_LOCK(sitar->codec_resource_lock);
 			sitar_codec_pause_hs_polling(codec);
 			sitar_codec_start_hs_polling(codec);
@@ -1713,6 +1712,7 @@
 	SND_SOC_DAPM_MUX("SLIM TX2 MUX", SND_SOC_NOPM, 0, 0, &sb_tx2_mux),
 	SND_SOC_DAPM_MUX("SLIM TX3 MUX", SND_SOC_NOPM, 0, 0, &sb_tx3_mux),
 	SND_SOC_DAPM_MUX("SLIM TX4 MUX", SND_SOC_NOPM, 0, 0, &sb_tx4_mux),
+	SND_SOC_DAPM_MUX("SLIM TX5 MUX", SND_SOC_NOPM, 0, 0, &sb_tx5_mux),
 
 	SND_SOC_DAPM_AIF_OUT_E("SLIM TX1", "AIF1 Capture", 0, SND_SOC_NOPM, 0,
 				0, sitar_codec_enable_slimtx,
@@ -1729,6 +1729,10 @@
 				0, sitar_codec_enable_slimtx,
 				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
 
+	SND_SOC_DAPM_AIF_OUT_E("SLIM TX5", "AIF1 Capture", 0, SND_SOC_NOPM, 0,
+				0, sitar_codec_enable_slimtx,
+				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
 	/* Digital Mic Inputs */
 	SND_SOC_DAPM_ADC_E("DMIC1", NULL, SND_SOC_NOPM, 0, 0,
 		sitar_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
@@ -1841,11 +1845,16 @@
 	{"SLIM TX2", NULL, "SLIM TX2 MUX"},
 	{"SLIM TX3", NULL, "SLIM TX3 MUX"},
 	{"SLIM TX4", NULL, "SLIM TX4 MUX"},
+	{"SLIM TX5", NULL, "SLIM TX5 MUX"},
 
 	{"SLIM TX1 MUX", "DEC1", "DEC1 MUX"},
 	{"SLIM TX2 MUX", "DEC2", "DEC2 MUX"},
 	{"SLIM TX3 MUX", "DEC3", "DEC3 MUX"},
 	{"SLIM TX4 MUX", "DEC4", "DEC4 MUX"},
+	{"SLIM TX5 MUX", "DEC1", "DEC1 MUX"},
+	{"SLIM TX5 MUX", "DEC2", "DEC2 MUX"},
+	{"SLIM TX5 MUX", "DEC3", "DEC3 MUX"},
+	{"SLIM TX5 MUX", "DEC4", "DEC4 MUX"},
 
 	/* Decimator Inputs */
 	{"DEC1 MUX", "DMIC1", "DMIC1"},
@@ -1865,7 +1874,7 @@
 	{"DEC3 MUX", "ADC2", "ADC2"},
 	{"DEC3 MUX", "ADC3", "ADC3"},
 	{"DEC3 MUX", "DMIC2", "DMIC2"},
-	{"DEC3 MUX", "DMIC3", "DMIC4"},
+	{"DEC3 MUX", "DMIC4", "DMIC4"},
 	{"DEC3 MUX", NULL, "CDC_CONN"},
 	{"DEC4 MUX", "DMIC4", "DMIC4"},
 	{"DEC4 MUX", "ADC1", "ADC1"},
@@ -1992,6 +2001,7 @@
 		(choice == SITAR_BANDGAP_AUDIO_MODE)) {
 		sitar_codec_enable_audio_mode_bandgap(codec);
 	} else if (choice == SITAR_BANDGAP_MBHC_MODE) {
+		snd_soc_update_bits(codec, SITAR_A_BIAS_CURR_CTL_2, 0x0C, 0x08);
 		snd_soc_update_bits(codec, SITAR_A_BIAS_CENTRAL_BG_CTL, 0x2,
 			0x2);
 		snd_soc_update_bits(codec, SITAR_A_BIAS_CENTRAL_BG_CTL, 0x80,
@@ -2329,10 +2339,10 @@
 		}
 	} else if (dai->id == AIF1_CAP) {
 		*tx_num = sitar_dai[dai->id - 1].capture.channels_max;
-		while (cnt < *tx_num) {
-			tx_slot[cnt] = tx_ch[cnt];
-			cnt++;
-		}
+		tx_slot[0] = tx_ch[cnt];
+		tx_slot[1] = tx_ch[4 + cnt];
+		tx_slot[2] = tx_ch[2 + cnt];
+		tx_slot[3] = tx_ch[3 + cnt];
 	}
 	return 0;
 }
@@ -4402,23 +4412,20 @@
 
 static const struct sitar_reg_mask_val sitar_1_1_reg_defaults[] = {
 
-	/* Sitar 1.1 MICBIAS changes */
 	SITAR_REG_VAL(SITAR_A_MICB_1_INT_RBIAS, 0x24),
 	SITAR_REG_VAL(SITAR_A_MICB_2_INT_RBIAS, 0x24),
 
-	/* Sitar 1.1 HPH changes */
 	SITAR_REG_VAL(SITAR_A_RX_HPH_BIAS_PA, 0x57),
 	SITAR_REG_VAL(SITAR_A_RX_HPH_BIAS_LDO, 0x56),
 
-	/* Sitar 1.1 EAR PA changes */
 	SITAR_REG_VAL(SITAR_A_RX_EAR_BIAS_PA, 0xA6),
 	SITAR_REG_VAL(SITAR_A_RX_EAR_GAIN, 0x02),
 	SITAR_REG_VAL(SITAR_A_RX_EAR_VCM, 0x03),
 
-	/* Sitar 1.1 RX Changes */
 	SITAR_REG_VAL(SITAR_A_CDC_RX1_B5_CTL, 0x78),
+	SITAR_REG_VAL(SITAR_A_CDC_RX2_B5_CTL, 0x78),
+	SITAR_REG_VAL(SITAR_A_CDC_RX3_B5_CTL, 0x78),
 
-	/* Sitar 1.1 RX1 and RX2 Changes */
 	SITAR_REG_VAL(SITAR_A_CDC_RX1_B6_CTL, 0x80),
 
 	SITAR_REG_VAL(SITAR_A_CDC_CLSG_FREQ_THRESH_B3_CTL, 0x1B),
diff --git a/sound/soc/codecs/wcd9310.c b/sound/soc/codecs/wcd9310.c
index 03640d4..88bdcad 100644
--- a/sound/soc/codecs/wcd9310.c
+++ b/sound/soc/codecs/wcd9310.c
@@ -68,8 +68,9 @@
 #define AIF2_PB 3
 #define AIF2_CAP 4
 #define AIF3_CAP 5
+#define AIF3_PB  6
 
-#define NUM_CODEC_DAIS 5
+#define NUM_CODEC_DAIS 6
 #define TABLA_COMP_DIGITAL_GAIN_OFFSET 3
 
 struct tabla_codec_dai_data {
@@ -244,6 +245,10 @@
 	u32 cfilt2_cnt;
 	u32 cfilt3_cnt;
 	u32 rx_bias_count;
+	s32 dmic_1_2_clk_cnt;
+	s32 dmic_3_4_clk_cnt;
+	s32 dmic_5_6_clk_cnt;
+
 	enum tabla_bandgap_type bandgap_type;
 	bool mclk_enabled;
 	bool clock_active;
@@ -1179,6 +1184,9 @@
 static const struct soc_enum rx_mix1_inp2_chain_enum =
 	SOC_ENUM_SINGLE(TABLA_A_CDC_CONN_RX1_B1_CTL, 4, 12, rx_mix1_text);
 
+static const struct soc_enum rx_mix1_inp3_chain_enum =
+	SOC_ENUM_SINGLE(TABLA_A_CDC_CONN_RX1_B2_CTL, 0, 12, rx_mix1_text);
+
 static const struct soc_enum rx2_mix1_inp1_chain_enum =
 	SOC_ENUM_SINGLE(TABLA_A_CDC_CONN_RX2_B1_CTL, 0, 12, rx_mix1_text);
 
@@ -1321,6 +1329,9 @@
 static const struct snd_kcontrol_new rx_mix1_inp2_mux =
 	SOC_DAPM_ENUM("RX1 MIX1 INP2 Mux", rx_mix1_inp2_chain_enum);
 
+static const struct snd_kcontrol_new rx_mix1_inp3_mux =
+	SOC_DAPM_ENUM("RX1 MIX1 INP3 Mux", rx_mix1_inp3_chain_enum);
+
 static const struct snd_kcontrol_new rx2_mix1_inp1_mux =
 	SOC_DAPM_ENUM("RX2 MIX1 INP1 Mux", rx2_mix1_inp1_chain_enum);
 
@@ -2027,8 +2038,9 @@
 	struct snd_kcontrol *kcontrol, int event)
 {
 	struct snd_soc_codec *codec = w->codec;
-	u16 tx_dmic_ctl_reg;
-	u8 dmic_clk_sel, dmic_clk_en;
+	struct tabla_priv *tabla = snd_soc_codec_get_drvdata(codec);
+	u8  dmic_clk_en;
+	s32 *dmic_clk_cnt;
 	unsigned int dmic;
 	int ret;
 
@@ -2041,20 +2053,31 @@
 	switch (dmic) {
 	case 1:
 	case 2:
-		dmic_clk_sel = 0x02;
 		dmic_clk_en = 0x01;
+		dmic_clk_cnt = &(tabla->dmic_1_2_clk_cnt);
+
+		pr_debug("%s() event %d DMIC%d dmic_1_2_clk_cnt %d\n",
+			__func__, event,  dmic, *dmic_clk_cnt);
+
 		break;
 
 	case 3:
 	case 4:
-		dmic_clk_sel = 0x08;
 		dmic_clk_en = 0x04;
+		dmic_clk_cnt = &(tabla->dmic_3_4_clk_cnt);
+
+		pr_debug("%s() event %d DMIC%d dmic_3_4_clk_cnt %d\n",
+			__func__, event,  dmic, *dmic_clk_cnt);
 		break;
 
 	case 5:
 	case 6:
-		dmic_clk_sel = 0x20;
 		dmic_clk_en = 0x10;
+		dmic_clk_cnt = &(tabla->dmic_5_6_clk_cnt);
+
+		pr_debug("%s() event %d DMIC%d dmic_5_6_clk_cnt %d\n",
+			__func__, event,  dmic, *dmic_clk_cnt);
+
 		break;
 
 	default:
@@ -2062,24 +2085,21 @@
 		return -EINVAL;
 	}
 
-	tx_dmic_ctl_reg = TABLA_A_CDC_TX1_DMIC_CTL + 8 * (dmic - 1);
-
-	pr_debug("%s %d\n", __func__, event);
-
 	switch (event) {
 	case SND_SOC_DAPM_PRE_PMU:
 
-		snd_soc_update_bits(codec, TABLA_A_CDC_CLK_DMIC_CTL,
-				dmic_clk_sel, dmic_clk_sel);
+		(*dmic_clk_cnt)++;
+		if (*dmic_clk_cnt == 1)
+			snd_soc_update_bits(codec, TABLA_A_CDC_CLK_DMIC_CTL,
+					dmic_clk_en, dmic_clk_en);
 
-		snd_soc_update_bits(codec, tx_dmic_ctl_reg, 0x1, 0x1);
-
-		snd_soc_update_bits(codec, TABLA_A_CDC_CLK_DMIC_CTL,
-				dmic_clk_en, dmic_clk_en);
 		break;
 	case SND_SOC_DAPM_POST_PMD:
-		snd_soc_update_bits(codec, TABLA_A_CDC_CLK_DMIC_CTL,
-				dmic_clk_en, 0);
+
+		(*dmic_clk_cnt)--;
+		if (*dmic_clk_cnt  == 0)
+			snd_soc_update_bits(codec, TABLA_A_CDC_CLK_DMIC_CTL,
+					dmic_clk_en, 0);
 		break;
 	}
 	return 0;
@@ -3211,6 +3231,7 @@
 
 	{"RX1 MIX1", NULL, "RX1 MIX1 INP1"},
 	{"RX1 MIX1", NULL, "RX1 MIX1 INP2"},
+	{"RX1 MIX1", NULL, "RX1 MIX1 INP3"},
 	{"RX2 MIX1", NULL, "RX2 MIX1 INP1"},
 	{"RX2 MIX1", NULL, "RX2 MIX1 INP2"},
 	{"RX3 MIX1", NULL, "RX3 MIX1 INP1"},
@@ -3237,6 +3258,7 @@
 	{"RX1 MIX1 INP1", "RX2", "SLIM RX2"},
 	{"RX1 MIX1 INP1", "RX3", "SLIM RX3"},
 	{"RX1 MIX1 INP1", "RX4", "SLIM RX4"},
+	{"RX1 MIX1 INP1", "RX5", "SLIM RX5"},
 	{"RX1 MIX1 INP1", "RX6", "SLIM RX6"},
 	{"RX1 MIX1 INP1", "RX7", "SLIM RX7"},
 	{"RX1 MIX1 INP1", "IIR1", "IIR1"},
@@ -3244,13 +3266,22 @@
 	{"RX1 MIX1 INP2", "RX2", "SLIM RX2"},
 	{"RX1 MIX1 INP2", "RX3", "SLIM RX3"},
 	{"RX1 MIX1 INP2", "RX4", "SLIM RX4"},
+	{"RX1 MIX1 INP2", "RX5", "SLIM RX5"},
 	{"RX1 MIX1 INP2", "RX6", "SLIM RX6"},
 	{"RX1 MIX1 INP2", "RX7", "SLIM RX7"},
 	{"RX1 MIX1 INP2", "IIR1", "IIR1"},
+	{"RX1 MIX1 INP3", "RX1", "SLIM RX1"},
+	{"RX1 MIX1 INP3", "RX2", "SLIM RX2"},
+	{"RX1 MIX1 INP3", "RX3", "SLIM RX3"},
+	{"RX1 MIX1 INP3", "RX4", "SLIM RX4"},
+	{"RX1 MIX1 INP3", "RX5", "SLIM RX5"},
+	{"RX1 MIX1 INP3", "RX6", "SLIM RX6"},
+	{"RX1 MIX1 INP3", "RX7", "SLIM RX7"},
 	{"RX2 MIX1 INP1", "RX1", "SLIM RX1"},
 	{"RX2 MIX1 INP1", "RX2", "SLIM RX2"},
 	{"RX2 MIX1 INP1", "RX3", "SLIM RX3"},
 	{"RX2 MIX1 INP1", "RX4", "SLIM RX4"},
+	{"RX2 MIX1 INP1", "RX5", "SLIM RX5"},
 	{"RX2 MIX1 INP1", "RX6", "SLIM RX6"},
 	{"RX2 MIX1 INP1", "RX7", "SLIM RX7"},
 	{"RX2 MIX1 INP1", "IIR1", "IIR1"},
@@ -3258,6 +3289,7 @@
 	{"RX2 MIX1 INP2", "RX2", "SLIM RX2"},
 	{"RX2 MIX1 INP2", "RX3", "SLIM RX3"},
 	{"RX2 MIX1 INP2", "RX4", "SLIM RX4"},
+	{"RX2 MIX1 INP2", "RX5", "SLIM RX5"},
 	{"RX2 MIX1 INP2", "RX6", "SLIM RX6"},
 	{"RX2 MIX1 INP2", "RX7", "SLIM RX7"},
 	{"RX2 MIX1 INP2", "IIR1", "IIR1"},
@@ -3265,6 +3297,7 @@
 	{"RX3 MIX1 INP1", "RX2", "SLIM RX2"},
 	{"RX3 MIX1 INP1", "RX3", "SLIM RX3"},
 	{"RX3 MIX1 INP1", "RX4", "SLIM RX4"},
+	{"RX3 MIX1 INP1", "RX5", "SLIM RX5"},
 	{"RX3 MIX1 INP1", "RX6", "SLIM RX6"},
 	{"RX3 MIX1 INP1", "RX7", "SLIM RX7"},
 	{"RX3 MIX1 INP1", "IIR1", "IIR1"},
@@ -3272,6 +3305,7 @@
 	{"RX3 MIX1 INP2", "RX2", "SLIM RX2"},
 	{"RX3 MIX1 INP2", "RX3", "SLIM RX3"},
 	{"RX3 MIX1 INP2", "RX4", "SLIM RX4"},
+	{"RX3 MIX1 INP2", "RX5", "SLIM RX5"},
 	{"RX3 MIX1 INP2", "RX6", "SLIM RX6"},
 	{"RX3 MIX1 INP2", "RX7", "SLIM RX7"},
 	{"RX3 MIX1 INP2", "IIR1", "IIR1"},
@@ -3279,12 +3313,14 @@
 	{"RX4 MIX1 INP1", "RX2", "SLIM RX2"},
 	{"RX4 MIX1 INP1", "RX3", "SLIM RX3"},
 	{"RX4 MIX1 INP1", "RX4", "SLIM RX4"},
+	{"RX4 MIX1 INP1", "RX5", "SLIM RX5"},
 	{"RX4 MIX1 INP1", "RX6", "SLIM RX6"},
 	{"RX4 MIX1 INP1", "RX7", "SLIM RX7"},
 	{"RX4 MIX1 INP1", "IIR1", "IIR1"},
 	{"RX4 MIX1 INP2", "RX1", "SLIM RX1"},
 	{"RX4 MIX1 INP2", "RX2", "SLIM RX2"},
 	{"RX4 MIX1 INP2", "RX3", "SLIM RX3"},
+	{"RX4 MIX1 INP2", "RX5", "SLIM RX5"},
 	{"RX4 MIX1 INP2", "RX4", "SLIM RX4"},
 	{"RX4 MIX1 INP2", "RX6", "SLIM RX6"},
 	{"RX4 MIX1 INP2", "RX7", "SLIM RX7"},
@@ -3293,6 +3329,7 @@
 	{"RX5 MIX1 INP1", "RX2", "SLIM RX2"},
 	{"RX5 MIX1 INP1", "RX3", "SLIM RX3"},
 	{"RX5 MIX1 INP1", "RX4", "SLIM RX4"},
+	{"RX5 MIX1 INP1", "RX5", "SLIM RX5"},
 	{"RX5 MIX1 INP1", "RX6", "SLIM RX6"},
 	{"RX5 MIX1 INP1", "RX7", "SLIM RX7"},
 	{"RX5 MIX1 INP1", "IIR1", "IIR1"},
@@ -3300,6 +3337,7 @@
 	{"RX5 MIX1 INP2", "RX2", "SLIM RX2"},
 	{"RX5 MIX1 INP2", "RX3", "SLIM RX3"},
 	{"RX5 MIX1 INP2", "RX4", "SLIM RX4"},
+	{"RX5 MIX1 INP2", "RX5", "SLIM RX5"},
 	{"RX5 MIX1 INP2", "RX6", "SLIM RX6"},
 	{"RX5 MIX1 INP2", "RX7", "SLIM RX7"},
 	{"RX5 MIX1 INP2", "IIR1", "IIR1"},
@@ -3307,6 +3345,7 @@
 	{"RX6 MIX1 INP1", "RX2", "SLIM RX2"},
 	{"RX6 MIX1 INP1", "RX3", "SLIM RX3"},
 	{"RX6 MIX1 INP1", "RX4", "SLIM RX4"},
+	{"RX6 MIX1 INP1", "RX5", "SLIM RX5"},
 	{"RX6 MIX1 INP1", "RX6", "SLIM RX6"},
 	{"RX6 MIX1 INP1", "RX7", "SLIM RX7"},
 	{"RX6 MIX1 INP1", "IIR1", "IIR1"},
@@ -3314,6 +3353,7 @@
 	{"RX6 MIX1 INP2", "RX2", "SLIM RX2"},
 	{"RX6 MIX1 INP2", "RX3", "SLIM RX3"},
 	{"RX6 MIX1 INP2", "RX4", "SLIM RX4"},
+	{"RX6 MIX1 INP2", "RX5", "SLIM RX5"},
 	{"RX6 MIX1 INP2", "RX6", "SLIM RX6"},
 	{"RX6 MIX1 INP2", "RX7", "SLIM RX7"},
 	{"RX6 MIX1 INP2", "IIR1", "IIR1"},
@@ -3321,6 +3361,7 @@
 	{"RX7 MIX1 INP1", "RX2", "SLIM RX2"},
 	{"RX7 MIX1 INP1", "RX3", "SLIM RX3"},
 	{"RX7 MIX1 INP1", "RX4", "SLIM RX4"},
+	{"RX7 MIX1 INP1", "RX5", "SLIM RX5"},
 	{"RX7 MIX1 INP1", "RX6", "SLIM RX6"},
 	{"RX7 MIX1 INP1", "RX7", "SLIM RX7"},
 	{"RX7 MIX1 INP1", "IIR1", "IIR1"},
@@ -3328,6 +3369,7 @@
 	{"RX7 MIX1 INP2", "RX2", "SLIM RX2"},
 	{"RX7 MIX1 INP2", "RX3", "SLIM RX3"},
 	{"RX7 MIX1 INP2", "RX4", "SLIM RX4"},
+	{"RX7 MIX1 INP2", "RX5", "SLIM RX5"},
 	{"RX7 MIX1 INP2", "RX6", "SLIM RX6"},
 	{"RX7 MIX1 INP2", "RX7", "SLIM RX7"},
 	{"RX7 MIX1 INP2", "IIR1", "IIR1"},
@@ -3808,9 +3850,10 @@
 		pr_err("%s: Invalid\n", __func__);
 		return -EINVAL;
 	}
-	pr_debug("%s: DAI-ID %x %d %d\n", __func__, dai->id, tx_num, rx_num);
+	pr_debug("%s(): dai_name = %s DAI-ID %x tx_ch %d rx_ch %d\n",
+			__func__, dai->name, dai->id, tx_num, rx_num);
 
-	if (dai->id == AIF1_PB || dai->id == AIF2_PB) {
+	if (dai->id == AIF1_PB || dai->id == AIF2_PB || dai->id == AIF3_PB) {
 		for (i = 0; i < rx_num; i++) {
 			tabla->dai[dai->id - 1].ch_num[i]  = rx_slot[i];
 			tabla->dai[dai->id - 1].ch_act = 0;
@@ -3842,7 +3885,7 @@
 		pr_err("%s: Invalid\n", __func__);
 		return -EINVAL;
 	}
-	pr_debug("%s: DAI-ID %x\n", __func__, dai->id);
+
 	/* for virtual port, codec driver needs to do
 	 * housekeeping, for now should be ok
 	 */
@@ -3871,11 +3914,20 @@
 		tx_slot[1] = tx_ch[1 + cnt];
 		tx_slot[2] = tx_ch[5 + cnt];
 		tx_slot[3] = tx_ch[3 + cnt];
+
+	} else if (dai->id == AIF3_PB) {
+		*rx_num = tabla_dai[dai->id - 1].playback.channels_max;
+		rx_slot[0] = rx_ch[3];
+		rx_slot[1] = rx_ch[4];
+
 	} else if (dai->id == AIF3_CAP) {
 		*tx_num = tabla_dai[dai->id - 1].capture.channels_max;
 		tx_slot[cnt] = tx_ch[2 + cnt];
 		tx_slot[cnt + 1] = tx_ch[4 + cnt];
 	}
+	pr_debug("%s(): dai_name = %s DAI-ID %x tx_ch %d rx_ch %d\n",
+			__func__, dai->name, dai->id, *tx_num, *rx_num);
+
 
 	return 0;
 }
@@ -3891,8 +3943,9 @@
 	u8 tx_fs_rate, rx_fs_rate, rx_state, tx_state;
 	u32 compander_fs;
 
-	pr_debug("%s: DAI-ID %x rate %d\n", __func__, dai->id,
-						params_rate(params));
+	pr_debug("%s: dai_name = %s DAI-ID %x rate %d num_ch %d\n", __func__,
+		 dai->name, dai->id, params_rate(params),
+		 params_channels(params));
 
 	switch (params_rate(params)) {
 	case 8000:
@@ -3989,7 +4042,7 @@
 	 * If current dai is a rx dai, set sample rate to
 	 * all the rx paths that are currently not active
 	 */
-	if (dai->id == AIF1_PB || dai->id == AIF2_PB) {
+	if (dai->id == AIF1_PB || dai->id == AIF2_PB || dai->id == AIF3_PB) {
 
 		rx_state = snd_soc_read(codec,
 			TABLA_A_CDC_CLK_RX_B1_CTL);
@@ -4114,6 +4167,20 @@
 		},
 		.ops = &tabla_dai_ops,
 	},
+	{
+		.name = "tabla_rx3",
+		.id = AIF3_PB,
+		.playback = {
+			.stream_name = "AIF3 Playback",
+			.rates = WCD9310_RATES,
+			.formats = TABLA_FORMATS,
+			.rate_min = 8000,
+			.rate_max = 192000,
+			.channels_min = 1,
+			.channels_max = 2,
+		},
+		.ops = &tabla_dai_ops,
+	},
 };
 
 static struct snd_soc_dai_driver tabla_i2s_dai[] = {
@@ -4160,6 +4227,9 @@
 	/* Execute the callback only if interface type is slimbus */
 	if (tabla_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS)
 		return 0;
+
+	pr_debug("%s: %s %d\n", __func__, w->name, event);
+
 	switch (event) {
 	case SND_SOC_DAPM_POST_PMU:
 		for (j = 0; j < ARRAY_SIZE(tabla_dai); j++) {
@@ -4228,7 +4298,8 @@
 	case SND_SOC_DAPM_POST_PMU:
 		for (j = 0; j < ARRAY_SIZE(tabla_dai); j++) {
 			if (tabla_dai[j].id == AIF1_PB ||
-				tabla_dai[j].id == AIF2_PB)
+				tabla_dai[j].id == AIF2_PB ||
+				tabla_dai[j].id == AIF3_PB)
 				continue;
 			if (!strncmp(w->sname,
 				tabla_dai[j].capture.stream_name, 13)) {
@@ -4245,7 +4316,8 @@
 	case SND_SOC_DAPM_POST_PMD:
 		for (j = 0; j < ARRAY_SIZE(tabla_dai); j++) {
 			if (tabla_dai[j].id == AIF1_PB ||
-				tabla_dai[j].id == AIF2_PB)
+				tabla_dai[j].id == AIF2_PB ||
+				tabla_dai[j].id == AIF3_PB)
 				continue;
 			if (!strncmp(w->sname,
 				tabla_dai[j].capture.stream_name, 13)) {
@@ -4284,9 +4356,16 @@
 	SND_SOC_DAPM_AIF_IN_E("SLIM RX2", "AIF1 Playback", 0, SND_SOC_NOPM, 0,
 				0, tabla_codec_enable_slimrx,
 				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_AIF_IN_E("SLIM RX3", "AIF1 Playback", 0, SND_SOC_NOPM, 0,
+				0, tabla_codec_enable_slimrx,
+				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
 
-	SND_SOC_DAPM_AIF_IN("SLIM RX3", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
-	SND_SOC_DAPM_AIF_IN("SLIM RX4", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_IN_E("SLIM RX4", "AIF3 Playback", 0, SND_SOC_NOPM, 0,
+				0, tabla_codec_enable_slimrx,
+				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_AIF_IN_E("SLIM RX5", "AIF3 Playback", 0, SND_SOC_NOPM, 0,
+				0, tabla_codec_enable_slimrx,
+				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
 
 	SND_SOC_DAPM_AIF_IN_E("SLIM RX6", "AIF2 Playback", 0, SND_SOC_NOPM, 0,
 				0, tabla_codec_enable_slimrx,
@@ -4395,6 +4474,8 @@
 		&rx_mix1_inp1_mux),
 	SND_SOC_DAPM_MUX("RX1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
 		&rx_mix1_inp2_mux),
+	SND_SOC_DAPM_MUX("RX1 MIX1 INP3", SND_SOC_NOPM, 0, 0,
+		&rx_mix1_inp3_mux),
 	SND_SOC_DAPM_MUX("RX2 MIX1 INP1", SND_SOC_NOPM, 0, 0,
 		&rx2_mix1_inp1_mux),
 	SND_SOC_DAPM_MUX("RX2 MIX1 INP2", SND_SOC_NOPM, 0, 0,
@@ -6732,17 +6813,70 @@
 	return r;
 }
 
+static int tabla_mbhc_init_and_calibrate(struct tabla_priv *tabla)
+{
+	int ret = 0;
+	struct snd_soc_codec *codec = tabla->codec;
+
+	tabla->mbhc_cfg.mclk_cb_fn(codec, 1, false);
+	tabla_mbhc_init(codec);
+	tabla_mbhc_cal(codec);
+	tabla_mbhc_calc_thres(codec);
+	tabla->mbhc_cfg.mclk_cb_fn(codec, 0, false);
+	tabla_codec_calibrate_hs_polling(codec);
+	if (!tabla->mbhc_cfg.gpio) {
+		ret = tabla_codec_enable_hs_detect(codec, 1,
+						   MBHC_USE_MB_TRIGGER |
+						   MBHC_USE_HPHL_TRIGGER,
+						   false);
+
+		if (IS_ERR_VALUE(ret))
+			pr_err("%s: Failed to setup MBHC detection\n",
+			       __func__);
+	} else {
+		/* Enable Mic Bias pull down and HPH Switch to GND */
+		snd_soc_update_bits(codec, tabla->mbhc_bias_regs.ctl_reg,
+				    0x01, 0x01);
+		snd_soc_update_bits(codec, TABLA_A_MBHC_HPH, 0x01, 0x01);
+		INIT_WORK(&tabla->hs_correct_plug_work,
+			  tabla_hs_correct_gpio_plug);
+	}
+
+	if (!IS_ERR_VALUE(ret)) {
+		snd_soc_update_bits(codec, TABLA_A_RX_HPH_OCP_CTL, 0x10, 0x10);
+		wcd9xxx_enable_irq(codec->control_data,
+				 TABLA_IRQ_HPH_PA_OCPL_FAULT);
+		wcd9xxx_enable_irq(codec->control_data,
+				 TABLA_IRQ_HPH_PA_OCPR_FAULT);
+
+		if (tabla->mbhc_cfg.gpio) {
+			ret = request_threaded_irq(tabla->mbhc_cfg.gpio_irq,
+					       NULL,
+					       tabla_mechanical_plug_detect_irq,
+					       (IRQF_TRIGGER_RISING |
+						IRQF_TRIGGER_FALLING),
+					       "tabla-gpio", codec);
+			if (!IS_ERR_VALUE(ret)) {
+				ret = enable_irq_wake(tabla->mbhc_cfg.gpio_irq);
+				/* Bootup time detection */
+				tabla_hs_gpio_handler(codec);
+			}
+		}
+	}
+
+	return ret;
+}
+
 static void mbhc_fw_read(struct work_struct *work)
 {
 	struct delayed_work *dwork;
 	struct tabla_priv *tabla;
 	struct snd_soc_codec *codec;
 	const struct firmware *fw;
-	int ret = -1, retry = 0, rc;
+	int ret = -1, retry = 0;
 
 	dwork = to_delayed_work(work);
-	tabla = container_of(dwork, struct tabla_priv,
-				mbhc_firmware_dwork);
+	tabla = container_of(dwork, struct tabla_priv, mbhc_firmware_dwork);
 	codec = tabla->codec;
 
 	while (retry < MBHC_FW_READ_ATTEMPTS) {
@@ -6754,7 +6888,7 @@
 
 		if (ret != 0) {
 			usleep_range(MBHC_FW_READ_TIMEOUT,
-					MBHC_FW_READ_TIMEOUT);
+				     MBHC_FW_READ_TIMEOUT);
 		} else {
 			pr_info("%s: MBHC Firmware read succesful\n", __func__);
 			break;
@@ -6773,30 +6907,7 @@
 		tabla->mbhc_fw = fw;
 	}
 
-	tabla->mbhc_cfg.mclk_cb_fn(codec, 1, false);
-	tabla_mbhc_init(codec);
-	tabla_mbhc_cal(codec);
-	tabla_mbhc_calc_thres(codec);
-	tabla->mbhc_cfg.mclk_cb_fn(codec, 0, false);
-	tabla_codec_calibrate_hs_polling(codec);
-	if (!tabla->mbhc_cfg.gpio) {
-		rc = tabla_codec_enable_hs_detect(codec, 1,
-						  MBHC_USE_MB_TRIGGER |
-						  MBHC_USE_HPHL_TRIGGER,
-						  false);
-
-		if (IS_ERR_VALUE(rc))
-			pr_err("%s: Failed to setup MBHC detection\n",
-			       __func__);
-	} else {
-		/* Enable Mic Bias pull down and HPH Switch to GND */
-		snd_soc_update_bits(codec,
-				    tabla->mbhc_bias_regs.ctl_reg, 0x01, 0x01);
-		snd_soc_update_bits(codec, TABLA_A_MBHC_HPH, 0x01, 0x01);
-		INIT_WORK(&tabla->hs_correct_plug_work,
-			  tabla_hs_correct_gpio_plug);
-	}
-
+	(void) tabla_mbhc_init_and_calibrate(tabla);
 }
 
 int tabla_hs_detect(struct snd_soc_codec *codec,
@@ -6836,53 +6947,11 @@
 	INIT_WORK(&tabla->hphrocp_work, hphrocp_off_report);
 	INIT_DELAYED_WORK(&tabla->mbhc_insert_dwork, mbhc_insert_work);
 
-	if (!tabla->mbhc_cfg.read_fw_bin) {
-		tabla->mbhc_cfg.mclk_cb_fn(codec, 1, false);
-		tabla_mbhc_init(codec);
-		tabla_mbhc_cal(codec);
-		tabla_mbhc_calc_thres(codec);
-		tabla->mbhc_cfg.mclk_cb_fn(codec, 0, false);
-		tabla_codec_calibrate_hs_polling(codec);
-		if (!tabla->mbhc_cfg.gpio) {
-			rc =  tabla_codec_enable_hs_detect(codec, 1,
-							  MBHC_USE_MB_TRIGGER |
-							  MBHC_USE_HPHL_TRIGGER,
-							  false);
-		} else {
-			/* Enable Mic Bias pull down and HPH Switch to GND */
-			snd_soc_update_bits(codec,
-					    tabla->mbhc_bias_regs.ctl_reg, 0x01,
-					    0x01);
-			snd_soc_update_bits(codec, TABLA_A_MBHC_HPH, 0x01,
-					    0x01);
-			INIT_WORK(&tabla->hs_correct_plug_work,
-				  tabla_hs_correct_gpio_plug);
-		}
-	} else {
+	if (!tabla->mbhc_cfg.read_fw_bin)
+		rc = tabla_mbhc_init_and_calibrate(tabla);
+	else
 		schedule_delayed_work(&tabla->mbhc_firmware_dwork,
 				      usecs_to_jiffies(MBHC_FW_READ_TIMEOUT));
-	}
-
-	if (!IS_ERR_VALUE(rc)) {
-		snd_soc_update_bits(codec, TABLA_A_RX_HPH_OCP_CTL, 0x10, 0x10);
-		wcd9xxx_enable_irq(codec->control_data,
-				 TABLA_IRQ_HPH_PA_OCPL_FAULT);
-		wcd9xxx_enable_irq(codec->control_data,
-				 TABLA_IRQ_HPH_PA_OCPR_FAULT);
-	}
-
-	if (!IS_ERR_VALUE(rc) && tabla->mbhc_cfg.gpio) {
-		rc = request_threaded_irq(tabla->mbhc_cfg.gpio_irq, NULL,
-					   tabla_mechanical_plug_detect_irq,
-					   (IRQF_TRIGGER_RISING |
-					    IRQF_TRIGGER_FALLING),
-					   "tabla-gpio", codec);
-		if (!IS_ERR_VALUE(rc)) {
-			rc = enable_irq_wake(tabla->mbhc_cfg.gpio_irq);
-			/* Bootup time detection */
-			tabla_hs_gpio_handler(codec);
-		}
-	}
 
 	return rc;
 }
@@ -7185,6 +7254,22 @@
 	{TABLA_A_CDC_TX8_MUX_CTL, 0x8, 0x0},
 	{TABLA_A_CDC_TX9_MUX_CTL, 0x8, 0x0},
 	{TABLA_A_CDC_TX10_MUX_CTL, 0x8, 0x0},
+
+	/* config Decimator for DMIC CLK_MODE_1(3.072Mhz@12.88Mhz mclk) */
+	{TABLA_A_CDC_TX1_DMIC_CTL, 0x1, 0x1},
+	{TABLA_A_CDC_TX2_DMIC_CTL, 0x1, 0x1},
+	{TABLA_A_CDC_TX3_DMIC_CTL, 0x1, 0x1},
+	{TABLA_A_CDC_TX4_DMIC_CTL, 0x1, 0x1},
+	{TABLA_A_CDC_TX5_DMIC_CTL, 0x1, 0x1},
+	{TABLA_A_CDC_TX6_DMIC_CTL, 0x1, 0x1},
+	{TABLA_A_CDC_TX7_DMIC_CTL, 0x1, 0x1},
+	{TABLA_A_CDC_TX8_DMIC_CTL, 0x1, 0x1},
+	{TABLA_A_CDC_TX9_DMIC_CTL, 0x1, 0x1},
+	{TABLA_A_CDC_TX10_DMIC_CTL, 0x1, 0x1},
+
+	/* config DMIC clk to CLK_MODE_1 (3.072Mhz@12.88Mhz mclk) */
+	{TABLA_A_CDC_CLK_DMIC_CTL, 0x2A, 0x2A},
+
 };
 
 static const struct tabla_reg_mask_val tabla_1_x_codec_reg_init_val[] = {
@@ -7531,6 +7616,9 @@
 		case AIF2_CAP:
 			ch_cnt = tabla_dai[i].capture.channels_max;
 			break;
+		case AIF3_PB:
+			ch_cnt = tabla_dai[i].playback.channels_max;
+			break;
 		case AIF3_CAP:
 			ch_cnt = tabla_dai[i].capture.channels_max;
 			break;
diff --git a/sound/soc/msm/Makefile b/sound/soc/msm/Makefile
index b3cffd1..0df028c 100644
--- a/sound/soc/msm/Makefile
+++ b/sound/soc/msm/Makefile
@@ -62,7 +62,7 @@
 snd-soc-qdsp6-objs += msm-pcm-lpa.o msm-pcm-afe.o
 obj-$(CONFIG_SND_SOC_QDSP6) += snd-soc-qdsp6.o
 
-snd-soc-msm8960-objs := msm8960.o apq8064.o msm8930.o mpq8064.o
+snd-soc-msm8960-objs := msm8960.o apq8064.o msm8930.o
 obj-$(CONFIG_SND_SOC_MSM8960) += snd-soc-msm8960.o
 
 # Generic MSM drivers
@@ -75,3 +75,12 @@
 # for MDM 9615 sound card driver
 snd-soc-mdm9615-objs := mdm9615.o
 obj-$(CONFIG_SND_SOC_MDM9615) += snd-soc-mdm9615.o
+
+# for MSM 8974 sound card driver
+obj-$(CONFIG_SND_SOC_MSM_QDSP6V2_INTF) += qdsp6v2/
+snd-soc-msm8974-objs := msm8974.o
+obj-$(CONFIG_SND_SOC_MSM8974) += snd-soc-msm8974.o
+
+snd-soc-qdsp6v2-objs := msm-dai-fe.o msm-dai-stub.o
+obj-$(CONFIG_SND_SOC_QDSP6V2) += snd-soc-qdsp6v2.o
+
diff --git a/sound/soc/msm/msm-pcm-routing.c b/sound/soc/msm/msm-pcm-routing.c
index 7fbb592..9ca6569 100644
--- a/sound/soc/msm/msm-pcm-routing.c
+++ b/sound/soc/msm/msm-pcm-routing.c
@@ -110,6 +110,43 @@
 /* This array is indexed by back-end DAI ID defined in msm-pcm-routing.h
  * If new back-end is defined, add new back-end DAI ID at the end of enum
  */
+
+union srs_trumedia_params_u {
+	struct srs_trumedia_params srs_params;
+	unsigned short int raw_params[1];
+};
+static union srs_trumedia_params_u msm_srs_trumedia_params[2];
+static int srs_port_id = -1;
+
+static void srs_send_params(int port_id, unsigned int techs,
+		int param_block_idx) {
+	pr_debug("SRS %s: called, port_id = %d, techs flags = %u,"
+			" paramblockidx %d", __func__, port_id, techs,
+			param_block_idx);
+	/* force all if techs is set to 1 */
+	if (techs == 1)
+		techs = 0xFFFFFFFF;
+
+	if (techs & (1 << SRS_ID_WOWHD))
+		srs_trumedia_open(port_id, SRS_ID_WOWHD,
+	(void *)&msm_srs_trumedia_params[param_block_idx].srs_params.wowhd);
+	if (techs & (1 << SRS_ID_CSHP))
+		srs_trumedia_open(port_id, SRS_ID_CSHP,
+	(void *)&msm_srs_trumedia_params[param_block_idx].srs_params.cshp);
+	if (techs & (1 << SRS_ID_HPF))
+		srs_trumedia_open(port_id, SRS_ID_HPF,
+	(void *)&msm_srs_trumedia_params[param_block_idx].srs_params.hpf);
+	if (techs & (1 << SRS_ID_PEQ))
+		srs_trumedia_open(port_id, SRS_ID_PEQ,
+	(void *)&msm_srs_trumedia_params[param_block_idx].srs_params.peq);
+	if (techs & (1 << SRS_ID_HL))
+		srs_trumedia_open(port_id, SRS_ID_HL,
+	(void *)&msm_srs_trumedia_params[param_block_idx].srs_params.hl);
+	if (techs & (1 << SRS_ID_GLOBAL))
+		srs_trumedia_open(port_id, SRS_ID_GLOBAL,
+	(void *)&msm_srs_trumedia_params[param_block_idx].srs_params.global);
+}
+
 static struct msm_pcm_routing_bdai_data msm_bedais[MSM_BACKEND_DAI_MAX] = {
 	{ PRIMARY_I2S_RX, 0, 0, 0, 0, 0},
 	{ PRIMARY_I2S_TX, 0, 0, 0, 0, 0},
@@ -749,6 +786,104 @@
 	return 0;
 }
 
+static int msm_routing_get_srs_trumedia_control(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = 0;
+	return 0;
+}
+
+static int msm_routing_set_srs_trumedia_control_(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	unsigned int techs = 0;
+	unsigned short offset, value, max, index;
+
+	max = sizeof(msm_srs_trumedia_params) >> 1;
+	index = (unsigned short)((ucontrol->value.integer.value[0] &
+			SRS_PARAM_INDEX_MASK) >> 31);
+	if (SRS_CMD_UPLOAD ==
+		(ucontrol->value.integer.value[0] & SRS_CMD_UPLOAD)) {
+		techs = ucontrol->value.integer.value[0] & 0xFF;
+		pr_debug("SRS %s: send params request, flags = %u",
+			__func__, techs);
+		if (srs_port_id >= 0 && techs)
+			srs_send_params(srs_port_id, techs, index);
+		return 0;
+	}
+	offset = (unsigned short)((ucontrol->value.integer.value[0] &
+			SRS_PARAM_OFFSET_MASK) >> 16);
+	value = (unsigned short)(ucontrol->value.integer.value[0] &
+			SRS_PARAM_VALUE_MASK);
+	if (offset < max) {
+		msm_srs_trumedia_params[index].raw_params[offset] = value;
+		pr_debug("SRS %s: index set... (max %d, requested %d,"
+			" val %d, paramblockidx %d)", __func__, max, offset,
+			value, index);
+	} else {
+		pr_err("SRS %s: index out of bounds! (max %d, requested %d)",
+				__func__, max, offset);
+	}
+	if (offset == 4) {
+		int i;
+		for (i = 0; i < max; i++) {
+			if (i == 0) {
+				pr_debug("SRS %s: global block start",
+						__func__);
+			}
+			if (i ==
+			(sizeof(struct srs_trumedia_params_GLOBAL) >> 1)) {
+				break;
+				pr_debug("SRS %s: wowhd block start at"
+					" offset %d word offset %d", __func__,
+					i, i>>1);
+			}
+			pr_debug("SRS %s: param_index %d index %d val %d",
+				__func__, index, i,
+				msm_srs_trumedia_params[index].raw_params[i]);
+		}
+	}
+	return 0;
+}
+
+static int msm_routing_set_srs_trumedia_control(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol) {
+	int ret;
+
+	pr_debug("SRS control normal called");
+	mutex_lock(&routing_lock);
+	srs_port_id = SLIMBUS_0_RX;
+	ret = msm_routing_set_srs_trumedia_control_(kcontrol, ucontrol);
+	mutex_unlock(&routing_lock);
+	return ret;
+}
+
+static int msm_routing_set_srs_trumedia_control_I2S(
+		struct snd_kcontrol *kcontrol,
+		struct snd_ctl_elem_value *ucontrol) {
+	int ret;
+
+	pr_debug("SRS control I2S called");
+	mutex_lock(&routing_lock);
+	srs_port_id = PRIMARY_I2S_RX;
+	ret = msm_routing_set_srs_trumedia_control_(kcontrol, ucontrol);
+	mutex_unlock(&routing_lock);
+	return ret;
+}
+
+static int msm_routing_set_srs_trumedia_control_HDMI(
+		struct snd_kcontrol *kcontrol,
+		struct snd_ctl_elem_value *ucontrol) {
+	int ret;
+
+	pr_debug("SRS control HDMI called");
+	mutex_lock(&routing_lock);
+	srs_port_id = HDMI_RX;
+	ret =  msm_routing_set_srs_trumedia_control_(kcontrol, ucontrol);
+	mutex_unlock(&routing_lock);
+	return ret;
+}
+
 static void msm_send_eq_values(int eq_idx)
 {
 	int result;
@@ -1378,6 +1513,63 @@
 	msm_routing_set_compressed_vol_mixer, compressed_rx_vol_gain),
 };
 
+static const struct snd_kcontrol_new lpa_SRS_trumedia_controls[] = {
+	{.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.name = "SRS TruMedia",
+	.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |
+			SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.info = snd_soc_info_volsw, \
+	.get = msm_routing_get_srs_trumedia_control,
+	.put = msm_routing_set_srs_trumedia_control,
+	.private_value = ((unsigned long)&(struct soc_mixer_control)
+	{.reg = SND_SOC_NOPM,
+	.shift = 0,
+	.rshift = 0,
+	.max = 0xFFFFFFFF,
+	.platform_max = 0xFFFFFFFF,
+	.invert = 0
+	})
+	}
+};
+
+static const struct snd_kcontrol_new lpa_SRS_trumedia_controls_HDMI[] = {
+	{.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.name = "SRS TruMedia HDMI",
+	.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |
+			SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.info = snd_soc_info_volsw, \
+	.get = msm_routing_get_srs_trumedia_control,
+	.put = msm_routing_set_srs_trumedia_control_HDMI,
+	.private_value = ((unsigned long)&(struct soc_mixer_control)
+	{.reg = SND_SOC_NOPM,
+	.shift = 0,
+	.rshift = 0,
+	.max = 0xFFFFFFFF,
+	.platform_max = 0xFFFFFFFF,
+	.invert = 0
+	})
+	}
+};
+
+static const struct snd_kcontrol_new lpa_SRS_trumedia_controls_I2S[] = {
+	{.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.name = "SRS TruMedia I2S",
+	.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |
+		SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.info = snd_soc_info_volsw, \
+	.get = msm_routing_get_srs_trumedia_control,
+	.put = msm_routing_set_srs_trumedia_control_I2S,
+	.private_value = ((unsigned long)&(struct soc_mixer_control)
+	{.reg = SND_SOC_NOPM,
+	.shift = 0,
+	.rshift = 0,
+	.max = 0xFFFFFFFF,
+	.platform_max = 0xFFFFFFFF,
+	.invert = 0
+	})
+	}
+};
+
 static const struct snd_kcontrol_new eq_enable_mixer_controls[] = {
 	SOC_SINGLE_EXT("MultiMedia1 EQ Enable", SND_SOC_NOPM,
 	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_eq_enable_mixer,
@@ -1980,8 +2172,10 @@
 	mutex_lock(&routing_lock);
 
 	for_each_set_bit(i, &bedai->fe_sessions, MSM_FRONTEND_DAI_MM_SIZE) {
-		if (fe_dai_map[i][session_type] != INVALID_SESSION)
+		if (fe_dai_map[i][session_type] != INVALID_SESSION) {
 			adm_close(bedai->port_id);
+			srs_port_id = -1;
+		}
 	}
 
 	bedai->active = 0;
@@ -2047,6 +2241,8 @@
 
 			msm_pcm_routing_build_matrix(i,
 				fe_dai_map[i][session_type], path_type);
+			srs_port_id = bedai->port_id;
+			srs_send_params(srs_port_id, 1, 0);
 		}
 	}
 
@@ -2115,6 +2311,18 @@
 				compressed_vol_mixer_controls,
 			ARRAY_SIZE(compressed_vol_mixer_controls));
 
+	snd_soc_add_platform_controls(platform,
+				lpa_SRS_trumedia_controls,
+			ARRAY_SIZE(lpa_SRS_trumedia_controls));
+
+	snd_soc_add_platform_controls(platform,
+				lpa_SRS_trumedia_controls_HDMI,
+			ARRAY_SIZE(lpa_SRS_trumedia_controls_HDMI));
+
+	snd_soc_add_platform_controls(platform,
+				lpa_SRS_trumedia_controls_I2S,
+			ARRAY_SIZE(lpa_SRS_trumedia_controls_I2S));
+
 	return 0;
 }
 
diff --git a/sound/soc/msm/msm-pcm-voice.c b/sound/soc/msm/msm-pcm-voice.c
index 59b8bbd..7bdb4f0 100644
--- a/sound/soc/msm/msm-pcm-voice.c
+++ b/sound/soc/msm/msm-pcm-voice.c
@@ -33,7 +33,9 @@
 
 static struct snd_pcm_hardware msm_pcm_hardware = {
 
-	.info =                 SNDRV_PCM_INFO_INTERLEAVED,
+	.info =                 (SNDRV_PCM_INFO_INTERLEAVED|
+				SNDRV_PCM_INFO_PAUSE |
+				SNDRV_PCM_INFO_RESUME),
 	.formats =              SNDRV_PCM_FMTBIT_S16_LE,
 	.rates =                SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
 	.rate_min =             8000,
@@ -205,6 +207,55 @@
 	return 0;
 }
 
+static int msm_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+	int ret = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_voice *prtd = runtime->private_data;
+	uint16_t session_id = 0;
+
+	pr_debug("%s: cmd = %d\n", __func__, cmd);
+	if (is_volte(prtd))
+		session_id = voc_get_session_id(VOLTE_SESSION_NAME);
+	else
+		session_id = voc_get_session_id(VOICE_SESSION_NAME);
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_STOP:
+		pr_debug("Start & Stop Voice call not handled in Trigger.\n");
+	break;
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		pr_debug("%s: resume call session_id = %d\n", __func__,
+			 session_id);
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+			ret = msm_pcm_playback_prepare(substream);
+		else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+			ret = msm_pcm_capture_prepare(substream);
+		if (prtd->playback_start && prtd->capture_start)
+			voc_resume_voice_call(session_id);
+	break;
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		pr_debug("%s: pause call session_id=%d\n",
+			 __func__, session_id);
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+			if (prtd->playback_start)
+				prtd->playback_start = 0;
+		} else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+			if (prtd->capture_start)
+				prtd->capture_start = 0;
+		}
+		voc_standby_voice_call(session_id);
+	break;
+	default:
+		ret = -EINVAL;
+	break;
+	}
+	return ret;
+}
+
 static int msm_voice_volume_get(struct snd_kcontrol *kcontrol,
 				struct snd_ctl_elem_value *ucontrol)
 {
@@ -437,6 +488,7 @@
 	.hw_params	= msm_pcm_hw_params,
 	.close          = msm_pcm_close,
 	.prepare        = msm_pcm_prepare,
+	.trigger        = msm_pcm_trigger,
 };
 
 
diff --git a/sound/soc/msm/msm8930.c b/sound/soc/msm/msm8930.c
index 2762bd6..72bb9b5 100644
--- a/sound/soc/msm/msm8930.c
+++ b/sound/soc/msm/msm8930.c
@@ -42,7 +42,7 @@
 #define DEFAULT_PMIC_SPK_GAIN 0x0D
 #define SITAR_EXT_CLK_RATE 12288000
 
-#define SITAR_MBHC_DEF_BUTTONS 3
+#define SITAR_MBHC_DEF_BUTTONS 8
 #define SITAR_MBHC_DEF_RLOADS 5
 
 static int msm8930_spk_control;
@@ -321,17 +321,17 @@
 	 */
 
 	/**
-	 * Digital Mic1. Front Bottom left Digital Mic on Fluid and MTP.
+	 * Digital Mic1. Front Bottom left Mic on Fluid and MTP.
 	 * Digital Mic GM5 on CDP mainboard.
-	 * Conncted to DMIC2 Input on Sitar codec.
+	 * Conncted to DMIC1 Input on Sitar codec.
 	 */
 	{"DMIC1", NULL, "MIC BIAS1 External"},
 	{"MIC BIAS1 External", NULL, "Digital Mic1"},
 
 	/**
-	 * Digital Mic2. Front Bottom right Digital Mic on Fluid and MTP.
+	 * Digital Mic2. Back top MIC on Fluid.
 	 * Digital Mic GM6 on CDP mainboard.
-	 * Conncted to DMIC1 Input on Sitar codec.
+	 * Conncted to DMIC2 Input on Sitar codec.
 	 */
 	{"DMIC2", NULL, "MIC BIAS1 External"},
 	{"MIC BIAS1 External", NULL, "Digital Mic2"},
@@ -668,7 +668,7 @@
 	}
 
 	err = snd_soc_jack_new(codec, "Button Jack",
-				SND_JACK_BTN_0, &button_jack);
+				SITAR_JACK_BUTTON_MASK, &button_jack);
 	if (err) {
 		pr_err("failed to create new jack\n");
 		return err;
diff --git a/sound/soc/msm/msm8974.c b/sound/soc/msm/msm8974.c
new file mode 100644
index 0000000..d47910b
--- /dev/null
+++ b/sound/soc/msm/msm8974.c
@@ -0,0 +1,752 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/mfd/pm8xxx/pm8921.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/mfd/pm8xxx/pm8921.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/soc-dsp.h>
+#include <sound/pcm.h>
+#include <sound/jack.h>
+#include <asm/mach-types.h>
+#include <mach/socinfo.h>
+#include <qdsp6v2/msm-pcm-routing-v2.h>
+#include "../codecs/wcd9310.h"
+
+/* 8974 machine driver */
+
+#define PM8921_GPIO_BASE		NR_GPIO_IRQS
+#define PM8921_GPIO_PM_TO_SYS(pm_gpio)  (pm_gpio - 1 + PM8921_GPIO_BASE)
+
+#define MSM8974_SPK_ON 1
+#define MSM8974_SPK_OFF 0
+
+#define MSM_SLIM_0_RX_MAX_CHANNELS		2
+#define MSM_SLIM_0_TX_MAX_CHANNELS		4
+
+#define BTSCO_RATE_8KHZ 8000
+#define BTSCO_RATE_16KHZ 16000
+
+#define BOTTOM_SPK_AMP_POS	0x1
+#define BOTTOM_SPK_AMP_NEG	0x2
+#define TOP_SPK_AMP_POS		0x4
+#define TOP_SPK_AMP_NEG		0x8
+
+#define GPIO_AUX_PCM_DOUT 43
+#define GPIO_AUX_PCM_DIN 44
+#define GPIO_AUX_PCM_SYNC 45
+#define GPIO_AUX_PCM_CLK 46
+
+#define TABLA_EXT_CLK_RATE 12288000
+
+#define TABLA_MBHC_DEF_BUTTONS 8
+#define TABLA_MBHC_DEF_RLOADS 5
+
+/* Shared channel numbers for Slimbus ports that connect APQ to MDM. */
+enum {
+	SLIM_1_RX_1 = 145, /* BT-SCO and USB TX */
+	SLIM_1_TX_1 = 146, /* BT-SCO and USB RX */
+	SLIM_2_RX_1 = 147, /* HDMI RX */
+	SLIM_3_RX_1 = 148, /* In-call recording RX */
+	SLIM_3_RX_2 = 149, /* In-call recording RX */
+	SLIM_4_TX_1 = 150, /* In-call musid delivery TX */
+};
+
+static u32 top_spk_pamp_gpio  = PM8921_GPIO_PM_TO_SYS(18);
+static u32 bottom_spk_pamp_gpio = PM8921_GPIO_PM_TO_SYS(19);
+static int msm_spk_control;
+static int msm_ext_bottom_spk_pamp;
+static int msm_ext_top_spk_pamp;
+static int msm_slim_0_rx_ch = 1;
+static int msm_slim_0_tx_ch = 1;
+
+static int msm_btsco_rate = BTSCO_RATE_8KHZ;
+static int msm_headset_gpios_configured;
+
+static struct snd_soc_jack hs_jack;
+static struct snd_soc_jack button_jack;
+
+static int msm_enable_codec_ext_clk(struct snd_soc_codec *codec, int enable,
+				    bool dapm);
+
+static struct tabla_mbhc_config mbhc_cfg = {
+	.headset_jack = &hs_jack,
+	.button_jack = &button_jack,
+	.read_fw_bin = false,
+	.calibration = NULL,
+	.micbias = TABLA_MICBIAS2,
+	.mclk_cb_fn = msm_enable_codec_ext_clk,
+	.mclk_rate = TABLA_EXT_CLK_RATE,
+	.gpio = 0, /* MBHC GPIO is not configured */
+	.gpio_irq = 0,
+	.gpio_level_insert = 1,
+};
+
+static void msm_enable_ext_spk_amp_gpio(u32 spk_amp_gpio)
+{
+	int ret = 0;
+
+	struct pm_gpio param = {
+		.direction      = PM_GPIO_DIR_OUT,
+		.output_buffer  = PM_GPIO_OUT_BUF_CMOS,
+		.output_value   = 1,
+		.pull      = PM_GPIO_PULL_NO,
+		.vin_sel	= PM_GPIO_VIN_S4,
+		.out_strength   = PM_GPIO_STRENGTH_MED,
+		.
+			function       = PM_GPIO_FUNC_NORMAL,
+	};
+
+	if (spk_amp_gpio == bottom_spk_pamp_gpio) {
+
+		ret = gpio_request(bottom_spk_pamp_gpio, "BOTTOM_SPK_AMP");
+		if (ret) {
+			pr_err("%s: Error requesting BOTTOM SPK AMP GPIO %u\n",
+				__func__, bottom_spk_pamp_gpio);
+			return;
+		}
+		ret = pm8xxx_gpio_config(bottom_spk_pamp_gpio, &param);
+		if (ret)
+			pr_err("%s: Failed to configure Bottom Spk Ampl"
+				" gpio %u\n", __func__, bottom_spk_pamp_gpio);
+		else {
+			pr_debug("%s: enable Bottom spkr amp gpio\n", __func__);
+			gpio_direction_output(bottom_spk_pamp_gpio, 1);
+		}
+
+	} else if (spk_amp_gpio == top_spk_pamp_gpio) {
+
+		ret = gpio_request(top_spk_pamp_gpio, "TOP_SPK_AMP");
+		if (ret) {
+			pr_err("%s: Error requesting GPIO %d\n", __func__,
+				top_spk_pamp_gpio);
+			return;
+		}
+		ret = pm8xxx_gpio_config(top_spk_pamp_gpio, &param);
+		if (ret)
+			pr_err("%s: Failed to configure Top Spk Ampl"
+				" gpio %u\n", __func__, top_spk_pamp_gpio);
+		else {
+			pr_debug("%s: enable Top spkr amp gpio\n", __func__);
+			gpio_direction_output(top_spk_pamp_gpio, 1);
+		}
+	} else {
+		pr_err("%s: ERROR : Invalid External Speaker Ampl GPIO."
+			" gpio = %u\n", __func__, spk_amp_gpio);
+		return;
+	}
+}
+
+static void msm_ext_spk_power_amp_on(u32 spk)
+{
+	if (spk & (BOTTOM_SPK_AMP_POS | BOTTOM_SPK_AMP_NEG)) {
+
+		if ((msm_ext_bottom_spk_pamp & BOTTOM_SPK_AMP_POS) &&
+			(msm_ext_bottom_spk_pamp & BOTTOM_SPK_AMP_NEG)) {
+
+			pr_debug("%s() External Bottom Speaker Ampl already "
+				"turned on. spk = 0x%08x\n", __func__, spk);
+			return;
+		}
+
+		msm_ext_bottom_spk_pamp |= spk;
+
+		if ((msm_ext_bottom_spk_pamp & BOTTOM_SPK_AMP_POS) &&
+			(msm_ext_bottom_spk_pamp & BOTTOM_SPK_AMP_NEG)) {
+
+			msm_enable_ext_spk_amp_gpio(bottom_spk_pamp_gpio);
+			pr_debug("%s: slepping 4 ms after turning on external "
+				" Bottom Speaker Ampl\n", __func__);
+			usleep_range(4000, 4000);
+		}
+
+	} else if (spk & (TOP_SPK_AMP_POS | TOP_SPK_AMP_NEG)) {
+
+		if ((msm_ext_top_spk_pamp & TOP_SPK_AMP_POS) &&
+			(msm_ext_top_spk_pamp & TOP_SPK_AMP_NEG)) {
+
+			pr_debug("%s() External Top Speaker Ampl already"
+				"turned on. spk = 0x%08x\n", __func__, spk);
+			return;
+		}
+
+		msm_ext_top_spk_pamp |= spk;
+
+		if ((msm_ext_top_spk_pamp & TOP_SPK_AMP_POS) &&
+			(msm_ext_top_spk_pamp & TOP_SPK_AMP_NEG)) {
+
+			msm_enable_ext_spk_amp_gpio(top_spk_pamp_gpio);
+			pr_debug("%s: sleeping 4 ms after turning on "
+				" external Top Speaker Ampl\n", __func__);
+			usleep_range(4000, 4000);
+		}
+	} else  {
+
+		pr_err("%s: ERROR : Invalid External Speaker Ampl. spk = 0x%08x\n",
+			__func__, spk);
+		return;
+	}
+}
+
+static void msm_ext_spk_power_amp_off(u32 spk)
+{
+	if (spk & (BOTTOM_SPK_AMP_POS | BOTTOM_SPK_AMP_NEG)) {
+
+		if (!msm_ext_bottom_spk_pamp)
+			return;
+
+		gpio_direction_output(bottom_spk_pamp_gpio, 0);
+		gpio_free(bottom_spk_pamp_gpio);
+		msm_ext_bottom_spk_pamp = 0;
+
+		pr_debug("%s: sleeping 4 ms after turning off external Bottom"
+			" Speaker Ampl\n", __func__);
+
+		usleep_range(4000, 4000);
+
+	} else if (spk & (TOP_SPK_AMP_POS | TOP_SPK_AMP_NEG)) {
+
+		if (!msm_ext_top_spk_pamp)
+			return;
+
+		gpio_direction_output(top_spk_pamp_gpio, 0);
+		gpio_free(top_spk_pamp_gpio);
+		msm_ext_top_spk_pamp = 0;
+
+		pr_debug("%s: sleeping 4 ms after turning off external Top"
+			" Spkaker Ampl\n", __func__);
+
+		usleep_range(4000, 4000);
+	} else  {
+
+		pr_err("%s: ERROR : Invalid Ext Spk Ampl. spk = 0x%08x\n",
+			__func__, spk);
+		return;
+	}
+}
+
+static void msm_ext_control(struct snd_soc_codec *codec)
+{
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+	pr_debug("%s: msm_spk_control = %d", __func__, msm_spk_control);
+	if (msm_spk_control == MSM8974_SPK_ON) {
+		snd_soc_dapm_enable_pin(dapm, "Ext Spk Bottom Pos");
+		snd_soc_dapm_enable_pin(dapm, "Ext Spk Bottom Neg");
+		snd_soc_dapm_enable_pin(dapm, "Ext Spk Top Pos");
+		snd_soc_dapm_enable_pin(dapm, "Ext Spk Top Neg");
+	} else {
+		snd_soc_dapm_disable_pin(dapm, "Ext Spk Bottom Pos");
+		snd_soc_dapm_disable_pin(dapm, "Ext Spk Bottom Neg");
+		snd_soc_dapm_disable_pin(dapm, "Ext Spk Top Pos");
+		snd_soc_dapm_disable_pin(dapm, "Ext Spk Top Neg");
+	}
+
+	snd_soc_dapm_sync(dapm);
+}
+
+static int msm_get_spk(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	pr_debug("%s: msm_spk_control = %d", __func__, msm_spk_control);
+	ucontrol->value.integer.value[0] = msm_spk_control;
+	return 0;
+}
+static int msm_set_spk(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+
+	pr_debug("%s()\n", __func__);
+	if (msm_spk_control == ucontrol->value.integer.value[0])
+		return 0;
+
+	msm_spk_control = ucontrol->value.integer.value[0];
+	msm_ext_control(codec);
+	return 1;
+}
+static int msm_spkramp_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *k, int event)
+{
+	pr_debug("%s() %x\n", __func__, SND_SOC_DAPM_EVENT_ON(event));
+
+	if (SND_SOC_DAPM_EVENT_ON(event)) {
+		if (!strncmp(w->name, "Ext Spk Bottom Pos", 18))
+			msm_ext_spk_power_amp_on(BOTTOM_SPK_AMP_POS);
+		else if (!strncmp(w->name, "Ext Spk Bottom Neg", 18))
+			msm_ext_spk_power_amp_on(BOTTOM_SPK_AMP_NEG);
+		else if (!strncmp(w->name, "Ext Spk Top Pos", 15))
+			msm_ext_spk_power_amp_on(TOP_SPK_AMP_POS);
+		else if  (!strncmp(w->name, "Ext Spk Top Neg", 15))
+			msm_ext_spk_power_amp_on(TOP_SPK_AMP_NEG);
+		else {
+			pr_err("%s() Invalid Speaker Widget = %s\n",
+					__func__, w->name);
+			return -EINVAL;
+		}
+
+	} else {
+		if (!strncmp(w->name, "Ext Spk Bottom Pos", 18))
+			msm_ext_spk_power_amp_off(BOTTOM_SPK_AMP_POS);
+		else if (!strncmp(w->name, "Ext Spk Bottom Neg", 18))
+			msm_ext_spk_power_amp_off(BOTTOM_SPK_AMP_NEG);
+		else if (!strncmp(w->name, "Ext Spk Top Pos", 15))
+			msm_ext_spk_power_amp_off(TOP_SPK_AMP_POS);
+		else if  (!strncmp(w->name, "Ext Spk Top Neg", 15))
+			msm_ext_spk_power_amp_off(TOP_SPK_AMP_NEG);
+		else {
+			pr_err("%s() Invalid Speaker Widget = %s\n",
+					__func__, w->name);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int msm_enable_codec_ext_clk(struct snd_soc_codec *codec, int enable,
+				    bool dapm)
+{
+	return 0;
+}
+
+static int msm_mclk_event(struct snd_soc_dapm_widget *w,
+		struct snd_kcontrol *kcontrol, int event)
+{
+	return 0;
+}
+
+static const struct snd_soc_dapm_widget msm_dapm_widgets[] = {
+
+	SND_SOC_DAPM_SUPPLY("MCLK",  SND_SOC_NOPM, 0, 0,
+	msm_mclk_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_SPK("Ext Spk Bottom Pos", msm_spkramp_event),
+	SND_SOC_DAPM_SPK("Ext Spk Bottom Neg", msm_spkramp_event),
+
+	SND_SOC_DAPM_SPK("Ext Spk Top Pos", msm_spkramp_event),
+	SND_SOC_DAPM_SPK("Ext Spk Top Neg", msm_spkramp_event),
+
+	SND_SOC_DAPM_MIC("Handset Mic", NULL),
+	SND_SOC_DAPM_MIC("Headset Mic", NULL),
+	SND_SOC_DAPM_MIC("Digital Mic1", NULL),
+	SND_SOC_DAPM_MIC("ANCRight Headset Mic", NULL),
+	SND_SOC_DAPM_MIC("ANCLeft Headset Mic", NULL),
+
+	SND_SOC_DAPM_MIC("Digital Mic1", NULL),
+	SND_SOC_DAPM_MIC("Digital Mic2", NULL),
+	SND_SOC_DAPM_MIC("Digital Mic3", NULL),
+	SND_SOC_DAPM_MIC("Digital Mic4", NULL),
+	SND_SOC_DAPM_MIC("Digital Mic5", NULL),
+	SND_SOC_DAPM_MIC("Digital Mic6", NULL),
+
+};
+
+static const struct snd_soc_dapm_route common_audio_map[] = {
+
+	{"RX_BIAS", NULL, "MCLK"},
+	{"LDO_H", NULL, "MCLK"},
+
+	/* Speaker path */
+	{"Ext Spk Bottom Pos", NULL, "LINEOUT1"},
+	{"Ext Spk Bottom Neg", NULL, "LINEOUT3"},
+
+	{"Ext Spk Top Pos", NULL, "LINEOUT2"},
+	{"Ext Spk Top Neg", NULL, "LINEOUT4"},
+
+	/* Microphone path */
+	{"AMIC1", NULL, "MIC BIAS1 Internal1"},
+	{"MIC BIAS1 Internal1", NULL, "Handset Mic"},
+
+	{"AMIC2", NULL, "MIC BIAS2 External"},
+	{"MIC BIAS2 External", NULL, "Headset Mic"},
+
+	/**
+	 * AMIC3 and AMIC4 inputs are connected to ANC microphones
+	 * These mics are biased differently on CDP and FLUID
+	 * routing entries below are based on bias arrangement
+	 * on FLUID.
+	 */
+	{"AMIC3", NULL, "MIC BIAS3 Internal1"},
+	{"MIC BIAS3 Internal1", NULL, "ANCRight Headset Mic"},
+
+	{"AMIC4", NULL, "MIC BIAS1 Internal2"},
+	{"MIC BIAS1 Internal2", NULL, "ANCLeft Headset Mic"},
+
+	{"HEADPHONE", NULL, "LDO_H"},
+
+	/**
+	 * The digital Mic routes are setup considering
+	 * fluid as default device.
+	 */
+
+	/**
+	 * Digital Mic1. Front Bottom left Digital Mic on Fluid and MTP.
+	 * Digital Mic GM5 on CDP mainboard.
+	 * Conncted to DMIC2 Input on Tabla codec.
+	 */
+	{"DMIC2", NULL, "MIC BIAS1 External"},
+	{"MIC BIAS1 External", NULL, "Digital Mic1"},
+
+	/**
+	 * Digital Mic2. Front Bottom right Digital Mic on Fluid and MTP.
+	 * Digital Mic GM6 on CDP mainboard.
+	 * Conncted to DMIC1 Input on Tabla codec.
+	 */
+	{"DMIC1", NULL, "MIC BIAS1 External"},
+	{"MIC BIAS1 External", NULL, "Digital Mic2"},
+
+	/**
+	 * Digital Mic3. Back Bottom Digital Mic on Fluid.
+	 * Digital Mic GM1 on CDP mainboard.
+	 * Conncted to DMIC4 Input on Tabla codec.
+	 */
+	{"DMIC4", NULL, "MIC BIAS3 External"},
+	{"MIC BIAS3 External", NULL, "Digital Mic3"},
+
+	/**
+	 * Digital Mic4. Back top Digital Mic on Fluid.
+	 * Digital Mic GM2 on CDP mainboard.
+	 * Conncted to DMIC3 Input on Tabla codec.
+	 */
+	{"DMIC3", NULL, "MIC BIAS3 External"},
+	{"MIC BIAS3 External", NULL, "Digital Mic4"},
+
+	/**
+	 * Digital Mic5. Front top Digital Mic on Fluid.
+	 * Digital Mic GM3 on CDP mainboard.
+	 * Conncted to DMIC5 Input on Tabla codec.
+	 */
+	{"DMIC5", NULL, "MIC BIAS4 External"},
+	{"MIC BIAS4 External", NULL, "Digital Mic5"},
+
+	/* Tabla digital Mic6 - back bottom digital Mic on Liquid and
+	 * bottom mic on CDP. FLUID/MTP do not have dmic6 installed.
+	 */
+	{"DMIC6", NULL, "MIC BIAS4 External"},
+	{"MIC BIAS4 External", NULL, "Digital Mic6"},
+};
+
+static const char *spk_function[] = {"Off", "On"};
+static const char *slim0_rx_ch_text[] = {"One", "Two"};
+static const char *slim0_tx_ch_text[] = {"One", "Two", "Three", "Four"};
+
+static const struct soc_enum msm_enum[] = {
+	SOC_ENUM_SINGLE_EXT(2, spk_function),
+	SOC_ENUM_SINGLE_EXT(2, slim0_rx_ch_text),
+	SOC_ENUM_SINGLE_EXT(4, slim0_tx_ch_text),
+};
+
+static const char *btsco_rate_text[] = {"8000", "16000"};
+static const struct soc_enum msm_btsco_enum[] = {
+		SOC_ENUM_SINGLE_EXT(2, btsco_rate_text),
+};
+
+static int msm_slim_0_rx_ch_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	pr_debug("%s: msm_slim_0_rx_ch  = %d\n", __func__,
+			msm_slim_0_rx_ch);
+	ucontrol->value.integer.value[0] = msm_slim_0_rx_ch - 1;
+	return 0;
+}
+
+static int msm_slim_0_rx_ch_put(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	msm_slim_0_rx_ch = ucontrol->value.integer.value[0] + 1;
+
+	pr_debug("%s: msm_slim_0_rx_ch = %d\n", __func__,
+			msm_slim_0_rx_ch);
+	return 1;
+}
+
+static int msm_slim_0_tx_ch_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	pr_debug("%s: msm_slim_0_tx_ch  = %d\n", __func__,
+			msm_slim_0_tx_ch);
+	ucontrol->value.integer.value[0] = msm_slim_0_tx_ch - 1;
+	return 0;
+}
+
+static int msm_slim_0_tx_ch_put(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	msm_slim_0_tx_ch = ucontrol->value.integer.value[0] + 1;
+
+	pr_debug("%s: msm_slim_0_tx_ch = %d\n", __func__,
+			msm_slim_0_tx_ch);
+	return 1;
+}
+
+static int msm_btsco_rate_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	pr_debug("%s: msm_btsco_rate  = %d", __func__,
+					msm_btsco_rate);
+	ucontrol->value.integer.value[0] = msm_btsco_rate;
+	return 0;
+}
+
+static int msm_btsco_rate_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	switch (ucontrol->value.integer.value[0]) {
+	case 0:
+		msm_btsco_rate = BTSCO_RATE_8KHZ;
+		break;
+	case 1:
+		msm_btsco_rate = BTSCO_RATE_16KHZ;
+		break;
+	default:
+		msm_btsco_rate = BTSCO_RATE_8KHZ;
+		break;
+	}
+	pr_debug("%s: msm_btsco_rate = %d\n", __func__,
+					msm_btsco_rate);
+	return 0;
+}
+
+static const struct snd_kcontrol_new tabla_msm_controls[] = {
+	SOC_ENUM_EXT("Speaker Function", msm_enum[0], msm_get_spk,
+		msm_set_spk),
+	SOC_ENUM_EXT("SLIM_0_RX Channels", msm_enum[1],
+		msm_slim_0_rx_ch_get, msm_slim_0_rx_ch_put),
+	SOC_ENUM_EXT("SLIM_0_TX Channels", msm_enum[2],
+		msm_slim_0_tx_ch_get, msm_slim_0_tx_ch_put),
+};
+
+static const struct snd_kcontrol_new int_btsco_rate_mixer_controls[] = {
+	SOC_ENUM_EXT("Internal BTSCO SampleRate", msm_btsco_enum[0],
+		msm_btsco_rate_get, msm_btsco_rate_put),
+};
+
+static struct snd_soc_dsp_link lpa_fe_media = {
+	.playback = true,
+	.trigger = {
+		SND_SOC_DSP_TRIGGER_POST,
+		SND_SOC_DSP_TRIGGER_POST
+	},
+};
+static struct snd_soc_dsp_link fe_media = {
+	.playback = true,
+	.capture = true,
+	.trigger = {
+		SND_SOC_DSP_TRIGGER_POST,
+		SND_SOC_DSP_TRIGGER_POST
+	},
+};
+static int msm_auxpcm_be_params_fixup(struct snd_soc_pcm_runtime *rtd,
+					struct snd_pcm_hw_params *params)
+{
+	struct snd_interval *rate = hw_param_interval(params,
+					SNDRV_PCM_HW_PARAM_RATE);
+
+	struct snd_interval *channels = hw_param_interval(params,
+					SNDRV_PCM_HW_PARAM_CHANNELS);
+
+	/* PCM only supports mono output with 8khz sample rate */
+	rate->min = rate->max = 8000;
+	channels->min = channels->max = 1;
+
+	return 0;
+}
+static int msm_aux_pcm_get_gpios(void)
+{
+	int ret = 0;
+
+	pr_debug("%s\n", __func__);
+
+	ret = gpio_request(GPIO_AUX_PCM_DOUT, "AUX PCM DOUT");
+	if (ret < 0) {
+		pr_err("%s: Failed to request gpio(%d): AUX PCM DOUT",
+				__func__, GPIO_AUX_PCM_DOUT);
+		goto fail_dout;
+	}
+
+	ret = gpio_request(GPIO_AUX_PCM_DIN, "AUX PCM DIN");
+	if (ret < 0) {
+		pr_err("%s: Failed to request gpio(%d): AUX PCM DIN",
+				__func__, GPIO_AUX_PCM_DIN);
+		goto fail_din;
+	}
+
+	ret = gpio_request(GPIO_AUX_PCM_SYNC, "AUX PCM SYNC");
+	if (ret < 0) {
+		pr_err("%s: Failed to request gpio(%d): AUX PCM SYNC",
+				__func__, GPIO_AUX_PCM_SYNC);
+		goto fail_sync;
+	}
+	ret = gpio_request(GPIO_AUX_PCM_CLK, "AUX PCM CLK");
+	if (ret < 0) {
+		pr_err("%s: Failed to request gpio(%d): AUX PCM CLK",
+				__func__, GPIO_AUX_PCM_CLK);
+		goto fail_clk;
+	}
+
+	return 0;
+
+fail_clk:
+	gpio_free(GPIO_AUX_PCM_SYNC);
+fail_sync:
+	gpio_free(GPIO_AUX_PCM_DIN);
+fail_din:
+	gpio_free(GPIO_AUX_PCM_DOUT);
+fail_dout:
+
+	return ret;
+}
+static int msm_aux_pcm_free_gpios(void)
+{
+	gpio_free(GPIO_AUX_PCM_DIN);
+	gpio_free(GPIO_AUX_PCM_DOUT);
+	gpio_free(GPIO_AUX_PCM_SYNC);
+	gpio_free(GPIO_AUX_PCM_CLK);
+
+	return 0;
+}
+
+static int msm_auxpcm_startup(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+
+	pr_debug("%s(): substream = %s\n", __func__, substream->name);
+	ret = msm_aux_pcm_get_gpios();
+	if (ret < 0) {
+		pr_err("%s: Aux PCM GPIO request failed\n", __func__);
+		return -EINVAL;
+	}
+	return ret;
+}
+
+static void msm_auxpcm_shutdown(struct snd_pcm_substream *substream)
+{
+
+	pr_debug("%s(): substream = %s\n", __func__, substream->name);
+	msm_aux_pcm_free_gpios();
+}
+static struct snd_soc_ops msm_auxpcm_be_ops = {
+	.startup = msm_auxpcm_startup,
+	.shutdown = msm_auxpcm_shutdown,
+};
+/* Digital audio interface glue - connects codec <---> CPU */
+static struct snd_soc_dai_link msm_dai[] = {
+	/* FrontEnd DAI Links */
+	{
+		.name = "MSM8974 Media1",
+		.stream_name = "MultiMedia1",
+		.cpu_dai_name	= "MultiMedia1",
+		.platform_name  = "msm-pcm-dsp",
+		.dynamic = 1,
+		.dsp_link = &fe_media,
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA1
+	},
+	{
+		.name = "MSM8974 LPA",
+		.stream_name = "LPA",
+		.cpu_dai_name	= "MultiMedia3",
+		.platform_name  = "msm-pcm-lpa",
+		.dynamic = 1,
+		.dsp_link = &lpa_fe_media,
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA3,
+	},
+
+	/* AUX PCM Backend DAI Links */
+	{
+		.name = LPASS_BE_AUXPCM_RX,
+		.stream_name = "AUX PCM Playback",
+		.cpu_dai_name = "msm-dai-q6.4106",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-rx",
+		.no_pcm = 1,
+		.be_id = MSM_BACKEND_DAI_AUXPCM_RX,
+		.be_hw_params_fixup = msm_auxpcm_be_params_fixup,
+		.ops = &msm_auxpcm_be_ops,
+	},
+	{
+		.name = LPASS_BE_AUXPCM_TX,
+		.stream_name = "AUX PCM Capture",
+		.cpu_dai_name = "msm-dai-q6.4107",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-tx",
+		.no_pcm = 1,
+		.be_id = MSM_BACKEND_DAI_AUXPCM_TX,
+		.be_hw_params_fixup = msm_auxpcm_be_params_fixup,
+	},
+
+};
+
+struct snd_soc_card snd_soc_card_msm = {
+	.name		= "msm8974-taiko-snd-card",
+	.dai_link	= msm_dai,
+	.num_links	= ARRAY_SIZE(msm_dai),
+};
+
+static struct platform_device *msm_snd_device;
+
+static void msm_free_headset_mic_gpios(void)
+{
+	if (msm_headset_gpios_configured) {
+		gpio_free(PM8921_GPIO_PM_TO_SYS(23));
+		gpio_free(PM8921_GPIO_PM_TO_SYS(35));
+	}
+}
+
+static int __init msm_audio_init(void)
+{
+	int ret = 0;
+	if (!machine_is_copper_sim()) {
+		pr_err("%s: Not the right machine type\n", __func__);
+		return -ENODEV;
+	}
+	msm_snd_device = platform_device_alloc("soc-audio", 0);
+	if (!msm_snd_device) {
+		pr_err("Platform device allocation failed\n");
+		kfree(mbhc_cfg.calibration);
+		return -ENOMEM;
+	}
+
+	platform_set_drvdata(msm_snd_device, &snd_soc_card_msm);
+	ret = platform_device_add(msm_snd_device);
+	if (ret) {
+		platform_device_put(msm_snd_device);
+		kfree(mbhc_cfg.calibration);
+		return ret;
+	}
+	return ret;
+
+}
+module_init(msm_audio_init);
+
+static void __exit msm_audio_exit(void)
+{
+	if (!machine_is_copper_sim()) {
+		pr_err("%s: Not the right machine type\n", __func__);
+		return ;
+	}
+	msm_free_headset_mic_gpios();
+	platform_device_unregister(msm_snd_device);
+	kfree(mbhc_cfg.calibration);
+}
+module_exit(msm_audio_exit);
+
+MODULE_DESCRIPTION("ALSA SoC msm");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/msm/qdsp6/q6adm.c b/sound/soc/msm/qdsp6/q6adm.c
index 676ecf1..bf6f743 100644
--- a/sound/soc/msm/qdsp6/q6adm.c
+++ b/sound/soc/msm/qdsp6/q6adm.c
@@ -45,6 +45,189 @@
 
 static struct adm_ctl			this_adm;
 
+int srs_trumedia_open(int port_id, int srs_tech_id, void *srs_params)
+{
+	struct asm_pp_params_command *open = NULL;
+	int ret = 0, sz = 0;
+	int index;
+
+	pr_debug("SRS - %s", __func__);
+	switch (srs_tech_id) {
+	case SRS_ID_GLOBAL: {
+		struct srs_trumedia_params_GLOBAL *glb_params = NULL;
+		sz = sizeof(struct asm_pp_params_command) +
+			sizeof(struct srs_trumedia_params_GLOBAL);
+		open = kzalloc(sz, GFP_KERNEL);
+		open->payload_size = sizeof(struct srs_trumedia_params_GLOBAL) +
+					sizeof(struct asm_pp_param_data_hdr);
+		open->params.param_id = SRS_TRUMEDIA_PARAMS;
+		open->params.param_size =
+				sizeof(struct srs_trumedia_params_GLOBAL);
+		glb_params = (struct srs_trumedia_params_GLOBAL *)((u8 *)open +
+				sizeof(struct asm_pp_params_command));
+		memcpy(glb_params, srs_params,
+			sizeof(struct srs_trumedia_params_GLOBAL));
+		pr_debug("SRS - %s: Global params - 1 = %x, 2 = %x, 3 = %x,"
+				" 4 = %x, 5 = %x, 6 = %x, 7 = %x, 8 = %x\n",
+				__func__, (int)glb_params->v1,
+				(int)glb_params->v2, (int)glb_params->v3,
+				(int)glb_params->v4, (int)glb_params->v5,
+				(int)glb_params->v6, (int)glb_params->v7,
+				(int)glb_params->v8);
+		break;
+	}
+	case SRS_ID_WOWHD: {
+		struct srs_trumedia_params_WOWHD *whd_params = NULL;
+		sz = sizeof(struct asm_pp_params_command) +
+			sizeof(struct srs_trumedia_params_WOWHD);
+		open = kzalloc(sz, GFP_KERNEL);
+		open->payload_size = sizeof(struct srs_trumedia_params_WOWHD) +
+					sizeof(struct asm_pp_param_data_hdr);
+		open->params.param_id = SRS_TRUMEDIA_PARAMS_WOWHD;
+		open->params.param_size =
+				sizeof(struct srs_trumedia_params_WOWHD);
+		whd_params = (struct srs_trumedia_params_WOWHD *)((u8 *)open +
+				sizeof(struct asm_pp_params_command));
+		memcpy(whd_params, srs_params,
+				sizeof(struct srs_trumedia_params_WOWHD));
+		pr_debug("SRS - %s: WOWHD params - 1 = %x, 2 = %x, 3 = %x,"
+			" 4 = %x, 5 = %x, 6 = %x, 7 = %x, 8 = %x, 9 = %x,"
+			" 10 = %x, 11 = %x\n", __func__, (int)whd_params->v1,
+			(int)whd_params->v2, (int)whd_params->v3,
+			(int)whd_params->v4, (int)whd_params->v5,
+			(int)whd_params->v6, (int)whd_params->v7,
+			(int)whd_params->v8, (int)whd_params->v9,
+			(int)whd_params->v10, (int)whd_params->v11);
+		break;
+	}
+	case SRS_ID_CSHP: {
+		struct srs_trumedia_params_CSHP *chp_params = NULL;
+		sz = sizeof(struct asm_pp_params_command) +
+			sizeof(struct srs_trumedia_params_CSHP);
+		open = kzalloc(sz, GFP_KERNEL);
+		open->payload_size = sizeof(struct srs_trumedia_params_CSHP) +
+					sizeof(struct asm_pp_param_data_hdr);
+		open->params.param_id = SRS_TRUMEDIA_PARAMS_CSHP;
+		open->params.param_size =
+					sizeof(struct srs_trumedia_params_CSHP);
+		chp_params = (struct srs_trumedia_params_CSHP *)((u8 *)open +
+				sizeof(struct asm_pp_params_command));
+		memcpy(chp_params, srs_params,
+				sizeof(struct srs_trumedia_params_CSHP));
+		pr_debug("SRS - %s: CSHP params - 1 = %x, 2 = %x, 3 = %x,"
+				" 4 = %x, 5 = %x, 6 = %x, 7 = %x, 8 = %x,"
+				" 9 = %x\n", __func__, (int)chp_params->v1,
+				(int)chp_params->v2, (int)chp_params->v3,
+				(int)chp_params->v4, (int)chp_params->v5,
+				(int)chp_params->v6, (int)chp_params->v7,
+				(int)chp_params->v8, (int)chp_params->v9);
+		break;
+	}
+	case SRS_ID_HPF: {
+		struct srs_trumedia_params_HPF *hpf_params = NULL;
+		sz = sizeof(struct asm_pp_params_command) +
+			sizeof(struct srs_trumedia_params_HPF);
+		open = kzalloc(sz, GFP_KERNEL);
+		open->payload_size = sizeof(struct srs_trumedia_params_HPF) +
+					sizeof(struct asm_pp_param_data_hdr);
+		open->params.param_id = SRS_TRUMEDIA_PARAMS_HPF;
+		open->params.param_size =
+					sizeof(struct srs_trumedia_params_HPF);
+		hpf_params = (struct srs_trumedia_params_HPF *)((u8 *)open +
+				sizeof(struct asm_pp_params_command));
+		memcpy(hpf_params, srs_params,
+			sizeof(struct srs_trumedia_params_HPF));
+		pr_debug("SRS - %s: HPF params - 1 = %x\n", __func__,
+				(int)hpf_params->v1);
+		break;
+	}
+	case SRS_ID_PEQ: {
+		struct srs_trumedia_params_PEQ *peq_params = NULL;
+		sz = sizeof(struct asm_pp_params_command) +
+			sizeof(struct srs_trumedia_params_PEQ);
+		open = kzalloc(sz, GFP_KERNEL);
+		open->payload_size = sizeof(struct srs_trumedia_params_PEQ) +
+					sizeof(struct asm_pp_param_data_hdr);
+		open->params.param_id = SRS_TRUMEDIA_PARAMS_PEQ;
+		open->params.param_size =
+					sizeof(struct srs_trumedia_params_PEQ);
+		peq_params = (struct srs_trumedia_params_PEQ *)((u8 *)open +
+				sizeof(struct asm_pp_params_command));
+		memcpy(peq_params, srs_params,
+				sizeof(struct srs_trumedia_params_PEQ));
+		pr_debug("SRS - %s: PEQ params - 1 = %x 2 = %x, 3 = %x,"
+			" 4 = %x\n", __func__, (int)peq_params->v1,
+			(int)peq_params->v2, (int)peq_params->v3,
+			(int)peq_params->v4);
+		break;
+	}
+	case SRS_ID_HL: {
+		struct srs_trumedia_params_HL *hl_params = NULL;
+		sz = sizeof(struct asm_pp_params_command) +
+			sizeof(struct srs_trumedia_params_HL);
+		open = kzalloc(sz, GFP_KERNEL);
+		open->payload_size = sizeof(struct srs_trumedia_params_HL) +
+					sizeof(struct asm_pp_param_data_hdr);
+		open->params.param_id = SRS_TRUMEDIA_PARAMS_HL;
+		open->params.param_size = sizeof(struct srs_trumedia_params_HL);
+		hl_params = (struct srs_trumedia_params_HL *)((u8 *)open +
+				sizeof(struct asm_pp_params_command));
+		memcpy(hl_params, srs_params,
+				sizeof(struct srs_trumedia_params_HL));
+		pr_debug("SRS - %s: HL params - 1 = %x, 2 = %x, 3 = %x, 4 = %x,"
+				" 5 = %x, 6 = %x, 7 = %x\n", __func__,
+				(int)hl_params->v1, (int)hl_params->v2,
+				(int)hl_params->v3, (int)hl_params->v4,
+				(int)hl_params->v5, (int)hl_params->v6,
+				(int)hl_params->v7);
+		break;
+	}
+	default:
+		goto fail_cmd;
+	}
+
+	open->payload = NULL;
+	open->params.module_id = SRS_TRUMEDIA_MODULE_ID;
+	open->params.reserved = 0;
+	open->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	open->hdr.pkt_size = sz;
+	open->hdr.src_svc = APR_SVC_ADM;
+	open->hdr.src_domain = APR_DOMAIN_APPS;
+	open->hdr.src_port = port_id;
+	open->hdr.dest_svc = APR_SVC_ADM;
+	open->hdr.dest_domain = APR_DOMAIN_ADSP;
+	index = afe_get_port_index(port_id);
+	open->hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
+	open->hdr.token = port_id;
+	open->hdr.opcode = ADM_CMD_SET_PARAMS;
+	pr_debug("SRS - %s: Command was sent now check Q6 - port id = %d,"
+			" size %d, module id %x, param id %x.\n", __func__,
+			open->hdr.dest_port, open->payload_size,
+			open->params.module_id, open->params.param_id);
+
+	ret = apr_send_pkt(this_adm.apr, (uint32_t *)open);
+	if (ret < 0) {
+		pr_err("SRS - %s: ADM enable for port %d failed\n", __func__,
+			port_id);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	/* Wait for the callback with copp id */
+	ret = wait_event_timeout(this_adm.wait, 1,
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("SRS - %s: ADM open failed for port %d\n", __func__,
+			port_id);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+fail_cmd:
+	kfree(open);
+	return ret;
+}
+
 static int32_t adm_callback(struct apr_client_data *data, void *priv)
 {
 	uint32_t *payload;
@@ -528,9 +711,7 @@
 			if ((open.topology_id ==
 				VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
 			    (open.topology_id ==
-				VPM_TX_DM_FLUENCE_COPP_TOPOLOGY) ||
-			    (open.topology_id ==
-				VPM_TX_QMIC_FLUENCE_COPP_TOPOLOGY))
+				VPM_TX_DM_FLUENCE_COPP_TOPOLOGY))
 				rate = 16000;
 		}
 
diff --git a/sound/soc/msm/qdsp6/q6voice.c b/sound/soc/msm/qdsp6/q6voice.c
index 18a1f43..0c30dc9 100644
--- a/sound/soc/msm/qdsp6/q6voice.c
+++ b/sound/soc/msm/qdsp6/q6voice.c
@@ -3383,7 +3383,7 @@
 
 	mutex_lock(&v->lock);
 
-	if (v->voc_state == VOC_RUN) {
+	if (v->voc_state == VOC_RUN || v->voc_state == VOC_STANDBY) {
 		if (v->dev_tx.port_id != RT_PROXY_PORT_001_TX &&
 			v->dev_rx.port_id != RT_PROXY_PORT_001_RX)
 			afe_sidetone(v->dev_tx.port_id, v->dev_rx.port_id,
@@ -3399,6 +3399,51 @@
 	return ret;
 }
 
+int voc_resume_voice_call(uint16_t session_id)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	struct apr_hdr mvm_start_voice_cmd;
+	int ret = 0;
+	void *apr_mvm;
+	u16 mvm_handle;
+
+	pr_debug("%s:\n", __func__);
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_mvm = common.apr_q6_mvm;
+
+	if (!apr_mvm) {
+		pr_err("%s: apr_mvm is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	mvm_handle = voice_get_mvm_handle(v);
+
+	mvm_start_voice_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	mvm_start_voice_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(mvm_start_voice_cmd) - APR_HDR_SIZE);
+	pr_debug("send mvm_start_voice_cmd pkt size = %d\n",
+				mvm_start_voice_cmd.pkt_size);
+	mvm_start_voice_cmd.src_port = v->session_id;
+	mvm_start_voice_cmd.dest_port = mvm_handle;
+	mvm_start_voice_cmd.token = 0;
+	mvm_start_voice_cmd.opcode = VSS_IMVM_CMD_START_VOICE;
+
+	v->mvm_state = CMD_STATUS_FAIL;
+	ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_start_voice_cmd);
+	if (ret < 0) {
+		pr_err("Fail in sending VSS_IMVM_CMD_START_VOICE\n");
+		goto fail;
+	}
+	v->voc_state = VOC_RUN;
+	return 0;
+fail:
+	return -EINVAL;
+}
+
 int voc_start_voice_call(uint16_t session_id)
 {
 	struct voice_data *v = voice_get_session(session_id);
@@ -3452,11 +3497,62 @@
 		}
 
 		v->voc_state = VOC_RUN;
+	} else if (v->voc_state == VOC_STANDBY) {
+		pr_err("Error: PCM Prepare when in Standby\n");
+		ret = -EINVAL;
+		goto fail;
 	}
 fail:	mutex_unlock(&v->lock);
 	return ret;
 }
 
+int voc_standby_voice_call(uint16_t session_id)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	struct apr_hdr mvm_standby_voice_cmd;
+	void *apr_mvm;
+	u16 mvm_handle;
+	int ret = 0;
+
+	pr_debug("%s: voc state=%d", __func__, v->voc_state);
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (v->voc_state == VOC_RUN) {
+		apr_mvm = common.apr_q6_mvm;
+		if (!apr_mvm) {
+			pr_err("%s: apr_mvm is NULL.\n", __func__);
+			ret = -EINVAL;
+			goto fail;
+		}
+		mvm_handle = voice_get_mvm_handle(v);
+		mvm_standby_voice_cmd.hdr_field =
+			APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+			APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+		mvm_standby_voice_cmd.pkt_size =
+			APR_PKT_SIZE(APR_HDR_SIZE,
+			sizeof(mvm_standby_voice_cmd) - APR_HDR_SIZE);
+		pr_debug("send mvm_standby_voice_cmd pkt size = %d\n",
+					mvm_standby_voice_cmd.pkt_size);
+		mvm_standby_voice_cmd.src_port = v->session_id;
+		mvm_standby_voice_cmd.dest_port = mvm_handle;
+		mvm_standby_voice_cmd.token = 0;
+		mvm_standby_voice_cmd.opcode = VSS_IMVM_CMD_STANDBY_VOICE;
+		v->mvm_state = CMD_STATUS_FAIL;
+		ret = apr_send_pkt(apr_mvm,
+				(uint32_t *)&mvm_standby_voice_cmd);
+		if (ret < 0) {
+			pr_err("Fail in sending VSS_IMVM_CMD_STANDBY_VOICE\n");
+			ret = -EINVAL;
+			goto fail;
+		}
+		v->voc_state = VOC_STANDBY;
+	}
+fail:
+	return ret;
+}
+
 void voc_register_mvs_cb(ul_cb_fn ul_cb,
 			   dl_cb_fn dl_cb,
 			   void *private_data)
@@ -3551,6 +3647,7 @@
 			case VSS_ICOMMON_CMD_SET_VOICE_TIMING:
 			case VSS_IWIDEVOICE_CMD_SET_WIDEVOICE:
 			case VSS_IMVM_CMD_SET_POLICY_DUAL_CONTROL:
+			case VSS_IMVM_CMD_STANDBY_VOICE:
 				pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]);
 				v->mvm_state = CMD_STATUS_SUCCESS;
 				wake_up(&v->mvm_wait);
diff --git a/sound/soc/msm/qdsp6/q6voice.h b/sound/soc/msm/qdsp6/q6voice.h
index cf691c0..88ab0d5 100644
--- a/sound/soc/msm/qdsp6/q6voice.h
+++ b/sound/soc/msm/qdsp6/q6voice.h
@@ -58,6 +58,7 @@
 	VOC_RUN,
 	VOC_CHANGE,
 	VOC_RELEASE,
+	VOC_STANDBY,
 };
 
 /* Common */
@@ -139,7 +140,14 @@
 */
 
 #define VSS_IMVM_CMD_START_VOICE			0x00011190
-/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
+/*
+ * Start Voice call command.
+ * Wait for APRV2_IBASIC_RSP_RESULT response.
+ * No pay load.
+ */
+
+#define VSS_IMVM_CMD_STANDBY_VOICE	0x00011191
+/* No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
 
 #define VSS_IMVM_CMD_STOP_VOICE				0x00011192
 /**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
@@ -964,6 +972,8 @@
 uint8_t voc_get_tty_mode(uint16_t session_id);
 int voc_set_tty_mode(uint16_t session_id, uint8_t tty_mode);
 int voc_start_voice_call(uint16_t session_id);
+int voc_standby_voice_call(uint16_t session_id);
+int voc_resume_voice_call(uint16_t session_id);
 int voc_end_voice_call(uint16_t session_id);
 int voc_set_rxtx_port(uint16_t session_id,
 		      uint32_t dev_port_id,
diff --git a/sound/soc/msm/qdsp6v2/Makefile b/sound/soc/msm/qdsp6v2/Makefile
new file mode 100644
index 0000000..6f765d1
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/Makefile
@@ -0,0 +1,4 @@
+snd-soc-qdsp6v2-objs += msm-dai-q6-v2.o msm-pcm-q6-v2.o msm-pcm-routing-v2.o msm-compr-q6-v2.o  msm-multi-ch-pcm-q6-v2.o
+snd-soc-qdsp6v2-objs += msm-pcm-lpa-v2.o msm-pcm-afe-v2.o
+obj-$(CONFIG_SND_SOC_QDSP6V2) += snd-soc-qdsp6v2.o
+obj-y += q6adm.o q6afe.o q6asm.o q6audio-v2.o
diff --git a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
new file mode 100644
index 0000000..daba79d
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
@@ -0,0 +1,666 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/time.h>
+#include <linux/wait.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/initval.h>
+#include <sound/control.h>
+#include <asm/dma.h>
+#include <linux/dma-mapping.h>
+#include <linux/android_pmem.h>
+
+#include "msm-compr-q6-v2.h"
+#include "msm-pcm-routing-v2.h"
+
+struct snd_msm {
+	struct msm_audio *prtd;
+	unsigned volume;
+};
+static struct snd_msm compressed_audio = {NULL, 0x2000} ;
+
+static struct audio_locks the_locks;
+
+static struct snd_pcm_hardware msm_compr_hardware_playback = {
+	.info =		 (SNDRV_PCM_INFO_MMAP |
+				SNDRV_PCM_INFO_BLOCK_TRANSFER |
+				SNDRV_PCM_INFO_MMAP_VALID |
+				SNDRV_PCM_INFO_INTERLEAVED |
+				SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
+	.formats =	      SNDRV_PCM_FMTBIT_S16_LE,
+	.rates =		SNDRV_PCM_RATE_8000_48000,
+	.rate_min =	     8000,
+	.rate_max =	     48000,
+	.channels_min =	 1,
+	.channels_max =	 2,
+	.buffer_bytes_max =     1200 * 1024 * 2,
+	.period_bytes_min =	4800,
+	.period_bytes_max =     1200 * 1024,
+	.periods_min =	  2,
+	.periods_max =	  512,
+	.fifo_size =	    0,
+};
+
+/* Conventional and unconventional sample rate supported */
+static unsigned int supported_sample_rates[] = {
+	8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
+};
+
+static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
+	.count = ARRAY_SIZE(supported_sample_rates),
+	.list = supported_sample_rates,
+	.mask = 0,
+};
+
+static void compr_event_handler(uint32_t opcode,
+		uint32_t token, uint32_t *payload, void *priv)
+{
+	struct compr_audio *compr = priv;
+	struct msm_audio *prtd = &compr->prtd;
+	struct snd_pcm_substream *substream = prtd->substream;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct audio_aio_write_param param;
+	struct audio_buffer *buf = NULL;
+	int i = 0;
+
+	pr_debug("%s opcode =%08x\n", __func__, opcode);
+	switch (opcode) {
+	case ASM_DATA_EVENT_WRITE_DONE_V2: {
+		uint32_t *ptrmem = (uint32_t *)&param;
+		pr_debug("ASM_DATA_EVENT_WRITE_DONE\n");
+		pr_debug("Buffer Consumed = 0x%08x\n", *ptrmem);
+		prtd->pcm_irq_pos += prtd->pcm_count;
+		if (atomic_read(&prtd->start))
+			snd_pcm_period_elapsed(substream);
+		atomic_inc(&prtd->out_count);
+		wake_up(&the_locks.write_wait);
+		if (!atomic_read(&prtd->start)) {
+			atomic_set(&prtd->pending_buffer, 1);
+			break;
+		} else
+			atomic_set(&prtd->pending_buffer, 0);
+
+		if (runtime->status->hw_ptr >= runtime->control->appl_ptr)
+			break;
+		buf = prtd->audio_client->port[IN].buf;
+		pr_debug("%s:writing %d bytes of buffer[%d] to dsp 2\n",
+				__func__, prtd->pcm_count, prtd->out_head);
+		pr_debug("%s:writing buffer[%d] from 0x%08x\n",
+				__func__, prtd->out_head,
+				((unsigned int)buf[0].phys
+				+ (prtd->out_head * prtd->pcm_count)));
+
+		param.paddr = (unsigned long)buf[0].phys
+				+ (prtd->out_head * prtd->pcm_count);
+		param.len = prtd->pcm_count;
+		param.msw_ts = 0;
+		param.lsw_ts = 0;
+		param.flags = NO_TIMESTAMP;
+		param.uid =  (unsigned long)buf[0].phys
+				+ (prtd->out_head * prtd->pcm_count);
+		for (i = 0; i < sizeof(struct audio_aio_write_param)/4;
+					i++, ++ptrmem)
+			pr_debug("cmd[%d]=0x%08x\n", i, *ptrmem);
+		if (q6asm_async_write(prtd->audio_client,
+					&param) < 0)
+			pr_err("%s:q6asm_async_write failed\n",
+				__func__);
+		else
+			prtd->out_head =
+				(prtd->out_head + 1) & (runtime->periods - 1);
+		break;
+	}
+	case ASM_DATA_EVENT_RENDERED_EOS:
+		pr_debug("ASM_DATA_CMDRSP_EOS\n");
+		prtd->cmd_ack = 1;
+		wake_up(&the_locks.eos_wait);
+		break;
+	case APR_BASIC_RSP_RESULT: {
+		switch (payload[0]) {
+		case ASM_SESSION_CMD_RUN_V2: {
+			if (!atomic_read(&prtd->pending_buffer))
+				break;
+			pr_debug("%s:writing %d bytes"
+				" of buffer[%d] to dsp\n",
+				__func__, prtd->pcm_count, prtd->out_head);
+			buf = prtd->audio_client->port[IN].buf;
+			pr_debug("%s:writing buffer[%d] from 0x%08x\n",
+				__func__, prtd->out_head,
+				((unsigned int)buf[0].phys
+				+ (prtd->out_head * prtd->pcm_count)));
+			param.paddr = (unsigned long)buf[prtd->out_head].phys;
+			param.len = prtd->pcm_count;
+			param.msw_ts = 0;
+			param.lsw_ts = 0;
+			param.flags = NO_TIMESTAMP;
+			param.uid =  (unsigned long)buf[prtd->out_head].phys;
+			if (q6asm_async_write(prtd->audio_client,
+						&param) < 0)
+				pr_err("%s:q6asm_async_write failed\n",
+					__func__);
+			else
+				prtd->out_head =
+					(prtd->out_head + 1)
+					& (runtime->periods - 1);
+			atomic_set(&prtd->pending_buffer, 0);
+		}
+			break;
+		case ASM_STREAM_CMD_FLUSH:
+			pr_debug("ASM_STREAM_CMD_FLUSH\n");
+			prtd->cmd_ack = 1;
+			wake_up(&the_locks.eos_wait);
+			break;
+		default:
+			break;
+		}
+		break;
+	}
+	default:
+		pr_debug("Not Supported Event opcode[0x%x]\n", opcode);
+		break;
+	}
+}
+
+static int msm_compr_playback_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct compr_audio *compr = runtime->private_data;
+	struct msm_audio *prtd = &compr->prtd;
+	struct asm_aac_cfg aac_cfg;
+	int ret;
+
+	pr_debug("%s\n", __func__);
+	prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream);
+	prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
+	prtd->pcm_irq_pos = 0;
+	/* rate and channels are sent to audio driver */
+	prtd->samp_rate = runtime->rate;
+	prtd->channel_mode = runtime->channels;
+	prtd->out_head = 0;
+	atomic_set(&prtd->out_count, runtime->periods);
+
+	if (prtd->enabled)
+		return 0;
+
+	switch (compr->info.codec_param.codec.id) {
+	case SND_AUDIOCODEC_MP3:
+		/* No media format block for mp3 */
+		break;
+	case SND_AUDIOCODEC_AAC:
+		pr_debug("SND_AUDIOCODEC_AAC\n");
+		memset(&aac_cfg, 0x0, sizeof(struct asm_aac_cfg));
+		aac_cfg.aot = AAC_ENC_MODE_EAAC_P;
+		aac_cfg.format = 0x03;
+		aac_cfg.ch_cfg = runtime->channels;
+		aac_cfg.sample_rate =  runtime->rate;
+		ret = q6asm_media_format_block_aac(prtd->audio_client,
+					&aac_cfg);
+		if (ret < 0)
+			pr_err("%s: CMD Format block failed\n", __func__);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	prtd->enabled = 1;
+	prtd->cmd_ack = 0;
+
+	return 0;
+}
+
+static int msm_compr_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+	int ret = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct compr_audio *compr = runtime->private_data;
+	struct msm_audio *prtd = &compr->prtd;
+
+	pr_debug("%s\n", __func__);
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+		prtd->pcm_irq_pos = 0;
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		pr_debug("%s: Trigger start\n", __func__);
+		q6asm_run_nowait(prtd->audio_client, 0, 0, 0);
+		atomic_set(&prtd->start, 1);
+		break;
+	case SNDRV_PCM_TRIGGER_STOP:
+		pr_debug("SNDRV_PCM_TRIGGER_STOP\n");
+		atomic_set(&prtd->start, 0);
+		break;
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		pr_debug("SNDRV_PCM_TRIGGER_PAUSE\n");
+		q6asm_cmd_nowait(prtd->audio_client, CMD_PAUSE);
+		atomic_set(&prtd->start, 0);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static void populate_codec_list(struct compr_audio *compr,
+		struct snd_pcm_runtime *runtime)
+{
+	pr_debug("%s\n", __func__);
+	/* MP3 Block */
+	compr->info.compr_cap.num_codecs = 1;
+	compr->info.compr_cap.min_fragment_size = runtime->hw.period_bytes_min;
+	compr->info.compr_cap.max_fragment_size = runtime->hw.period_bytes_max;
+	compr->info.compr_cap.min_fragments = runtime->hw.periods_min;
+	compr->info.compr_cap.max_fragments = runtime->hw.periods_max;
+	compr->info.compr_cap.codecs[0] = SND_AUDIOCODEC_MP3;
+	compr->info.compr_cap.codecs[1] = SND_AUDIOCODEC_AAC;
+	/* Add new codecs here */
+}
+
+static int msm_compr_open(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+	struct compr_audio *compr;
+	struct msm_audio *prtd;
+	int ret = 0;
+	struct asm_softpause_params softpause = {
+		.enable = SOFT_PAUSE_ENABLE,
+		.period = SOFT_PAUSE_PERIOD,
+		.step = SOFT_PAUSE_STEP,
+		.rampingcurve = SOFT_PAUSE_CURVE_LINEAR,
+	};
+	struct asm_softvolume_params softvol = {
+		.period = SOFT_VOLUME_PERIOD,
+		.step = SOFT_VOLUME_STEP,
+		.rampingcurve = SOFT_VOLUME_CURVE_LINEAR,
+	};
+
+	/* Capture path */
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		return -EINVAL;
+
+	pr_debug("%s\n", __func__);
+	compr = kzalloc(sizeof(struct compr_audio), GFP_KERNEL);
+	if (compr == NULL) {
+		pr_err("Failed to allocate memory for msm_audio\n");
+		return -ENOMEM;
+	}
+	prtd = &compr->prtd;
+	prtd->substream = substream;
+	prtd->audio_client = q6asm_audio_client_alloc(
+				(app_cb)compr_event_handler, compr);
+	if (!prtd->audio_client) {
+		pr_info("%s: Could not allocate memory\n", __func__);
+		kfree(prtd);
+		return -ENOMEM;
+	}
+	runtime->hw = msm_compr_hardware_playback;
+
+	pr_info("%s: session ID %d\n", __func__, prtd->audio_client->session);
+
+	prtd->session_id = prtd->audio_client->session;
+	msm_pcm_routing_reg_phy_stream(soc_prtd->dai_link->be_id,
+			prtd->session_id, substream->stream);
+
+	prtd->cmd_ack = 1;
+
+	ret = snd_pcm_hw_constraint_list(runtime, 0,
+			SNDRV_PCM_HW_PARAM_RATE,
+			&constraints_sample_rates);
+	if (ret < 0)
+		pr_info("snd_pcm_hw_constraint_list failed\n");
+	/* Ensure that buffer size is a multiple of period size */
+	ret = snd_pcm_hw_constraint_integer(runtime,
+			    SNDRV_PCM_HW_PARAM_PERIODS);
+	if (ret < 0)
+		pr_info("snd_pcm_hw_constraint_integer failed\n");
+
+	prtd->dsp_cnt = 0;
+	atomic_set(&prtd->pending_buffer, 1);
+	compr->codec = FORMAT_MP3;
+	populate_codec_list(compr, runtime);
+	runtime->private_data = compr;
+	compressed_audio.prtd =  &compr->prtd;
+	ret = compressed_set_volume(compressed_audio.volume);
+	if (ret < 0)
+		pr_err("%s : Set Volume failed : %d", __func__, ret);
+
+	ret = q6asm_set_softpause(compressed_audio.prtd->audio_client,
+								&softpause);
+	if (ret < 0)
+		pr_err("%s: Send SoftPause Param failed ret=%d\n",
+			__func__, ret);
+	ret = q6asm_set_softvolume(compressed_audio.prtd->audio_client,
+								&softvol);
+	if (ret < 0)
+		pr_err("%s: Send SoftVolume Param failed ret=%d\n",
+			__func__, ret);
+
+	return 0;
+}
+
+int compressed_set_volume(unsigned volume)
+{
+	int rc = 0;
+	if (compressed_audio.prtd && compressed_audio.prtd->audio_client) {
+		rc = q6asm_set_volume(compressed_audio.prtd->audio_client,
+								 volume);
+		if (rc < 0) {
+			pr_err("%s: Send Volume command failed"
+					" rc=%d\n", __func__, rc);
+		}
+	}
+	compressed_audio.volume = volume;
+	return rc;
+}
+
+static int msm_compr_playback_close(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+	struct compr_audio *compr = runtime->private_data;
+	struct msm_audio *prtd = &compr->prtd;
+	int dir = 0;
+
+	pr_debug("%s\n", __func__);
+
+	dir = IN;
+	atomic_set(&prtd->pending_buffer, 0);
+	q6asm_cmd(prtd->audio_client, CMD_CLOSE);
+	compressed_audio.prtd = NULL;
+	q6asm_audio_client_buf_free_contiguous(dir,
+				prtd->audio_client);
+
+	msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->be_id,
+	SNDRV_PCM_STREAM_PLAYBACK);
+	q6asm_audio_client_free(prtd->audio_client);
+	kfree(prtd);
+	return 0;
+}
+
+static int msm_compr_close(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		ret = msm_compr_playback_close(substream);
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		ret = EINVAL;
+	return ret;
+}
+static int msm_compr_prepare(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		ret = msm_compr_playback_prepare(substream);
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		ret = EINVAL;
+	return ret;
+}
+
+static snd_pcm_uframes_t msm_compr_pointer(struct snd_pcm_substream *substream)
+{
+
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct compr_audio *compr = runtime->private_data;
+	struct msm_audio *prtd = &compr->prtd;
+
+	if (prtd->pcm_irq_pos >= prtd->pcm_size)
+		prtd->pcm_irq_pos = 0;
+
+	pr_debug("pcm_irq_pos = %d\n", prtd->pcm_irq_pos);
+	return bytes_to_frames(runtime, (prtd->pcm_irq_pos));
+}
+
+static int msm_compr_mmap(struct snd_pcm_substream *substream,
+				struct vm_area_struct *vma)
+{
+	int result = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct compr_audio *compr = runtime->private_data;
+	struct msm_audio *prtd = &compr->prtd;
+
+	pr_debug("%s\n", __func__);
+	prtd->mmap_flag = 1;
+	if (runtime->dma_addr && runtime->dma_bytes) {
+		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+		result = remap_pfn_range(vma, vma->vm_start,
+				runtime->dma_addr >> PAGE_SHIFT,
+				runtime->dma_bytes,
+				vma->vm_page_prot);
+	} else {
+		pr_err("Physical address or size of buf is NULL");
+		return -EINVAL;
+	}
+	return result;
+}
+
+static int msm_compr_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct compr_audio *compr = runtime->private_data;
+	struct msm_audio *prtd = &compr->prtd;
+	struct snd_dma_buffer *dma_buf = &substream->dma_buffer;
+	struct audio_buffer *buf;
+	int dir, ret;
+
+	pr_debug("%s\n", __func__);
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		dir = IN;
+	else
+		return -EINVAL;
+
+	ret = q6asm_open_write(prtd->audio_client, compr->codec);
+	if (ret < 0) {
+		pr_err("%s: Session out open failed\n", __func__);
+		return -ENOMEM;
+	}
+	ret = q6asm_set_io_mode(prtd->audio_client, ASYNC_IO_MODE);
+	if (ret < 0) {
+		pr_err("%s: Set IO mode failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	ret = q6asm_audio_client_buf_alloc_contiguous(dir,
+			prtd->audio_client,
+			runtime->hw.period_bytes_min,
+			runtime->hw.periods_max);
+	if (ret < 0) {
+		pr_err("Audio Start: Buffer Allocation failed "
+					"rc = %d\n", ret);
+		return -ENOMEM;
+	}
+	buf = prtd->audio_client->port[dir].buf;
+
+	pr_debug("%s:buf = %p\n", __func__, buf);
+	dma_buf->dev.type = SNDRV_DMA_TYPE_DEV;
+	dma_buf->dev.dev = substream->pcm->card->dev;
+	dma_buf->private_data = NULL;
+	dma_buf->area = buf[0].data;
+	dma_buf->addr =  buf[0].phys;
+	dma_buf->bytes = runtime->hw.buffer_bytes_max;
+	if (!dma_buf->area)
+		return -ENOMEM;
+
+	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+	return 0;
+}
+
+static int msm_compr_ioctl(struct snd_pcm_substream *substream,
+		unsigned int cmd, void *arg)
+{
+	int rc = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct compr_audio *compr = runtime->private_data;
+	struct msm_audio *prtd = &compr->prtd;
+	uint64_t timestamp;
+	uint64_t temp;
+
+	switch (cmd) {
+	case SNDRV_COMPRESS_TSTAMP: {
+		struct snd_compr_tstamp tstamp;
+		pr_debug("SNDRV_COMPRESS_TSTAMP\n");
+
+		memset(&tstamp, 0x0, sizeof(struct snd_compr_tstamp));
+		timestamp = q6asm_get_session_time(prtd->audio_client);
+		if (timestamp < 0) {
+			pr_err("%s: Get Session Time return value =%lld\n",
+				__func__, timestamp);
+			return -EAGAIN;
+		}
+		temp = (timestamp * 2 * runtime->channels);
+		temp = temp * (runtime->rate/1000);
+		temp = div_u64(temp, 1000);
+		tstamp.sampling_rate = runtime->rate;
+		tstamp.rendered = (size_t)(temp & 0xFFFFFFFF);
+		tstamp.decoded  = (size_t)((temp >> 32) & 0xFFFFFFFF);
+		tstamp.timestamp = timestamp;
+		pr_debug("%s: bytes_consumed:lsb = %d, msb = %d,"
+			"timestamp = %lld,\n",
+			 __func__, tstamp.rendered, tstamp.decoded,
+			tstamp.timestamp);
+		if (copy_to_user((void *) arg, &tstamp,
+			sizeof(struct snd_compr_tstamp)))
+				return -EFAULT;
+		return 0;
+	}
+	case SNDRV_COMPRESS_GET_CAPS:
+		pr_debug("SNDRV_COMPRESS_GET_CAPS\n");
+		if (copy_to_user((void *) arg, &compr->info.compr_cap,
+			sizeof(struct snd_compr_caps))) {
+			rc = -EFAULT;
+			pr_err("%s: ERROR: copy to user\n", __func__);
+			return rc;
+		}
+		return 0;
+	case SNDRV_COMPRESS_SET_PARAMS:
+		pr_debug("SNDRV_COMPRESS_SET_PARAMS: ");
+		if (copy_from_user(&compr->info.codec_param, (void *) arg,
+			sizeof(struct snd_compr_params))) {
+			rc = -EFAULT;
+			pr_err("%s: ERROR: copy from user\n", __func__);
+			return rc;
+		}
+		switch (compr->info.codec_param.codec.id) {
+		case SND_AUDIOCODEC_MP3:
+			/* For MP3 we dont need any other parameter */
+			pr_debug("SND_AUDIOCODEC_MP3\n");
+			compr->codec = FORMAT_MP3;
+			break;
+		case SND_AUDIOCODEC_AAC:
+			pr_debug("SND_AUDIOCODEC_AAC\n");
+			compr->codec = FORMAT_MPEG4_AAC;
+			break;
+		default:
+			pr_debug("FORMAT_LINEAR_PCM\n");
+			compr->codec = FORMAT_LINEAR_PCM;
+			break;
+		}
+		return 0;
+	case SNDRV_PCM_IOCTL1_RESET:
+		prtd->cmd_ack = 0;
+		rc = q6asm_cmd(prtd->audio_client, CMD_FLUSH);
+		if (rc < 0)
+			pr_err("%s: flush cmd failed rc=%d\n", __func__, rc);
+		rc = wait_event_timeout(the_locks.eos_wait,
+			prtd->cmd_ack, 5 * HZ);
+		if (rc < 0)
+			pr_err("Flush cmd timeout\n");
+		prtd->pcm_irq_pos = 0;
+		break;
+	default:
+		break;
+	}
+	return snd_pcm_lib_ioctl(substream, cmd, arg);
+}
+
+static struct snd_pcm_ops msm_compr_ops = {
+	.open	   = msm_compr_open,
+	.hw_params	= msm_compr_hw_params,
+	.close	  = msm_compr_close,
+	.ioctl	  = msm_compr_ioctl,
+	.prepare	= msm_compr_prepare,
+	.trigger	= msm_compr_trigger,
+	.pointer	= msm_compr_pointer,
+	.mmap		= msm_compr_mmap,
+};
+
+static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_card *card = rtd->card->snd_card;
+	int ret = 0;
+
+	if (!card->dev->coherent_dma_mask)
+		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	return ret;
+}
+
+static struct snd_soc_platform_driver msm_soc_platform = {
+	.ops		= &msm_compr_ops,
+	.pcm_new	= msm_asoc_pcm_new,
+};
+
+static __devinit int msm_compr_probe(struct platform_device *pdev)
+{
+	pr_info("%s: dev name %s\n", __func__, dev_name(&pdev->dev));
+	return snd_soc_register_platform(&pdev->dev,
+				   &msm_soc_platform);
+}
+
+static int msm_compr_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_platform(&pdev->dev);
+	return 0;
+}
+
+static struct platform_driver msm_compr_driver = {
+	.driver = {
+		.name = "msm-compr-dsp",
+		.owner = THIS_MODULE,
+	},
+	.probe = msm_compr_probe,
+	.remove = __devexit_p(msm_compr_remove),
+};
+
+static int __init msm_soc_platform_init(void)
+{
+	init_waitqueue_head(&the_locks.enable_wait);
+	init_waitqueue_head(&the_locks.eos_wait);
+	init_waitqueue_head(&the_locks.write_wait);
+	init_waitqueue_head(&the_locks.read_wait);
+
+	return platform_driver_register(&msm_compr_driver);
+}
+module_init(msm_soc_platform_init);
+
+static void __exit msm_soc_platform_exit(void)
+{
+	platform_driver_unregister(&msm_compr_driver);
+}
+module_exit(msm_soc_platform_exit);
+
+MODULE_DESCRIPTION("PCM module platform driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.h b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.h
new file mode 100644
index 0000000..2183690
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_COMPR_H
+#define _MSM_COMPR_H
+#include <sound/apr_audio-v2.h>
+#include <sound/q6asm-v2.h>
+#include <sound/snd_compress_params.h>
+#include <sound/compress_offload.h>
+#include <sound/compress_driver.h>
+
+#include "msm-pcm-q6-v2.h"
+
+struct compr_info {
+	struct snd_compr_caps compr_cap;
+	struct snd_compr_codec_caps codec_caps;
+	struct snd_compr_params codec_param;
+};
+
+struct compr_audio {
+	struct msm_audio prtd;
+	struct compr_info info;
+	uint32_t codec;
+};
+
+#endif /*_MSM_COMPR_H*/
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
new file mode 100644
index 0000000..1605062
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
@@ -0,0 +1,1229 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/wcd9xxx/core.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/apr_audio-v2.h>
+#include <sound/q6afe-v2.h>
+#include <sound/msm-dai-q6-v2.h>
+#include <sound/pcm_params.h>
+#include <mach/clk.h>
+
+enum {
+	STATUS_PORT_STARTED, /* track if AFE port has started */
+	STATUS_MAX
+};
+
+struct msm_dai_q6_dai_data {
+	DECLARE_BITMAP(status_mask, STATUS_MAX);
+	u32 rate;
+	u32 channels;
+	union afe_port_config port_config;
+};
+
+static struct clk *pcm_clk;
+static DEFINE_MUTEX(aux_pcm_mutex);
+static int aux_pcm_count;
+static struct msm_dai_auxpcm_pdata *auxpcm_plat_data;
+
+static u8 num_of_bits_set(u8 sd_line_mask)
+{
+	u8 num_bits_set = 0;
+
+	while (sd_line_mask) {
+		num_bits_set++;
+		sd_line_mask = sd_line_mask & (sd_line_mask - 1);
+	}
+	return num_bits_set;
+}
+
+static int msm_dai_q6_cdc_hw_params(struct snd_pcm_hw_params *params,
+				    struct snd_soc_dai *dai, int stream)
+{
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+	dai_data->channels = params_channels(params);
+	switch (dai_data->channels) {
+	case 2:
+		dai_data->port_config.i2s.mono_stereo = MSM_AFE_STEREO;
+		break;
+	case 1:
+		dai_data->port_config.i2s.mono_stereo = MSM_AFE_MONO;
+		break;
+	default:
+		return -EINVAL;
+		break;
+	}
+	dai_data->rate = params_rate(params);
+	dai_data->port_config.i2s.sample_rate = dai_data->rate;
+	dai_data->port_config.i2s.i2s_cfg_minor_version =
+						AFE_API_VERSION_I2S_CONFIG;
+	dai_data->port_config.i2s.data_format =  AFE_LINEAR_PCM_DATA;
+	dev_dbg(dai->dev, " channel %d sample rate %d entered\n",
+	dai_data->channels, dai_data->rate);
+
+	/* Q6 only supports 16 as now */
+	dai_data->port_config.i2s.bit_width = 16;
+	dai_data->port_config.i2s.channel_mode = 1;
+	return 0;
+}
+
+static int msm_dai_q6_i2s_hw_params(struct snd_pcm_hw_params *params,
+				    struct snd_soc_dai *dai, int stream)
+{
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+	struct msm_i2s_data *i2s_pdata =
+			(struct msm_i2s_data *) dai->dev->platform_data;
+
+	dai_data->channels = params_channels(params);
+	if (num_of_bits_set(i2s_pdata->sd_lines) == 1) {
+		switch (dai_data->channels) {
+		case 2:
+			dai_data->port_config.i2s.mono_stereo = MSM_AFE_STEREO;
+			break;
+		case 1:
+			dai_data->port_config.i2s.mono_stereo = MSM_AFE_MONO;
+			break;
+		default:
+			pr_warn("greater than stereo has not been validated");
+			break;
+		}
+	}
+	dai_data->rate = params_rate(params);
+	dai_data->port_config.i2s.sample_rate = dai_data->rate;
+	dai_data->port_config.i2s.i2s_cfg_minor_version =
+						AFE_API_VERSION_I2S_CONFIG;
+	dai_data->port_config.i2s.data_format =  AFE_LINEAR_PCM_DATA;
+	/* Q6 only supports 16 as now */
+	dai_data->port_config.i2s.bit_width = 16;
+	dai_data->port_config.i2s.channel_mode = 1;
+
+	return 0;
+}
+
+static int msm_dai_q6_i2s_platform_data_validation(
+					struct snd_soc_dai *dai)
+{
+	u8 num_of_sd_lines;
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+	struct msm_i2s_data *i2s_pdata =
+			(struct msm_i2s_data *)dai->dev->platform_data;
+	struct snd_soc_dai_driver *dai_driver =
+			(struct snd_soc_dai_driver *)dai->driver;
+
+	num_of_sd_lines = num_of_bits_set(i2s_pdata->sd_lines);
+
+	switch (num_of_sd_lines) {
+	case 1:
+		switch (i2s_pdata->sd_lines) {
+		case MSM_MI2S_SD0:
+			dai_data->port_config.i2s.channel_mode =
+					AFE_PORT_I2S_SD0;
+			break;
+		case MSM_MI2S_SD1:
+			dai_data->port_config.i2s.channel_mode =
+					AFE_PORT_I2S_SD1;
+			break;
+		case MSM_MI2S_SD2:
+			dai_data->port_config.i2s.channel_mode =
+					AFE_PORT_I2S_SD2;
+			break;
+		case MSM_MI2S_SD3:
+			dai_data->port_config.i2s.channel_mode =
+					AFE_PORT_I2S_SD3;
+			break;
+		default:
+			pr_err("%s: invalid SD line\n",
+				   __func__);
+			goto error_invalid_data;
+		}
+		break;
+	case 2:
+		switch (i2s_pdata->sd_lines) {
+		case MSM_MI2S_SD0 | MSM_MI2S_SD1:
+			dai_data->port_config.i2s.channel_mode =
+					AFE_PORT_I2S_QUAD01;
+			break;
+		case MSM_MI2S_SD2 | MSM_MI2S_SD3:
+			dai_data->port_config.i2s.channel_mode =
+					AFE_PORT_I2S_QUAD23;
+			break;
+		default:
+			pr_err("%s: invalid SD line\n",
+				   __func__);
+			goto error_invalid_data;
+		}
+		break;
+	case 3:
+		switch (i2s_pdata->sd_lines) {
+		case MSM_MI2S_SD0 | MSM_MI2S_SD1 | MSM_MI2S_SD2:
+			dai_data->port_config.i2s.channel_mode =
+					AFE_PORT_I2S_6CHS;
+			break;
+		default:
+			pr_err("%s: invalid SD lines\n",
+				   __func__);
+			goto error_invalid_data;
+		}
+		break;
+	case 4:
+		switch (i2s_pdata->sd_lines) {
+		case MSM_MI2S_SD0 | MSM_MI2S_SD1 | MSM_MI2S_SD2 | MSM_MI2S_SD3:
+			dai_data->port_config.i2s.channel_mode =
+					AFE_PORT_I2S_8CHS;
+			break;
+		default:
+			pr_err("%s: invalid SD lines\n",
+				   __func__);
+			goto error_invalid_data;
+		}
+		break;
+	default:
+		pr_err("%s: invalid SD lines\n", __func__);
+		goto error_invalid_data;
+	}
+	if (i2s_pdata->capability == MSM_MI2S_CAP_RX)
+		dai_driver->playback.channels_max = num_of_sd_lines << 1;
+
+	return 0;
+
+error_invalid_data:
+	return -EINVAL;
+}
+
+static int msm_dai_q6_cdc_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBS_CFS:
+		dai_data->port_config.i2s.ws_src = 1; /* CPU is master */
+		break;
+	case SND_SOC_DAIFMT_CBM_CFM:
+		dai_data->port_config.i2s.ws_src = 0; /* CPU is slave */
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+
+static int msm_dai_q6_slim_bus_hw_params(struct snd_pcm_hw_params *params,
+				    struct snd_soc_dai *dai, int stream)
+{
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+	dai_data->channels = params_channels(params);
+	dai_data->rate = params_rate(params);
+
+	/* Q6 only supports 16 as now */
+	dai_data->port_config.slim_sch.sb_cfg_minor_version =
+				AFE_API_VERSION_SLIMBUS_CONFIG;
+	dai_data->port_config.slim_sch.bit_width = 16;
+	dai_data->port_config.slim_sch.data_format = 0;
+	dai_data->port_config.slim_sch.num_channels = dai_data->channels;
+	dai_data->port_config.slim_sch.sample_rate = dai_data->rate;
+
+	dev_dbg(dai->dev, "%s:slimbus_dev_id[%hu] bit_wd[%hu] format[%hu]\n"
+		"num_channel %hu  shared_ch_mapping[0]  %hu\n"
+		"slave_port_mapping[1]  %hu slave_port_mapping[2]  %hu\n"
+		"sample_rate %d\n", __func__,
+		dai_data->port_config.slim_sch.slimbus_dev_id,
+		dai_data->port_config.slim_sch.bit_width,
+		dai_data->port_config.slim_sch.data_format,
+		dai_data->port_config.slim_sch.num_channels,
+		dai_data->port_config.slim_sch.shared_ch_mapping[0],
+		dai_data->port_config.slim_sch.shared_ch_mapping[1],
+		dai_data->port_config.slim_sch.shared_ch_mapping[2],
+		dai_data->rate);
+
+	return 0;
+}
+
+static int msm_dai_q6_bt_fm_hw_params(struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai, int stream)
+{
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+	dai_data->channels = params_channels(params);
+	dai_data->rate = params_rate(params);
+
+	dev_dbg(dai->dev, "channels %d sample rate %d entered\n",
+		dai_data->channels, dai_data->rate);
+
+	memset(&dai_data->port_config, 0, sizeof(dai_data->port_config));
+
+	return 0;
+}
+static int msm_dai_q6_auxpcm_hw_params(
+				struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+	struct msm_dai_auxpcm_pdata *auxpcm_pdata =
+			(struct msm_dai_auxpcm_pdata *) dai->dev->platform_data;
+
+	if (params_channels(params) != 1) {
+		dev_err(dai->dev, "AUX PCM supports only mono stream\n");
+		return -EINVAL;
+	}
+	dai_data->channels = params_channels(params);
+
+	if (params_rate(params) != 8000) {
+		dev_err(dai->dev, "AUX PCM supports only 8KHz sampling rate\n");
+		return -EINVAL;
+	}
+	dai_data->rate = params_rate(params);
+
+	dai_data->port_config.pcm.pcm_cfg_minor_version =
+				AFE_API_VERSION_PCM_CONFIG;
+	dai_data->port_config.pcm.aux_mode = auxpcm_pdata->mode;
+	dai_data->port_config.pcm.sync_src = auxpcm_pdata->sync;
+	dai_data->port_config.pcm.frame_setting = auxpcm_pdata->frame;
+	dai_data->port_config.pcm.quantype = auxpcm_pdata->quant;
+	dai_data->port_config.pcm.ctrl_data_out_enable = auxpcm_pdata->data;
+	dai_data->port_config.pcm.sample_rate = dai_data->rate;
+	dai_data->port_config.pcm.num_channels = dai_data->channels;
+	dai_data->port_config.pcm.bit_width = 16;
+	dai_data->port_config.pcm.slot_number_mapping[0] = auxpcm_pdata->slot;
+
+	return 0;
+}
+
+static int msm_dai_q6_afe_rtproxy_hw_params(struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+	dai_data->rate = params_rate(params);
+	dai_data->port_config.rtproxy.num_channels = params_channels(params);
+	dai_data->port_config.rtproxy.sample_rate = params_rate(params);
+
+	pr_debug("channel %d entered,dai_id: %d,rate: %d\n",
+	dai_data->port_config.rtproxy.num_channels, dai->id, dai_data->rate);
+
+	dai_data->port_config.rtproxy.rt_proxy_cfg_minor_version =
+				AFE_API_VERSION_RT_PROXY_CONFIG;
+	dai_data->port_config.rtproxy.bit_width = 16; /* Q6 only supports 16 */
+	dai_data->port_config.rtproxy.interleaved = 1;
+	dai_data->port_config.rtproxy.frame_size = params_period_bytes(params);
+	dai_data->port_config.rtproxy.jitter_allowance =
+				dai_data->port_config.rtproxy.frame_size/2;
+	dai_data->port_config.rtproxy.low_water_mark = 0;
+	dai_data->port_config.rtproxy.high_water_mark = 0;
+
+	return 0;
+}
+
+/* Current implementation assumes hw_param is called once
+ * This may not be the case but what to do when ADM and AFE
+ * port are already opened and parameter changes
+ */
+static int msm_dai_q6_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai)
+{
+	int rc = 0;
+
+	switch (dai->id) {
+	case PRIMARY_I2S_TX:
+	case PRIMARY_I2S_RX:
+	case SECONDARY_I2S_RX:
+		rc = msm_dai_q6_cdc_hw_params(params, dai, substream->stream);
+		break;
+	case MI2S_RX:
+		rc = msm_dai_q6_i2s_hw_params(params, dai, substream->stream);
+		break;
+	case SLIMBUS_0_RX:
+	case SLIMBUS_1_RX:
+	case SLIMBUS_0_TX:
+	case SLIMBUS_1_TX:
+		rc = msm_dai_q6_slim_bus_hw_params(params, dai,
+				substream->stream);
+		break;
+	case INT_BT_SCO_RX:
+	case INT_BT_SCO_TX:
+	case INT_FM_RX:
+	case INT_FM_TX:
+		rc = msm_dai_q6_bt_fm_hw_params(params, dai, substream->stream);
+		break;
+	case RT_PROXY_DAI_001_TX:
+	case RT_PROXY_DAI_001_RX:
+	case RT_PROXY_DAI_002_TX:
+	case RT_PROXY_DAI_002_RX:
+		rc = msm_dai_q6_afe_rtproxy_hw_params(params, dai);
+		break;
+	case VOICE_PLAYBACK_TX:
+	case VOICE_RECORD_RX:
+	case VOICE_RECORD_TX:
+		rc = 0;
+		break;
+	default:
+		dev_err(dai->dev, "invalid AFE port ID\n");
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static void msm_dai_q6_auxpcm_shutdown(struct snd_pcm_substream *substream,
+				struct snd_soc_dai *dai)
+{
+	int rc = 0;
+
+	mutex_lock(&aux_pcm_mutex);
+
+	if (aux_pcm_count == 0) {
+		dev_dbg(dai->dev, "%s(): dai->id %d aux_pcm_count is 0. Just"
+				" return\n", __func__, dai->id);
+		mutex_unlock(&aux_pcm_mutex);
+		return;
+	}
+
+	aux_pcm_count--;
+
+	if (aux_pcm_count > 0) {
+		dev_dbg(dai->dev, "%s(): dai->id %d aux_pcm_count = %d\n",
+			__func__, dai->id, aux_pcm_count);
+		mutex_unlock(&aux_pcm_mutex);
+		return;
+	} else if (aux_pcm_count < 0) {
+		dev_err(dai->dev, "%s(): ERROR: dai->id %d"
+			" aux_pcm_count = %d < 0\n",
+			__func__, dai->id, aux_pcm_count);
+		aux_pcm_count = 0;
+		mutex_unlock(&aux_pcm_mutex);
+		return;
+	}
+
+	pr_debug("%s: dai->id = %d aux_pcm_count = %d\n", __func__,
+			dai->id, aux_pcm_count);
+
+	rc = afe_close(PCM_RX); /* can block */
+	if (IS_ERR_VALUE(rc))
+		dev_err(dai->dev, "fail to close PCM_RX  AFE port\n");
+
+	rc = afe_close(PCM_TX);
+	if (IS_ERR_VALUE(rc))
+		dev_err(dai->dev, "fail to close AUX PCM TX port\n");
+
+	mutex_unlock(&aux_pcm_mutex);
+}
+
+static void msm_dai_q6_shutdown(struct snd_pcm_substream *substream,
+				struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+	int rc = 0;
+
+	if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+		switch (dai->id) {
+		case VOICE_PLAYBACK_TX:
+		case VOICE_RECORD_TX:
+		case VOICE_RECORD_RX:
+			pr_debug("%s, stop pseudo port:%d\n",
+						__func__,  dai->id);
+			rc = afe_stop_pseudo_port(dai->id);
+			break;
+		default:
+			rc = afe_close(dai->id); /* can block */
+			break;
+		}
+		if (IS_ERR_VALUE(rc))
+			dev_err(dai->dev, "fail to close AFE port\n");
+		pr_debug("%s: dai_data->status_mask = %ld\n", __func__,
+			*dai_data->status_mask);
+		clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
+	}
+}
+
+static int msm_dai_q6_auxpcm_prepare(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+	int rc = 0;
+
+	mutex_lock(&aux_pcm_mutex);
+
+	if (aux_pcm_count == 2) {
+		dev_dbg(dai->dev, "%s(): dai->id %d aux_pcm_count is 2. Just"
+			" return.\n", __func__, dai->id);
+		mutex_unlock(&aux_pcm_mutex);
+		return 0;
+	} else if (aux_pcm_count > 2) {
+		dev_err(dai->dev, "%s(): ERROR: dai->id %d"
+			" aux_pcm_count = %d > 2\n",
+			__func__, dai->id, aux_pcm_count);
+		mutex_unlock(&aux_pcm_mutex);
+		return 0;
+	}
+
+	aux_pcm_count++;
+	if (aux_pcm_count == 2)  {
+		dev_dbg(dai->dev, "%s(): dai->id %d aux_pcm_count = %d after "
+			" increment\n", __func__, dai->id, aux_pcm_count);
+		mutex_unlock(&aux_pcm_mutex);
+		return 0;
+	}
+
+	pr_debug("%s:dai->id:%d  aux_pcm_count = %d. opening afe\n",
+			__func__, dai->id, aux_pcm_count);
+
+	rc = afe_q6_interface_prepare();
+	if (IS_ERR_VALUE(rc))
+		dev_err(dai->dev, "fail to open AFE APR\n");
+
+	/*
+	 * For AUX PCM Interface the below sequence of clk
+	 * settings and afe_open is a strict requirement.
+	 *
+	 * Also using afe_open instead of afe_port_start_nowait
+	 * to make sure the port is open before deasserting the
+	 * clock line. This is required because pcm register is
+	 * not written before clock deassert. Hence the hw does
+	 * not get updated with new setting if the below clock
+	 * assert/deasset and afe_open sequence is not followed.
+	 */
+
+	afe_open(PCM_RX, &dai_data->port_config, dai_data->rate);
+
+	afe_open(PCM_TX, &dai_data->port_config, dai_data->rate);
+
+	mutex_unlock(&aux_pcm_mutex);
+
+	return rc;
+}
+
+static int msm_dai_q6_prepare(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+	int rc = 0;
+
+	if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+		/* PORT START should be set if prepare called in active state */
+		rc = afe_q6_interface_prepare();
+		if (IS_ERR_VALUE(rc))
+			dev_err(dai->dev, "fail to open AFE APR\n");
+	}
+	return rc;
+}
+
+static int msm_dai_q6_auxpcm_trigger(struct snd_pcm_substream *substream,
+		int cmd, struct snd_soc_dai *dai)
+{
+	int rc = 0;
+
+	pr_debug("%s:port:%d  cmd:%d  aux_pcm_count= %d",
+		__func__, dai->id, cmd, aux_pcm_count);
+
+	switch (cmd) {
+
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		/* afe_open will be called from prepare */
+		return 0;
+
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		return 0;
+
+	default:
+		rc = -EINVAL;
+	}
+
+	return rc;
+
+}
+
+static int msm_dai_q6_trigger(struct snd_pcm_substream *substream, int cmd,
+		struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+	int rc = 0;
+
+	/* Start/stop port without waiting for Q6 AFE response. Need to have
+	 * native q6 AFE driver propagates AFE response in order to handle
+	 * port start/stop command error properly if error does arise.
+	 */
+	pr_debug("%s:port:%d  cmd:%d dai_data->status_mask = %ld",
+		__func__, dai->id, cmd, *dai_data->status_mask);
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+			switch (dai->id) {
+			case VOICE_PLAYBACK_TX:
+			case VOICE_RECORD_TX:
+			case VOICE_RECORD_RX:
+				afe_pseudo_port_start_nowait(dai->id);
+				break;
+			default:
+				afe_port_start_nowait(dai->id,
+					&dai_data->port_config, dai_data->rate);
+				break;
+			}
+			set_bit(STATUS_PORT_STARTED,
+				dai_data->status_mask);
+		}
+		break;
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+			switch (dai->id) {
+			case VOICE_PLAYBACK_TX:
+			case VOICE_RECORD_TX:
+			case VOICE_RECORD_RX:
+				afe_pseudo_port_stop_nowait(dai->id);
+				break;
+			default:
+				afe_port_stop_nowait(dai->id);
+				break;
+			}
+			clear_bit(STATUS_PORT_STARTED,
+				dai_data->status_mask);
+		}
+		break;
+
+	default:
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+static int msm_dai_q6_dai_auxpcm_probe(struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_dai_data *dai_data;
+	int rc = 0;
+
+	struct msm_dai_auxpcm_pdata *auxpcm_pdata =
+			(struct msm_dai_auxpcm_pdata *) dai->dev->platform_data;
+
+	mutex_lock(&aux_pcm_mutex);
+
+	if (!auxpcm_plat_data)
+		auxpcm_plat_data = auxpcm_pdata;
+	else if (auxpcm_plat_data != auxpcm_pdata) {
+
+		dev_err(dai->dev, "AUX PCM RX and TX devices does not have"
+				" same platform data\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * The clk name for AUX PCM operation is passed as platform
+	 * data to the cpu driver, since cpu drive is unaware of any
+	 * boarc specific configuration.
+	 */
+	if (!pcm_clk)
+		pcm_clk = clk_get(dai->dev, auxpcm_pdata->clk);
+
+	mutex_unlock(&aux_pcm_mutex);
+
+	dai_data = kzalloc(sizeof(struct msm_dai_q6_dai_data), GFP_KERNEL);
+
+	if (!dai_data) {
+		dev_err(dai->dev, "DAI-%d: fail to allocate dai data\n",
+		dai->id);
+		rc = -ENOMEM;
+	} else
+		dev_set_drvdata(dai->dev, dai_data);
+
+	pr_err("%s : probe done for dai->id %d\n", __func__, dai->id);
+	return rc;
+}
+
+static int msm_dai_q6_dai_auxpcm_remove(struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_dai_data *dai_data;
+	int rc;
+
+	dai_data = dev_get_drvdata(dai->dev);
+
+	mutex_lock(&aux_pcm_mutex);
+
+	if (aux_pcm_count == 0) {
+		dev_dbg(dai->dev, "%s(): dai->id %d aux_pcm_count is 0. clean"
+				" up and return\n", __func__, dai->id);
+		goto done;
+	}
+
+	aux_pcm_count--;
+
+	if (aux_pcm_count > 0) {
+		dev_dbg(dai->dev, "%s(): dai->id %d aux_pcm_count = %d\n",
+			__func__, dai->id, aux_pcm_count);
+		goto done;
+	} else if (aux_pcm_count < 0) {
+		dev_err(dai->dev, "%s(): ERROR: dai->id %d"
+			" aux_pcm_count = %d < 0\n",
+			__func__, dai->id, aux_pcm_count);
+		goto done;
+	}
+
+	dev_dbg(dai->dev, "%s(): dai->id %d aux_pcm_count = %d."
+			"closing afe\n",
+		__func__, dai->id, aux_pcm_count);
+
+	rc = afe_close(PCM_RX); /* can block */
+	if (IS_ERR_VALUE(rc))
+		dev_err(dai->dev, "fail to close AUX PCM RX AFE port\n");
+
+	rc = afe_close(PCM_TX);
+	if (IS_ERR_VALUE(rc))
+		dev_err(dai->dev, "fail to close AUX PCM TX AFE port\n");
+
+done:
+	kfree(dai_data);
+	snd_soc_unregister_dai(dai->dev);
+
+	mutex_unlock(&aux_pcm_mutex);
+
+	return 0;
+}
+static int msm_dai_q6_dai_i2s_probe(struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_dai_data *dai_data;
+	int rc = 0;
+
+	dai_data = kzalloc(sizeof(struct msm_dai_q6_dai_data),
+		GFP_KERNEL);
+
+	if (!dai_data) {
+		dev_err(dai->dev, "DAI-%d: fail to allocate dai data\n",
+		dai->id);
+		rc = -ENOMEM;
+		goto rtn;
+	} else
+		dev_set_drvdata(dai->dev, dai_data);
+
+	rc = msm_dai_q6_i2s_platform_data_validation(dai);
+	if (rc != 0) {
+		pr_err("%s: The msm_dai_q6_i2s_platform_data_validation failed\n",
+			    __func__);
+		kfree(dai_data);
+	}
+rtn:
+	return rc;
+}
+
+static int msm_dai_q6_dai_probe(struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_dai_data *dai_data;
+	int rc = 0;
+
+	dai_data = kzalloc(sizeof(struct msm_dai_q6_dai_data),
+		GFP_KERNEL);
+
+	if (!dai_data) {
+		dev_err(dai->dev, "DAI-%d: fail to allocate dai data\n",
+		dai->id);
+		rc = -ENOMEM;
+	} else
+		dev_set_drvdata(dai->dev, dai_data);
+
+	return rc;
+}
+
+static int msm_dai_q6_dai_remove(struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_dai_data *dai_data;
+	int rc;
+
+	dai_data = dev_get_drvdata(dai->dev);
+
+	/* If AFE port is still up, close it */
+	if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+		switch (dai->id) {
+		case VOICE_PLAYBACK_TX:
+		case VOICE_RECORD_TX:
+		case VOICE_RECORD_RX:
+			pr_debug("%s, stop pseudo port:%d\n",
+						__func__,  dai->id);
+			rc = afe_stop_pseudo_port(dai->id);
+			break;
+		default:
+			rc = afe_close(dai->id); /* can block */
+		}
+		if (IS_ERR_VALUE(rc))
+			dev_err(dai->dev, "fail to close AFE port\n");
+		clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
+	}
+	kfree(dai_data);
+	snd_soc_unregister_dai(dai->dev);
+
+	return 0;
+}
+
+static int msm_dai_q6_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+	int rc = 0;
+
+	dev_dbg(dai->dev, "enter %s, id = %d fmt[%d]\n", __func__,
+							dai->id, fmt);
+	switch (dai->id) {
+	case PRIMARY_I2S_TX:
+	case PRIMARY_I2S_RX:
+	case MI2S_RX:
+	case SECONDARY_I2S_RX:
+		rc = msm_dai_q6_cdc_set_fmt(dai, fmt);
+		break;
+	default:
+		dev_err(dai->dev, "invalid cpu_dai set_fmt\n");
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static int msm_dai_q6_set_channel_map(struct snd_soc_dai *dai,
+				unsigned int tx_num, unsigned int *tx_slot,
+				unsigned int rx_num, unsigned int *rx_slot)
+
+{
+	int rc = 0;
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+	unsigned int i = 0;
+
+	dev_dbg(dai->dev, "enter %s, id = %d\n", __func__,
+							dai->id);
+	switch (dai->id) {
+	case SLIMBUS_0_RX:
+	case SLIMBUS_1_RX:
+		/* channel number to be between 128 and 255. For RX port
+		 * use channel numbers from 138 to 144, for TX port
+		 * use channel numbers from 128 to 137
+		 * For ports between MDM-APQ use channel numbers from 145
+		 */
+		if (!rx_slot)
+			return -EINVAL;
+		for (i = 0; i < rx_num; i++) {
+			dai_data->port_config.slim_sch.shared_ch_mapping[i] =
+							rx_slot[i];
+			pr_debug("%s: find number of channels[%d] ch[%d]\n",
+							__func__, i,
+							rx_slot[i]);
+		}
+		dai_data->port_config.slim_sch.num_channels = rx_num;
+		pr_debug("%s:SLIMBUS_0_RX cnt[%d] ch[%d %d]\n", __func__,
+		rx_num, dai_data->port_config.slim_sch.shared_ch_mapping[0],
+		dai_data->port_config.slim_sch.shared_ch_mapping[1]);
+
+		break;
+	case SLIMBUS_0_TX:
+	case SLIMBUS_1_TX:
+		/* channel number to be between 128 and 255. For RX port
+		 * use channel numbers from 138 to 144, for TX port
+		 * use channel numbers from 128 to 137
+		 * For ports between MDM-APQ use channel numbers from 145
+		 */
+		if (!tx_slot)
+			return -EINVAL;
+		for (i = 0; i < tx_num; i++) {
+			dai_data->port_config.slim_sch.shared_ch_mapping[i] =
+							tx_slot[i];
+			pr_debug("%s: find number of channels[%d] ch[%d]\n",
+						__func__, i, tx_slot[i]);
+		}
+		dai_data->port_config.slim_sch.num_channels = tx_num;
+		pr_debug("%s:SLIMBUS_0_TX cnt[%d] ch[%d %d]\n", __func__,
+		tx_num, dai_data->port_config.slim_sch.shared_ch_mapping[0],
+		dai_data->port_config.slim_sch.shared_ch_mapping[1]);
+		break;
+	default:
+		dev_err(dai->dev, "invalid cpu_dai set_fmt\n");
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+static struct snd_soc_dai_ops msm_dai_q6_ops = {
+	.prepare	= msm_dai_q6_prepare,
+	.trigger	= msm_dai_q6_trigger,
+	.hw_params	= msm_dai_q6_hw_params,
+	.shutdown	= msm_dai_q6_shutdown,
+	.set_fmt	= msm_dai_q6_set_fmt,
+	.set_channel_map = msm_dai_q6_set_channel_map,
+};
+
+static struct snd_soc_dai_ops msm_dai_q6_auxpcm_ops = {
+	.prepare	= msm_dai_q6_auxpcm_prepare,
+	.trigger	= msm_dai_q6_auxpcm_trigger,
+	.hw_params	= msm_dai_q6_auxpcm_hw_params,
+	.shutdown	= msm_dai_q6_auxpcm_shutdown,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_i2s_rx_dai = {
+	.playback = {
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+		SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		.channels_min = 1,
+		.channels_max = 4,
+		.rate_min =     8000,
+		.rate_max =	48000,
+	},
+	.ops = &msm_dai_q6_ops,
+	.probe = msm_dai_q6_dai_i2s_probe,
+	.remove = msm_dai_q6_dai_remove,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_i2s_tx_dai = {
+	.capture = {
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+		SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		.channels_min = 1,
+		.channels_max = 2,
+		.rate_min =     8000,
+		.rate_max =	48000,
+	},
+	.ops = &msm_dai_q6_ops,
+	.probe = msm_dai_q6_dai_probe,
+	.remove = msm_dai_q6_dai_remove,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_afe_rx_dai = {
+	.playback = {
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+		SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		.channels_min = 1,
+		.channels_max = 2,
+		.rate_min =     8000,
+		.rate_max =	48000,
+	},
+	.ops = &msm_dai_q6_ops,
+	.probe = msm_dai_q6_dai_probe,
+	.remove = msm_dai_q6_dai_remove,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_afe_tx_dai = {
+	.capture = {
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+		SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		.channels_min = 1,
+		.channels_max = 2,
+		.rate_min =     8000,
+		.rate_max =	48000,
+	},
+	.ops = &msm_dai_q6_ops,
+	.probe = msm_dai_q6_dai_probe,
+	.remove = msm_dai_q6_dai_remove,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_voice_playback_tx_dai = {
+	.playback = {
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+		SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		.channels_min = 1,
+		.channels_max = 2,
+		.rate_max =     48000,
+		.rate_min =     8000,
+	},
+	.ops = &msm_dai_q6_ops,
+	.probe = msm_dai_q6_dai_probe,
+	.remove = msm_dai_q6_dai_remove,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_slimbus_rx_dai = {
+	.playback = {
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+		SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		.channels_min = 1,
+		.channels_max = 2,
+		.rate_min =     8000,
+		.rate_max =	48000,
+	},
+	.ops = &msm_dai_q6_ops,
+	.probe = msm_dai_q6_dai_probe,
+	.remove = msm_dai_q6_dai_remove,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_slimbus_tx_dai = {
+	.capture = {
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+		SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		.channels_min = 1,
+		.channels_max = 2,
+		.rate_min =     8000,
+		.rate_max =	48000,
+	},
+	.ops = &msm_dai_q6_ops,
+	.probe = msm_dai_q6_dai_probe,
+	.remove = msm_dai_q6_dai_remove,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_incall_record_dai = {
+	.capture = {
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+		SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		.channels_min = 1,
+		.channels_max = 2,
+		.rate_min =     8000,
+		.rate_max =     48000,
+	},
+	.ops = &msm_dai_q6_ops,
+	.probe = msm_dai_q6_dai_probe,
+	.remove = msm_dai_q6_dai_remove,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_bt_sco_rx_dai = {
+	.playback = {
+		.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		.channels_min = 1,
+		.channels_max = 1,
+		.rate_max = 16000,
+		.rate_min = 8000,
+	},
+	.ops = &msm_dai_q6_ops,
+	.probe = msm_dai_q6_dai_probe,
+	.remove = msm_dai_q6_dai_remove,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_bt_sco_tx_dai = {
+	.playback = {
+		.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		.channels_min = 1,
+		.channels_max = 1,
+		.rate_max = 16000,
+		.rate_min = 8000,
+	},
+	.ops = &msm_dai_q6_ops,
+	.probe = msm_dai_q6_dai_probe,
+	.remove = msm_dai_q6_dai_remove,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_fm_rx_dai = {
+	.playback = {
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+		SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		.channels_min = 2,
+		.channels_max = 2,
+		.rate_max = 48000,
+		.rate_min = 8000,
+	},
+	.ops = &msm_dai_q6_ops,
+	.probe = msm_dai_q6_dai_probe,
+	.remove = msm_dai_q6_dai_remove,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_fm_tx_dai = {
+	.playback = {
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+		SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		.channels_min = 2,
+		.channels_max = 2,
+		.rate_max = 48000,
+		.rate_min = 8000,
+	},
+	.ops = &msm_dai_q6_ops,
+	.probe = msm_dai_q6_dai_probe,
+	.remove = msm_dai_q6_dai_remove,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_aux_pcm_rx_dai = {
+	.playback = {
+		.rates = SNDRV_PCM_RATE_8000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		.channels_min = 1,
+		.channels_max = 1,
+		.rate_max = 8000,
+		.rate_min = 8000,
+	},
+	.ops = &msm_dai_q6_auxpcm_ops,
+	.probe = msm_dai_q6_dai_auxpcm_probe,
+	.remove = msm_dai_q6_dai_auxpcm_remove,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_aux_pcm_tx_dai = {
+	.capture = {
+		.rates = SNDRV_PCM_RATE_8000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		.channels_min = 1,
+		.channels_max = 1,
+		.rate_max = 8000,
+		.rate_min = 8000,
+	},
+	.ops = &msm_dai_q6_auxpcm_ops,
+	.probe = msm_dai_q6_dai_auxpcm_probe,
+	.remove = msm_dai_q6_dai_auxpcm_remove,
+};
+
+
+static struct snd_soc_dai_driver msm_dai_q6_slimbus_1_rx_dai = {
+	.playback = {
+		.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		.channels_min = 1,
+		.channels_max = 1,
+		.rate_min = 8000,
+		.rate_max = 16000,
+	},
+	.ops = &msm_dai_q6_ops,
+	.probe = msm_dai_q6_dai_probe,
+	.remove = msm_dai_q6_dai_remove,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_slimbus_1_tx_dai = {
+	.capture = {
+		.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		.channels_min = 1,
+		.channels_max = 1,
+		.rate_min = 8000,
+		.rate_max = 16000,
+	},
+	.ops = &msm_dai_q6_ops,
+	.probe = msm_dai_q6_dai_probe,
+	.remove = msm_dai_q6_dai_remove,
+};
+
+static __devinit int msm_dai_q6_dev_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+
+	dev_dbg(&pdev->dev, "dev name %s\n", dev_name(&pdev->dev));
+
+	switch (pdev->id) {
+	case PRIMARY_I2S_RX:
+	case SECONDARY_I2S_RX:
+		rc = snd_soc_register_dai(&pdev->dev, &msm_dai_q6_i2s_rx_dai);
+		break;
+	case PRIMARY_I2S_TX:
+		rc = snd_soc_register_dai(&pdev->dev, &msm_dai_q6_i2s_tx_dai);
+		break;
+	case AFE_PORT_ID_PRIMARY_PCM_RX:
+		rc = snd_soc_register_dai(&pdev->dev,
+				&msm_dai_q6_aux_pcm_rx_dai);
+		break;
+	case AFE_PORT_ID_PRIMARY_PCM_TX:
+		rc = snd_soc_register_dai(&pdev->dev,
+				&msm_dai_q6_aux_pcm_tx_dai);
+		break;
+	case MI2S_RX:
+		rc = snd_soc_register_dai(&pdev->dev,
+					&msm_dai_q6_i2s_rx_dai);
+		break;
+	case SLIMBUS_0_RX:
+		rc = snd_soc_register_dai(&pdev->dev,
+				&msm_dai_q6_slimbus_rx_dai);
+		break;
+	case SLIMBUS_0_TX:
+		rc = snd_soc_register_dai(&pdev->dev,
+				&msm_dai_q6_slimbus_tx_dai);
+		break;
+
+	case SLIMBUS_1_RX:
+		rc = snd_soc_register_dai(&pdev->dev,
+				&msm_dai_q6_slimbus_1_rx_dai);
+		break;
+	case SLIMBUS_1_TX:
+		rc = snd_soc_register_dai(&pdev->dev,
+				&msm_dai_q6_slimbus_1_tx_dai);
+		break;
+	case INT_BT_SCO_RX:
+		rc = snd_soc_register_dai(&pdev->dev,
+					&msm_dai_q6_bt_sco_rx_dai);
+		break;
+	case INT_BT_SCO_TX:
+		rc = snd_soc_register_dai(&pdev->dev,
+					&msm_dai_q6_bt_sco_tx_dai);
+		break;
+	case INT_FM_RX:
+		rc = snd_soc_register_dai(&pdev->dev, &msm_dai_q6_fm_rx_dai);
+		break;
+	case INT_FM_TX:
+		rc = snd_soc_register_dai(&pdev->dev, &msm_dai_q6_fm_tx_dai);
+		break;
+	case RT_PROXY_DAI_001_RX:
+	case RT_PROXY_DAI_002_RX:
+		rc = snd_soc_register_dai(&pdev->dev, &msm_dai_q6_afe_rx_dai);
+		break;
+	case RT_PROXY_DAI_001_TX:
+	case RT_PROXY_DAI_002_TX:
+		rc = snd_soc_register_dai(&pdev->dev, &msm_dai_q6_afe_tx_dai);
+		break;
+	case VOICE_PLAYBACK_TX:
+		rc = snd_soc_register_dai(&pdev->dev,
+					&msm_dai_q6_voice_playback_tx_dai);
+		break;
+	case VOICE_RECORD_RX:
+	case VOICE_RECORD_TX:
+		rc = snd_soc_register_dai(&pdev->dev,
+						&msm_dai_q6_incall_record_dai);
+		break;
+	default:
+		rc = -ENODEV;
+		break;
+	}
+	return rc;
+}
+
+static __devexit int msm_dai_q6_dev_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_dai(&pdev->dev);
+	return 0;
+}
+
+static struct platform_driver msm_dai_q6_driver = {
+	.probe  = msm_dai_q6_dev_probe,
+	.remove = msm_dai_q6_dev_remove,
+	.driver = {
+		.name = "msm-dai-q6",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init msm_dai_q6_init(void)
+{
+	return platform_driver_register(&msm_dai_q6_driver);
+}
+module_init(msm_dai_q6_init);
+
+static void __exit msm_dai_q6_exit(void)
+{
+	platform_driver_unregister(&msm_dai_q6_driver);
+}
+module_exit(msm_dai_q6_exit);
+
+/* Module information */
+MODULE_DESCRIPTION("MSM DSP DAI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/msm/qdsp6v2/msm-multi-ch-pcm-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-multi-ch-pcm-q6-v2.c
new file mode 100644
index 0000000..cab689d
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/msm-multi-ch-pcm-q6-v2.c
@@ -0,0 +1,777 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/time.h>
+#include <linux/wait.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/android_pmem.h>
+#include <asm/dma.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/initval.h>
+#include <sound/control.h>
+
+#include "msm-pcm-q6-v2.h"
+#include "msm-pcm-routing-v2.h"
+
+static struct audio_locks the_locks;
+
+struct snd_msm {
+	struct snd_card *card;
+	struct snd_pcm *pcm;
+};
+
+struct snd_msm_volume {
+	struct msm_audio *prtd;
+	unsigned volume;
+};
+static struct snd_msm_volume multi_ch_pcm_audio = {NULL, 0x2000};
+
+#define PLAYBACK_NUM_PERIODS	8
+#define PLAYBACK_PERIOD_SIZE	4032
+#define CAPTURE_NUM_PERIODS	16
+#define CAPTURE_PERIOD_SIZE	320
+
+static struct snd_pcm_hardware msm_pcm_hardware_capture = {
+	.info =                 (SNDRV_PCM_INFO_MMAP |
+				SNDRV_PCM_INFO_BLOCK_TRANSFER |
+				SNDRV_PCM_INFO_MMAP_VALID |
+				SNDRV_PCM_INFO_INTERLEAVED |
+				SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
+	.formats =              SNDRV_PCM_FMTBIT_S16_LE,
+	.rates =                SNDRV_PCM_RATE_8000_48000,
+	.rate_min =             8000,
+	.rate_max =             48000,
+	.channels_min =         1,
+	.channels_max =         2,
+	.buffer_bytes_max =     CAPTURE_NUM_PERIODS * CAPTURE_PERIOD_SIZE,
+	.period_bytes_min =	CAPTURE_PERIOD_SIZE,
+	.period_bytes_max =     CAPTURE_PERIOD_SIZE,
+	.periods_min =          CAPTURE_NUM_PERIODS,
+	.periods_max =          CAPTURE_NUM_PERIODS,
+	.fifo_size =            0,
+};
+
+static struct snd_pcm_hardware msm_pcm_hardware_playback = {
+	.info =                 (SNDRV_PCM_INFO_MMAP |
+				SNDRV_PCM_INFO_BLOCK_TRANSFER |
+				SNDRV_PCM_INFO_MMAP_VALID |
+				SNDRV_PCM_INFO_INTERLEAVED |
+				SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
+	.formats =              SNDRV_PCM_FMTBIT_S16_LE,
+	.rates =                SNDRV_PCM_RATE_8000_48000,
+	.rate_min =             8000,
+	.rate_max =             48000,
+	.channels_min =         1,
+	.channels_max =         6,
+	.buffer_bytes_max =     PLAYBACK_NUM_PERIODS * PLAYBACK_PERIOD_SIZE,
+	.period_bytes_min =	PLAYBACK_PERIOD_SIZE,
+	.period_bytes_max =     PLAYBACK_PERIOD_SIZE,
+	.periods_min =          PLAYBACK_NUM_PERIODS,
+	.periods_max =          PLAYBACK_NUM_PERIODS,
+	.fifo_size =            0,
+};
+
+/* Conventional and unconventional sample rate supported */
+static unsigned int supported_sample_rates[] = {
+	8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
+};
+
+static uint32_t in_frame_info[CAPTURE_NUM_PERIODS][2];
+
+static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
+	.count = ARRAY_SIZE(supported_sample_rates),
+	.list = supported_sample_rates,
+	.mask = 0,
+};
+
+static void event_handler(uint32_t opcode,
+		uint32_t token, uint32_t *payload, void *priv)
+{
+	struct msm_audio *prtd = priv;
+	struct snd_pcm_substream *substream = prtd->substream;
+	uint32_t *ptrmem = (uint32_t *)payload;
+	int i = 0;
+	uint32_t idx = 0;
+	uint32_t size = 0;
+
+	pr_debug("%s\n", __func__);
+	switch (opcode) {
+	case ASM_DATA_EVENT_WRITE_DONE_V2: {
+		pr_debug("ASM_DATA_EVENT_WRITE_DONE\n");
+		pr_debug("Buffer Consumed = 0x%08x\n", *ptrmem);
+		prtd->pcm_irq_pos += prtd->pcm_count;
+		if (atomic_read(&prtd->start))
+			snd_pcm_period_elapsed(substream);
+		atomic_inc(&prtd->out_count);
+		wake_up(&the_locks.write_wait);
+		if (!atomic_read(&prtd->start))
+			break;
+		if (!prtd->mmap_flag)
+			break;
+		if (q6asm_is_cpu_buf_avail_nolock(IN,
+				prtd->audio_client,
+				&size, &idx)) {
+			pr_debug("%s:writing %d bytes of buffer to dsp 2\n",
+					__func__, prtd->pcm_count);
+			q6asm_write_nolock(prtd->audio_client,
+				prtd->pcm_count, 0, 0, NO_TIMESTAMP);
+		}
+		break;
+	}
+	case ASM_DATA_EVENT_RENDERED_EOS:
+		pr_debug("ASM_DATA_CMDRSP_EOS\n");
+		prtd->cmd_ack = 1;
+		wake_up(&the_locks.eos_wait);
+		break;
+	case ASM_DATA_EVENT_READ_DONE_V2: {
+		pr_debug("ASM_DATA_EVENT_READ_DONE\n");
+		pr_debug("token = 0x%08x\n", token);
+		for (i = 0; i < 8; i++, ++ptrmem)
+			pr_debug("cmd[%d]=0x%08x\n", i, *ptrmem);
+		in_frame_info[token][0] = payload[2];
+		in_frame_info[token][1] = payload[3];
+		prtd->pcm_irq_pos += in_frame_info[token][0];
+		pr_debug("pcm_irq_pos=%d\n", prtd->pcm_irq_pos);
+		if (atomic_read(&prtd->start))
+			snd_pcm_period_elapsed(substream);
+		if (atomic_read(&prtd->in_count) <= prtd->periods)
+			atomic_inc(&prtd->in_count);
+		wake_up(&the_locks.read_wait);
+		if (prtd->mmap_flag
+			&& q6asm_is_cpu_buf_avail_nolock(OUT,
+				prtd->audio_client,
+				&size, &idx))
+			q6asm_read_nolock(prtd->audio_client);
+		break;
+	}
+	case APR_BASIC_RSP_RESULT: {
+		switch (payload[0]) {
+		case ASM_SESSION_CMD_RUN_V2:
+			if (substream->stream
+				!= SNDRV_PCM_STREAM_PLAYBACK) {
+				atomic_set(&prtd->start, 1);
+				break;
+			}
+			if (prtd->mmap_flag) {
+				pr_debug("%s:writing %d bytes"
+					" of buffer to dsp\n",
+					__func__,
+					prtd->pcm_count);
+				q6asm_write_nolock(prtd->audio_client,
+					prtd->pcm_count,
+					0, 0, NO_TIMESTAMP);
+			} else {
+				while (atomic_read(&prtd->out_needed)) {
+					pr_debug("%s:writing %d bytes"
+						 " of buffer to dsp\n",
+						__func__,
+						prtd->pcm_count);
+					q6asm_write_nolock(prtd->audio_client,
+						prtd->pcm_count,
+						0, 0, NO_TIMESTAMP);
+					atomic_dec(&prtd->out_needed);
+					wake_up(&the_locks.write_wait);
+				};
+			}
+			atomic_set(&prtd->start, 1);
+			break;
+		default:
+			break;
+		}
+	}
+	break;
+	default:
+		pr_debug("Not Supported Event opcode[0x%x]\n", opcode);
+		break;
+	}
+}
+
+static int msm_pcm_playback_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = runtime->private_data;
+	int ret;
+
+	pr_debug("%s\n", __func__);
+	prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream);
+	prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
+	prtd->pcm_irq_pos = 0;
+	/* rate and channels are sent to audio driver */
+	prtd->samp_rate = runtime->rate;
+	prtd->channel_mode = runtime->channels;
+	if (prtd->enabled)
+		return 0;
+
+	ret = q6asm_media_format_block_pcm(prtd->audio_client,
+			runtime->rate, runtime->channels);
+	if (ret < 0)
+		pr_info("%s: CMD Format block failed\n", __func__);
+
+	atomic_set(&prtd->out_count, runtime->periods);
+
+	prtd->enabled = 1;
+	prtd->cmd_ack = 0;
+
+	return 0;
+}
+
+static int msm_pcm_capture_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = runtime->private_data;
+	int ret = 0;
+	int i = 0;
+	pr_debug("%s\n", __func__);
+	prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream);
+	prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
+	prtd->pcm_irq_pos = 0;
+
+	/* rate and channels are sent to audio driver */
+	prtd->samp_rate = runtime->rate;
+	prtd->channel_mode = runtime->channels;
+
+	if (prtd->enabled)
+		return 0;
+
+	pr_debug("Samp_rate = %d\n", prtd->samp_rate);
+	pr_debug("Channel = %d\n", prtd->channel_mode);
+	ret = q6asm_enc_cfg_blk_pcm(prtd->audio_client, prtd->samp_rate,
+					prtd->channel_mode);
+	if (ret < 0)
+		pr_debug("%s: cmd cfg pcm was block failed", __func__);
+
+	for (i = 0; i < runtime->periods; i++)
+		q6asm_read(prtd->audio_client);
+	prtd->periods = runtime->periods;
+
+	prtd->enabled = 1;
+
+	return ret;
+}
+
+static int msm_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+	int ret = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = runtime->private_data;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		pr_debug("%s: Trigger start\n", __func__);
+		q6asm_run_nowait(prtd->audio_client, 0, 0, 0);
+		break;
+	case SNDRV_PCM_TRIGGER_STOP:
+		pr_debug("SNDRV_PCM_TRIGGER_STOP\n");
+		atomic_set(&prtd->start, 0);
+		if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
+			break;
+		prtd->cmd_ack = 0;
+		q6asm_cmd_nowait(prtd->audio_client, CMD_EOS);
+		break;
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		pr_debug("SNDRV_PCM_TRIGGER_PAUSE\n");
+		q6asm_cmd_nowait(prtd->audio_client, CMD_PAUSE);
+		atomic_set(&prtd->start, 0);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int msm_pcm_open(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+	struct msm_audio *prtd;
+	int ret = 0;
+	struct asm_softpause_params softpause = {
+		.enable = SOFT_PAUSE_ENABLE,
+		.period = SOFT_PAUSE_PERIOD,
+		.step = SOFT_PAUSE_STEP,
+		.rampingcurve = SOFT_PAUSE_CURVE_LINEAR,
+	};
+	struct asm_softvolume_params softvol = {
+		.period = SOFT_VOLUME_PERIOD,
+		.step = SOFT_VOLUME_STEP,
+		.rampingcurve = SOFT_VOLUME_CURVE_LINEAR,
+	};
+
+	pr_debug("%s\n", __func__);
+	prtd = kzalloc(sizeof(struct msm_audio), GFP_KERNEL);
+	if (prtd == NULL) {
+		pr_err("Failed to allocate memory for msm_audio\n");
+		return -ENOMEM;
+	}
+	prtd->substream = substream;
+	prtd->audio_client = q6asm_audio_client_alloc(
+				(app_cb)event_handler, prtd);
+	if (!prtd->audio_client) {
+		pr_err("%s: Could not allocate memory\n", __func__);
+		kfree(prtd);
+		return -ENOMEM;
+	}
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		runtime->hw = msm_pcm_hardware_playback;
+		ret = q6asm_open_write(prtd->audio_client,
+				FORMAT_MULTI_CHANNEL_LINEAR_PCM);
+		if (ret < 0) {
+			pr_err("%s: pcm out open failed\n", __func__);
+			q6asm_audio_client_free(prtd->audio_client);
+			kfree(prtd);
+			return -ENOMEM;
+		}
+	}
+	/* Capture path */
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		runtime->hw = msm_pcm_hardware_capture;
+		ret = q6asm_open_read(prtd->audio_client, FORMAT_LINEAR_PCM);
+		if (ret < 0) {
+			pr_err("%s: pcm in open failed\n", __func__);
+			q6asm_audio_client_free(prtd->audio_client);
+			kfree(prtd);
+			return -ENOMEM;
+		}
+	}
+
+	pr_debug("%s: session ID %d\n", __func__, prtd->audio_client->session);
+
+	prtd->session_id = prtd->audio_client->session;
+	msm_pcm_routing_reg_phy_stream(soc_prtd->dai_link->be_id,
+			prtd->session_id, substream->stream);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		prtd->cmd_ack = 1;
+
+	ret = snd_pcm_hw_constraint_list(runtime, 0,
+				SNDRV_PCM_HW_PARAM_RATE,
+				&constraints_sample_rates);
+	if (ret < 0)
+		pr_err("snd_pcm_hw_constraint_list failed\n");
+	/* Ensure that buffer size is a multiple of period size */
+	ret = snd_pcm_hw_constraint_integer(runtime,
+					    SNDRV_PCM_HW_PARAM_PERIODS);
+	if (ret < 0)
+		pr_err("snd_pcm_hw_constraint_integer failed\n");
+
+	prtd->dsp_cnt = 0;
+	runtime->private_data = prtd;
+	pr_debug("substream->pcm->device = %d\n", substream->pcm->device);
+	pr_debug("soc_prtd->dai_link->be_id = %d\n", soc_prtd->dai_link->be_id);
+	multi_ch_pcm_audio.prtd = prtd;
+	ret = multi_ch_pcm_set_volume(multi_ch_pcm_audio.volume);
+	if (ret < 0)
+		pr_err("%s : Set Volume failed : %d", __func__, ret);
+
+	ret = q6asm_set_softpause(multi_ch_pcm_audio.prtd->audio_client,
+								 &softpause);
+	if (ret < 0)
+		pr_err("%s: Send SoftPause Param failed ret=%d\n",
+			__func__, ret);
+	ret = q6asm_set_softvolume(multi_ch_pcm_audio.prtd->audio_client,
+								 &softvol);
+	if (ret < 0)
+		pr_err("%s: Send SoftVolume Param failed ret=%d\n",
+			__func__, ret);
+
+	return 0;
+}
+
+int multi_ch_pcm_set_volume(unsigned volume)
+{
+	int rc = 0;
+	pr_err("multi_ch_pcm_set_volume\n");
+
+	if (multi_ch_pcm_audio.prtd && multi_ch_pcm_audio.prtd->audio_client) {
+		pr_err("%s q6asm_set_volume\n", __func__);
+		rc = q6asm_set_volume(multi_ch_pcm_audio.prtd->audio_client,
+								volume);
+		if (rc < 0) {
+			pr_err("%s: Send Volume command failed"
+				" rc=%d\n", __func__, rc);
+		}
+	}
+	multi_ch_pcm_audio.volume = volume;
+	return rc;
+}
+
+
+static int msm_pcm_playback_copy(struct snd_pcm_substream *substream, int a,
+	snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames)
+{
+	int ret = 0;
+	int fbytes = 0;
+	int xfer = 0;
+	char *bufptr = NULL;
+	void *data = NULL;
+	uint32_t idx = 0;
+	uint32_t size = 0;
+
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = runtime->private_data;
+
+	fbytes = frames_to_bytes(runtime, frames);
+	pr_debug("%s: prtd->out_count = %d\n",
+				__func__, atomic_read(&prtd->out_count));
+	ret = wait_event_timeout(the_locks.write_wait,
+			(atomic_read(&prtd->out_count)), 5 * HZ);
+	if (ret < 0) {
+		pr_err("%s: wait_event_timeout failed\n", __func__);
+		goto fail;
+	}
+
+	if (!atomic_read(&prtd->out_count)) {
+		pr_err("%s: pcm stopped out_count 0\n", __func__);
+		return 0;
+	}
+
+	data = q6asm_is_cpu_buf_avail(IN, prtd->audio_client, &size, &idx);
+	bufptr = data;
+	if (bufptr) {
+		pr_debug("%s:fbytes =%d: xfer=%d size=%d\n",
+					__func__, fbytes, xfer, size);
+		xfer = fbytes;
+		if (copy_from_user(bufptr, buf, xfer)) {
+			ret = -EFAULT;
+			goto fail;
+		}
+		buf += xfer;
+		fbytes -= xfer;
+		pr_debug("%s:fbytes = %d: xfer=%d\n", __func__, fbytes, xfer);
+		if (atomic_read(&prtd->start)) {
+			pr_debug("%s:writing %d bytes of buffer to dsp\n",
+					__func__, xfer);
+			ret = q6asm_write(prtd->audio_client, xfer,
+						0, 0, NO_TIMESTAMP);
+			if (ret < 0) {
+				ret = -EFAULT;
+				goto fail;
+			}
+		} else
+			atomic_inc(&prtd->out_needed);
+		atomic_dec(&prtd->out_count);
+	}
+fail:
+	return  ret;
+}
+
+static int msm_pcm_playback_close(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+	struct msm_audio *prtd = runtime->private_data;
+	int dir = 0;
+	int ret = 0;
+
+	pr_debug("%s\n", __func__);
+
+	dir = IN;
+	ret = wait_event_timeout(the_locks.eos_wait,
+				prtd->cmd_ack, 5 * HZ);
+	if (ret < 0)
+		pr_err("%s: CMD_EOS failed\n", __func__);
+	q6asm_cmd(prtd->audio_client, CMD_CLOSE);
+	q6asm_audio_client_buf_free_contiguous(dir,
+				prtd->audio_client);
+
+	msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->be_id,
+	SNDRV_PCM_STREAM_PLAYBACK);
+	multi_ch_pcm_audio.prtd = NULL;
+	q6asm_audio_client_free(prtd->audio_client);
+	kfree(prtd);
+	return 0;
+}
+
+static int msm_pcm_capture_copy(struct snd_pcm_substream *substream,
+		 int channel, snd_pcm_uframes_t hwoff, void __user *buf,
+						 snd_pcm_uframes_t frames)
+{
+	int ret = 0;
+	int fbytes = 0;
+	int xfer;
+	char *bufptr;
+	void *data = NULL;
+	static uint32_t idx;
+	static uint32_t size;
+	uint32_t offset = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = substream->runtime->private_data;
+
+
+	pr_debug("%s\n", __func__);
+	fbytes = frames_to_bytes(runtime, frames);
+
+	pr_debug("appl_ptr %d\n", (int)runtime->control->appl_ptr);
+	pr_debug("hw_ptr %d\n", (int)runtime->status->hw_ptr);
+	pr_debug("avail_min %d\n", (int)runtime->control->avail_min);
+
+	ret = wait_event_timeout(the_locks.read_wait,
+			(atomic_read(&prtd->in_count)), 5 * HZ);
+	if (ret < 0) {
+		pr_debug("%s: wait_event_timeout failed\n", __func__);
+		goto fail;
+	}
+	if (!atomic_read(&prtd->in_count)) {
+		pr_debug("%s: pcm stopped in_count 0\n", __func__);
+		return 0;
+	}
+	pr_debug("Checking if valid buffer is available...%08x\n",
+						(unsigned int) data);
+	data = q6asm_is_cpu_buf_avail(OUT, prtd->audio_client, &size, &idx);
+	bufptr = data;
+	pr_debug("Size = %d\n", size);
+	pr_debug("fbytes = %d\n", fbytes);
+	pr_debug("idx = %d\n", idx);
+	if (bufptr) {
+		xfer = fbytes;
+		if (xfer > size)
+			xfer = size;
+		offset = in_frame_info[idx][1];
+		pr_debug("Offset value = %d\n", offset);
+		if (copy_to_user(buf, bufptr+offset, xfer)) {
+			pr_err("Failed to copy buf to user\n");
+			ret = -EFAULT;
+			goto fail;
+		}
+		fbytes -= xfer;
+		size -= xfer;
+		in_frame_info[idx][1] += xfer;
+		pr_debug("%s:fbytes = %d: size=%d: xfer=%d\n",
+					__func__, fbytes, size, xfer);
+		pr_debug(" Sending next buffer to dsp\n");
+		memset(&in_frame_info[idx], 0,
+			sizeof(uint32_t) * 2);
+		atomic_dec(&prtd->in_count);
+		ret = q6asm_read(prtd->audio_client);
+		if (ret < 0) {
+			pr_err("q6asm read failed\n");
+			ret = -EFAULT;
+			goto fail;
+		}
+	} else
+		pr_err("No valid buffer\n");
+
+	pr_debug("Returning from capture_copy... %d\n", ret);
+fail:
+	return ret;
+}
+
+static int msm_pcm_capture_close(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+	struct msm_audio *prtd = runtime->private_data;
+	int dir = OUT;
+
+	pr_debug("%s\n", __func__);
+	q6asm_cmd(prtd->audio_client, CMD_CLOSE);
+	q6asm_audio_client_buf_free_contiguous(dir,
+				prtd->audio_client);
+	msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->be_id,
+	SNDRV_PCM_STREAM_CAPTURE);
+	q6asm_audio_client_free(prtd->audio_client);
+	kfree(prtd);
+
+	return 0;
+}
+
+static int msm_pcm_copy(struct snd_pcm_substream *substream, int a,
+	 snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames)
+{
+	int ret = 0;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		ret = msm_pcm_playback_copy(substream, a, hwoff, buf, frames);
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		ret = msm_pcm_capture_copy(substream, a, hwoff, buf, frames);
+	return ret;
+}
+
+static int msm_pcm_close(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		ret = msm_pcm_playback_close(substream);
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		ret = msm_pcm_capture_close(substream);
+	return ret;
+}
+static int msm_pcm_prepare(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		ret = msm_pcm_playback_prepare(substream);
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		ret = msm_pcm_capture_prepare(substream);
+	return ret;
+}
+
+static snd_pcm_uframes_t msm_pcm_pointer(struct snd_pcm_substream *substream)
+{
+
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = runtime->private_data;
+
+	if (prtd->pcm_irq_pos >= prtd->pcm_size)
+		prtd->pcm_irq_pos = 0;
+
+	pr_debug("pcm_irq_pos = %d\n", prtd->pcm_irq_pos);
+	return bytes_to_frames(runtime, (prtd->pcm_irq_pos));
+}
+
+static int msm_pcm_mmap(struct snd_pcm_substream *substream,
+				struct vm_area_struct *vma)
+{
+	int result = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = runtime->private_data;
+
+	pr_debug("%s\n", __func__);
+	prtd->mmap_flag = 1;
+
+	if (runtime->dma_addr && runtime->dma_bytes) {
+		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+		result = remap_pfn_range(vma, vma->vm_start,
+				runtime->dma_addr >> PAGE_SHIFT,
+				runtime->dma_bytes,
+				vma->vm_page_prot);
+	} else {
+		pr_err("Physical address or size of buf is NULL");
+		return -EINVAL;
+	}
+
+	return result;
+}
+
+static int msm_pcm_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = runtime->private_data;
+	struct snd_dma_buffer *dma_buf = &substream->dma_buffer;
+	struct audio_buffer *buf;
+	int dir, ret;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		dir = IN;
+	else
+		dir = OUT;
+
+	ret = q6asm_audio_client_buf_alloc_contiguous(dir,
+			prtd->audio_client,
+			runtime->hw.period_bytes_min,
+			runtime->hw.periods_max);
+	if (ret < 0) {
+		pr_err("Audio Start: Buffer Allocation failed rc = %d\n", ret);
+		return -ENOMEM;
+	}
+	buf = prtd->audio_client->port[dir].buf;
+
+	pr_debug("%s:buf = %p\n", __func__, buf);
+	dma_buf->dev.type = SNDRV_DMA_TYPE_DEV;
+	dma_buf->dev.dev = substream->pcm->card->dev;
+	dma_buf->private_data = NULL;
+	dma_buf->area = buf[0].data;
+	dma_buf->addr =  buf[0].phys;
+	dma_buf->bytes = runtime->hw.buffer_bytes_max;
+	if (!dma_buf->area)
+		return -ENOMEM;
+
+	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+	return 0;
+}
+
+static struct snd_pcm_ops msm_pcm_ops = {
+	.open           = msm_pcm_open,
+	.copy		= msm_pcm_copy,
+	.hw_params	= msm_pcm_hw_params,
+	.close          = msm_pcm_close,
+	.ioctl          = snd_pcm_lib_ioctl,
+	.prepare        = msm_pcm_prepare,
+	.trigger        = msm_pcm_trigger,
+	.pointer        = msm_pcm_pointer,
+	.mmap		= msm_pcm_mmap,
+};
+
+static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_card *card = rtd->card->snd_card;
+	int ret = 0;
+
+	if (!card->dev->coherent_dma_mask)
+		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	return ret;
+}
+
+static struct snd_soc_platform_driver msm_soc_platform = {
+	.ops		= &msm_pcm_ops,
+	.pcm_new	= msm_asoc_pcm_new,
+};
+
+static __devinit int msm_pcm_probe(struct platform_device *pdev)
+{
+	pr_info("%s: dev name %s\n", __func__, dev_name(&pdev->dev));
+	return snd_soc_register_platform(&pdev->dev,
+				   &msm_soc_platform);
+}
+
+static int msm_pcm_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_platform(&pdev->dev);
+	return 0;
+}
+
+static struct platform_driver msm_pcm_driver = {
+	.driver = {
+		.name = "msm-multi-ch-pcm-dsp",
+		.owner = THIS_MODULE,
+	},
+	.probe = msm_pcm_probe,
+	.remove = __devexit_p(msm_pcm_remove),
+};
+
+static int __init msm_soc_platform_init(void)
+{
+	init_waitqueue_head(&the_locks.enable_wait);
+	init_waitqueue_head(&the_locks.eos_wait);
+	init_waitqueue_head(&the_locks.write_wait);
+	init_waitqueue_head(&the_locks.read_wait);
+
+	return platform_driver_register(&msm_pcm_driver);
+}
+module_init(msm_soc_platform_init);
+
+static void __exit msm_soc_platform_exit(void)
+{
+	platform_driver_unregister(&msm_pcm_driver);
+}
+module_exit(msm_soc_platform_exit);
+
+MODULE_DESCRIPTION("Multi channel PCM module platform driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c
new file mode 100644
index 0000000..4593784
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c
@@ -0,0 +1,581 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 and
+* only version 2 as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*/
+
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/time.h>
+#include <linux/wait.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/android_pmem.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/initval.h>
+#include <sound/control.h>
+#include <sound/q6adm-v2.h>
+#include <asm/dma.h>
+#include "msm-pcm-afe-v2.h"
+
+#define MIN_PERIOD_SIZE (128 * 2)
+#define MAX_PERIOD_SIZE (128 * 2 * 2 * 6)
+static struct snd_pcm_hardware msm_afe_hardware = {
+	.info =			(SNDRV_PCM_INFO_MMAP |
+				SNDRV_PCM_INFO_BLOCK_TRANSFER |
+				SNDRV_PCM_INFO_MMAP_VALID |
+				SNDRV_PCM_INFO_INTERLEAVED),
+	.formats =              SNDRV_PCM_FMTBIT_S16_LE,
+	.rates =                (SNDRV_PCM_RATE_8000 |
+				SNDRV_PCM_RATE_16000 |
+				SNDRV_PCM_RATE_48000),
+	.rate_min =             8000,
+	.rate_max =             48000,
+	.channels_min =         1,
+	.channels_max =         2,
+	.buffer_bytes_max =     MAX_PERIOD_SIZE * 32,
+	.period_bytes_min =     MIN_PERIOD_SIZE,
+	.period_bytes_max =     MAX_PERIOD_SIZE,
+	.periods_min =          32,
+	.periods_max =          384,
+	.fifo_size =            0,
+};
+static enum hrtimer_restart afe_hrtimer_callback(struct hrtimer *hrt);
+static enum hrtimer_restart afe_hrtimer_rec_callback(struct hrtimer *hrt);
+
+static enum hrtimer_restart afe_hrtimer_callback(struct hrtimer *hrt)
+{
+	struct pcm_afe_info *prtd =
+		container_of(hrt, struct pcm_afe_info, hrt);
+	struct snd_pcm_substream *substream = prtd->substream;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	u32 mem_map_handle = 0;
+	if (prtd->start) {
+		pr_debug("sending frame to DSP: poll_time: %d\n",
+				prtd->poll_time);
+		if (prtd->dsp_cnt == runtime->periods)
+			prtd->dsp_cnt = 0;
+		pr_debug("%s: mem_map_handle 0x%x\n", __func__, mem_map_handle);
+		afe_rt_proxy_port_write(
+		(prtd->dma_addr +
+		(prtd->dsp_cnt *
+		snd_pcm_lib_period_bytes(prtd->substream))), mem_map_handle,
+		snd_pcm_lib_period_bytes(prtd->substream));
+		prtd->dsp_cnt++;
+		hrtimer_forward_now(hrt, ns_to_ktime(prtd->poll_time
+					* 1000));
+
+		return HRTIMER_RESTART;
+	} else
+		return HRTIMER_NORESTART;
+}
+static enum hrtimer_restart afe_hrtimer_rec_callback(struct hrtimer *hrt)
+{
+	struct pcm_afe_info *prtd =
+		container_of(hrt, struct pcm_afe_info, hrt);
+	struct snd_pcm_substream *substream = prtd->substream;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	u32 mem_map_handle = 0;
+	if (prtd->start) {
+		if (prtd->dsp_cnt == runtime->periods)
+			prtd->dsp_cnt = 0;
+		pr_err("%s: mem_map_handle 0x%x\n", __func__, mem_map_handle);
+		afe_rt_proxy_port_read(
+		(prtd->dma_addr + (prtd->dsp_cnt
+		* snd_pcm_lib_period_bytes(prtd->substream))), mem_map_handle,
+		snd_pcm_lib_period_bytes(prtd->substream));
+		prtd->dsp_cnt++;
+		pr_debug("sending frame rec to DSP: poll_time: %d\n",
+				prtd->poll_time);
+		hrtimer_forward_now(hrt, ns_to_ktime(prtd->poll_time
+				* 1000));
+
+		return HRTIMER_RESTART;
+	} else
+		return HRTIMER_NORESTART;
+}
+static void pcm_afe_process_tx_pkt(uint32_t opcode,
+		uint32_t token, uint32_t *payload,
+		 void *priv)
+{
+	struct pcm_afe_info *prtd = priv;
+	unsigned long dsp_flags;
+	struct snd_pcm_substream *substream = NULL;
+	struct snd_pcm_runtime *runtime = NULL;
+	uint16_t event;
+
+	if (prtd == NULL)
+		return;
+	substream =  prtd->substream;
+	runtime = substream->runtime;
+	pr_debug("%s\n", __func__);
+	spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
+	switch (opcode) {
+	case AFE_EVENT_RT_PROXY_PORT_STATUS: {
+		event = (uint16_t)((0xFFFF0000 & payload[0]) >> 0x10);
+			switch (event) {
+			case AFE_EVENT_RTPORT_START: {
+				prtd->dsp_cnt = 0;
+				prtd->poll_time = ((unsigned long)((
+						snd_pcm_lib_period_bytes
+						(prtd->substream) *
+						1000 * 1000)/
+						(runtime->rate *
+						runtime->channels * 2)));
+				pr_debug("prtd->poll_time: %d",
+						prtd->poll_time);
+				hrtimer_start(&prtd->hrt,
+					ns_to_ktime(0),
+					HRTIMER_MODE_REL);
+				break;
+			}
+			case AFE_EVENT_RTPORT_STOP:
+				pr_debug("%s: event!=0\n", __func__);
+				prtd->start = 0;
+				snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
+				break;
+			case AFE_EVENT_RTPORT_LOW_WM:
+				pr_debug("%s: Underrun\n", __func__);
+				break;
+			case AFE_EVENT_RTPORT_HI_WM:
+				pr_debug("%s: Overrun\n", __func__);
+				break;
+			default:
+				break;
+			}
+			break;
+	}
+	case APR_BASIC_RSP_RESULT: {
+		switch (payload[0]) {
+		case AFE_PORT_DATA_CMD_RT_PROXY_PORT_WRITE_V2:
+			pr_debug("write done\n");
+			prtd->pcm_irq_pos += snd_pcm_lib_period_bytes
+							(prtd->substream);
+			snd_pcm_period_elapsed(prtd->substream);
+			break;
+		default:
+			break;
+		}
+		break;
+	}
+	default:
+		break;
+	}
+	spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
+}
+
+static void pcm_afe_process_rx_pkt(uint32_t opcode,
+		uint32_t token, uint32_t *payload,
+		 void *priv)
+{
+	struct pcm_afe_info *prtd = priv;
+	unsigned long dsp_flags;
+	struct snd_pcm_substream *substream = NULL;
+	struct snd_pcm_runtime *runtime = NULL;
+	uint16_t event;
+
+	if (prtd == NULL)
+		return;
+	substream =  prtd->substream;
+	runtime = substream->runtime;
+	pr_debug("%s\n", __func__);
+	spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
+	switch (opcode) {
+	case AFE_EVENT_RT_PROXY_PORT_STATUS: {
+		event = (uint16_t)((0xFFFF0000 & payload[0]) >> 0x10);
+		switch (event) {
+		case AFE_EVENT_RTPORT_START: {
+			prtd->dsp_cnt = 0;
+			prtd->poll_time = ((unsigned long)((
+				snd_pcm_lib_period_bytes(prtd->substream)
+					* 1000 * 1000)/(runtime->rate
+					* runtime->channels * 2)));
+			hrtimer_start(&prtd->hrt,
+				ns_to_ktime(0),
+				HRTIMER_MODE_REL);
+			pr_debug("prtd->poll_time : %d", prtd->poll_time);
+			break;
+		}
+		case AFE_EVENT_RTPORT_STOP:
+			pr_debug("%s: event!=0\n", __func__);
+			prtd->start = 0;
+			snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
+			break;
+		case AFE_EVENT_RTPORT_LOW_WM:
+			pr_debug("%s: Underrun\n", __func__);
+			break;
+		case AFE_EVENT_RTPORT_HI_WM:
+			pr_debug("%s: Overrun\n", __func__);
+			break;
+		default:
+			break;
+		}
+		break;
+	}
+	case APR_BASIC_RSP_RESULT: {
+		switch (payload[0]) {
+		case AFE_PORT_DATA_CMD_RT_PROXY_PORT_READ_V2:
+			pr_debug("Read done\n");
+			prtd->pcm_irq_pos += snd_pcm_lib_period_bytes
+							(prtd->substream);
+			snd_pcm_period_elapsed(prtd->substream);
+			break;
+		default:
+			break;
+		}
+		break;
+	}
+	default:
+		break;
+	}
+	spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
+}
+
+static int msm_afe_playback_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct pcm_afe_info *prtd = runtime->private_data;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *dai = rtd->cpu_dai;
+	int ret = 0;
+
+	pr_debug("%s: sample_rate=%d\n", __func__, runtime->rate);
+
+	pr_debug("%s: dai->id =%x\n", __func__, dai->id);
+	ret = afe_register_get_events(dai->id,
+			pcm_afe_process_tx_pkt, prtd);
+	if (ret < 0) {
+		pr_err("afe-pcm:register for events failed\n");
+		return ret;
+	}
+	pr_debug("%s:success\n", __func__);
+	prtd->prepared++;
+	return ret;
+}
+
+static int msm_afe_capture_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct pcm_afe_info *prtd = runtime->private_data;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *dai = rtd->cpu_dai;
+	int ret = 0;
+
+	pr_debug("%s\n", __func__);
+
+	pr_debug("%s: dai->id =%x\n", __func__, dai->id);
+	ret = afe_register_get_events(dai->id,
+			pcm_afe_process_rx_pkt, prtd);
+	if (ret < 0) {
+		pr_err("afe-pcm:register for events failed\n");
+		return ret;
+	}
+	pr_debug("%s:success\n", __func__);
+	prtd->prepared++;
+	return 0;
+}
+
+/* Conventional and unconventional sample rate supported */
+static unsigned int supported_sample_rates[] = {
+	8000, 16000, 48000
+};
+
+static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
+	.count = ARRAY_SIZE(supported_sample_rates),
+	.list = supported_sample_rates,
+	.mask = 0,
+};
+
+static int msm_afe_open(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct pcm_afe_info *prtd = NULL;
+	int ret = 0;
+
+	prtd = kzalloc(sizeof(struct pcm_afe_info), GFP_KERNEL);
+	if (prtd == NULL) {
+		pr_err("Failed to allocate memory for msm_audio\n");
+		return -ENOMEM;
+	} else
+		pr_debug("prtd %x\n", (unsigned int)prtd);
+
+	mutex_init(&prtd->lock);
+	spin_lock_init(&prtd->dsp_lock);
+	prtd->dsp_cnt = 0;
+
+	mutex_lock(&prtd->lock);
+
+	runtime->hw = msm_afe_hardware;
+	prtd->substream = substream;
+	runtime->private_data = prtd;
+	mutex_unlock(&prtd->lock);
+	hrtimer_init(&prtd->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		prtd->hrt.function = afe_hrtimer_callback;
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		prtd->hrt.function = afe_hrtimer_rec_callback;
+
+	ret = snd_pcm_hw_constraint_list(runtime, 0,
+				SNDRV_PCM_HW_PARAM_RATE,
+				&constraints_sample_rates);
+	if (ret < 0)
+		pr_err("snd_pcm_hw_constraint_list failed\n");
+	/* Ensure that buffer size is a multiple of period size */
+	ret = snd_pcm_hw_constraint_integer(runtime,
+					    SNDRV_PCM_HW_PARAM_PERIODS);
+	if (ret < 0)
+		pr_err("snd_pcm_hw_constraint_integer failed\n");
+
+	return 0;
+}
+
+static int msm_afe_close(struct snd_pcm_substream *substream)
+{
+	int rc = 0;
+	struct snd_dma_buffer *dma_buf;
+	struct snd_pcm_runtime *runtime;
+	struct pcm_afe_info *prtd;
+	struct snd_soc_pcm_runtime *rtd = NULL;
+	struct snd_soc_dai *dai = NULL;
+	int ret = 0;
+
+	pr_debug("%s\n", __func__);
+	if (substream == NULL) {
+		pr_err("substream is NULL\n");
+		return -EINVAL;
+	}
+	rtd = substream->private_data;
+	dai = rtd->cpu_dai;
+	runtime = substream->runtime;
+	prtd = runtime->private_data;
+
+	mutex_lock(&prtd->lock);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		ret =  afe_unregister_get_events(dai->id);
+		if (ret < 0)
+			pr_err("AFE unregister for events failed\n");
+	} else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		ret =  afe_unregister_get_events(dai->id);
+		if (ret < 0)
+			pr_err("AFE unregister for events failed\n");
+	}
+	hrtimer_cancel(&prtd->hrt);
+
+	rc = afe_cmd_memory_unmap(runtime->dma_addr);
+	if (rc < 0)
+		pr_err("AFE memory unmap failed\n");
+
+	pr_debug("release all buffer\n");
+	dma_buf = &substream->dma_buffer;
+	if (dma_buf == NULL) {
+		pr_debug("dma_buf is NULL\n");
+			goto done;
+		}
+	if (dma_buf->area != NULL) {
+		dma_free_coherent(substream->pcm->card->dev,
+			runtime->hw.buffer_bytes_max, dma_buf->area,
+			dma_buf->addr);
+		dma_buf->area = NULL;
+	}
+done:
+	pr_debug("%s: dai->id =%x\n", __func__, dai->id);
+	mutex_unlock(&prtd->lock);
+	prtd->prepared--;
+	kfree(prtd);
+	return 0;
+}
+static int msm_afe_prepare(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct pcm_afe_info *prtd = runtime->private_data;
+
+	prtd->pcm_irq_pos = 0;
+	if (prtd->prepared)
+		return 0;
+	mutex_lock(&prtd->lock);
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		ret = msm_afe_playback_prepare(substream);
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		ret = msm_afe_capture_prepare(substream);
+	mutex_unlock(&prtd->lock);
+	return ret;
+}
+static int msm_afe_mmap(struct snd_pcm_substream *substream,
+				struct vm_area_struct *vma)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct pcm_afe_info *prtd = runtime->private_data;
+
+	pr_debug("%s\n", __func__);
+	prtd->mmap_flag = 1;
+	dma_mmap_coherent(substream->pcm->card->dev, vma,
+				runtime->dma_area,
+				runtime->dma_addr,
+				runtime->dma_bytes);
+	return 0;
+}
+static int msm_afe_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+	int ret = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct pcm_afe_info *prtd = runtime->private_data;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		pr_debug("%s: SNDRV_PCM_TRIGGER_START\n", __func__);
+		prtd->start = 1;
+		break;
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		pr_debug("%s: SNDRV_PCM_TRIGGER_STOP\n", __func__);
+		prtd->start = 0;
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+static int msm_afe_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_dma_buffer *dma_buf = &substream->dma_buffer;
+	struct pcm_afe_info *prtd = runtime->private_data;
+	int rc;
+
+	pr_debug("%s:\n", __func__);
+
+	mutex_lock(&prtd->lock);
+
+	dma_buf->dev.type = SNDRV_DMA_TYPE_DEV;
+	dma_buf->dev.dev = substream->pcm->card->dev;
+	dma_buf->private_data = NULL;
+	dma_buf->area = dma_alloc_coherent(dma_buf->dev.dev,
+				runtime->hw.buffer_bytes_max,
+				&dma_buf->addr, GFP_KERNEL);
+
+	pr_debug("%s: dma_buf->area: 0x%p, dma_buf->addr: 0x%x", __func__,
+			(unsigned int *) dma_buf->area, dma_buf->addr);
+	if (!dma_buf->area) {
+		pr_err("%s:MSM AFE memory allocation failed\n", __func__);
+		mutex_unlock(&prtd->lock);
+		return -ENOMEM;
+	}
+	dma_buf->bytes = runtime->hw.buffer_bytes_max;
+	memset(dma_buf->area, 0, runtime->hw.buffer_bytes_max);
+	prtd->dma_addr = (u32) dma_buf->addr;
+
+	mutex_unlock(&prtd->lock);
+
+	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+
+	rc = afe_cmd_memory_map(dma_buf->addr, dma_buf->bytes);
+	if (rc < 0)
+		pr_err("fail to map memory to DSP\n");
+
+	return rc;
+}
+static snd_pcm_uframes_t msm_afe_pointer(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct pcm_afe_info *prtd = runtime->private_data;
+
+	if (prtd->pcm_irq_pos >= snd_pcm_lib_buffer_bytes(substream))
+		prtd->pcm_irq_pos = 0;
+
+	pr_debug("pcm_irq_pos = %d\n", prtd->pcm_irq_pos);
+	return bytes_to_frames(runtime, (prtd->pcm_irq_pos));
+}
+
+static struct snd_pcm_ops msm_afe_ops = {
+	.open           = msm_afe_open,
+	.hw_params	= msm_afe_hw_params,
+	.trigger	= msm_afe_trigger,
+	.close          = msm_afe_close,
+	.prepare        = msm_afe_prepare,
+	.mmap		= msm_afe_mmap,
+	.pointer	= msm_afe_pointer,
+};
+
+
+static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_card *card = rtd->card->snd_card;
+	int ret = 0;
+
+	pr_debug("%s\n", __func__);
+	if (!card->dev->coherent_dma_mask)
+		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	return ret;
+}
+
+static int msm_afe_afe_probe(struct snd_soc_platform *platform)
+{
+	pr_debug("%s\n", __func__);
+	return 0;
+}
+
+static struct snd_soc_platform_driver msm_soc_platform = {
+	.ops		= &msm_afe_ops,
+	.pcm_new	= msm_asoc_pcm_new,
+	.probe		= msm_afe_afe_probe,
+};
+
+static __devinit int msm_afe_probe(struct platform_device *pdev)
+{
+	pr_debug("%s: dev name %s\n", __func__, dev_name(&pdev->dev));
+	return snd_soc_register_platform(&pdev->dev,
+				   &msm_soc_platform);
+}
+
+static int msm_afe_remove(struct platform_device *pdev)
+{
+	pr_debug("%s\n", __func__);
+	snd_soc_unregister_platform(&pdev->dev);
+	return 0;
+}
+
+static struct platform_driver msm_afe_driver = {
+	.driver = {
+		.name = "msm-pcm-afe",
+		.owner = THIS_MODULE,
+	},
+	.probe = msm_afe_probe,
+	.remove = __devexit_p(msm_afe_remove),
+};
+
+static int __init msm_soc_platform_init(void)
+{
+	pr_debug("%s\n", __func__);
+	return platform_driver_register(&msm_afe_driver);
+}
+module_init(msm_soc_platform_init);
+
+static void __exit msm_soc_platform_exit(void)
+{
+	pr_debug("%s\n", __func__);
+	platform_driver_unregister(&msm_afe_driver);
+}
+module_exit(msm_soc_platform_exit);
+
+MODULE_DESCRIPTION("AFE PCM module platform driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.h
new file mode 100644
index 0000000..20d6377
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.h
@@ -0,0 +1,45 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MSM_PCM_AFE_H
+#define _MSM_PCM_AFE_H
+#include <sound/apr_audio-v2.h>
+#include <sound/q6afe-v2.h>
+
+
+struct pcm_afe_info {
+	unsigned long dma_addr;
+	struct snd_pcm_substream *substream;
+	unsigned int pcm_irq_pos;       /* IRQ position */
+	struct mutex lock;
+	spinlock_t dsp_lock;
+	uint32_t samp_rate;
+	uint32_t channel_mode;
+	uint8_t start;
+	uint32_t dsp_cnt;
+	uint32_t buf_phys;
+	int32_t mmap_flag;
+	int prepared;
+	struct hrtimer hrt;
+	int poll_time;
+};
+
+
+#define MSM_EXT(xname, fp_info, fp_get, fp_put, addr) \
+	{.iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+	.name = xname, \
+	.info = fp_info,\
+	.get = fp_get, .put = fp_put, \
+	.private_value = addr, \
+	}
+
+#endif /*_MSM_PCM_AFE_H*/
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
new file mode 100644
index 0000000..ee92753
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
@@ -0,0 +1,609 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/time.h>
+#include <linux/wait.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/initval.h>
+#include <sound/control.h>
+#include <asm/dma.h>
+#include <linux/dma-mapping.h>
+#include <linux/android_pmem.h>
+#include <sound/snd_compress_params.h>
+#include <sound/compress_offload.h>
+#include <sound/compress_driver.h>
+#include <sound/timer.h>
+
+#include "msm-pcm-q6-v2.h"
+#include "msm-pcm-routing-v2.h"
+
+static struct audio_locks the_locks;
+
+struct snd_msm {
+	struct msm_audio *prtd;
+	unsigned volume;
+};
+static struct snd_msm lpa_audio;
+
+static struct snd_pcm_hardware msm_pcm_hardware = {
+	.info =                 (SNDRV_PCM_INFO_MMAP |
+				SNDRV_PCM_INFO_BLOCK_TRANSFER |
+				SNDRV_PCM_INFO_MMAP_VALID |
+				SNDRV_PCM_INFO_INTERLEAVED |
+				SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
+	.formats =              SNDRV_PCM_FMTBIT_S16_LE,
+	.rates =                SNDRV_PCM_RATE_8000_48000 | SNDRV_PCM_RATE_KNOT,
+	.rate_min =             8000,
+	.rate_max =             48000,
+	.channels_min =         1,
+	.channels_max =         2,
+	.buffer_bytes_max =     2 * 1024 * 1024,
+	.period_bytes_min =	128 * 1024,
+	.period_bytes_max =     512 * 1024,
+	.periods_min =          4,
+	.periods_max =          16,
+	.fifo_size =            0,
+};
+
+/* Conventional and unconventional sample rate supported */
+static unsigned int supported_sample_rates[] = {
+	8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
+};
+
+static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
+	.count = ARRAY_SIZE(supported_sample_rates),
+	.list = supported_sample_rates,
+	.mask = 0,
+};
+
+static void event_handler(uint32_t opcode,
+		uint32_t token, uint32_t *payload, void *priv)
+{
+	struct msm_audio *prtd = priv;
+	struct snd_pcm_substream *substream = prtd->substream;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct audio_aio_write_param param;
+	struct audio_buffer *buf = NULL;
+	unsigned long flag = 0;
+	int i = 0;
+
+	pr_debug("%s\n", __func__);
+	spin_lock_irqsave(&the_locks.event_lock, flag);
+	switch (opcode) {
+	case ASM_DATA_EVENT_WRITE_DONE_V2: {
+		uint32_t *ptrmem = (uint32_t *)&param;
+		pr_debug("ASM_DATA_EVENT_WRITE_DONE_V2\n");
+		pr_debug("Buffer Consumed = 0x%08x\n", *ptrmem);
+		prtd->pcm_irq_pos += prtd->pcm_count;
+		if (prtd->pcm_irq_pos >= prtd->pcm_size)
+			prtd->pcm_irq_pos = 0;
+		if (atomic_read(&prtd->start))
+			snd_pcm_period_elapsed(substream);
+		else
+			if (substream->timer_running)
+				snd_timer_interrupt(substream->timer, 1);
+
+		atomic_inc(&prtd->out_count);
+		wake_up(&the_locks.write_wait);
+		if (!atomic_read(&prtd->start)) {
+			atomic_set(&prtd->pending_buffer, 1);
+			break;
+		} else
+			atomic_set(&prtd->pending_buffer, 0);
+		if (runtime->status->hw_ptr >= runtime->control->appl_ptr)
+			break;
+		pr_debug("%s:writing %d bytes of buffer to dsp 2\n",
+				__func__, prtd->pcm_count);
+
+		buf = prtd->audio_client->port[IN].buf;
+		param.paddr = (unsigned long)buf[0].phys
+				+ (prtd->out_head * prtd->pcm_count);
+		param.len = prtd->pcm_count;
+		param.msw_ts = 0;
+		param.lsw_ts = 0;
+		param.flags = NO_TIMESTAMP;
+		param.uid =  (unsigned long)buf[0].phys
+				+ (prtd->out_head * prtd->pcm_count);
+		for (i = 0; i < sizeof(struct audio_aio_write_param)/4;
+					i++, ++ptrmem)
+			pr_debug("cmd[%d]=0x%08x\n", i, *ptrmem);
+		if (q6asm_async_write(prtd->audio_client,
+					&param) < 0)
+			pr_err("%s:q6asm_async_write failed\n",
+				__func__);
+		else
+			prtd->out_head =
+				(prtd->out_head + 1) & (runtime->periods - 1);
+		atomic_set(&prtd->pending_buffer, 0);
+		break;
+	}
+	case ASM_DATA_EVENT_RENDERED_EOS:
+		pr_debug("ASM_DATA_CMDRSP_EOS\n");
+		prtd->cmd_ack = 1;
+		wake_up(&the_locks.eos_wait);
+		break;
+	case APR_BASIC_RSP_RESULT: {
+		switch (payload[0]) {
+		case ASM_SESSION_CMD_RUN_V2: {
+			if (!atomic_read(&prtd->pending_buffer))
+				break;
+			if (runtime->status->hw_ptr >=
+				runtime->control->appl_ptr)
+				break;
+			pr_debug("%s:writing %d bytes"
+				" of buffer to dsp\n",
+				__func__, prtd->pcm_count);
+			buf = prtd->audio_client->port[IN].buf;
+			param.paddr = (unsigned long)buf[prtd->out_head].phys;
+			param.len = prtd->pcm_count;
+			param.msw_ts = 0;
+			param.lsw_ts = 0;
+			param.flags = NO_TIMESTAMP;
+			param.uid =  (unsigned long)buf[prtd->out_head].phys;
+			if (q6asm_async_write(prtd->audio_client,
+						&param) < 0)
+				pr_err("%s:q6asm_async_write failed\n",
+					__func__);
+			else
+				prtd->out_head =
+					(prtd->out_head + 1)
+					& (runtime->periods - 1);
+			atomic_set(&prtd->pending_buffer, 0);
+		}
+			break;
+		case ASM_STREAM_CMD_FLUSH:
+			pr_debug("ASM_STREAM_CMD_FLUSH\n");
+			prtd->cmd_ack = 1;
+			wake_up(&the_locks.eos_wait);
+			break;
+		default:
+			break;
+		}
+		break;
+	}
+	default:
+		pr_debug("Not Supported Event opcode[0x%x]\n", opcode);
+		break;
+	}
+	spin_unlock_irqrestore(&the_locks.event_lock, flag);
+}
+
+static int msm_pcm_playback_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = runtime->private_data;
+	int ret;
+
+	pr_debug("%s\n", __func__);
+	prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream);
+	prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
+	prtd->pcm_irq_pos = 0;
+	/* rate and channels are sent to audio driver */
+	prtd->samp_rate = runtime->rate;
+	prtd->channel_mode = runtime->channels;
+	prtd->out_head = 0;
+	if (prtd->enabled)
+		return 0;
+
+	ret = q6asm_media_format_block_pcm(prtd->audio_client, runtime->rate,
+				runtime->channels);
+	if (ret < 0)
+		pr_debug("%s: CMD Format block failed\n", __func__);
+
+	atomic_set(&prtd->out_count, runtime->periods);
+	prtd->enabled = 1;
+	prtd->cmd_ack = 0;
+	return 0;
+}
+
+static int msm_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+	int ret = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = runtime->private_data;
+	pr_debug("%s\n", __func__);
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+		prtd->pcm_irq_pos = 0;
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		pr_debug("SNDRV_PCM_TRIGGER_START\n");
+		q6asm_run_nowait(prtd->audio_client, 0, 0, 0);
+		atomic_set(&prtd->start, 1);
+		break;
+	case SNDRV_PCM_TRIGGER_STOP:
+		pr_debug("SNDRV_PCM_TRIGGER_STOP\n");
+		atomic_set(&prtd->start, 0);
+		if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
+			break;
+		break;
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		pr_debug("SNDRV_PCM_TRIGGER_PAUSE\n");
+		q6asm_cmd_nowait(prtd->audio_client, CMD_PAUSE);
+		atomic_set(&prtd->start, 0);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int msm_pcm_open(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+	struct msm_audio *prtd;
+	struct asm_softpause_params softpause = {
+		.enable = SOFT_PAUSE_ENABLE,
+		.period = SOFT_PAUSE_PERIOD,
+		.step = SOFT_PAUSE_STEP,
+		.rampingcurve = SOFT_PAUSE_CURVE_LINEAR,
+	};
+	struct asm_softvolume_params softvol = {
+		.period = SOFT_VOLUME_PERIOD,
+		.step = SOFT_VOLUME_STEP,
+		.rampingcurve = SOFT_VOLUME_CURVE_LINEAR,
+	};
+	int ret = 0;
+
+	pr_debug("%s\n", __func__);
+	prtd = kzalloc(sizeof(struct msm_audio), GFP_KERNEL);
+	if (prtd == NULL) {
+		pr_err("Failed to allocate memory for msm_audio\n");
+		return -ENOMEM;
+	}
+	runtime->hw = msm_pcm_hardware;
+	prtd->substream = substream;
+	prtd->audio_client = q6asm_audio_client_alloc(
+				(app_cb)event_handler, prtd);
+	if (!prtd->audio_client) {
+		pr_debug("%s: Could not allocate memory\n", __func__);
+		kfree(prtd);
+		return -ENOMEM;
+	}
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		ret = q6asm_open_write(prtd->audio_client, FORMAT_LINEAR_PCM);
+		if (ret < 0) {
+			pr_err("%s: pcm out open failed\n", __func__);
+			q6asm_audio_client_free(prtd->audio_client);
+			kfree(prtd);
+			return -ENOMEM;
+		}
+		ret = q6asm_set_io_mode(prtd->audio_client, ASYNC_IO_MODE);
+		if (ret < 0) {
+			pr_err("%s: Set IO mode failed\n", __func__);
+			q6asm_audio_client_free(prtd->audio_client);
+			kfree(prtd);
+			return -ENOMEM;
+		}
+	}
+	/* Capture path */
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		return -EPERM;
+	pr_debug("%s: session ID %d\n", __func__, prtd->audio_client->session);
+	prtd->session_id = prtd->audio_client->session;
+	msm_pcm_routing_reg_phy_stream(soc_prtd->dai_link->be_id,
+		prtd->session_id, substream->stream);
+
+	ret = snd_pcm_hw_constraint_list(runtime, 0,
+				SNDRV_PCM_HW_PARAM_RATE,
+				&constraints_sample_rates);
+	if (ret < 0)
+		pr_debug("snd_pcm_hw_constraint_list failed\n");
+	/* Ensure that buffer size is a multiple of period size */
+	ret = snd_pcm_hw_constraint_integer(runtime,
+					    SNDRV_PCM_HW_PARAM_PERIODS);
+	if (ret < 0)
+		pr_debug("snd_pcm_hw_constraint_integer failed\n");
+
+	prtd->dsp_cnt = 0;
+	atomic_set(&prtd->pending_buffer, 1);
+	runtime->private_data = prtd;
+	lpa_audio.prtd = prtd;
+	lpa_set_volume(lpa_audio.volume);
+	ret = q6asm_set_softpause(lpa_audio.prtd->audio_client, &softpause);
+	if (ret < 0)
+		pr_err("%s: Send SoftPause Param failed ret=%d\n",
+			__func__, ret);
+	ret = q6asm_set_softvolume(lpa_audio.prtd->audio_client, &softvol);
+	if (ret < 0)
+		pr_err("%s: Send SoftVolume Param failed ret=%d\n",
+			__func__, ret);
+
+	return 0;
+}
+
+int lpa_set_volume(unsigned volume)
+{
+	int rc = 0;
+	if (lpa_audio.prtd && lpa_audio.prtd->audio_client) {
+		rc = q6asm_set_volume(lpa_audio.prtd->audio_client, volume);
+		if (rc < 0) {
+			pr_err("%s: Send Volume command failed"
+					" rc=%d\n", __func__, rc);
+		}
+	}
+	lpa_audio.volume = volume;
+	return rc;
+}
+
+static int msm_pcm_playback_close(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+	struct msm_audio *prtd = runtime->private_data;
+	int dir = 0;
+	int rc = 0;
+
+	/*
+	If routing is still enabled, we need to issue EOS to
+	the DSP
+	To issue EOS to dsp, we need to be run state otherwise
+	EOS is not honored.
+	*/
+	if (msm_routing_check_backend_enabled(soc_prtd->dai_link->be_id)) {
+		rc = q6asm_run(prtd->audio_client, 0, 0, 0);
+		atomic_set(&prtd->pending_buffer, 0);
+		prtd->cmd_ack = 0;
+		q6asm_cmd_nowait(prtd->audio_client, CMD_EOS);
+		pr_debug("%s\n", __func__);
+		rc = wait_event_timeout(the_locks.eos_wait,
+			prtd->cmd_ack, 5 * HZ);
+		if (rc < 0)
+			pr_err("EOS cmd timeout\n");
+		prtd->pcm_irq_pos = 0;
+	}
+
+	dir = IN;
+	atomic_set(&prtd->pending_buffer, 0);
+	lpa_audio.prtd = NULL;
+	q6asm_cmd(prtd->audio_client, CMD_CLOSE);
+	q6asm_audio_client_buf_free_contiguous(dir,
+				prtd->audio_client);
+
+	pr_debug("%s\n", __func__);
+	msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->be_id,
+		SNDRV_PCM_STREAM_PLAYBACK);
+	pr_debug("%s\n", __func__);
+	q6asm_audio_client_free(prtd->audio_client);
+	kfree(prtd);
+
+	return 0;
+}
+
+static int msm_pcm_close(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		ret = msm_pcm_playback_close(substream);
+	return ret;
+}
+
+static int msm_pcm_prepare(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		ret = msm_pcm_playback_prepare(substream);
+	return ret;
+}
+
+static snd_pcm_uframes_t msm_pcm_pointer(struct snd_pcm_substream *substream)
+{
+
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = runtime->private_data;
+
+	pr_debug("%s: pcm_irq_pos = %d\n", __func__, prtd->pcm_irq_pos);
+	return bytes_to_frames(runtime, (prtd->pcm_irq_pos));
+}
+
+static int msm_pcm_mmap(struct snd_pcm_substream *substream,
+				struct vm_area_struct *vma)
+{
+	int result = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = runtime->private_data;
+
+	pr_debug("%s\n", __func__);
+	prtd->mmap_flag = 1;
+
+	if (runtime->dma_addr && runtime->dma_bytes) {
+		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+		result = remap_pfn_range(vma, vma->vm_start,
+				runtime->dma_addr >> PAGE_SHIFT,
+				runtime->dma_bytes,
+				vma->vm_page_prot);
+	} else {
+		pr_err("Physical address or size of buf is NULL");
+		return -EINVAL;
+	}
+	return result;
+}
+
+static int msm_pcm_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = runtime->private_data;
+	struct snd_dma_buffer *dma_buf = &substream->dma_buffer;
+	struct audio_buffer *buf;
+	int dir, ret;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		dir = IN;
+	else
+		return -EPERM;
+	ret = q6asm_audio_client_buf_alloc_contiguous(dir,
+			prtd->audio_client,
+			runtime->hw.period_bytes_min,
+			runtime->hw.periods_max);
+	if (ret < 0) {
+		pr_err("Audio Start: Buffer Allocation failed "
+					"rc = %d\n", ret);
+		return -ENOMEM;
+	}
+	buf = prtd->audio_client->port[dir].buf;
+
+	if (buf == NULL || buf[0].data == NULL)
+		return -ENOMEM;
+
+	pr_debug("%s:buf = %p\n", __func__, buf);
+	dma_buf->dev.type = SNDRV_DMA_TYPE_DEV;
+	dma_buf->dev.dev = substream->pcm->card->dev;
+	dma_buf->private_data = NULL;
+	dma_buf->area = buf[0].data;
+	dma_buf->addr =  buf[0].phys;
+	dma_buf->bytes = runtime->hw.buffer_bytes_max;
+	if (!dma_buf->area)
+		return -ENOMEM;
+
+	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+	return 0;
+}
+
+static int msm_pcm_ioctl(struct snd_pcm_substream *substream,
+		unsigned int cmd, void *arg)
+{
+	int rc = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = runtime->private_data;
+	uint64_t timestamp;
+	uint64_t temp;
+
+	switch (cmd) {
+	case SNDRV_COMPRESS_TSTAMP: {
+		struct snd_compr_tstamp tstamp;
+		pr_debug("SNDRV_COMPRESS_TSTAMP\n");
+
+		memset(&tstamp, 0x0, sizeof(struct snd_compr_tstamp));
+		timestamp = q6asm_get_session_time(prtd->audio_client);
+		if (timestamp < 0) {
+			pr_err("%s: Get Session Time return value =%lld\n",
+				__func__, timestamp);
+			return -EAGAIN;
+		}
+		temp = (timestamp * 2 * runtime->channels);
+		temp = temp * (runtime->rate/1000);
+		temp = div_u64(temp, 1000);
+		tstamp.sampling_rate = runtime->rate;
+		tstamp.rendered = (size_t)(temp & 0xFFFFFFFF);
+		tstamp.decoded  = (size_t)((temp >> 32) & 0xFFFFFFFF);
+		tstamp.timestamp = timestamp;
+		pr_debug("%s: bytes_consumed:lsb = %d, msb = %d,"
+			"timestamp = %lld,\n",
+			__func__, tstamp.rendered, tstamp.decoded,
+			tstamp.timestamp);
+		if (copy_to_user((void *) arg, &tstamp,
+			sizeof(struct snd_compr_tstamp)))
+			return -EFAULT;
+		return 0;
+	}
+	case SNDRV_PCM_IOCTL1_RESET:
+		prtd->cmd_ack = 0;
+		rc = q6asm_cmd(prtd->audio_client, CMD_FLUSH);
+		if (rc < 0)
+			pr_err("%s: flush cmd failed rc=%d\n", __func__, rc);
+		rc = wait_event_timeout(the_locks.eos_wait,
+			prtd->cmd_ack, 5 * HZ);
+		if (rc < 0)
+			pr_err("Flush cmd timeout\n");
+		prtd->pcm_irq_pos = 0;
+		break;
+	default:
+		break;
+	}
+	return snd_pcm_lib_ioctl(substream, cmd, arg);
+}
+
+static struct snd_pcm_ops msm_pcm_ops = {
+	.open           = msm_pcm_open,
+	.hw_params	= msm_pcm_hw_params,
+	.close          = msm_pcm_close,
+	.ioctl          = msm_pcm_ioctl,
+	.prepare        = msm_pcm_prepare,
+	.trigger        = msm_pcm_trigger,
+	.pointer        = msm_pcm_pointer,
+	.mmap		= msm_pcm_mmap,
+};
+
+static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_card *card = rtd->card->snd_card;
+	int ret = 0;
+
+	if (!card->dev->coherent_dma_mask)
+		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	return ret;
+}
+
+static struct snd_soc_platform_driver msm_soc_platform = {
+	.ops		= &msm_pcm_ops,
+	.pcm_new	= msm_asoc_pcm_new,
+};
+
+static __devinit int msm_pcm_probe(struct platform_device *pdev)
+{
+	dev_info(&pdev->dev, "%s: dev name %s\n",
+			__func__, dev_name(&pdev->dev));
+	return snd_soc_register_platform(&pdev->dev,
+				&msm_soc_platform);
+}
+
+static int msm_pcm_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_platform(&pdev->dev);
+	return 0;
+}
+
+static struct platform_driver msm_pcm_driver = {
+	.driver = {
+		.name = "msm-pcm-lpa",
+		.owner = THIS_MODULE,
+	},
+	.probe = msm_pcm_probe,
+	.remove = __devexit_p(msm_pcm_remove),
+};
+
+static int __init msm_soc_platform_init(void)
+{
+	spin_lock_init(&the_locks.event_lock);
+	init_waitqueue_head(&the_locks.enable_wait);
+	init_waitqueue_head(&the_locks.eos_wait);
+	init_waitqueue_head(&the_locks.write_wait);
+	init_waitqueue_head(&the_locks.read_wait);
+
+	return platform_driver_register(&msm_pcm_driver);
+}
+module_init(msm_soc_platform_init);
+
+static void __exit msm_soc_platform_exit(void)
+{
+	platform_driver_unregister(&msm_pcm_driver);
+}
+module_exit(msm_soc_platform_exit);
+
+MODULE_DESCRIPTION("PCM module platform driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
new file mode 100644
index 0000000..f94e6c1
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
@@ -0,0 +1,725 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/time.h>
+#include <linux/wait.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/initval.h>
+#include <sound/control.h>
+#include <asm/dma.h>
+#include <linux/dma-mapping.h>
+#include <linux/android_pmem.h>
+
+#include "msm-pcm-q6-v2.h"
+#include "msm-pcm-routing-v2.h"
+
+static struct audio_locks the_locks;
+
+struct snd_msm {
+	struct snd_card *card;
+	struct snd_pcm *pcm;
+};
+
+#define PLAYBACK_NUM_PERIODS	8
+#define PLAYBACK_PERIOD_SIZE	2048
+#define CAPTURE_NUM_PERIODS	16
+#define CAPTURE_PERIOD_SIZE	512
+
+static struct snd_pcm_hardware msm_pcm_hardware_capture = {
+	.info =                 (SNDRV_PCM_INFO_MMAP |
+				SNDRV_PCM_INFO_BLOCK_TRANSFER |
+				SNDRV_PCM_INFO_MMAP_VALID |
+				SNDRV_PCM_INFO_INTERLEAVED |
+				SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
+	.formats =              SNDRV_PCM_FMTBIT_S16_LE,
+	.rates =                SNDRV_PCM_RATE_8000_48000,
+	.rate_min =             8000,
+	.rate_max =             48000,
+	.channels_min =         1,
+	.channels_max =         2,
+	.buffer_bytes_max =     CAPTURE_NUM_PERIODS * CAPTURE_PERIOD_SIZE,
+	.period_bytes_min =	CAPTURE_PERIOD_SIZE,
+	.period_bytes_max =     CAPTURE_PERIOD_SIZE,
+	.periods_min =          CAPTURE_NUM_PERIODS,
+	.periods_max =          CAPTURE_NUM_PERIODS,
+	.fifo_size =            0,
+};
+
+static struct snd_pcm_hardware msm_pcm_hardware_playback = {
+	.info =                 (SNDRV_PCM_INFO_MMAP |
+				SNDRV_PCM_INFO_BLOCK_TRANSFER |
+				SNDRV_PCM_INFO_MMAP_VALID |
+				SNDRV_PCM_INFO_INTERLEAVED |
+				SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
+	.formats =              SNDRV_PCM_FMTBIT_S16_LE,
+	.rates =                SNDRV_PCM_RATE_8000_48000,
+	.rate_min =             8000,
+	.rate_max =             48000,
+	.channels_min =         1,
+	.channels_max =         2,
+	.buffer_bytes_max =     PLAYBACK_NUM_PERIODS * PLAYBACK_PERIOD_SIZE,
+	.period_bytes_min =	PLAYBACK_PERIOD_SIZE,
+	.period_bytes_max =     PLAYBACK_PERIOD_SIZE,
+	.periods_min =          PLAYBACK_NUM_PERIODS,
+	.periods_max =          PLAYBACK_NUM_PERIODS,
+	.fifo_size =            0,
+};
+
+/* Conventional and unconventional sample rate supported */
+static unsigned int supported_sample_rates[] = {
+	8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
+};
+
+static uint32_t in_frame_info[CAPTURE_NUM_PERIODS][2];
+
+static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
+	.count = ARRAY_SIZE(supported_sample_rates),
+	.list = supported_sample_rates,
+	.mask = 0,
+};
+
+static void event_handler(uint32_t opcode,
+		uint32_t token, uint32_t *payload, void *priv)
+{
+	struct msm_audio *prtd = priv;
+	struct snd_pcm_substream *substream = prtd->substream;
+	uint32_t *ptrmem = (uint32_t *)payload;
+	uint32_t idx = 0;
+	uint32_t size = 0;
+
+	pr_err("%s\n", __func__);
+	switch (opcode) {
+	case ASM_DATA_EVENT_WRITE_DONE_V2: {
+		pr_debug("ASM_DATA_EVENT_WRITE_DONE_V2\n");
+		pr_debug("Buffer Consumed = 0x%08x\n", *ptrmem);
+		prtd->pcm_irq_pos += prtd->pcm_count;
+		if (atomic_read(&prtd->start))
+			snd_pcm_period_elapsed(substream);
+		atomic_inc(&prtd->out_count);
+		wake_up(&the_locks.write_wait);
+		if (!atomic_read(&prtd->start))
+			break;
+		if (!prtd->mmap_flag)
+			break;
+		if (q6asm_is_cpu_buf_avail_nolock(IN,
+				prtd->audio_client,
+				&size, &idx)) {
+			pr_debug("%s:writing %d bytes of buffer to dsp 2\n",
+					__func__, prtd->pcm_count);
+			q6asm_write_nolock(prtd->audio_client,
+				prtd->pcm_count, 0, 0, NO_TIMESTAMP);
+		}
+		break;
+	}
+	case ASM_DATA_EVENT_RENDERED_EOS:
+		pr_debug("ASM_DATA_EVENT_RENDERED_EOS\n");
+		prtd->cmd_ack = 1;
+		wake_up(&the_locks.eos_wait);
+		break;
+	case ASM_DATA_EVENT_READ_DONE_V2: {
+		pr_debug("ASM_DATA_EVENT_READ_DONE_V2\n");
+		pr_debug("token = 0x%08x\n", token);
+		in_frame_info[token][0] = payload[4];
+		in_frame_info[token][1] = payload[5];
+		prtd->pcm_irq_pos += in_frame_info[token][0];
+		pr_debug("pcm_irq_pos=%d\n", prtd->pcm_irq_pos);
+		if (atomic_read(&prtd->start))
+			snd_pcm_period_elapsed(substream);
+		if (atomic_read(&prtd->in_count) <= prtd->periods)
+			atomic_inc(&prtd->in_count);
+		wake_up(&the_locks.read_wait);
+		if (prtd->mmap_flag
+			&& q6asm_is_cpu_buf_avail_nolock(OUT,
+				prtd->audio_client,
+				&size, &idx))
+			q6asm_read_nolock(prtd->audio_client);
+		break;
+	}
+	case APR_BASIC_RSP_RESULT: {
+		switch (payload[0]) {
+		case ASM_SESSION_CMD_RUN_V2:
+			if (substream->stream
+				!= SNDRV_PCM_STREAM_PLAYBACK) {
+				atomic_set(&prtd->start, 1);
+				break;
+			}
+			if (prtd->mmap_flag) {
+				pr_debug("%s:writing %d bytes"
+					" of buffer to dsp\n",
+					__func__,
+					prtd->pcm_count);
+				q6asm_write_nolock(prtd->audio_client,
+					prtd->pcm_count,
+					0, 0, NO_TIMESTAMP);
+			} else {
+				while (atomic_read(&prtd->out_needed)) {
+					pr_debug("%s:writing %d bytes"
+						 " of buffer to dsp\n",
+						__func__,
+						prtd->pcm_count);
+					q6asm_write_nolock(prtd->audio_client,
+						prtd->pcm_count,
+						0, 0, NO_TIMESTAMP);
+					atomic_dec(&prtd->out_needed);
+					wake_up(&the_locks.write_wait);
+				};
+			}
+			atomic_set(&prtd->start, 1);
+			break;
+		default:
+			pr_debug("%s:Payload = [0x%x]stat[0x%x]\n",
+				__func__, payload[0], payload[1]);
+			break;
+		}
+	}
+	break;
+	default:
+		pr_debug("Not Supported Event opcode[0x%x]\n", opcode);
+		break;
+	}
+}
+
+static int msm_pcm_playback_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = runtime->private_data;
+	int ret;
+
+	pr_debug("%s\n", __func__);
+	prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream);
+	prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
+	prtd->pcm_irq_pos = 0;
+	/* rate and channels are sent to audio driver */
+	prtd->samp_rate = runtime->rate;
+	prtd->channel_mode = runtime->channels;
+	if (prtd->enabled)
+		return 0;
+
+	ret = q6asm_media_format_block_pcm(prtd->audio_client, runtime->rate,
+				runtime->channels);
+	if (ret < 0)
+		pr_info("%s: CMD Format block failed\n", __func__);
+
+	atomic_set(&prtd->out_count, runtime->periods);
+
+	prtd->enabled = 1;
+	prtd->cmd_ack = 0;
+
+	return 0;
+}
+
+static int msm_pcm_capture_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = runtime->private_data;
+	int ret = 0;
+	int i = 0;
+	pr_debug("%s\n", __func__);
+	prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream);
+	prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
+	prtd->pcm_irq_pos = 0;
+
+	/* rate and channels are sent to audio driver */
+	prtd->samp_rate = runtime->rate;
+	prtd->channel_mode = runtime->channels;
+
+	if (prtd->enabled)
+		return 0;
+
+	pr_debug("Samp_rate = %d\n", prtd->samp_rate);
+	pr_debug("Channel = %d\n", prtd->channel_mode);
+	ret = q6asm_enc_cfg_blk_pcm(prtd->audio_client, prtd->samp_rate,
+					prtd->channel_mode);
+	if (ret < 0)
+		pr_debug("%s: cmd cfg pcm was block failed", __func__);
+
+	for (i = 0; i < runtime->periods; i++)
+		q6asm_read(prtd->audio_client);
+	prtd->periods = runtime->periods;
+
+	prtd->enabled = 1;
+
+	return ret;
+}
+
+static int msm_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+	int ret = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = runtime->private_data;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		pr_debug("%s: Trigger start\n", __func__);
+		q6asm_run_nowait(prtd->audio_client, 0, 0, 0);
+		break;
+	case SNDRV_PCM_TRIGGER_STOP:
+		pr_debug("SNDRV_PCM_TRIGGER_STOP\n");
+		atomic_set(&prtd->start, 0);
+		if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
+			break;
+		prtd->cmd_ack = 0;
+		q6asm_cmd_nowait(prtd->audio_client, CMD_EOS);
+		break;
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		pr_debug("SNDRV_PCM_TRIGGER_PAUSE\n");
+		q6asm_cmd_nowait(prtd->audio_client, CMD_PAUSE);
+		atomic_set(&prtd->start, 0);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int msm_pcm_open(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+	struct msm_audio *prtd;
+	int ret = 0;
+
+	pr_debug("%s\n", __func__);
+	prtd = kzalloc(sizeof(struct msm_audio), GFP_KERNEL);
+	if (prtd == NULL) {
+		pr_err("Failed to allocate memory for msm_audio\n");
+		return -ENOMEM;
+	}
+	prtd->substream = substream;
+	prtd->audio_client = q6asm_audio_client_alloc(
+				(app_cb)event_handler, prtd);
+	if (!prtd->audio_client) {
+		pr_info("%s: Could not allocate memory\n", __func__);
+		kfree(prtd);
+		return -ENOMEM;
+	}
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		runtime->hw = msm_pcm_hardware_playback;
+		ret = q6asm_open_write(prtd->audio_client, FORMAT_LINEAR_PCM);
+		if (ret < 0) {
+			pr_err("%s: pcm out open failed\n", __func__);
+			q6asm_audio_client_free(prtd->audio_client);
+			kfree(prtd);
+			return -ENOMEM;
+		}
+	}
+	/* Capture path */
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		runtime->hw = msm_pcm_hardware_capture;
+		ret = q6asm_open_read(prtd->audio_client, FORMAT_LINEAR_PCM);
+		if (ret < 0) {
+			pr_err("%s: pcm in open failed\n", __func__);
+			q6asm_audio_client_free(prtd->audio_client);
+			kfree(prtd);
+			return -ENOMEM;
+		}
+	}
+
+	pr_debug("%s: session ID %d\n", __func__, prtd->audio_client->session);
+
+	prtd->session_id = prtd->audio_client->session;
+	msm_pcm_routing_reg_phy_stream(soc_prtd->dai_link->be_id,
+			prtd->session_id, substream->stream);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		prtd->cmd_ack = 1;
+
+	ret = snd_pcm_hw_constraint_list(runtime, 0,
+				SNDRV_PCM_HW_PARAM_RATE,
+				&constraints_sample_rates);
+	if (ret < 0)
+		pr_info("snd_pcm_hw_constraint_list failed\n");
+	/* Ensure that buffer size is a multiple of period size */
+	ret = snd_pcm_hw_constraint_integer(runtime,
+					    SNDRV_PCM_HW_PARAM_PERIODS);
+	if (ret < 0)
+		pr_info("snd_pcm_hw_constraint_integer failed\n");
+
+	prtd->dsp_cnt = 0;
+	runtime->private_data = prtd;
+
+	return 0;
+}
+
+static int msm_pcm_playback_copy(struct snd_pcm_substream *substream, int a,
+	snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames)
+{
+	int ret = 0;
+	int fbytes = 0;
+	int xfer = 0;
+	char *bufptr = NULL;
+	void *data = NULL;
+	uint32_t idx = 0;
+	uint32_t size = 0;
+
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = runtime->private_data;
+
+	fbytes = frames_to_bytes(runtime, frames);
+	pr_debug("%s: prtd->out_count = %d\n",
+				__func__, atomic_read(&prtd->out_count));
+	ret = wait_event_timeout(the_locks.write_wait,
+			(atomic_read(&prtd->out_count)), 5 * HZ);
+	if (ret < 0) {
+		pr_err("%s: wait_event_timeout failed\n", __func__);
+		goto fail;
+	}
+
+	if (!atomic_read(&prtd->out_count)) {
+		pr_err("%s: pcm stopped out_count 0\n", __func__);
+		return 0;
+	}
+
+	data = q6asm_is_cpu_buf_avail(IN, prtd->audio_client, &size, &idx);
+	bufptr = data;
+	if (bufptr) {
+		pr_debug("%s:fbytes =%d: xfer=%d size=%d\n",
+					__func__, fbytes, xfer, size);
+		xfer = fbytes;
+		if (copy_from_user(bufptr, buf, xfer)) {
+			ret = -EFAULT;
+			goto fail;
+		}
+		buf += xfer;
+		fbytes -= xfer;
+		pr_debug("%s:fbytes = %d: xfer=%d\n", __func__, fbytes, xfer);
+		if (atomic_read(&prtd->start)) {
+			pr_debug("%s:writing %d bytes of buffer to dsp\n",
+					__func__, xfer);
+			ret = q6asm_write(prtd->audio_client, xfer,
+						0, 0, NO_TIMESTAMP);
+			if (ret < 0) {
+				ret = -EFAULT;
+				goto fail;
+			}
+		} else
+			atomic_inc(&prtd->out_needed);
+		atomic_dec(&prtd->out_count);
+	}
+fail:
+	return  ret;
+}
+
+static int msm_pcm_playback_close(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+	struct msm_audio *prtd = runtime->private_data;
+	int dir = 0;
+	int ret = 0;
+
+	pr_debug("%s\n", __func__);
+
+	dir = IN;
+	ret = wait_event_timeout(the_locks.eos_wait,
+				prtd->cmd_ack, 5 * HZ);
+	if (ret < 0)
+		pr_err("%s: CMD_EOS failed\n", __func__);
+	q6asm_cmd(prtd->audio_client, CMD_CLOSE);
+	q6asm_audio_client_buf_free_contiguous(dir,
+				prtd->audio_client);
+
+	msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->be_id,
+	SNDRV_PCM_STREAM_PLAYBACK);
+	q6asm_audio_client_free(prtd->audio_client);
+	kfree(prtd);
+	return 0;
+}
+
+static int msm_pcm_capture_copy(struct snd_pcm_substream *substream,
+		 int channel, snd_pcm_uframes_t hwoff, void __user *buf,
+						 snd_pcm_uframes_t frames)
+{
+	int ret = 0;
+	int fbytes = 0;
+	int xfer;
+	char *bufptr;
+	void *data = NULL;
+	static uint32_t idx;
+	static uint32_t size;
+	uint32_t offset = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = substream->runtime->private_data;
+
+
+	pr_debug("%s\n", __func__);
+	fbytes = frames_to_bytes(runtime, frames);
+
+	pr_debug("appl_ptr %d\n", (int)runtime->control->appl_ptr);
+	pr_debug("hw_ptr %d\n", (int)runtime->status->hw_ptr);
+	pr_debug("avail_min %d\n", (int)runtime->control->avail_min);
+
+	ret = wait_event_timeout(the_locks.read_wait,
+			(atomic_read(&prtd->in_count)), 5 * HZ);
+	if (ret < 0) {
+		pr_debug("%s: wait_event_timeout failed\n", __func__);
+		goto fail;
+	}
+	if (!atomic_read(&prtd->in_count)) {
+		pr_debug("%s: pcm stopped in_count 0\n", __func__);
+		return 0;
+	}
+	pr_debug("Checking if valid buffer is available...%08x\n",
+						(unsigned int) data);
+	data = q6asm_is_cpu_buf_avail(OUT, prtd->audio_client, &size, &idx);
+	bufptr = data;
+	pr_debug("Size = %d\n", size);
+	pr_debug("fbytes = %d\n", fbytes);
+	pr_debug("idx = %d\n", idx);
+	if (bufptr) {
+		xfer = fbytes;
+		if (xfer > size)
+			xfer = size;
+		offset = in_frame_info[idx][1];
+		pr_debug("Offset value = %d\n", offset);
+		if (copy_to_user(buf, bufptr+offset, xfer)) {
+			pr_err("Failed to copy buf to user\n");
+			ret = -EFAULT;
+			goto fail;
+		}
+		fbytes -= xfer;
+		size -= xfer;
+		in_frame_info[idx][1] += xfer;
+		pr_debug("%s:fbytes = %d: size=%d: xfer=%d\n",
+					__func__, fbytes, size, xfer);
+		pr_debug(" Sending next buffer to dsp\n");
+		memset(&in_frame_info[idx], 0,
+			sizeof(uint32_t) * 2);
+		atomic_dec(&prtd->in_count);
+		ret = q6asm_read(prtd->audio_client);
+		if (ret < 0) {
+			pr_err("q6asm read failed\n");
+			ret = -EFAULT;
+			goto fail;
+		}
+	} else
+		pr_err("No valid buffer\n");
+
+	pr_debug("Returning from capture_copy... %d\n", ret);
+fail:
+	return ret;
+}
+
+static int msm_pcm_capture_close(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+	struct msm_audio *prtd = runtime->private_data;
+	int dir = OUT;
+
+	pr_debug("%s\n", __func__);
+	q6asm_cmd(prtd->audio_client, CMD_CLOSE);
+	q6asm_audio_client_buf_free_contiguous(dir,
+				prtd->audio_client);
+	msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->be_id,
+	SNDRV_PCM_STREAM_CAPTURE);
+	q6asm_audio_client_free(prtd->audio_client);
+	kfree(prtd);
+
+	return 0;
+}
+
+static int msm_pcm_copy(struct snd_pcm_substream *substream, int a,
+	 snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames)
+{
+	int ret = 0;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		ret = msm_pcm_playback_copy(substream, a, hwoff, buf, frames);
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		ret = msm_pcm_capture_copy(substream, a, hwoff, buf, frames);
+	return ret;
+}
+
+static int msm_pcm_close(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		ret = msm_pcm_playback_close(substream);
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		ret = msm_pcm_capture_close(substream);
+	return ret;
+}
+static int msm_pcm_prepare(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		ret = msm_pcm_playback_prepare(substream);
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		ret = msm_pcm_capture_prepare(substream);
+	return ret;
+}
+
+static snd_pcm_uframes_t msm_pcm_pointer(struct snd_pcm_substream *substream)
+{
+
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = runtime->private_data;
+
+	if (prtd->pcm_irq_pos >= prtd->pcm_size)
+		prtd->pcm_irq_pos = 0;
+
+	pr_debug("pcm_irq_pos = %d\n", prtd->pcm_irq_pos);
+	return bytes_to_frames(runtime, (prtd->pcm_irq_pos));
+}
+
+static int msm_pcm_mmap(struct snd_pcm_substream *substream,
+				struct vm_area_struct *vma)
+{
+	int result = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = runtime->private_data;
+
+	pr_debug("%s\n", __func__);
+	prtd->mmap_flag = 1;
+
+	if (runtime->dma_addr && runtime->dma_bytes) {
+		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+		result = remap_pfn_range(vma, vma->vm_start,
+				runtime->dma_addr >> PAGE_SHIFT,
+				runtime->dma_bytes,
+				vma->vm_page_prot);
+	} else {
+		pr_err("Physical address or size of buf is NULL");
+		return -EINVAL;
+	}
+
+	return result;
+}
+
+static int msm_pcm_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_audio *prtd = runtime->private_data;
+	struct snd_dma_buffer *dma_buf = &substream->dma_buffer;
+	struct audio_buffer *buf;
+	int dir, ret;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		dir = IN;
+	else
+		dir = OUT;
+pr_err("%s: before buf alloc\n", __func__);
+	ret = q6asm_audio_client_buf_alloc_contiguous(dir,
+			prtd->audio_client,
+			runtime->hw.period_bytes_min,
+			runtime->hw.periods_max);
+	if (ret < 0) {
+		pr_err("Audio Start: Buffer Allocation failed "
+					"rc = %d\n", ret);
+		return -ENOMEM;
+	}
+pr_err("%s: after buf alloc\n", __func__);
+	buf = prtd->audio_client->port[dir].buf;
+	if (buf == NULL || buf[0].data == NULL)
+		return -ENOMEM;
+
+	pr_debug("%s:buf = %p\n", __func__, buf);
+	dma_buf->dev.type = SNDRV_DMA_TYPE_DEV;
+	dma_buf->dev.dev = substream->pcm->card->dev;
+	dma_buf->private_data = NULL;
+	dma_buf->area = buf[0].data;
+	dma_buf->addr =  buf[0].phys;
+	dma_buf->bytes = runtime->hw.buffer_bytes_max;
+	if (!dma_buf->area)
+		return -ENOMEM;
+
+	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+	return 0;
+}
+
+static struct snd_pcm_ops msm_pcm_ops = {
+	.open           = msm_pcm_open,
+	.copy		= msm_pcm_copy,
+	.hw_params	= msm_pcm_hw_params,
+	.close          = msm_pcm_close,
+	.ioctl          = snd_pcm_lib_ioctl,
+	.prepare        = msm_pcm_prepare,
+	.trigger        = msm_pcm_trigger,
+	.pointer        = msm_pcm_pointer,
+	.mmap		= msm_pcm_mmap,
+};
+
+static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_card *card = rtd->card->snd_card;
+	int ret = 0;
+
+	if (!card->dev->coherent_dma_mask)
+		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	return ret;
+}
+
+static struct snd_soc_platform_driver msm_soc_platform = {
+	.ops		= &msm_pcm_ops,
+	.pcm_new	= msm_asoc_pcm_new,
+};
+
+static __devinit int msm_pcm_probe(struct platform_device *pdev)
+{
+	pr_info("%s: dev name %s\n", __func__, dev_name(&pdev->dev));
+	return snd_soc_register_platform(&pdev->dev,
+				   &msm_soc_platform);
+}
+
+static int msm_pcm_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_platform(&pdev->dev);
+	return 0;
+}
+
+static struct platform_driver msm_pcm_driver = {
+	.driver = {
+		.name = "msm-pcm-dsp",
+		.owner = THIS_MODULE,
+	},
+	.probe = msm_pcm_probe,
+	.remove = __devexit_p(msm_pcm_remove),
+};
+
+static int __init msm_soc_platform_init(void)
+{
+	init_waitqueue_head(&the_locks.enable_wait);
+	init_waitqueue_head(&the_locks.eos_wait);
+	init_waitqueue_head(&the_locks.write_wait);
+	init_waitqueue_head(&the_locks.read_wait);
+
+	return platform_driver_register(&msm_pcm_driver);
+}
+module_init(msm_soc_platform_init);
+
+static void __exit msm_soc_platform_exit(void)
+{
+	platform_driver_unregister(&msm_pcm_driver);
+}
+module_exit(msm_soc_platform_exit);
+
+MODULE_DESCRIPTION("PCM module platform driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.h
new file mode 100644
index 0000000..44395b7
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (C) 2008 HTC Corporation
+ * Copyright (c) 2012 Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can find it at http://www.fsf.org.
+ */
+
+#ifndef _MSM_PCM_H
+#define _MSM_PCM_H
+#include <sound/apr_audio-v2.h>
+#include <sound/q6asm-v2.h>
+
+
+
+/* Support unconventional sample rates 12000, 24000 as well */
+#define USE_RATE                \
+			(SNDRV_PCM_RATE_8000_48000 | SNDRV_PCM_RATE_KNOT)
+
+extern int copy_count;
+
+struct buffer {
+	void *data;
+	unsigned size;
+	unsigned used;
+	unsigned addr;
+};
+
+struct buffer_rec {
+	void *data;
+	unsigned int size;
+	unsigned int read;
+	unsigned int addr;
+};
+
+struct audio_locks {
+	spinlock_t event_lock;
+	wait_queue_head_t read_wait;
+	wait_queue_head_t write_wait;
+	wait_queue_head_t eos_wait;
+	wait_queue_head_t enable_wait;
+};
+
+struct msm_audio {
+	struct snd_pcm_substream *substream;
+	unsigned int pcm_size;
+	unsigned int pcm_count;
+	unsigned int pcm_irq_pos;       /* IRQ position */
+	uint16_t source; /* Encoding source bit mask */
+
+	struct audio_client *audio_client;
+
+	uint16_t session_id;
+
+	uint32_t samp_rate;
+	uint32_t channel_mode;
+	uint32_t dsp_cnt;
+
+	int abort; /* set when error, like sample rate mismatch */
+
+	int enabled;
+	int close_ack;
+	int cmd_ack;
+	atomic_t start;
+	atomic_t out_count;
+	atomic_t in_count;
+	atomic_t out_needed;
+	int out_head;
+	int periods;
+	int mmap_flag;
+	atomic_t pending_buffer;
+};
+
+#endif /*_MSM_PCM_H*/
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
new file mode 100644
index 0000000..2eebae5
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -0,0 +1,1834 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <linux/mutex.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/initval.h>
+#include <sound/control.h>
+#include <sound/q6adm-v2.h>
+#include <sound/q6asm-v2.h>
+#include <sound/q6afe-v2.h>
+#include <sound/tlv.h>
+#include "msm-pcm-routing-v2.h"
+#include "../qdsp6/q6voice.h"
+
+struct msm_pcm_routing_bdai_data {
+	u16 port_id; /* AFE port ID */
+	u8 active; /* track if this backend is enabled */
+	struct snd_pcm_hw_params *hw_params; /* to get freq and channel mode */
+	unsigned long fe_sessions; /* Front-end sessions */
+	unsigned long port_sessions; /* track Tx BE ports -> Rx BE */
+};
+
+#define INVALID_SESSION -1
+#define SESSION_TYPE_RX 0
+#define SESSION_TYPE_TX 1
+
+static struct mutex routing_lock;
+
+static int fm_switch_enable;
+
+#define INT_FM_RX_VOL_MAX_STEPS 100
+#define INT_FM_RX_VOL_GAIN 2000
+
+static int msm_route_fm_vol_control;
+static const DECLARE_TLV_DB_SCALE(fm_rx_vol_gain, 0,
+			INT_FM_RX_VOL_MAX_STEPS, 0);
+
+#define INT_RX_VOL_MAX_STEPS 100
+#define INT_RX_VOL_GAIN 0x2000
+
+static int msm_route_lpa_vol_control;
+static const DECLARE_TLV_DB_SCALE(lpa_rx_vol_gain, 0,
+			INT_RX_VOL_MAX_STEPS, 0);
+
+static int msm_route_multimedia2_vol_control;
+static const DECLARE_TLV_DB_SCALE(multimedia2_rx_vol_gain, 0,
+			INT_RX_VOL_MAX_STEPS, 0);
+
+static int msm_route_compressed_vol_control;
+static const DECLARE_TLV_DB_SCALE(compressed_rx_vol_gain, 0,
+			INT_RX_VOL_MAX_STEPS, 0);
+
+
+
+/* Equal to Frontend after last of the MULTIMEDIA SESSIONS */
+#define MAX_EQ_SESSIONS		MSM_FRONTEND_DAI_CS_VOICE
+
+enum {
+	EQ_BAND1 = 0,
+	EQ_BAND2,
+	EQ_BAND3,
+	EQ_BAND4,
+	EQ_BAND5,
+	EQ_BAND6,
+	EQ_BAND7,
+	EQ_BAND8,
+	EQ_BAND9,
+	EQ_BAND10,
+	EQ_BAND11,
+	EQ_BAND12,
+	EQ_BAND_MAX,
+};
+
+struct msm_audio_eq_band {
+	uint16_t     band_idx; /* The band index, 0 .. 11 */
+	uint32_t     filter_type; /* Filter band type */
+	uint32_t     center_freq_hz; /* Filter band center frequency */
+	uint32_t     filter_gain; /* Filter band initial gain (dB) */
+			/* Range is +12 dB to -12 dB with 1dB increments. */
+	uint32_t     q_factor;
+} __packed;
+
+struct msm_audio_eq_stream_config {
+	uint32_t	enable; /* Number of consequtive bands specified */
+	uint32_t	num_bands;
+	struct msm_audio_eq_band	eq_bands[EQ_BAND_MAX];
+} __packed;
+
+struct msm_audio_eq_stream_config	eq_data[MAX_EQ_SESSIONS];
+
+static void msm_send_eq_values(int eq_idx);
+/* This array is indexed by back-end DAI ID defined in msm-pcm-routing.h
+ * If new back-end is defined, add new back-end DAI ID at the end of enum
+ */
+static struct msm_pcm_routing_bdai_data msm_bedais[MSM_BACKEND_DAI_MAX] = {
+	{ PRIMARY_I2S_RX, 0, NULL, 0, 0},
+	{ PRIMARY_I2S_TX, 0, NULL, 0, 0},
+	{ SLIMBUS_0_RX, 0, NULL, 0, 0},
+	{ SLIMBUS_0_TX, 0, NULL, 0, 0},
+	{ HDMI_RX, 0, NULL,  0, 0},
+	{ INT_BT_SCO_RX, 0, NULL, 0, 0},
+	{ INT_BT_SCO_TX, 0, NULL, 0, 0},
+	{ INT_FM_RX, 0, NULL, 0, 0},
+	{ INT_FM_TX, 0, NULL, 0, 0},
+	{ RT_PROXY_PORT_001_RX, 0, NULL, 0, 0},
+	{ RT_PROXY_PORT_001_TX, 0, NULL, 0, 0},
+	{ PCM_RX, 0, NULL, 0, 0},
+	{ PCM_TX, 0, NULL, 0, 0},
+	{ VOICE_PLAYBACK_TX, 0, NULL, 0, 0},
+	{ VOICE_RECORD_RX, 0, NULL, 0, 0},
+	{ VOICE_RECORD_TX, 0, NULL, 0, 0},
+	{ MI2S_RX, 0, NULL, 0, 0},
+	{ SECONDARY_I2S_RX, 0, NULL, 0, 0},
+	{ SLIMBUS_1_RX, 0, NULL, 0, 0},
+	{ SLIMBUS_1_TX, 0, NULL, 0, 0},
+	{ SLIMBUS_INVALID, 0, NULL, 0, 0},
+};
+
+
+/* Track ASM playback & capture sessions of DAI */
+static int fe_dai_map[MSM_FRONTEND_DAI_MM_SIZE][2] = {
+	/* MULTIMEDIA1 */
+	{INVALID_SESSION, INVALID_SESSION},
+	/* MULTIMEDIA2 */
+	{INVALID_SESSION, INVALID_SESSION},
+	/* MULTIMEDIA3 */
+	{INVALID_SESSION, INVALID_SESSION},
+	/* MULTIMEDIA4 */
+	{INVALID_SESSION, INVALID_SESSION},
+};
+
+static void msm_pcm_routing_build_matrix(int fedai_id, int dspst_id,
+	int path_type)
+{
+	int i, port_type;
+	struct route_payload payload;
+
+	payload.num_copps = 0;
+	port_type = (path_type == ADM_PATH_PLAYBACK ?
+		MSM_AFE_PORT_TYPE_RX : MSM_AFE_PORT_TYPE_TX);
+
+	for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
+		if ((afe_get_port_type(msm_bedais[i].port_id) ==
+			port_type) &&
+			msm_bedais[i].active && (test_bit(fedai_id,
+				&msm_bedais[i].fe_sessions)))
+			payload.copp_ids[payload.num_copps++] =
+					msm_bedais[i].port_id;
+	}
+
+	if (payload.num_copps)
+		adm_matrix_map(dspst_id, path_type,
+			payload.num_copps, payload.copp_ids, 0);
+}
+
+void msm_pcm_routing_reg_phy_stream(int fedai_id, int dspst_id, int stream_type)
+{
+	int i, session_type, path_type, port_type;
+	struct route_payload payload;
+	u32 channels;
+
+	if (fedai_id > MSM_FRONTEND_DAI_MM_MAX_ID) {
+		/* bad ID assigned in machine driver */
+		pr_err("%s: bad MM ID %d\n", __func__, fedai_id);
+		return;
+	}
+
+	if (stream_type == SNDRV_PCM_STREAM_PLAYBACK) {
+		session_type = SESSION_TYPE_RX;
+		path_type = ADM_PATH_PLAYBACK;
+		port_type = MSM_AFE_PORT_TYPE_RX;
+	} else {
+		session_type = SESSION_TYPE_TX;
+		path_type = ADM_PATH_LIVE_REC;
+		port_type = MSM_AFE_PORT_TYPE_TX;
+	}
+
+	mutex_lock(&routing_lock);
+
+	payload.num_copps = 0; /* only RX needs to use payload */
+	fe_dai_map[fedai_id][session_type] = dspst_id;
+	/* re-enable EQ if active */
+	if (eq_data[fedai_id].enable)
+		msm_send_eq_values(fedai_id);
+	for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
+		if ((afe_get_port_type(msm_bedais[i].port_id) ==
+			port_type) && msm_bedais[i].active &&
+			(test_bit(fedai_id,
+			&msm_bedais[i].fe_sessions))) {
+
+			channels = params_channels(msm_bedais[i].hw_params);
+
+			if ((stream_type == SNDRV_PCM_STREAM_PLAYBACK) &&
+				(channels > 2))
+				adm_multi_ch_copp_open(msm_bedais[i].port_id,
+				path_type,
+				params_rate(msm_bedais[i].hw_params),
+				channels,
+				DEFAULT_COPP_TOPOLOGY);
+			else
+				adm_open(msm_bedais[i].port_id,
+				path_type,
+				params_rate(msm_bedais[i].hw_params),
+				params_channels(msm_bedais[i].hw_params),
+				DEFAULT_COPP_TOPOLOGY);
+
+			payload.copp_ids[payload.num_copps++] =
+				msm_bedais[i].port_id;
+		}
+	}
+	if (payload.num_copps)
+		adm_matrix_map(dspst_id, path_type,
+			payload.num_copps, payload.copp_ids, 0);
+
+	mutex_unlock(&routing_lock);
+}
+
+void msm_pcm_routing_dereg_phy_stream(int fedai_id, int stream_type)
+{
+	int i, port_type, session_type;
+
+	if (fedai_id > MSM_FRONTEND_DAI_MM_MAX_ID) {
+		/* bad ID assigned in machine driver */
+		pr_err("%s: bad MM ID\n", __func__);
+		return;
+	}
+
+	if (stream_type == SNDRV_PCM_STREAM_PLAYBACK) {
+		port_type = MSM_AFE_PORT_TYPE_RX;
+		session_type = SESSION_TYPE_RX;
+	} else {
+		port_type = MSM_AFE_PORT_TYPE_TX;
+		session_type = SESSION_TYPE_TX;
+	}
+
+	mutex_lock(&routing_lock);
+
+	for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
+		if ((afe_get_port_type(msm_bedais[i].port_id) ==
+			port_type) && msm_bedais[i].active &&
+			(test_bit(fedai_id,
+			&msm_bedais[i].fe_sessions)))
+			adm_close(msm_bedais[i].port_id);
+	}
+
+	fe_dai_map[fedai_id][session_type] = INVALID_SESSION;
+
+	mutex_unlock(&routing_lock);
+}
+
+/* Check if FE/BE route is set */
+static bool msm_pcm_routing_route_is_set(u16 be_id, u16 fe_id)
+{
+	bool rc = false;
+
+	if (fe_id > MSM_FRONTEND_DAI_MM_MAX_ID) {
+		/* recheck FE ID in the mixer control defined in this file */
+		pr_err("%s: bad MM ID\n", __func__);
+		return rc;
+	}
+
+	if (test_bit(fe_id, &msm_bedais[be_id].fe_sessions))
+		rc = true;
+
+	return rc;
+}
+
+static void msm_pcm_routing_process_audio(u16 reg, u16 val, int set)
+{
+	int session_type, path_type;
+	u32 channels;
+
+	pr_debug("%s: reg %x val %x set %x\n", __func__, reg, val, set);
+
+	if (val > MSM_FRONTEND_DAI_MM_MAX_ID) {
+		/* recheck FE ID in the mixer control defined in this file */
+		pr_err("%s: bad MM ID\n", __func__);
+		return;
+	}
+
+	if (afe_get_port_type(msm_bedais[reg].port_id) ==
+		MSM_AFE_PORT_TYPE_RX) {
+		session_type = SESSION_TYPE_RX;
+		path_type = ADM_PATH_PLAYBACK;
+	} else {
+		session_type = SESSION_TYPE_TX;
+		path_type = ADM_PATH_LIVE_REC;
+	}
+
+	mutex_lock(&routing_lock);
+
+	if (set) {
+		set_bit(val, &msm_bedais[reg].fe_sessions);
+		if (msm_bedais[reg].active && fe_dai_map[val][session_type] !=
+			INVALID_SESSION) {
+
+			channels = params_channels(msm_bedais[reg].hw_params);
+
+			if ((session_type == SESSION_TYPE_RX) && (channels > 2))
+				adm_multi_ch_copp_open(msm_bedais[reg].port_id,
+				path_type,
+				params_rate(msm_bedais[reg].hw_params),
+				channels,
+				DEFAULT_COPP_TOPOLOGY);
+			else
+				adm_open(msm_bedais[reg].port_id,
+				path_type,
+				params_rate(msm_bedais[reg].hw_params),
+				params_channels(msm_bedais[reg].hw_params),
+				DEFAULT_COPP_TOPOLOGY);
+
+			msm_pcm_routing_build_matrix(val,
+				fe_dai_map[val][session_type], path_type);
+		}
+	} else {
+		clear_bit(val, &msm_bedais[reg].fe_sessions);
+		if (msm_bedais[reg].active && fe_dai_map[val][session_type] !=
+			INVALID_SESSION) {
+			adm_close(msm_bedais[reg].port_id);
+			msm_pcm_routing_build_matrix(val,
+				fe_dai_map[val][session_type], path_type);
+		}
+	}
+	mutex_unlock(&routing_lock);
+}
+
+static int msm_routing_get_audio_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct soc_mixer_control *mc =
+	(struct soc_mixer_control *)kcontrol->private_value;
+
+	if (test_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions))
+		ucontrol->value.integer.value[0] = 1;
+	else
+		ucontrol->value.integer.value[0] = 0;
+
+	pr_info("%s: reg %x shift %x val %ld\n", __func__, mc->reg, mc->shift,
+	ucontrol->value.integer.value[0]);
+
+	return 0;
+}
+
+static int msm_routing_put_audio_mixer(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+
+
+	if (ucontrol->value.integer.value[0] &&
+	    msm_pcm_routing_route_is_set(mc->reg, mc->shift) == false) {
+		msm_pcm_routing_process_audio(mc->reg, mc->shift, 1);
+		snd_soc_dapm_mixer_update_power(widget, kcontrol, 1);
+	} else if (!ucontrol->value.integer.value[0] &&
+		   msm_pcm_routing_route_is_set(mc->reg, mc->shift) == true) {
+		msm_pcm_routing_process_audio(mc->reg, mc->shift, 0);
+		snd_soc_dapm_mixer_update_power(widget, kcontrol, 0);
+	}
+	pr_info("%s: reg %x shift %x val %ld\n", __func__, mc->reg, mc->shift,
+					ucontrol->value.integer.value[0]);
+
+	return 1;
+}
+
+static void msm_pcm_routing_process_voice(u16 reg, u16 val, int set)
+{
+	return;
+}
+
+static int msm_routing_get_voice_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct soc_mixer_control *mc =
+	(struct soc_mixer_control *)kcontrol->private_value;
+
+	mutex_lock(&routing_lock);
+
+	if (test_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions))
+		ucontrol->value.integer.value[0] = 1;
+	else
+		ucontrol->value.integer.value[0] = 0;
+
+	mutex_unlock(&routing_lock);
+
+	pr_debug("%s: reg %x shift %x val %ld\n", __func__, mc->reg, mc->shift,
+			ucontrol->value.integer.value[0]);
+
+	return 0;
+}
+
+static int msm_routing_put_voice_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+
+	if (ucontrol->value.integer.value[0]) {
+		msm_pcm_routing_process_voice(mc->reg, mc->shift, 1);
+		snd_soc_dapm_mixer_update_power(widget, kcontrol, 1);
+	} else {
+		msm_pcm_routing_process_voice(mc->reg, mc->shift, 0);
+		snd_soc_dapm_mixer_update_power(widget, kcontrol, 0);
+	}
+
+	return 1;
+}
+
+static int msm_routing_get_voice_stub_mixer(struct snd_kcontrol *kcontrol,
+		struct snd_ctl_elem_value *ucontrol)
+{
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+
+	mutex_lock(&routing_lock);
+
+	if (test_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions))
+		ucontrol->value.integer.value[0] = 1;
+	else
+		ucontrol->value.integer.value[0] = 0;
+
+	mutex_unlock(&routing_lock);
+
+	pr_debug("%s: reg %x shift %x val %ld\n", __func__, mc->reg, mc->shift,
+		ucontrol->value.integer.value[0]);
+
+	return 0;
+}
+
+static int msm_routing_put_voice_stub_mixer(struct snd_kcontrol *kcontrol,
+		struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+
+	if (ucontrol->value.integer.value[0]) {
+		mutex_lock(&routing_lock);
+		set_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions);
+		mutex_unlock(&routing_lock);
+
+		snd_soc_dapm_mixer_update_power(widget, kcontrol, 1);
+	} else {
+		mutex_lock(&routing_lock);
+		clear_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions);
+		mutex_unlock(&routing_lock);
+
+		snd_soc_dapm_mixer_update_power(widget, kcontrol, 0);
+	}
+
+	pr_debug("%s: reg %x shift %x val %ld\n", __func__, mc->reg, mc->shift,
+		ucontrol->value.integer.value[0]);
+
+	return 1;
+}
+
+static int msm_routing_get_switch_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = fm_switch_enable;
+	pr_debug("%s: FM Switch enable %ld\n", __func__,
+		ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int msm_routing_put_switch_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+
+	pr_debug("%s: FM Switch enable %ld\n", __func__,
+			ucontrol->value.integer.value[0]);
+	if (ucontrol->value.integer.value[0])
+		snd_soc_dapm_mixer_update_power(widget, kcontrol, 1);
+	else
+		snd_soc_dapm_mixer_update_power(widget, kcontrol, 0);
+	fm_switch_enable = ucontrol->value.integer.value[0];
+	return 1;
+}
+
+static int msm_routing_get_port_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct soc_mixer_control *mc =
+	(struct soc_mixer_control *)kcontrol->private_value;
+
+	if (test_bit(mc->shift, &msm_bedais[mc->reg].port_sessions))
+		ucontrol->value.integer.value[0] = 1;
+	else
+		ucontrol->value.integer.value[0] = 0;
+
+	pr_debug("%s: reg %x shift %x val %ld\n", __func__, mc->reg, mc->shift,
+	ucontrol->value.integer.value[0]);
+
+	return 0;
+}
+
+static int msm_routing_put_port_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+
+	pr_debug("%s: reg %x shift %x val %ld\n", __func__, mc->reg,
+		mc->shift, ucontrol->value.integer.value[0]);
+
+	if (ucontrol->value.integer.value[0]) {
+		afe_loopback(1, msm_bedais[mc->reg].port_id,
+			     msm_bedais[mc->shift].port_id);
+		set_bit(mc->shift,
+		&msm_bedais[mc->reg].port_sessions);
+	} else {
+		afe_loopback(0, msm_bedais[mc->reg].port_id,
+			     msm_bedais[mc->shift].port_id);
+		clear_bit(mc->shift,
+		&msm_bedais[mc->reg].port_sessions);
+	}
+
+	return 1;
+}
+
+static int msm_routing_get_fm_vol_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = msm_route_fm_vol_control;
+	return 0;
+}
+
+static int msm_routing_set_fm_vol_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	afe_loopback_gain(INT_FM_TX , ucontrol->value.integer.value[0]);
+
+	msm_route_fm_vol_control = ucontrol->value.integer.value[0];
+
+	return 0;
+}
+
+static int msm_routing_get_lpa_vol_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = msm_route_lpa_vol_control;
+	return 0;
+}
+
+static int msm_routing_set_lpa_vol_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	if (!lpa_set_volume(ucontrol->value.integer.value[0]))
+		msm_route_lpa_vol_control =
+			ucontrol->value.integer.value[0];
+
+	return 0;
+
+}
+
+static int msm_routing_get_multimedia2_vol_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+
+	ucontrol->value.integer.value[0] = msm_route_multimedia2_vol_control;
+	return 0;
+}
+
+static int msm_routing_set_multimedia2_vol_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	if (!multi_ch_pcm_set_volume(ucontrol->value.integer.value[0]))
+		msm_route_multimedia2_vol_control =
+			ucontrol->value.integer.value[0];
+	return 0;
+}
+
+static int msm_routing_get_compressed_vol_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+
+	ucontrol->value.integer.value[0] = msm_route_compressed_vol_control;
+	return 0;
+}
+
+static int msm_routing_set_compressed_vol_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	if (!compressed_set_volume(ucontrol->value.integer.value[0]))
+		msm_route_compressed_vol_control =
+			ucontrol->value.integer.value[0];
+	return 0;
+}
+
+static void msm_send_eq_values(int eq_idx)
+{
+	int result;
+	struct audio_client *ac =
+		q6asm_get_audio_client(fe_dai_map[eq_idx][SESSION_TYPE_RX]);
+
+	if (ac == NULL) {
+		pr_err("%s: Could not get audio client for session: %d\n",
+		       __func__, fe_dai_map[eq_idx][SESSION_TYPE_RX]);
+		goto done;
+	}
+
+	result = q6asm_equalizer(ac, &eq_data[eq_idx]);
+
+	if (result < 0)
+		pr_err("%s: Call to ASM equalizer failed, returned = %d\n",
+		       __func__, result);
+done:
+	return;
+}
+
+static int msm_routing_get_eq_enable_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int eq_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->reg;
+
+	ucontrol->value.integer.value[0] = eq_data[eq_idx].enable;
+
+	pr_debug("%s: EQ #%d enable %d\n", __func__,
+		eq_idx, eq_data[eq_idx].enable);
+	return 0;
+}
+
+static int msm_routing_put_eq_enable_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int eq_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->reg;
+	int value = ucontrol->value.integer.value[0];
+
+	pr_debug("%s: EQ #%d enable %d\n", __func__,
+		eq_idx, value);
+	eq_data[eq_idx].enable = value;
+
+	msm_send_eq_values(eq_idx);
+	return 0;
+}
+
+static int msm_routing_get_eq_band_count_audio_mixer(
+					struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	int eq_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->reg;
+
+	ucontrol->value.integer.value[0] = eq_data[eq_idx].num_bands;
+
+	pr_debug("%s: EQ #%d bands %d\n", __func__,
+		eq_idx, eq_data[eq_idx].num_bands);
+	return eq_data[eq_idx].num_bands;
+}
+
+static int msm_routing_put_eq_band_count_audio_mixer(
+					struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	int eq_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->reg;
+	int value = ucontrol->value.integer.value[0];
+
+	pr_debug("%s: EQ #%d bands %d\n", __func__,
+		eq_idx, value);
+	eq_data[eq_idx].num_bands = value;
+	return 0;
+}
+
+static int msm_routing_get_eq_band_audio_mixer(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	int eq_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->reg;
+	int band_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->shift;
+
+	ucontrol->value.integer.value[0] =
+			eq_data[eq_idx].eq_bands[band_idx].band_idx;
+	ucontrol->value.integer.value[1] =
+			eq_data[eq_idx].eq_bands[band_idx].filter_type;
+	ucontrol->value.integer.value[2] =
+			eq_data[eq_idx].eq_bands[band_idx].center_freq_hz;
+	ucontrol->value.integer.value[3] =
+			eq_data[eq_idx].eq_bands[band_idx].filter_gain;
+	ucontrol->value.integer.value[4] =
+			eq_data[eq_idx].eq_bands[band_idx].q_factor;
+
+	pr_debug("%s: band_idx = %d\n", __func__,
+			eq_data[eq_idx].eq_bands[band_idx].band_idx);
+	pr_debug("%s: filter_type = %d\n", __func__,
+			eq_data[eq_idx].eq_bands[band_idx].filter_type);
+	pr_debug("%s: center_freq_hz = %d\n", __func__,
+			eq_data[eq_idx].eq_bands[band_idx].center_freq_hz);
+	pr_debug("%s: filter_gain = %d\n", __func__,
+			eq_data[eq_idx].eq_bands[band_idx].filter_gain);
+	pr_debug("%s: q_factor = %d\n", __func__,
+			eq_data[eq_idx].eq_bands[band_idx].q_factor);
+	return 0;
+}
+
+static int msm_routing_put_eq_band_audio_mixer(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	int eq_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->reg;
+	int band_idx = ((struct soc_multi_mixer_control *)
+					kcontrol->private_value)->shift;
+
+	eq_data[eq_idx].eq_bands[band_idx].band_idx =
+					ucontrol->value.integer.value[0];
+	eq_data[eq_idx].eq_bands[band_idx].filter_type =
+					ucontrol->value.integer.value[1];
+	eq_data[eq_idx].eq_bands[band_idx].center_freq_hz =
+					ucontrol->value.integer.value[2];
+	eq_data[eq_idx].eq_bands[band_idx].filter_gain =
+					ucontrol->value.integer.value[3];
+	eq_data[eq_idx].eq_bands[band_idx].q_factor =
+					ucontrol->value.integer.value[4];
+	return 0;
+}
+
+static const struct snd_kcontrol_new pri_i2s_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_PRI_I2S_RX ,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new sec_i2s_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SEC_I2S_RX ,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new slimbus_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SLIMBUS_0_RX ,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new mi2s_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_MI2S_RX ,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new hdmi_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+	/* incall music delivery mixer */
+static const struct snd_kcontrol_new incall_music_delivery_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_VOICE_PLAYBACK_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_VOICE_PLAYBACK_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new int_bt_sco_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new int_fm_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_INT_FM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_INT_FM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_INT_FM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_INT_FM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new afe_pcm_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new auxpcm_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new mmul1_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_TX", MSM_BACKEND_DAI_PRI_I2S_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_AUXPCM_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("VOC_REC_DL", MSM_BACKEND_DAI_INCALL_RECORD_RX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("VOC_REC_UL", MSM_BACKEND_DAI_INCALL_RECORD_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new mmul2_mixer_controls[] = {
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new pri_rx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new sec_i2s_rx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new slimbus_rx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_SLIMBUS_0_RX ,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new bt_sco_rx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_INT_BT_SCO_RX ,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+};
+
+static const struct snd_kcontrol_new afe_pcm_rx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+};
+
+static const struct snd_kcontrol_new aux_pcm_rx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+};
+
+static const struct snd_kcontrol_new hdmi_rx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new stub_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_INVALID,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+};
+
+static const struct snd_kcontrol_new slimbus_1_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_SLIMBUS_1_RX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+};
+
+static const struct snd_kcontrol_new tx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_TX_Voice", MSM_BACKEND_DAI_PRI_I2S_TX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX_Voice", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX_Voice",
+	MSM_BACKEND_DAI_INT_BT_SCO_TX, MSM_FRONTEND_DAI_CS_VOICE, 1, 0,
+	msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX_Voice", MSM_BACKEND_DAI_AFE_PCM_TX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_TX_Voice", MSM_BACKEND_DAI_AUXPCM_TX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new tx_voip_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_TX_Voip", MSM_BACKEND_DAI_PRI_I2S_TX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX_Voip", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX_Voip", MSM_BACKEND_DAI_INT_BT_SCO_TX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX_Voip", MSM_BACKEND_DAI_AFE_PCM_TX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_TX_Voip", MSM_BACKEND_DAI_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new tx_voice_stub_mixer_controls[] = {
+	SOC_SINGLE_EXT("STUB_TX_HL", MSM_BACKEND_DAI_INVALID,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+};
+
+static const struct snd_kcontrol_new sbus_0_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_BACKEND_DAI_INT_FM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_BACKEND_DAI_SLIMBUS_0_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_BACKEND_DAI_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new auxpcm_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_BACKEND_DAI_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_BACKEND_DAI_SLIMBUS_0_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new sbus_1_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_SLIMBUS_1_RX,
+	MSM_BACKEND_DAI_INT_BT_SCO_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new bt_sco_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_BACKEND_DAI_SLIMBUS_1_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new fm_switch_mixer_controls =
+	SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
+	0, 1, 0, msm_routing_get_switch_mixer,
+	msm_routing_put_switch_mixer);
+
+static const struct snd_kcontrol_new int_fm_vol_mixer_controls[] = {
+	SOC_SINGLE_EXT_TLV("Internal FM RX Volume", SND_SOC_NOPM, 0,
+	INT_FM_RX_VOL_GAIN, 0, msm_routing_get_fm_vol_mixer,
+	msm_routing_set_fm_vol_mixer, fm_rx_vol_gain),
+};
+
+static const struct snd_kcontrol_new lpa_vol_mixer_controls[] = {
+	SOC_SINGLE_EXT_TLV("LPA RX Volume", SND_SOC_NOPM, 0,
+	INT_RX_VOL_GAIN, 0, msm_routing_get_lpa_vol_mixer,
+	msm_routing_set_lpa_vol_mixer, lpa_rx_vol_gain),
+};
+
+static const struct snd_kcontrol_new multimedia2_vol_mixer_controls[] = {
+	SOC_SINGLE_EXT_TLV("HIFI2 RX Volume", SND_SOC_NOPM, 0,
+	INT_RX_VOL_GAIN, 0, msm_routing_get_multimedia2_vol_mixer,
+	msm_routing_set_multimedia2_vol_mixer, multimedia2_rx_vol_gain),
+};
+
+static const struct snd_kcontrol_new compressed_vol_mixer_controls[] = {
+	SOC_SINGLE_EXT_TLV("COMPRESSED RX Volume", SND_SOC_NOPM, 0,
+	INT_RX_VOL_GAIN, 0, msm_routing_get_compressed_vol_mixer,
+	msm_routing_set_compressed_vol_mixer, compressed_rx_vol_gain),
+};
+
+static const struct snd_kcontrol_new eq_enable_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1 EQ Enable", SND_SOC_NOPM,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_eq_enable_mixer,
+	msm_routing_put_eq_enable_mixer),
+	SOC_SINGLE_EXT("MultiMedia2 EQ Enable", SND_SOC_NOPM,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_eq_enable_mixer,
+	msm_routing_put_eq_enable_mixer),
+	SOC_SINGLE_EXT("MultiMedia3 EQ Enable", SND_SOC_NOPM,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_eq_enable_mixer,
+	msm_routing_put_eq_enable_mixer),
+};
+
+static const struct snd_kcontrol_new eq_band_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1 EQ Band Count", SND_SOC_NOPM,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 11, 0,
+	msm_routing_get_eq_band_count_audio_mixer,
+	msm_routing_put_eq_band_count_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2 EQ Band Count", SND_SOC_NOPM,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 11, 0,
+	msm_routing_get_eq_band_count_audio_mixer,
+	msm_routing_put_eq_band_count_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3 EQ Band Count", SND_SOC_NOPM,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 11, 0,
+	msm_routing_get_eq_band_count_audio_mixer,
+	msm_routing_put_eq_band_count_audio_mixer),
+};
+
+static const struct snd_kcontrol_new eq_coeff_mixer_controls[] = {
+	SOC_SINGLE_MULTI_EXT("MultiMedia1 EQ Band1", EQ_BAND1,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia1 EQ Band2", EQ_BAND2,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia1 EQ Band3", EQ_BAND3,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia1 EQ Band4", EQ_BAND4,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia1 EQ Band5", EQ_BAND5,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia1 EQ Band6", EQ_BAND6,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia1 EQ Band7", EQ_BAND7,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia1 EQ Band8", EQ_BAND8,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia1 EQ Band9", EQ_BAND9,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia1 EQ Band10", EQ_BAND10,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia1 EQ Band11", EQ_BAND11,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia1 EQ Band12", EQ_BAND12,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia2 EQ Band1", EQ_BAND1,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia2 EQ Band2", EQ_BAND2,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia2 EQ Band3", EQ_BAND3,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia2 EQ Band4", EQ_BAND4,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia2 EQ Band5", EQ_BAND5,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia2 EQ Band6", EQ_BAND6,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia2 EQ Band7", EQ_BAND7,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia2 EQ Band8", EQ_BAND8,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia2 EQ Band9", EQ_BAND9,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia2 EQ Band10", EQ_BAND10,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia2 EQ Band11", EQ_BAND11,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia2 EQ Band12", EQ_BAND12,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia3 EQ Band1", EQ_BAND1,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia3 EQ Band2", EQ_BAND2,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia3 EQ Band3", EQ_BAND3,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia3 EQ Band4", EQ_BAND4,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia3 EQ Band5", EQ_BAND5,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia3 EQ Band6", EQ_BAND6,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia3 EQ Band7", EQ_BAND7,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia3 EQ Band8", EQ_BAND8,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia3 EQ Band9", EQ_BAND9,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia3 EQ Band10", EQ_BAND10,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia3 EQ Band11", EQ_BAND11,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+	SOC_SINGLE_MULTI_EXT("MultiMedia3 EQ Band12", EQ_BAND12,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 255, 0, 5,
+	msm_routing_get_eq_band_audio_mixer,
+	msm_routing_put_eq_band_audio_mixer),
+};
+
+static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = {
+	/* Frontend AIF */
+	/* Widget name equals to Front-End DAI name<Need confirmation>,
+	 * Stream name must contains substring of front-end dai name
+	 */
+	SND_SOC_DAPM_AIF_IN("MM_DL1", "MultiMedia1 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("MM_DL2", "MultiMedia2 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("MM_DL3", "MultiMedia3 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("MM_DL4", "MultiMedia4 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("VOIP_DL", "VoIP Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("MM_UL1", "MultiMedia1 Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("MM_UL2", "MultiMedia2 Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("CS-VOICE_DL1", "CS-VOICE Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("CS-VOICE_UL1", "CS-VOICE Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("VOIP_UL", "VoIP Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SLIM0_DL_HL", "SLIMBUS0_HOSTLESS Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SLIM0_UL_HL", "SLIMBUS0_HOSTLESS Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("INTFM_DL_HL", "INT_FM_HOSTLESS Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("INTFM_UL_HL", "INT_FM_HOSTLESS Capture",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("HDMI_DL_HL", "HDMI_HOSTLESS Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("AUXPCM_DL_HL", "AUXPCM_HOSTLESS Playback",
+		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("AUXPCM_UL_HL", "AUXPCM_HOSTLESS Capture",
+		0, 0, 0, 0),
+
+	/* Backend AIF */
+	/* Stream name equals to backend dai link stream name
+	 */
+	SND_SOC_DAPM_AIF_OUT("PRI_I2S_RX", "Primary I2S Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SEC_I2S_RX", "Secondary I2S Playback",
+				0, 0, 0 , 0),
+	SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_RX", "Slimbus Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("HDMI", "HDMI Playback", 0, 0, 0 , 0),
+	SND_SOC_DAPM_AIF_OUT("MI2S_RX", "MI2S Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("PRI_I2S_TX", "Primary I2S Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SLIMBUS_0_TX", "Slimbus Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("INT_BT_SCO_RX", "Internal BT-SCO Playback",
+				0, 0, 0 , 0),
+	SND_SOC_DAPM_AIF_IN("INT_BT_SCO_TX", "Internal BT-SCO Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("INT_FM_RX", "Internal FM Playback",
+				0, 0, 0 , 0),
+	SND_SOC_DAPM_AIF_IN("INT_FM_TX", "Internal FM Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("PCM_RX", "AFE Playback",
+				0, 0, 0 , 0),
+	SND_SOC_DAPM_AIF_IN("PCM_TX", "AFE Capture",
+				0, 0, 0 , 0),
+	/* incall */
+	SND_SOC_DAPM_AIF_OUT("VOICE_PLAYBACK_TX", "Voice Farend Playback",
+				0, 0, 0 , 0),
+	SND_SOC_DAPM_AIF_IN("INCALL_RECORD_TX", "Voice Uplink Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("INCALL_RECORD_RX", "Voice Downlink Capture",
+				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("AUX_PCM_RX", "AUX PCM Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("AUX_PCM_TX", "AUX PCM Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("VOICE_STUB_DL", "VOICE_STUB Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("VOICE_STUB_UL", "VOICE_STUB Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("STUB_RX", "Stub Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("STUB_TX", "Stub Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_RX", "Slimbus1 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SLIMBUS_1_TX", "Slimbus1 Capture", 0, 0, 0, 0),
+
+	/* Switch Definitions */
+	SND_SOC_DAPM_SWITCH("SLIMBUS_DL_HL", SND_SOC_NOPM, 0, 0,
+				&fm_switch_mixer_controls),
+	/* Mixer definitions */
+	SND_SOC_DAPM_MIXER("PRI_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+	pri_i2s_rx_mixer_controls, ARRAY_SIZE(pri_i2s_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SEC_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+	sec_i2s_rx_mixer_controls, ARRAY_SIZE(sec_i2s_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SLIMBUS_0_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+	slimbus_rx_mixer_controls, ARRAY_SIZE(slimbus_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("HDMI Mixer", SND_SOC_NOPM, 0, 0,
+	hdmi_mixer_controls, ARRAY_SIZE(hdmi_mixer_controls)),
+	SND_SOC_DAPM_MIXER("MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+	mi2s_rx_mixer_controls, ARRAY_SIZE(mi2s_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("MultiMedia1 Mixer", SND_SOC_NOPM, 0, 0,
+	mmul1_mixer_controls, ARRAY_SIZE(mmul1_mixer_controls)),
+	SND_SOC_DAPM_MIXER("MultiMedia2 Mixer", SND_SOC_NOPM, 0, 0,
+	mmul2_mixer_controls, ARRAY_SIZE(mmul2_mixer_controls)),
+	SND_SOC_DAPM_MIXER("AUX_PCM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+	auxpcm_rx_mixer_controls, ARRAY_SIZE(auxpcm_rx_mixer_controls)),
+	/* incall */
+	SND_SOC_DAPM_MIXER("Incall_Music Audio Mixer", SND_SOC_NOPM, 0, 0,
+			incall_music_delivery_mixer_controls,
+			ARRAY_SIZE(incall_music_delivery_mixer_controls)),
+	/* Voice Mixer */
+	SND_SOC_DAPM_MIXER("PRI_RX_Voice Mixer",
+				SND_SOC_NOPM, 0, 0, pri_rx_voice_mixer_controls,
+				ARRAY_SIZE(pri_rx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SEC_RX_Voice Mixer",
+				SND_SOC_NOPM, 0, 0,
+				sec_i2s_rx_voice_mixer_controls,
+				ARRAY_SIZE(sec_i2s_rx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SLIM_0_RX_Voice Mixer",
+				SND_SOC_NOPM, 0, 0,
+				slimbus_rx_voice_mixer_controls,
+				ARRAY_SIZE(slimbus_rx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("INTERNAL_BT_SCO_RX_Voice Mixer",
+				SND_SOC_NOPM, 0, 0,
+				bt_sco_rx_voice_mixer_controls,
+				ARRAY_SIZE(bt_sco_rx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("AFE_PCM_RX_Voice Mixer",
+				SND_SOC_NOPM, 0, 0,
+				afe_pcm_rx_voice_mixer_controls,
+				ARRAY_SIZE(afe_pcm_rx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("AUX_PCM_RX_Voice Mixer",
+				SND_SOC_NOPM, 0, 0,
+				aux_pcm_rx_voice_mixer_controls,
+				ARRAY_SIZE(aux_pcm_rx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("HDMI_RX_Voice Mixer",
+				SND_SOC_NOPM, 0, 0,
+				hdmi_rx_voice_mixer_controls,
+				ARRAY_SIZE(hdmi_rx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("Voice_Tx Mixer",
+				SND_SOC_NOPM, 0, 0, tx_voice_mixer_controls,
+				ARRAY_SIZE(tx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("Voip_Tx Mixer",
+				SND_SOC_NOPM, 0, 0, tx_voip_mixer_controls,
+				ARRAY_SIZE(tx_voip_mixer_controls)),
+	SND_SOC_DAPM_MIXER("INTERNAL_BT_SCO_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+	int_bt_sco_rx_mixer_controls, ARRAY_SIZE(int_bt_sco_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("INTERNAL_FM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+	int_fm_rx_mixer_controls, ARRAY_SIZE(int_fm_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("AFE_PCM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+	afe_pcm_rx_mixer_controls, ARRAY_SIZE(afe_pcm_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("Voice Stub Tx Mixer", SND_SOC_NOPM, 0, 0,
+	tx_voice_stub_mixer_controls, ARRAY_SIZE(tx_voice_stub_mixer_controls)),
+	SND_SOC_DAPM_MIXER("STUB_RX Mixer", SND_SOC_NOPM, 0, 0,
+	stub_rx_mixer_controls, ARRAY_SIZE(stub_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SLIMBUS_1_RX Mixer", SND_SOC_NOPM, 0, 0,
+	slimbus_1_rx_mixer_controls, ARRAY_SIZE(slimbus_1_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SLIMBUS_0_RX Port Mixer",
+	SND_SOC_NOPM, 0, 0, sbus_0_rx_port_mixer_controls,
+	ARRAY_SIZE(sbus_0_rx_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("AUXPCM_RX Port Mixer",
+	SND_SOC_NOPM, 0, 0, auxpcm_rx_port_mixer_controls,
+	ARRAY_SIZE(auxpcm_rx_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SLIMBUS_1_RX Port Mixer", SND_SOC_NOPM, 0, 0,
+	sbus_1_rx_port_mixer_controls,
+	ARRAY_SIZE(sbus_1_rx_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("INTERNAL_BT_SCO_RX Port Mixer", SND_SOC_NOPM, 0, 0,
+	bt_sco_rx_port_mixer_controls,
+	ARRAY_SIZE(bt_sco_rx_port_mixer_controls)),
+};
+
+static const struct snd_soc_dapm_route intercon[] = {
+	{"PRI_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"PRI_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"PRI_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"PRI_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"PRI_I2S_RX", NULL, "PRI_RX Audio Mixer"},
+
+	{"SEC_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"SEC_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"SEC_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"SEC_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"SEC_I2S_RX", NULL, "SEC_RX Audio Mixer"},
+
+	{"SLIMBUS_0_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"SLIMBUS_0_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"SLIMBUS_0_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"SLIMBUS_0_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"SLIMBUS_0_RX", NULL, "SLIMBUS_0_RX Audio Mixer"},
+
+	{"HDMI Mixer", "MultiMedia1", "MM_DL1"},
+	{"HDMI Mixer", "MultiMedia2", "MM_DL2"},
+	{"HDMI Mixer", "MultiMedia3", "MM_DL3"},
+	{"HDMI Mixer", "MultiMedia4", "MM_DL4"},
+	{"HDMI", NULL, "HDMI Mixer"},
+
+		/* incall */
+	{"Incall_Music Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"Incall_Music Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"VOICE_PLAYBACK_TX", NULL, "Incall_Music Audio Mixer"},
+
+	{"MultiMedia1 Mixer", "VOC_REC_UL", "INCALL_RECORD_TX"},
+	{"MultiMedia1 Mixer", "VOC_REC_DL", "INCALL_RECORD_RX"},
+	{"MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"MI2S_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"MI2S_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"MI2S_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"MI2S_RX", NULL, "MI2S_RX Audio Mixer"},
+
+	{"MultiMedia1 Mixer", "PRI_TX", "PRI_I2S_TX"},
+	{"MultiMedia1 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+	{"MultiMedia1 Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+
+	{"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"INT_BT_SCO_RX", NULL, "INTERNAL_BT_SCO_RX Audio Mixer"},
+
+	{"INTERNAL_FM_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"INTERNAL_FM_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"INTERNAL_FM_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"INTERNAL_FM_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"INT_FM_RX", NULL, "INTERNAL_FM_RX Audio Mixer"},
+
+	{"AFE_PCM_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"AFE_PCM_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"AFE_PCM_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"AFE_PCM_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"PCM_RX", NULL, "AFE_PCM_RX Audio Mixer"},
+
+	{"MultiMedia1 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"MultiMedia1 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+
+	{"MultiMedia1 Mixer", "AFE_PCM_TX", "PCM_TX"},
+	{"MM_UL1", NULL, "MultiMedia1 Mixer"},
+	{"MultiMedia2 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"MM_UL2", NULL, "MultiMedia2 Mixer"},
+
+	{"AUX_PCM_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"AUX_PCM_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"AUX_PCM_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"AUX_PCM_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"AUX_PCM_RX", NULL, "AUX_PCM_RX Audio Mixer"},
+
+	{"PRI_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"PRI_RX_Voice Mixer", "Voip", "VOIP_DL"},
+	{"PRI_I2S_RX", NULL, "PRI_RX_Voice Mixer"},
+
+	{"SEC_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"SEC_RX_Voice Mixer", "Voip", "VOIP_DL"},
+	{"SEC_I2S_RX", NULL, "SEC_RX_Voice Mixer"},
+
+	{"SLIM_0_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"SLIM_0_RX_Voice Mixer", "Voip", "VOIP_DL"},
+	{"SLIMBUS_0_RX", NULL, "SLIM_0_RX_Voice Mixer"},
+
+	{"INTERNAL_BT_SCO_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"INTERNAL_BT_SCO_RX_Voice Mixer", "Voip", "VOIP_DL"},
+	{"INT_BT_SCO_RX", NULL, "INTERNAL_BT_SCO_RX_Voice Mixer"},
+
+	{"AFE_PCM_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"AFE_PCM_RX_Voice Mixer", "Voip", "VOIP_DL"},
+	{"PCM_RX", NULL, "AFE_PCM_RX_Voice Mixer"},
+
+	{"AUX_PCM_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"AUX_PCM_RX_Voice Mixer", "Voip", "VOIP_DL"},
+	{"AUX_PCM_RX", NULL, "AUX_PCM_RX_Voice Mixer"},
+
+	{"HDMI_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"HDMI_RX_Voice Mixer", "Voip", "VOIP_DL"},
+	{"HDMI", NULL, "HDMI_RX_Voice Mixer"},
+	{"HDMI", NULL, "HDMI_DL_HL"},
+
+	{"Voice_Tx Mixer", "PRI_TX_Voice", "PRI_I2S_TX"},
+	{"Voice_Tx Mixer", "SLIM_0_TX_Voice", "SLIMBUS_0_TX"},
+	{"Voice_Tx Mixer", "INTERNAL_BT_SCO_TX_Voice", "INT_BT_SCO_TX"},
+	{"Voice_Tx Mixer", "AFE_PCM_TX_Voice", "PCM_TX"},
+	{"Voice_Tx Mixer", "AUX_PCM_TX_Voice", "AUX_PCM_TX"},
+	{"CS-VOICE_UL1", NULL, "Voice_Tx Mixer"},
+	{"Voip_Tx Mixer", "PRI_TX_Voip", "PRI_I2S_TX"},
+	{"Voip_Tx Mixer", "SLIM_0_TX_Voip", "SLIMBUS_0_TX"},
+	{"Voip_Tx Mixer", "INTERNAL_BT_SCO_TX_Voip", "INT_BT_SCO_TX"},
+	{"Voip_Tx Mixer", "AFE_PCM_TX_Voip", "PCM_TX"},
+	{"Voip_Tx Mixer", "AUX_PCM_TX_Voip", "AUX_PCM_TX"},
+
+	{"VOIP_UL", NULL, "Voip_Tx Mixer"},
+	{"SLIMBUS_DL_HL", "Switch", "SLIM0_DL_HL"},
+	{"SLIMBUS_0_RX", NULL, "SLIMBUS_DL_HL"},
+	{"SLIM0_UL_HL", NULL, "SLIMBUS_0_TX"},
+	{"INT_FM_RX", NULL, "INTFM_DL_HL"},
+	{"INTFM_UL_HL", NULL, "INT_FM_TX"},
+	{"AUX_PCM_RX", NULL, "AUXPCM_DL_HL"},
+	{"AUXPCM_UL_HL", NULL, "AUX_PCM_TX"},
+	{"SLIMBUS_0_RX Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"SLIMBUS_0_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+	{"SLIMBUS_0_RX Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"SLIMBUS_0_RX", NULL, "SLIMBUS_0_RX Port Mixer"},
+
+	{"AUXPCM_RX Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"AUXPCM_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+	{"AUX_PCM_RX", NULL, "AUXPCM_RX Port Mixer"},
+
+	{"Voice Stub Tx Mixer", "STUB_TX_HL", "STUB_TX"},
+	{"Voice Stub Tx Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
+	{"Voice Stub Tx Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"VOICE_STUB_UL", NULL, "Voice Stub Tx Mixer"},
+
+	{"STUB_RX Mixer", "Voice Stub", "VOICE_STUB_DL"},
+	{"STUB_RX", NULL, "STUB_RX Mixer"},
+	{"SLIMBUS_1_RX Mixer", "Voice Stub", "VOICE_STUB_DL"},
+	{"SLIMBUS_1_RX", NULL, "SLIMBUS_1_RX Mixer"},
+	{"INTERNAL_BT_SCO_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+
+	{"SLIMBUS_1_RX Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"SLIMBUS_1_RX", NULL, "SLIMBUS_1_RX Port Mixer"},
+	{"INTERNAL_BT_SCO_RX Port Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
+	{"INT_BT_SCO_RX", NULL, "INTERNAL_BT_SCO_RX Port Mixer"},
+};
+
+static int msm_pcm_routing_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	unsigned int be_id = rtd->dai_link->be_id;
+
+	if (be_id >= MSM_BACKEND_DAI_MAX) {
+		pr_err("%s: unexpected be_id %d\n", __func__, be_id);
+		return -EINVAL;
+	}
+
+	mutex_lock(&routing_lock);
+	msm_bedais[be_id].hw_params = params;
+	mutex_unlock(&routing_lock);
+	return 0;
+}
+
+static int msm_pcm_routing_close(struct snd_pcm_substream *substream)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	unsigned int be_id = rtd->dai_link->be_id;
+	int i, session_type;
+	struct msm_pcm_routing_bdai_data *bedai;
+
+	if (be_id >= MSM_BACKEND_DAI_MAX) {
+		pr_err("%s: unexpected be_id %d\n", __func__, be_id);
+		return -EINVAL;
+	}
+
+	bedai = &msm_bedais[be_id];
+
+	session_type = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+		0 : 1);
+
+	mutex_lock(&routing_lock);
+
+	for_each_set_bit(i, &bedai->fe_sessions, MSM_FRONTEND_DAI_MM_SIZE) {
+		if (fe_dai_map[i][session_type] != INVALID_SESSION)
+			adm_close(bedai->port_id);
+	}
+
+	bedai->active = 0;
+	bedai->hw_params = NULL;
+
+	mutex_unlock(&routing_lock);
+
+	return 0;
+}
+
+static int msm_pcm_routing_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	unsigned int be_id = rtd->dai_link->be_id;
+	int i, path_type, session_type;
+	struct msm_pcm_routing_bdai_data *bedai;
+	u32 channels;
+
+	if (be_id >= MSM_BACKEND_DAI_MAX) {
+		pr_err("%s: unexpected be_id %d\n", __func__, be_id);
+		return -EINVAL;
+	}
+
+
+	bedai = &msm_bedais[be_id];
+
+	if (bedai->hw_params == NULL) {
+		pr_err("%s: HW param is not configured", __func__);
+		return -EINVAL;
+	}
+
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		path_type = ADM_PATH_PLAYBACK;
+		session_type = SESSION_TYPE_RX;
+	} else {
+		path_type = ADM_PATH_LIVE_REC;
+		session_type = SESSION_TYPE_TX;
+	}
+
+	mutex_lock(&routing_lock);
+
+	if (bedai->active == 1)
+		goto done; /* Ignore prepare if back-end already active */
+
+	/* AFE port is not active at this point. However, still
+	 * go ahead setting active flag under the notion that
+	 * QDSP6 is able to handle ADM starting before AFE port
+	 * is started.
+	 */
+	bedai->active = 1;
+
+	for_each_set_bit(i, &bedai->fe_sessions, MSM_FRONTEND_DAI_MM_SIZE) {
+		if (fe_dai_map[i][session_type] != INVALID_SESSION) {
+
+			channels = params_channels(bedai->hw_params);
+			if ((substream->stream == SNDRV_PCM_STREAM_PLAYBACK) &&
+				(channels > 2))
+				adm_multi_ch_copp_open(bedai->port_id,
+				path_type,
+				params_rate(bedai->hw_params),
+				channels,
+				DEFAULT_COPP_TOPOLOGY);
+			else
+				adm_open(bedai->port_id,
+				path_type,
+				params_rate(bedai->hw_params),
+				params_channels(bedai->hw_params),
+				DEFAULT_COPP_TOPOLOGY);
+
+			msm_pcm_routing_build_matrix(i,
+				fe_dai_map[i][session_type], path_type);
+		}
+	}
+
+done:
+	mutex_unlock(&routing_lock);
+
+	return 0;
+}
+
+static struct snd_pcm_ops msm_routing_pcm_ops = {
+	.hw_params	= msm_pcm_routing_hw_params,
+	.close          = msm_pcm_routing_close,
+	.prepare        = msm_pcm_routing_prepare,
+};
+
+static unsigned int msm_routing_read(struct snd_soc_platform *platform,
+				 unsigned int reg)
+{
+	dev_dbg(platform->dev, "reg %x\n", reg);
+	return 0;
+}
+
+/* Not used but frame seems to require it */
+static int msm_routing_write(struct snd_soc_platform *platform,
+	unsigned int reg, unsigned int val)
+{
+	dev_dbg(platform->dev, "reg %x val %x\n", reg, val);
+	return 0;
+}
+
+/* Not used but frame seems to require it */
+static int msm_routing_probe(struct snd_soc_platform *platform)
+{
+	snd_soc_dapm_new_controls(&platform->dapm, msm_qdsp6_widgets,
+			    ARRAY_SIZE(msm_qdsp6_widgets));
+	snd_soc_dapm_add_routes(&platform->dapm, intercon,
+		ARRAY_SIZE(intercon));
+
+	snd_soc_dapm_new_widgets(&platform->dapm);
+
+	snd_soc_add_platform_controls(platform,
+				int_fm_vol_mixer_controls,
+			ARRAY_SIZE(int_fm_vol_mixer_controls));
+
+	snd_soc_add_platform_controls(platform,
+				lpa_vol_mixer_controls,
+			ARRAY_SIZE(lpa_vol_mixer_controls));
+
+	snd_soc_add_platform_controls(platform,
+				eq_enable_mixer_controls,
+			ARRAY_SIZE(eq_enable_mixer_controls));
+
+	snd_soc_add_platform_controls(platform,
+				eq_band_mixer_controls,
+			ARRAY_SIZE(eq_band_mixer_controls));
+
+	snd_soc_add_platform_controls(platform,
+				eq_coeff_mixer_controls,
+			ARRAY_SIZE(eq_coeff_mixer_controls));
+
+	snd_soc_add_platform_controls(platform,
+				multimedia2_vol_mixer_controls,
+			ARRAY_SIZE(multimedia2_vol_mixer_controls));
+
+	snd_soc_add_platform_controls(platform,
+				compressed_vol_mixer_controls,
+			ARRAY_SIZE(compressed_vol_mixer_controls));
+
+	return 0;
+}
+
+static struct snd_soc_platform_driver msm_soc_routing_platform = {
+	.ops		= &msm_routing_pcm_ops,
+	.probe		= msm_routing_probe,
+	.read		= msm_routing_read,
+	.write		= msm_routing_write,
+};
+
+static __devinit int msm_routing_pcm_probe(struct platform_device *pdev)
+{
+	dev_dbg(&pdev->dev, "dev name %s\n", dev_name(&pdev->dev));
+	return snd_soc_register_platform(&pdev->dev,
+				   &msm_soc_routing_platform);
+}
+
+static int msm_routing_pcm_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_platform(&pdev->dev);
+	return 0;
+}
+
+static struct platform_driver msm_routing_pcm_driver = {
+	.driver = {
+		.name = "msm-pcm-routing",
+		.owner = THIS_MODULE,
+	},
+	.probe = msm_routing_pcm_probe,
+	.remove = __devexit_p(msm_routing_pcm_remove),
+};
+
+int msm_routing_check_backend_enabled(int fedai_id)
+{
+	int i;
+	if (fedai_id >= MSM_FRONTEND_DAI_MM_MAX_ID) {
+		/* bad ID assigned in machine driver */
+		pr_err("%s: bad MM ID\n", __func__);
+		return 0;
+	}
+	for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
+		if ((test_bit(fedai_id,
+			&msm_bedais[i].fe_sessions))) {
+			return msm_bedais[i].active;
+		}
+	}
+	return 0;
+}
+
+static int __init msm_soc_routing_platform_init(void)
+{
+	mutex_init(&routing_lock);
+	return platform_driver_register(&msm_routing_pcm_driver);
+}
+module_init(msm_soc_routing_platform_init);
+
+static void __exit msm_soc_routing_platform_exit(void)
+{
+	platform_driver_unregister(&msm_routing_pcm_driver);
+}
+module_exit(msm_soc_routing_platform_exit);
+
+MODULE_DESCRIPTION("MSM routing platform driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
new file mode 100644
index 0000000..b971787
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
@@ -0,0 +1,103 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MSM_PCM_ROUTING_H
+#define _MSM_PCM_ROUTING_H
+#include <sound/apr_audio-v2.h>
+
+#define LPASS_BE_PRI_I2S_RX "(Backend) PRIMARY_I2S_RX"
+#define LPASS_BE_PRI_I2S_TX "(Backend) PRIMARY_I2S_TX"
+#define LPASS_BE_SLIMBUS_0_RX "(Backend) SLIMBUS_0_RX"
+#define LPASS_BE_SLIMBUS_0_TX "(Backend) SLIMBUS_0_TX"
+#define LPASS_BE_HDMI "(Backend) HDMI"
+#define LPASS_BE_INT_BT_SCO_RX "(Backend) INT_BT_SCO_RX"
+#define LPASS_BE_INT_BT_SCO_TX "(Backend) INT_BT_SCO_TX"
+#define LPASS_BE_INT_FM_RX "(Backend) INT_FM_RX"
+#define LPASS_BE_INT_FM_TX "(Backend) INT_FM_TX"
+#define LPASS_BE_AFE_PCM_RX "(Backend) RT_PROXY_DAI_001_RX"
+#define LPASS_BE_AFE_PCM_TX "(Backend) RT_PROXY_DAI_002_TX"
+#define LPASS_BE_AUXPCM_RX "(Backend) AUX_PCM_RX"
+#define LPASS_BE_AUXPCM_TX "(Backend) AUX_PCM_TX"
+#define LPASS_BE_VOICE_PLAYBACK_TX "(Backend) VOICE_PLAYBACK_TX"
+#define LPASS_BE_INCALL_RECORD_RX "(Backend) INCALL_RECORD_TX"
+#define LPASS_BE_INCALL_RECORD_TX "(Backend) INCALL_RECORD_RX"
+#define LPASS_BE_SEC_I2S_RX "(Backend) SECONDARY_I2S_RX"
+
+#define LPASS_BE_MI2S_RX "(Backend) MI2S_RX"
+#define LPASS_BE_STUB_RX "(Backend) STUB_RX"
+#define LPASS_BE_STUB_TX "(Backend) STUB_TX"
+#define LPASS_BE_SLIMBUS_1_RX "(Backend) SLIMBUS_1_RX"
+#define LPASS_BE_SLIMBUS_1_TX "(Backend) SLIMBUS_1_TX"
+
+/* For multimedia front-ends, asm session is allocated dynamically.
+ * Hence, asm session/multimedia front-end mapping has to be maintained.
+ * Due to this reason, additional multimedia front-end must be placed before
+ * non-multimedia front-ends.
+ */
+
+enum {
+	MSM_FRONTEND_DAI_MULTIMEDIA1 = 0,
+	MSM_FRONTEND_DAI_MULTIMEDIA2,
+	MSM_FRONTEND_DAI_MULTIMEDIA3,
+	MSM_FRONTEND_DAI_MULTIMEDIA4,
+	MSM_FRONTEND_DAI_CS_VOICE,
+	MSM_FRONTEND_DAI_VOIP,
+	MSM_FRONTEND_DAI_AFE_RX,
+	MSM_FRONTEND_DAI_AFE_TX,
+	MSM_FRONTEND_DAI_VOICE_STUB,
+	MSM_FRONTEND_DAI_MAX,
+};
+
+#define MSM_FRONTEND_DAI_MM_SIZE (MSM_FRONTEND_DAI_MULTIMEDIA4 + 1)
+#define MSM_FRONTEND_DAI_MM_MAX_ID MSM_FRONTEND_DAI_MULTIMEDIA4
+
+enum {
+	MSM_BACKEND_DAI_PRI_I2S_RX = 0,
+	MSM_BACKEND_DAI_PRI_I2S_TX,
+	MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_BACKEND_DAI_SLIMBUS_0_TX,
+	MSM_BACKEND_DAI_HDMI_RX,
+	MSM_BACKEND_DAI_INT_BT_SCO_RX,
+	MSM_BACKEND_DAI_INT_BT_SCO_TX,
+	MSM_BACKEND_DAI_INT_FM_RX,
+	MSM_BACKEND_DAI_INT_FM_TX,
+	MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_BACKEND_DAI_AFE_PCM_TX,
+	MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_BACKEND_DAI_AUXPCM_TX,
+	MSM_BACKEND_DAI_VOICE_PLAYBACK_TX,
+	MSM_BACKEND_DAI_INCALL_RECORD_RX,
+	MSM_BACKEND_DAI_INCALL_RECORD_TX,
+	MSM_BACKEND_DAI_MI2S_RX,
+	MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_BACKEND_DAI_SLIMBUS_1_RX,
+	MSM_BACKEND_DAI_SLIMBUS_1_TX,
+	MSM_BACKEND_DAI_INVALID,
+	MSM_BACKEND_DAI_MAX,
+};
+
+/* dai_id: front-end ID,
+ * dspst_id:  DSP audio stream ID
+ * stream_type: playback or capture
+ */
+void msm_pcm_routing_reg_phy_stream(int fedai_id, int dspst_id,
+	int stream_type);
+void msm_pcm_routing_dereg_phy_stream(int fedai_id, int stream_type);
+
+int lpa_set_volume(unsigned volume);
+
+int msm_routing_check_backend_enabled(int fedai_id);
+
+int multi_ch_pcm_set_volume(unsigned volume);
+
+int compressed_set_volume(unsigned volume);
+
+#endif /*_MSM_PCM_H*/
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
new file mode 100644
index 0000000..691ca21
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -0,0 +1,621 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/jiffies.h>
+#include <linux/uaccess.h>
+#include <linux/atomic.h>
+
+
+#include <mach/qdsp6v2/audio_acdb.h>
+#include <mach/qdsp6v2/rtac.h>
+
+#include <sound/apr_audio-v2.h>
+#include <mach/qdsp6v2/apr.h>
+#include <sound/q6adm-v2.h>
+#include <sound/q6audio-v2.h>
+
+
+#define TIMEOUT_MS 1000
+
+#define RESET_COPP_ID 99
+#define INVALID_COPP_ID 0xFF
+
+struct adm_ctl {
+	void *apr;
+	atomic_t copp_id[Q6_AFE_MAX_PORTS];
+	atomic_t copp_cnt[Q6_AFE_MAX_PORTS];
+	atomic_t copp_stat[Q6_AFE_MAX_PORTS];
+	u32      mem_map_handle[Q6_AFE_MAX_PORTS];
+	wait_queue_head_t wait[Q6_AFE_MAX_PORTS];
+};
+
+static struct adm_ctl			this_adm;
+
+static int32_t adm_callback(struct apr_client_data *data, void *priv)
+{
+	uint32_t *payload;
+	int i, index;
+	payload = data->payload;
+
+	if (data->opcode == RESET_EVENTS) {
+		pr_debug("adm_callback: Reset event is received: %d %d apr[%p]\n",
+				data->reset_event, data->reset_proc,
+				this_adm.apr);
+		if (this_adm.apr) {
+			apr_reset(this_adm.apr);
+			for (i = 0; i < Q6_AFE_MAX_PORTS; i++) {
+				atomic_set(&this_adm.copp_id[i],
+							RESET_COPP_ID);
+				atomic_set(&this_adm.copp_cnt[i], 0);
+				atomic_set(&this_adm.copp_stat[i], 0);
+			}
+			this_adm.apr = NULL;
+		}
+		return 0;
+	}
+
+	pr_debug("%s: code = 0x%x PL#0[%x], PL#1[%x], size = %d\n", __func__,
+			data->opcode, payload[0], payload[1],
+					data->payload_size);
+
+	if (data->payload_size) {
+		index = q6audio_get_port_index(data->token);
+		if (index < 0 || index >= Q6_AFE_MAX_PORTS) {
+			pr_err("%s: invalid port idx %d token %d\n",
+					__func__, index, data->token);
+			return 0;
+		}
+		if (data->opcode == APR_BASIC_RSP_RESULT) {
+			pr_debug("APR_BASIC_RSP_RESULT\n");
+			switch (payload[0]) {
+			case ADM_CMD_SET_PP_PARAMS_V5:
+				if (rtac_make_adm_callback(
+					payload, data->payload_size))
+					pr_debug("%s: payload[0]: 0x%x\n",
+						__func__, payload[0]);
+					break;
+			case ADM_CMD_DEVICE_CLOSE_V5:
+			case ADM_CMD_SHARED_MEM_UNMAP_REGIONS:
+			case ADM_CMD_SHARED_MEM_MAP_REGIONS:
+			case ADM_CMD_MATRIX_MAP_ROUTINGS_V5:
+				pr_debug("ADM_CMD_MATRIX_MAP_ROUTINGS\n");
+				atomic_set(&this_adm.copp_stat[index], 1);
+				wake_up(&this_adm.wait[index]);
+				break;
+			default:
+				pr_err("%s: Unknown Cmd: 0x%x\n", __func__,
+								payload[0]);
+				break;
+			}
+			return 0;
+		}
+
+		switch (data->opcode) {
+		case ADM_CMDRSP_DEVICE_OPEN_V5: {
+			struct adm_cmd_rsp_device_open_v5 *open =
+			(struct adm_cmd_rsp_device_open_v5 *)data->payload;
+			if (open->copp_id == INVALID_COPP_ID) {
+				pr_err("%s: invalid coppid rxed %d\n",
+					__func__, open->copp_id);
+				atomic_set(&this_adm.copp_stat[index], 1);
+				wake_up(&this_adm.wait[index]);
+				break;
+			}
+			atomic_set(&this_adm.copp_id[index], open->copp_id);
+			atomic_set(&this_adm.copp_stat[index], 1);
+			pr_debug("%s: coppid rxed=%d\n", __func__,
+							open->copp_id);
+			wake_up(&this_adm.wait[index]);
+			}
+			break;
+		case ADM_CMD_GET_PP_PARAMS_V5:
+			pr_debug("%s: ADM_CMD_GET_PP_PARAMS_V5\n", __func__);
+			rtac_make_adm_callback(payload,
+				data->payload_size);
+			break;
+		default:
+			pr_err("%s: Unknown cmd:0x%x\n", __func__,
+							data->opcode);
+			break;
+		}
+	}
+	return 0;
+}
+
+/* TODO: send_adm_cal_block function to be defined
+	when calibration available for 8974 */
+static void send_adm_cal(int port_id, int path)
+{
+	/* function to be defined when calibration available for 8974 */
+	pr_debug("%s\n", __func__);
+}
+
+int adm_open(int port_id, int path, int rate, int channel_mode, int topology)
+{
+	struct adm_cmd_device_open_v5	open;
+	int ret = 0;
+	int index;
+	int tmp_port = q6audio_get_port_id(port_id);
+
+	pr_debug("%s: port %d path:%d rate:%d mode:%d\n", __func__,
+				port_id, path, rate, channel_mode);
+
+	port_id = q6audio_convert_virtual_to_portid(port_id);
+
+	if (q6audio_validate_port(port_id) < 0) {
+		pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
+		return -ENODEV;
+	}
+
+	index = q6audio_get_port_index(port_id);
+	pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index);
+
+	if (this_adm.apr == NULL) {
+		this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
+						0xFFFFFFFF, &this_adm);
+		if (this_adm.apr == NULL) {
+			pr_err("%s: Unable to register ADM\n", __func__);
+			ret = -ENODEV;
+			return ret;
+		}
+		rtac_set_adm_handle(this_adm.apr);
+	}
+
+
+	/* Create a COPP if port id are not enabled */
+	if (atomic_read(&this_adm.copp_cnt[index]) == 0) {
+
+		open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+		open.hdr.pkt_size = sizeof(open);
+		open.hdr.src_svc = APR_SVC_ADM;
+		open.hdr.src_domain = APR_DOMAIN_APPS;
+		open.hdr.src_port = tmp_port;
+		open.hdr.dest_svc = APR_SVC_ADM;
+		open.hdr.dest_domain = APR_DOMAIN_ADSP;
+		open.hdr.dest_port = tmp_port;
+		open.hdr.token = port_id;
+		open.hdr.opcode = ADM_CMD_DEVICE_OPEN_V5;
+
+		open.mode_of_operation = path;
+		/* Reserved for future use, need to set this to 0 */
+		open.flags = 0x00;
+		open.endpoint_id_1 = tmp_port;
+		open.endpoint_id_2 = 0xFFFF;
+
+		/* convert path to acdb path */
+		if (path == ADM_PATH_PLAYBACK)
+			open.topology_id = get_adm_rx_topology();
+		else {
+			open.topology_id = get_adm_tx_topology();
+			if ((open.topology_id ==
+				VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
+			    (open.topology_id ==
+				VPM_TX_DM_FLUENCE_COPP_TOPOLOGY))
+				rate = 16000;
+		}
+
+		if (open.topology_id  == 0)
+			open.topology_id = topology;
+
+		open.dev_num_channel = channel_mode & 0x00FF;
+		open.bit_width = 16;
+		open.sample_rate  = rate;
+		memset(open.dev_channel_mapping, 0, 8);
+
+		if (channel_mode == 1)	{
+			open.dev_channel_mapping[0] = PCM_CHANNEL_FC;
+		} else if (channel_mode == 2) {
+			open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
+			open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
+		} else if (channel_mode == 6) {
+			open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
+			open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
+			open.dev_channel_mapping[2] = PCM_CHANNEL_LFE;
+			open.dev_channel_mapping[3] = PCM_CHANNEL_FC;
+			open.dev_channel_mapping[4] = PCM_CHANNEL_LB;
+			open.dev_channel_mapping[5] = PCM_CHANNEL_RB;
+		} else {
+			pr_err("%s invalid num_chan %d\n", __func__,
+					channel_mode);
+			return -EINVAL;
+		}
+
+		pr_debug("%s: port_id=%d rate=%d"
+			"topology_id=0x%X\n", __func__,	open.endpoint_id_1, \
+				  open.sample_rate, open.topology_id);
+
+		atomic_set(&this_adm.copp_stat[index], 0);
+
+		ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
+		if (ret < 0) {
+			pr_err("%s:ADM enable for port %d for[%d] failed\n",
+						__func__, tmp_port, port_id);
+			ret = -EINVAL;
+			goto fail_cmd;
+		}
+		/* Wait for the callback with copp id */
+		ret = wait_event_timeout(this_adm.wait[index],
+			atomic_read(&this_adm.copp_stat[index]),
+			msecs_to_jiffies(TIMEOUT_MS));
+		if (!ret) {
+			pr_err("%s ADM open failed for port %d"
+			"for [%d]\n", __func__, tmp_port, port_id);
+			ret = -EINVAL;
+			goto fail_cmd;
+		}
+	}
+	atomic_inc(&this_adm.copp_cnt[index]);
+	return 0;
+
+fail_cmd:
+
+	return ret;
+}
+
+
+int adm_multi_ch_copp_open(int port_id, int path, int rate, int channel_mode,
+				int topology)
+{
+	int ret = 0;
+
+	ret = adm_open(port_id, path, rate, channel_mode, topology);
+
+	return ret;
+}
+
+int adm_matrix_map(int session_id, int path, int num_copps,
+			unsigned int *port_id, int copp_id)
+{
+	struct adm_cmd_matrix_map_routings_v5	*route;
+	struct adm_session_map_node_v5 *node;
+	uint32_t *copps_list;
+	int cmd_size = 0;
+	int ret = 0, i = 0;
+	void *payload = NULL;
+	void *matrix_map = NULL;
+
+	/* Assumes port_ids have already been validated during adm_open */
+	int index = q6audio_get_port_index(copp_id);
+	if (index < 0 || index >= Q6_AFE_MAX_PORTS) {
+		pr_err("%s: invalid port idx %d token %d\n",
+					__func__, index, copp_id);
+		return 0;
+	}
+	cmd_size = (sizeof(struct adm_cmd_matrix_map_routings_v5) +
+			sizeof(struct adm_session_map_node_v5) +
+			(sizeof(uint32_t) * num_copps));
+	matrix_map = kzalloc(cmd_size, GFP_KERNEL);
+	if (matrix_map == NULL) {
+		pr_err("%s: Mem alloc failed\n", __func__);
+		ret = -EINVAL;
+		return ret;
+	}
+	route = (struct adm_cmd_matrix_map_routings_v5 *)matrix_map;
+
+	pr_debug("%s: session 0x%x path:%d num_copps:%d port_id[0] :%d coppid[%d]\n",
+		 __func__, session_id, path, num_copps, port_id[0], copp_id);
+
+	route->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	route->hdr.pkt_size = cmd_size;
+	route->hdr.src_svc = 0;
+	route->hdr.src_domain = APR_DOMAIN_APPS;
+	route->hdr.src_port = copp_id;
+	route->hdr.dest_svc = APR_SVC_ADM;
+	route->hdr.dest_domain = APR_DOMAIN_ADSP;
+	route->hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
+	route->hdr.token = copp_id;
+	route->hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS_V5;
+	route->num_sessions = 1;
+
+	switch (path) {
+	case 0x1:
+		route->matrix_id = ADM_MATRIX_ID_AUDIO_RX;
+		break;
+	case 0x2:
+	case 0x3:
+		route->matrix_id = ADM_MATRIX_ID_AUDIO_TX;
+		break;
+	default:
+		pr_err("%s: Wrong path set[%d]\n", __func__, path);
+		break;
+	}
+	payload = ((u8 *)matrix_map +
+			sizeof(struct adm_cmd_matrix_map_routings_v5));
+	node = (struct adm_session_map_node_v5 *)payload;
+
+	node->session_id = session_id;
+	node->num_copps = num_copps;
+	payload = (u8 *)node + sizeof(struct adm_session_map_node_v5);
+	copps_list = (uint32_t *)payload;
+	for (i = 0; i < num_copps; i++) {
+		int tmp;
+		port_id[i] = q6audio_convert_virtual_to_portid(port_id[i]);
+
+		tmp = q6audio_get_port_index(port_id[i]);
+
+
+		if (tmp >= 0 && tmp < Q6_AFE_MAX_PORTS)
+			copps_list[i] =
+					atomic_read(&this_adm.copp_id[tmp]);
+		pr_debug("%s: port_id[%d]: %d, index: %d act coppid[0x%x]\n",
+			__func__, i, port_id[i], tmp,
+			atomic_read(&this_adm.copp_id[tmp]));
+	}
+	atomic_set(&this_adm.copp_stat[index], 0);
+
+	ret = apr_send_pkt(this_adm.apr, (uint32_t *)matrix_map);
+	if (ret < 0) {
+		pr_err("%s: ADM routing for port %d failed\n",
+					__func__, port_id[0]);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	ret = wait_event_timeout(this_adm.wait[index],
+				atomic_read(&this_adm.copp_stat[index]),
+				msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: ADM cmd Route failed for port %d\n",
+					__func__, port_id[0]);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	for (i = 0; i < num_copps; i++)
+		send_adm_cal(port_id[i], path);
+
+fail_cmd:
+	kfree(matrix_map);
+	return ret;
+}
+
+int adm_memory_map_regions(int port_id,
+		uint32_t *buf_add, uint32_t mempool_id,
+		uint32_t *bufsz, uint32_t bufcnt)
+{
+	struct  avs_cmd_shared_mem_map_regions *mmap_regions = NULL;
+	struct  avs_shared_map_region_payload *mregions = NULL;
+	void    *mmap_region_cmd = NULL;
+	void    *payload = NULL;
+	int     ret = 0;
+	int     i = 0;
+	int     cmd_size = 0;
+	int     index = 0;
+
+	pr_debug("%s\n", __func__);
+	if (this_adm.apr == NULL) {
+		this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
+						0xFFFFFFFF, &this_adm);
+		if (this_adm.apr == NULL) {
+			pr_err("%s: Unable to register ADM\n", __func__);
+			ret = -ENODEV;
+			return ret;
+		}
+		rtac_set_adm_handle(this_adm.apr);
+	}
+
+	port_id = q6audio_convert_virtual_to_portid(port_id);
+
+	if (q6audio_validate_port(port_id) < 0) {
+		pr_err("%s port id[%d] is invalid\n", __func__, port_id);
+		return -ENODEV;
+	}
+
+	index = q6audio_get_port_index(port_id);
+
+	cmd_size = sizeof(struct avs_cmd_shared_mem_map_regions)
+			+ sizeof(struct avs_shared_map_region_payload)
+			* bufcnt;
+
+	mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
+	if (!mmap_region_cmd) {
+		pr_err("%s: allocate mmap_region_cmd failed\n", __func__);
+		return -ENOMEM;
+	}
+	mmap_regions = (struct avs_cmd_shared_mem_map_regions *)mmap_region_cmd;
+	mmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+								APR_PKT_VER);
+	mmap_regions->hdr.pkt_size = cmd_size;
+	mmap_regions->hdr.src_port = 0;
+	mmap_regions->hdr.dest_port = 0;
+	mmap_regions->hdr.token = 0;
+	mmap_regions->hdr.opcode = ADM_CMD_SHARED_MEM_MAP_REGIONS;
+	mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_EBI_POOL & 0x00ff;
+	mmap_regions->num_regions = bufcnt & 0x00ff;
+	mmap_regions->property_flag = 0x00;
+
+	pr_debug("%s: map_regions->num_regions = %d\n", __func__,
+				mmap_regions->num_regions);
+	payload = ((u8 *) mmap_region_cmd +
+				sizeof(struct avs_cmd_shared_mem_map_regions));
+	mregions = (struct avs_shared_map_region_payload *)payload;
+
+	for (i = 0; i < bufcnt; i++) {
+		mregions->shm_addr_lsw = buf_add[i];
+		mregions->shm_addr_msw = 0x00;
+		mregions->mem_size_bytes = bufsz[i];
+		++mregions;
+	}
+
+	atomic_set(&this_adm.copp_stat[0], 0);
+	ret = apr_send_pkt(this_adm.apr, (uint32_t *) mmap_region_cmd);
+	if (ret < 0) {
+		pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
+					mmap_regions->hdr.opcode, ret);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	ret = wait_event_timeout(this_adm.wait[index],
+			atomic_read(&this_adm.copp_stat[0]), 5 * HZ);
+	if (!ret) {
+		pr_err("%s: timeout. waited for memory_map\n", __func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+fail_cmd:
+	kfree(mmap_region_cmd);
+	return ret;
+}
+
+int adm_memory_unmap_regions(int32_t port_id, uint32_t *buf_add,
+			uint32_t *bufsz, uint32_t bufcnt)
+{
+	struct  avs_cmd_shared_mem_unmap_regions unmap_regions;
+	int     ret = 0;
+	int     cmd_size = 0;
+	int     index = 0;
+
+	pr_debug("%s\n", __func__);
+
+	if (this_adm.apr == NULL) {
+		pr_err("%s APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	port_id = q6audio_convert_virtual_to_portid(port_id);
+
+	if (q6audio_validate_port(port_id) < 0) {
+		pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
+		return -ENODEV;
+	}
+
+	index = q6audio_get_port_index(port_id);
+
+	unmap_regions.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+							APR_PKT_VER);
+	unmap_regions.hdr.pkt_size = cmd_size;
+	unmap_regions.hdr.src_port = 0;
+	unmap_regions.hdr.dest_port = 0;
+	unmap_regions.hdr.token = 0;
+	unmap_regions.hdr.opcode = ADM_CMD_SHARED_MEM_UNMAP_REGIONS;
+	unmap_regions.mem_map_handle = this_adm.mem_map_handle[index];
+	atomic_set(&this_adm.copp_stat[0], 0);
+	ret = apr_send_pkt(this_adm.apr, (uint32_t *) &unmap_regions);
+	if (ret < 0) {
+		pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
+				unmap_regions.hdr.opcode, ret);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	ret = wait_event_timeout(this_adm.wait[index],
+			atomic_read(&this_adm.copp_stat[0]), 5 * HZ);
+	if (!ret) {
+		pr_err("%s: timeout. waited for memory_unmap\n", __func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+fail_cmd:
+	return ret;
+}
+
+int adm_get_copp_id(int port_index)
+{
+	pr_debug("%s\n", __func__);
+
+	if (port_index < 0) {
+		pr_err("%s: invalid port_id = %d\n", __func__, port_index);
+		return -EINVAL;
+	}
+
+	return atomic_read(&this_adm.copp_id[port_index]);
+}
+
+int adm_close(int port_id)
+{
+	struct apr_hdr close;
+
+	int ret = 0;
+	int index = 0;
+
+	port_id = q6audio_convert_virtual_to_portid(port_id);
+
+	index = q6audio_get_port_index(port_id);
+	if (q6audio_validate_port(port_id) < 0)
+		return -EINVAL;
+
+	pr_debug("%s port_id=%d index %d\n", __func__, port_id, index);
+
+	if (!(atomic_read(&this_adm.copp_cnt[index]))) {
+		pr_err("%s: copp count for port[%d]is 0\n", __func__, port_id);
+
+		goto fail_cmd;
+	}
+	atomic_dec(&this_adm.copp_cnt[index]);
+	if (!(atomic_read(&this_adm.copp_cnt[index]))) {
+
+		close.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+			APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+		close.pkt_size = sizeof(close);
+		close.src_svc = APR_SVC_ADM;
+		close.src_domain = APR_DOMAIN_APPS;
+		close.src_port = port_id;
+		close.dest_svc = APR_SVC_ADM;
+		close.dest_domain = APR_DOMAIN_ADSP;
+		close.dest_port = atomic_read(&this_adm.copp_id[index]);
+		close.token = port_id;
+		close.opcode = ADM_CMD_DEVICE_CLOSE_V5;
+
+		atomic_set(&this_adm.copp_id[index], RESET_COPP_ID);
+		atomic_set(&this_adm.copp_stat[index], 0);
+
+
+		pr_debug("%s:coppid %d portid=%d index=%d coppcnt=%d\n",
+				__func__,
+				atomic_read(&this_adm.copp_id[index]),
+				port_id, index,
+				atomic_read(&this_adm.copp_cnt[index]));
+
+		ret = apr_send_pkt(this_adm.apr, (uint32_t *)&close);
+		if (ret < 0) {
+			pr_err("%s ADM close failed\n", __func__);
+			ret = -EINVAL;
+			goto fail_cmd;
+		}
+
+		ret = wait_event_timeout(this_adm.wait[index],
+				atomic_read(&this_adm.copp_stat[index]),
+				msecs_to_jiffies(TIMEOUT_MS));
+		if (!ret) {
+			pr_err("%s: ADM cmd Route failed for port %d\n",
+						__func__, port_id);
+			ret = -EINVAL;
+			goto fail_cmd;
+		}
+
+		rtac_remove_adm_device(port_id);
+	}
+
+fail_cmd:
+	return ret;
+}
+
+static int __init adm_init(void)
+{
+	int i = 0;
+	this_adm.apr = NULL;
+
+	for (i = 0; i < Q6_AFE_MAX_PORTS; i++) {
+		atomic_set(&this_adm.copp_id[i], RESET_COPP_ID);
+		atomic_set(&this_adm.copp_cnt[i], 0);
+		atomic_set(&this_adm.copp_stat[i], 0);
+		init_waitqueue_head(&this_adm.wait[i]);
+	}
+	return 0;
+}
+
+device_initcall(adm_init);
diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c
new file mode 100644
index 0000000..5b30e8e
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/q6afe.c
@@ -0,0 +1,1584 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <mach/qdsp6v2/audio_acdb.h>
+#include <sound/apr_audio-v2.h>
+#include <sound/q6afe-v2.h>
+
+#include <sound/q6audio-v2.h>
+
+
+struct afe_ctl {
+	void *apr;
+	atomic_t state;
+	atomic_t status;
+	wait_queue_head_t wait[AFE_MAX_PORTS];
+	void (*tx_cb) (uint32_t opcode,
+		uint32_t token, uint32_t *payload, void *priv);
+	void (*rx_cb) (uint32_t opcode,
+		uint32_t token, uint32_t *payload, void *priv);
+	void *tx_private_data;
+	void *rx_private_data;
+};
+
+static struct afe_ctl this_afe;
+
+static struct acdb_cal_block afe_cal_addr[MAX_AUDPROC_TYPES];
+
+#define TIMEOUT_MS 1000
+#define Q6AFE_MAX_VOLUME 0x3FFF
+
+#define SIZEOF_CFG_CMD(y) \
+		(sizeof(struct apr_hdr) + sizeof(u16) + (sizeof(struct y)))
+
+static int32_t afe_callback(struct apr_client_data *data, void *priv)
+{
+	if (data->opcode == RESET_EVENTS) {
+		pr_debug("q6afe: reset event = %d %d apr[%p]\n",
+			data->reset_event, data->reset_proc, this_afe.apr);
+		if (this_afe.apr) {
+			apr_reset(this_afe.apr);
+			atomic_set(&this_afe.state, 0);
+			this_afe.apr = NULL;
+		}
+		return 0;
+	}
+	pr_debug("%s:opcode = 0x%x cmd = 0x%x status = 0x%x\n",
+				__func__, data->opcode,
+				((uint32_t *)(data->payload))[0],
+				((uint32_t *)(data->payload))[1]);
+	if (data->payload_size) {
+		uint32_t *payload;
+		uint16_t port_id = 0;
+		payload = data->payload;
+		pr_debug("%s:opcode = 0x%x cmd = 0x%x status = 0x%x token=%d\n",
+					__func__, data->opcode,
+					payload[0], payload[1], data->token);
+		/* payload[1] contains the error status for response */
+		if (payload[1] != 0) {
+			atomic_set(&this_afe.status, -1);
+			pr_err("%s: cmd = 0x%x returned error = 0x%x\n",
+					__func__, payload[0], payload[1]);
+		}
+		if (data->opcode == APR_BASIC_RSP_RESULT) {
+			switch (payload[0]) {
+			case AFE_PORT_CMD_DEVICE_STOP:
+			case AFE_PORT_CMD_DEVICE_START:
+			case AFE_PORT_CMD_SET_PARAM_V2:
+			case AFE_PSEUDOPORT_CMD_START:
+			case AFE_PSEUDOPORT_CMD_STOP:
+			case AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS:
+			case AFE_SERVICE_CMD_SHARED_MEM_UNMAP_REGIONS:
+			case AFE_SERVICE_CMD_UNREGISTER_RT_PORT_DRIVER:
+				atomic_set(&this_afe.state, 0);
+				wake_up(&this_afe.wait[data->token]);
+				break;
+			case AFE_SERVICE_CMD_REGISTER_RT_PORT_DRIVER:
+				break;
+			case AFE_PORT_DATA_CMD_RT_PROXY_PORT_WRITE_V2:
+				port_id = RT_PROXY_PORT_001_TX;
+				break;
+			case AFE_PORT_DATA_CMD_RT_PROXY_PORT_READ_V2:
+				port_id = RT_PROXY_PORT_001_RX;
+				break;
+			default:
+				pr_err("%s:Unknown cmd 0x%x\n", __func__,
+						payload[0]);
+				break;
+			}
+		} else if (data->opcode == AFE_EVENT_RT_PROXY_PORT_STATUS) {
+			port_id = (uint16_t)(0x0000FFFF & payload[0]);
+		}
+		pr_debug("%s:port_id = %x\n", __func__, port_id);
+		switch (port_id) {
+		case RT_PROXY_PORT_001_TX: {
+			if (this_afe.tx_cb) {
+				this_afe.tx_cb(data->opcode, data->token,
+					data->payload,
+					this_afe.tx_private_data);
+			}
+			break;
+		}
+		case RT_PROXY_PORT_001_RX: {
+			if (this_afe.rx_cb) {
+				this_afe.rx_cb(data->opcode, data->token,
+					data->payload,
+					this_afe.rx_private_data);
+			}
+			break;
+		}
+		default:
+			break;
+		}
+	}
+	return 0;
+}
+
+
+int afe_get_port_type(u16 port_id)
+{
+	int ret;
+
+	switch (port_id) {
+	case PRIMARY_I2S_RX:
+	case PCM_RX:
+	case SECONDARY_I2S_RX:
+	case MI2S_RX:
+	case HDMI_RX:
+	case SLIMBUS_0_RX:
+	case SLIMBUS_1_RX:
+	case INT_BT_SCO_RX:
+	case INT_BT_A2DP_RX:
+	case INT_FM_RX:
+	case VOICE_PLAYBACK_TX:
+	case RT_PROXY_PORT_001_RX:
+		ret = MSM_AFE_PORT_TYPE_RX;
+		break;
+
+	case PRIMARY_I2S_TX:
+	case PCM_TX:
+	case SECONDARY_I2S_TX:
+	case MI2S_TX:
+	case DIGI_MIC_TX:
+	case VOICE_RECORD_TX:
+	case SLIMBUS_0_TX:
+	case SLIMBUS_1_TX:
+	case INT_FM_TX:
+	case VOICE_RECORD_RX:
+	case INT_BT_SCO_TX:
+	case RT_PROXY_PORT_001_TX:
+		ret = MSM_AFE_PORT_TYPE_TX;
+		break;
+
+	default:
+		pr_err("%s: invalid port id %d\n", __func__, port_id);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+int afe_sizeof_cfg_cmd(u16 port_id)
+{
+	int ret_size;
+	switch (port_id) {
+	case PRIMARY_I2S_RX:
+	case PRIMARY_I2S_TX:
+	case SECONDARY_I2S_RX:
+	case SECONDARY_I2S_TX:
+	case MI2S_RX:
+	case MI2S_TX:
+		ret_size = SIZEOF_CFG_CMD(afe_param_id_i2s_cfg);
+		break;
+	case HDMI_RX:
+		ret_size =
+		SIZEOF_CFG_CMD(afe_param_id_hdmi_multi_chan_audio_cfg);
+		break;
+	case SLIMBUS_0_RX:
+	case SLIMBUS_0_TX:
+	case SLIMBUS_1_RX:
+	case SLIMBUS_1_TX:
+		ret_size = SIZEOF_CFG_CMD(afe_param_id_slimbus_cfg);
+		break;
+	case RT_PROXY_PORT_001_RX:
+	case RT_PROXY_PORT_001_TX:
+		ret_size = SIZEOF_CFG_CMD(afe_param_id_rt_proxy_port_cfg);
+		break;
+	case PCM_RX:
+	case PCM_TX:
+	default:
+		ret_size = SIZEOF_CFG_CMD(afe_param_id_pcm_cfg);
+		break;
+	}
+	return ret_size;
+}
+
+int afe_q6_interface_prepare(void)
+{
+	int ret = 0;
+
+	pr_debug("%s:", __func__);
+
+	if (this_afe.apr == NULL) {
+		this_afe.apr = apr_register("ADSP", "AFE", afe_callback,
+			0xFFFFFFFF, &this_afe);
+		if (this_afe.apr == NULL) {
+			pr_err("%s: Unable to register AFE\n", __func__);
+			ret = -ENODEV;
+		}
+	}
+	return ret;
+}
+static void afe_send_cal_block(int32_t path, u16 port_id)
+{
+	/* To come back */
+}
+
+void afe_send_cal(u16 port_id)
+{
+	pr_debug("%s\n", __func__);
+
+	if (afe_get_port_type(port_id) == MSM_AFE_PORT_TYPE_TX)
+		afe_send_cal_block(TX_CAL, port_id);
+	else if (afe_get_port_type(port_id) == MSM_AFE_PORT_TYPE_RX)
+		afe_send_cal_block(RX_CAL, port_id);
+}
+
+int afe_port_start_nowait(u16 port_id, union afe_port_config *afe_config,
+	u32 rate) /* This function is no blocking */
+{
+	struct afe_port_cmd_device_start start;
+	struct afe_audioif_config_command config;
+	int ret;
+	int cfg_type;
+	int index = 0;
+
+	if (!afe_config) {
+		pr_err("%s: Error, no configuration data\n", __func__);
+		ret = -EINVAL;
+		return ret;
+	}
+	pr_err("%s: %d %d\n", __func__, port_id, rate);
+	index = q6audio_get_port_index(port_id);
+	if (q6audio_validate_port(port_id) < 0)
+		return -EINVAL;
+
+	if ((port_id == RT_PROXY_DAI_001_RX) ||
+		(port_id == RT_PROXY_DAI_002_TX))
+		return -EINVAL;
+	if ((port_id == RT_PROXY_DAI_002_RX) ||
+		(port_id == RT_PROXY_DAI_001_TX))
+		port_id = VIRTUAL_ID_TO_PORTID(port_id);
+
+	ret = afe_q6_interface_prepare();
+	if (ret != 0)
+		return ret;
+	if (q6audio_validate_port(port_id) < 0) {
+		pr_err("%s: Failed : Invalid Port id = %d\n", __func__,
+				port_id);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	config.hdr.pkt_size = afe_sizeof_cfg_cmd(port_id);
+	config.hdr.src_port = 0;
+	config.hdr.dest_port = 0;
+
+	config.hdr.token = index;
+	switch (port_id) {
+	case PRIMARY_I2S_RX:
+	case PRIMARY_I2S_TX:
+		cfg_type = AFE_PARAM_ID_PCM_CONFIG;
+		break;
+	case PCM_RX:
+	case PCM_TX:
+		cfg_type = AFE_PARAM_ID_HDMI_CONFIG;
+		break;
+	case SECONDARY_I2S_RX:
+	case SECONDARY_I2S_TX:
+	case MI2S_RX:
+	case MI2S_TX:
+		cfg_type = AFE_PARAM_ID_I2S_CONFIG;
+		break;
+	case HDMI_RX:
+		cfg_type = AFE_PARAM_ID_HDMI_CONFIG;
+		break;
+	case SLIMBUS_0_RX:
+	case SLIMBUS_0_TX:
+	case SLIMBUS_1_RX:
+	case SLIMBUS_1_TX:
+	case SLIMBUS_2_RX:
+	case SLIMBUS_2_TX:
+	case SLIMBUS_3_RX:
+	case SLIMBUS_3_TX:
+	case SLIMBUS_4_RX:
+	case SLIMBUS_4_TX:
+		cfg_type = AFE_PARAM_ID_SLIMBUS_CONFIG;
+		break;
+	default:
+		pr_err("%s: Invalid port id 0x%x\n", __func__, port_id);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+	config.param.port_id = port_id;
+	config.param.payload_size = (afe_sizeof_cfg_cmd(port_id) +
+				sizeof(struct afe_port_param_data_v2));
+	config.param.payload_address_lsw = 0x00;
+	config.param.payload_address_msw = 0x00;
+	config.param.mem_map_handle = 0x00;
+	config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+	config.pdata.param_id = cfg_type;
+	config.pdata.param_size =  afe_sizeof_cfg_cmd(port_id);
+
+	config.port = *afe_config;
+
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config);
+	if (ret < 0) {
+		pr_err("%s: AFE enable for port %d failed\n", __func__,
+				port_id);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	/* send AFE cal */
+	afe_send_cal(port_id);
+
+	start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	start.hdr.pkt_size = sizeof(start);
+	start.hdr.src_port = 0;
+	start.hdr.dest_port = 0;
+	start.hdr.token = 0;
+	start.hdr.opcode = AFE_PORT_CMD_DEVICE_START;
+	start.port_id = port_id;
+
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start);
+
+	if (IS_ERR_VALUE(ret)) {
+		pr_err("%s: AFE enable for port %d failed\n", __func__,
+				port_id);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	return 0;
+
+fail_cmd:
+	return ret;
+}
+
+int afe_open(u16 port_id,
+		union afe_port_config *afe_config, int rate)
+{
+	struct afe_port_cmd_device_start start;
+	struct afe_audioif_config_command config;
+	int ret = 0;
+	int cfg_type;
+	int index = 0;
+
+	if (!afe_config) {
+		pr_err("%s: Error, no configuration data\n", __func__);
+		ret = -EINVAL;
+		return ret;
+	}
+
+	pr_err("%s: %d %d\n", __func__, port_id, rate);
+
+	index = q6audio_get_port_index(port_id);
+	if (q6audio_validate_port(port_id) < 0)
+		return -EINVAL;
+
+	if ((port_id == RT_PROXY_DAI_001_RX) ||
+		(port_id == RT_PROXY_DAI_002_TX))
+		return -EINVAL;
+	if ((port_id == RT_PROXY_DAI_002_RX) ||
+		(port_id == RT_PROXY_DAI_001_TX))
+		port_id = VIRTUAL_ID_TO_PORTID(port_id);
+
+	ret = afe_q6_interface_prepare();
+	if (ret != 0)
+		return ret;
+
+	if (q6audio_validate_port(port_id) < 0) {
+		pr_err("%s: Failed : Invalid Port id = %d\n", __func__,
+				port_id);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	config.hdr.pkt_size = sizeof(config);
+	config.hdr.src_port = 0;
+	config.hdr.dest_port = 0;
+	config.hdr.token = index;
+	switch (port_id) {
+	case PRIMARY_I2S_RX:
+	case PRIMARY_I2S_TX:
+		cfg_type = AFE_PARAM_ID_I2S_CONFIG;
+		break;
+	case PCM_RX:
+	case PCM_TX:
+		cfg_type = AFE_PARAM_ID_PCM_CONFIG;
+		break;
+	case SECONDARY_I2S_RX:
+	case SECONDARY_I2S_TX:
+	case MI2S_RX:
+	case MI2S_TX:
+		cfg_type = AFE_PARAM_ID_I2S_CONFIG;
+		break;
+	case HDMI_RX:
+		cfg_type = AFE_PARAM_ID_HDMI_CONFIG;
+		break;
+	case SLIMBUS_0_RX:
+	case SLIMBUS_0_TX:
+	case SLIMBUS_1_RX:
+	case SLIMBUS_1_TX:
+	case SLIMBUS_2_RX:
+	case SLIMBUS_2_TX:
+	case SLIMBUS_3_RX:
+	case SLIMBUS_3_TX:
+	case SLIMBUS_4_RX:
+	case SLIMBUS_4_TX:
+		cfg_type = AFE_PARAM_ID_SLIMBUS_CONFIG;
+		break;
+	default:
+		pr_err("%s: Invalid port id 0x%x\n", __func__, port_id);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+	config.param.port_id = q6audio_get_port_id(port_id);
+	config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr)
+				 - sizeof(config.param);
+	config.param.payload_address_lsw = 0x00;
+	config.param.payload_address_msw = 0x00;
+	config.param.mem_map_handle = 0x00;
+	config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+	config.pdata.param_id = cfg_type;
+	config.pdata.param_size =  sizeof(config.port);
+
+	config.port = *afe_config;
+	pr_debug("%s: param PL size=%d iparam_size[%d][%d %d %d %d]"
+		" param_id[%x]\n",
+		__func__, config.param.payload_size, config.pdata.param_size,
+		sizeof(config), sizeof(config.param), sizeof(config.port),
+		sizeof(struct apr_hdr), config.pdata.param_id);
+	atomic_set(&this_afe.state, 1);
+	atomic_set(&this_afe.status, 0);
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config);
+	if (ret < 0) {
+		pr_err("%s: AFE enable for port %d opcode[0x%x]failed\n",
+			__func__, port_id, cfg_type);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	ret = wait_event_timeout(this_afe.wait[index],
+			(atomic_read(&this_afe.state) == 0),
+				msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	if (atomic_read(&this_afe.status) != 0) {
+		pr_err("%s: config cmd failed\n", __func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	start.hdr.pkt_size = sizeof(start);
+	start.hdr.src_port = 0;
+	start.hdr.dest_port = 0;
+	start.hdr.token = index;
+	start.hdr.opcode = AFE_PORT_CMD_DEVICE_START;
+	start.port_id = q6audio_get_port_id(port_id);
+	pr_debug("%s: cmd device start opcode[0x%x] port id[0x%x]\n",
+		__func__, start.hdr.opcode, start.port_id);
+	atomic_set(&this_afe.state, 1);
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start);
+	if (ret < 0) {
+		pr_err("%s: AFE enable for port %d failed\n", __func__,
+				port_id);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	ret = wait_event_timeout(this_afe.wait[index],
+			(atomic_read(&this_afe.state) == 0),
+				msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	return 0;
+fail_cmd:
+	return ret;
+}
+
+int afe_loopback(u16 enable, u16 rx_port, u16 tx_port)
+{
+	struct afe_loopback_cfg_v1 lb_cmd;
+	int ret = 0;
+	int index = 0;
+
+	ret = afe_q6_interface_prepare();
+	if (ret != 0)
+		return ret;
+
+	index = q6audio_get_port_index(rx_port);
+	if (q6audio_validate_port(rx_port) < 0)
+		return -EINVAL;
+
+	lb_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(20), APR_PKT_VER);
+	lb_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+						sizeof(lb_cmd) - APR_HDR_SIZE);
+	lb_cmd.hdr.src_port = 0;
+	lb_cmd.hdr.dest_port = 0;
+	lb_cmd.hdr.token = 0;
+	lb_cmd.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+	lb_cmd.param.port_id = tx_port;
+	lb_cmd.param.payload_size = (sizeof(lb_cmd) -
+			sizeof(struct apr_hdr) -
+			sizeof(struct afe_port_cmd_set_param_v2));
+	lb_cmd.param.payload_address_lsw = 0x00;
+	lb_cmd.param.payload_address_msw = 0x00;
+	lb_cmd.param.mem_map_handle = 0x00;
+	lb_cmd.pdata.module_id = AFE_MODULE_LOOPBACK;
+	lb_cmd.pdata.param_id = AFE_PARAM_ID_LOOPBACK_CONFIG;
+	lb_cmd.pdata.param_size =  lb_cmd.param.payload_size -
+				sizeof(struct afe_port_param_data_v2);
+
+	lb_cmd.dst_port_id = rx_port;
+	lb_cmd.routing_mode = LB_MODE_DEFAULT;
+	lb_cmd.enable = (enable ? 1 : 0);
+	lb_cmd.loopback_cfg_minor_version =
+					AFE_API_VERSION_LOOPBACK_CONFIG;
+	atomic_set(&this_afe.state, 1);
+
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &lb_cmd);
+	if (ret < 0) {
+		pr_err("%s: AFE loopback failed\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+	ret = wait_event_timeout(this_afe.wait[index],
+		(atomic_read(&this_afe.state) == 0),
+				msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		ret = -EINVAL;
+	}
+done:
+	return ret;
+}
+
+int afe_loopback_gain(u16 port_id, u16 volume)
+{
+	struct afe_loopback_gain_per_path_param set_param;
+	int ret = 0;
+	int index = 0;
+
+	if (this_afe.apr == NULL) {
+		this_afe.apr = apr_register("ADSP", "AFE", afe_callback,
+					0xFFFFFFFF, &this_afe);
+		pr_debug("%s: Register AFE\n", __func__);
+		if (this_afe.apr == NULL) {
+			pr_err("%s: Unable to register AFE\n", __func__);
+			ret = -ENODEV;
+			return ret;
+		}
+	}
+
+	if (q6audio_validate_port(port_id) < 0) {
+
+		pr_err("%s: Failed : Invalid Port id = %d\n", __func__,
+				port_id);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	index = q6audio_get_port_index(port_id);
+	if (q6audio_validate_port(port_id) < 0)
+		return -EINVAL;
+
+	/* RX ports numbers are even .TX ports numbers are odd. */
+	if (port_id % 2 == 0) {
+		pr_err("%s: Failed : afe loopback gain only for TX ports."
+			" port_id %d\n", __func__, port_id);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	pr_debug("%s: %d %hX\n", __func__, port_id, volume);
+
+	set_param.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	set_param.hdr.pkt_size = sizeof(set_param);
+	set_param.hdr.src_port = 0;
+	set_param.hdr.dest_port = 0;
+	set_param.hdr.token = 0;
+	set_param.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+
+	set_param.param.port_id		= port_id;
+	set_param.param.payload_size		=
+		(sizeof(struct afe_loopback_gain_per_path_param) -
+		 sizeof(struct apr_hdr) -
+		sizeof(struct afe_port_cmd_set_param_v2));
+	set_param.param.payload_address_lsw	= 0;
+	set_param.param.payload_address_msw	= 0;
+	set_param.param.mem_map_handle        = 0;
+
+	set_param.pdata.module_id	= AFE_MODULE_LOOPBACK;
+	set_param.pdata.param_id	= AFE_PARAM_ID_LOOPBACK_GAIN_PER_PATH;
+	set_param.pdata.param_size = (set_param.param.payload_size -
+				sizeof(struct afe_port_param_data_v2));
+	set_param.rx_port_id = port_id;
+	set_param.gain	= volume;
+
+	set_param.hdr.token = index;
+
+	atomic_set(&this_afe.state, 1);
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &set_param);
+	if (ret < 0) {
+		pr_err("%s: AFE param set failed for port %d\n",
+					__func__, port_id);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	ret = wait_event_timeout(this_afe.wait[index],
+		(atomic_read(&this_afe.state) == 0),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (ret < 0) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return ret;
+}
+
+int afe_pseudo_port_start_nowait(u16 port_id)
+{
+	struct afe_pseudoport_start_command start;
+	int ret = 0;
+
+	pr_debug("%s: port_id=%d\n", __func__, port_id);
+	if (this_afe.apr == NULL) {
+		pr_err("%s: AFE APR is not registered\n", __func__);
+		return -ENODEV;
+	}
+
+
+	start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	start.hdr.pkt_size = sizeof(start);
+	start.hdr.src_port = 0;
+	start.hdr.dest_port = 0;
+	start.hdr.token = 0;
+	start.hdr.opcode = AFE_PSEUDOPORT_CMD_START;
+	start.port_id = port_id;
+	start.timing = 1;
+
+	atomic_set(&this_afe.state, 1);
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start);
+	if (ret < 0) {
+		pr_err("%s: AFE enable for port %d failed %d\n",
+		       __func__, port_id, ret);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int afe_start_pseudo_port(u16 port_id)
+{
+	int ret = 0;
+	struct afe_pseudoport_start_command start;
+	int index = 0;
+
+	pr_debug("%s: port_id=%d\n", __func__, port_id);
+
+	ret = afe_q6_interface_prepare();
+	if (ret != 0)
+		return ret;
+
+	index = q6audio_get_port_index(port_id);
+	if (q6audio_validate_port(port_id) < 0)
+		return -EINVAL;
+
+	start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	start.hdr.pkt_size = sizeof(start);
+	start.hdr.src_port = 0;
+	start.hdr.dest_port = 0;
+	start.hdr.token = 0;
+	start.hdr.opcode = AFE_PSEUDOPORT_CMD_START;
+	start.port_id = port_id;
+	start.timing = 1;
+
+	start.hdr.token = index;
+	atomic_set(&this_afe.state, 1);
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start);
+	if (ret < 0) {
+		pr_err("%s: AFE enable for port %d failed %d\n",
+		       __func__, port_id, ret);
+		return -EINVAL;
+	}
+
+	ret = wait_event_timeout(this_afe.wait[index],
+				 (atomic_read(&this_afe.state) == 0),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int afe_pseudo_port_stop_nowait(u16 port_id)
+{
+	int ret = 0;
+	struct afe_pseudoport_stop_command stop;
+	int index = 0;
+
+	pr_debug("%s: port_id=%d\n", __func__, port_id);
+
+	if (this_afe.apr == NULL) {
+		pr_err("%s: AFE is already closed\n", __func__);
+		return -EINVAL;
+	}
+	index = q6audio_get_port_index(port_id);
+	if (q6audio_validate_port(port_id) < 0)
+		return -EINVAL;
+
+	stop.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	stop.hdr.pkt_size = sizeof(stop);
+	stop.hdr.src_port = 0;
+	stop.hdr.dest_port = 0;
+	stop.hdr.token = 0;
+	stop.hdr.opcode = AFE_PSEUDOPORT_CMD_STOP;
+	stop.port_id = port_id;
+	stop.reserved = 0;
+
+	stop.hdr.token = index;
+	atomic_set(&this_afe.state, 1);
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &stop);
+	if (ret < 0) {
+		pr_err("%s: AFE close failed %d\n", __func__, ret);
+		return -EINVAL;
+	}
+
+	return 0;
+
+}
+
+int afe_stop_pseudo_port(u16 port_id)
+{
+	int ret = 0;
+	struct afe_pseudoport_stop_command stop;
+	int index = 0;
+
+	pr_debug("%s: port_id=%d\n", __func__, port_id);
+
+	if (this_afe.apr == NULL) {
+		pr_err("%s: AFE is already closed\n", __func__);
+		return -EINVAL;
+	}
+
+	index = q6audio_get_port_index(port_id);
+	if (q6audio_validate_port(port_id) < 0)
+		return -EINVAL;
+
+	stop.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	stop.hdr.pkt_size = sizeof(stop);
+	stop.hdr.src_port = 0;
+	stop.hdr.dest_port = 0;
+	stop.hdr.token = 0;
+	stop.hdr.opcode = AFE_PSEUDOPORT_CMD_STOP;
+	stop.port_id = port_id;
+	stop.reserved = 0;
+
+	stop.hdr.token = index;
+	atomic_set(&this_afe.state, 1);
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &stop);
+	if (ret < 0) {
+		pr_err("%s: AFE close failed %d\n", __func__, ret);
+		return -EINVAL;
+	}
+
+	ret = wait_event_timeout(this_afe.wait[index],
+				 (atomic_read(&this_afe.state) == 0),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*bharath, memory map handle needs to be stored by AFE client */
+int afe_cmd_memory_map(u32 dma_addr_p, u32 dma_buf_sz)
+{
+	int ret = 0;
+	int cmd_size = 0;
+	void    *payload = NULL;
+	void    *mmap_region_cmd = NULL;
+	struct afe_service_cmd_shared_mem_map_regions *mregion = NULL;
+	struct  afe_service_shared_map_region_payload *mregion_pl = NULL;
+	int index = 0;
+
+	pr_debug("%s:\n", __func__);
+
+	if (this_afe.apr == NULL) {
+		this_afe.apr = apr_register("ADSP", "AFE", afe_callback,
+					0xFFFFFFFF, &this_afe);
+		pr_debug("%s: Register AFE\n", __func__);
+		if (this_afe.apr == NULL) {
+			pr_err("%s: Unable to register AFE\n", __func__);
+			ret = -ENODEV;
+			return ret;
+		}
+	}
+
+	cmd_size = sizeof(struct afe_service_cmd_shared_mem_map_regions) \
+		+ sizeof(struct afe_service_shared_map_region_payload);
+
+	mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
+	if (!mmap_region_cmd) {
+		pr_err("%s: allocate mmap_region_cmd failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	mregion = (struct afe_service_cmd_shared_mem_map_regions *)
+							mmap_region_cmd;
+	mregion->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	mregion->hdr.pkt_size = sizeof(mregion);
+	mregion->hdr.src_port = 0;
+	mregion->hdr.dest_port = 0;
+	mregion->hdr.token = 0;
+	mregion->hdr.opcode = AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS;
+	mregion->mem_pool_id = ADSP_MEMORY_MAP_EBI_POOL;
+	mregion->num_regions = 1;
+	mregion->property_flag = 0x00;
+	/* Todo */
+	index = mregion->hdr.token = IDX_RSVD_2;
+
+	payload = ((u8 *) mmap_region_cmd +
+		   sizeof(struct afe_service_cmd_shared_mem_map_regions));
+
+	mregion_pl = (struct afe_service_shared_map_region_payload *)payload;
+
+	mregion_pl->shm_addr_lsw = dma_addr_p;
+	mregion_pl->shm_addr_msw = 0x00;
+	mregion_pl->mem_size_bytes = dma_buf_sz;
+
+	atomic_set(&this_afe.state, 1);
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) mmap_region_cmd);
+	if (ret < 0) {
+		pr_err("%s: AFE memory map cmd failed %d\n",
+		       __func__, ret);
+		ret = -EINVAL;
+		return ret;
+	}
+
+	ret = wait_event_timeout(this_afe.wait[index],
+				 (atomic_read(&this_afe.state) == 0),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		ret = -EINVAL;
+		return ret;
+	}
+
+	return 0;
+}
+
+int afe_cmd_memory_map_nowait(int port_id, u32 dma_addr_p, u32 dma_buf_sz)
+{
+	int ret = 0;
+	int cmd_size = 0;
+	void    *payload = NULL;
+	void    *mmap_region_cmd = NULL;
+	struct afe_service_cmd_shared_mem_map_regions *mregion = NULL;
+	struct  afe_service_shared_map_region_payload *mregion_pl = NULL;
+	int index = 0;
+
+	pr_debug("%s:\n", __func__);
+
+	if (this_afe.apr == NULL) {
+		this_afe.apr = apr_register("ADSP", "AFE", afe_callback,
+					0xFFFFFFFF, &this_afe);
+		pr_debug("%s: Register AFE\n", __func__);
+		if (this_afe.apr == NULL) {
+			pr_err("%s: Unable to register AFE\n", __func__);
+			ret = -ENODEV;
+			return ret;
+		}
+	}
+	index = q6audio_get_port_index(port_id);
+	if (q6audio_validate_port(port_id) < 0)
+		return -EINVAL;
+
+	cmd_size = sizeof(struct afe_service_cmd_shared_mem_map_regions)
+		+ sizeof(struct afe_service_shared_map_region_payload);
+
+	mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
+	if (!mmap_region_cmd) {
+		pr_err("%s: allocate mmap_region_cmd failed\n", __func__);
+		return -ENOMEM;
+	}
+	mregion = (struct afe_service_cmd_shared_mem_map_regions *)
+						mmap_region_cmd;
+	mregion->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	mregion->hdr.pkt_size = sizeof(mregion);
+	mregion->hdr.src_port = 0;
+	mregion->hdr.dest_port = 0;
+	mregion->hdr.token = 0;
+	mregion->hdr.opcode = AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS;
+	mregion->mem_pool_id = ADSP_MEMORY_MAP_EBI_POOL;
+	mregion->num_regions = 1;
+	mregion->property_flag = 0x00;
+
+	payload = ((u8 *) mmap_region_cmd +
+		sizeof(struct afe_service_cmd_shared_mem_map_regions));
+	mregion_pl = (struct afe_service_shared_map_region_payload *)payload;
+
+	mregion_pl->shm_addr_lsw = dma_addr_p;
+	mregion_pl->shm_addr_msw = 0x00;
+	mregion_pl->mem_size_bytes = dma_buf_sz;
+
+	atomic_set(&this_afe.state, 1);
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) mmap_region_cmd);
+	if (ret < 0) {
+		pr_err("%s: AFE memory map cmd failed %d\n",
+		       __func__, ret);
+		ret = -EINVAL;
+		return ret;
+	}
+	return 0;
+}
+
+int afe_cmd_memory_unmap(u32 mem_map_handle)
+{
+	int ret = 0;
+	struct afe_service_cmd_shared_mem_unmap_regions mregion;
+	int index = 0;
+
+	pr_debug("%s:\n", __func__);
+
+	if (this_afe.apr == NULL) {
+		this_afe.apr = apr_register("ADSP", "AFE", afe_callback,
+					0xFFFFFFFF, &this_afe);
+		pr_debug("%s: Register AFE\n", __func__);
+		if (this_afe.apr == NULL) {
+			pr_err("%s: Unable to register AFE\n", __func__);
+			ret = -ENODEV;
+			return ret;
+		}
+	}
+
+	mregion.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	mregion.hdr.pkt_size = sizeof(mregion);
+	mregion.hdr.src_port = 0;
+	mregion.hdr.dest_port = 0;
+	mregion.hdr.token = 0;
+	mregion.hdr.opcode = AFE_SERVICE_CMD_SHARED_MEM_UNMAP_REGIONS;
+	mregion.mem_map_handle = mem_map_handle;
+
+	/* Todo */
+	index = mregion.hdr.token = IDX_RSVD_2;
+
+	atomic_set(&this_afe.state, 1);
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &mregion);
+	if (ret < 0) {
+		pr_err("%s: AFE memory unmap cmd failed %d\n",
+		       __func__, ret);
+		ret = -EINVAL;
+		return ret;
+	}
+
+	ret = wait_event_timeout(this_afe.wait[index],
+				 (atomic_read(&this_afe.state) == 0),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		ret = -EINVAL;
+		return ret;
+	}
+	return 0;
+}
+
+int afe_cmd_memory_unmap_nowait(u32 mem_map_handle)
+{
+	int ret = 0;
+	struct afe_service_cmd_shared_mem_unmap_regions mregion;
+
+	pr_debug("%s:\n", __func__);
+
+	if (this_afe.apr == NULL) {
+		this_afe.apr = apr_register("ADSP", "AFE", afe_callback,
+					0xFFFFFFFF, &this_afe);
+		pr_debug("%s: Register AFE\n", __func__);
+		if (this_afe.apr == NULL) {
+			pr_err("%s: Unable to register AFE\n", __func__);
+			ret = -ENODEV;
+			return ret;
+		}
+	}
+
+	mregion.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	mregion.hdr.pkt_size = sizeof(mregion);
+	mregion.hdr.src_port = 0;
+	mregion.hdr.dest_port = 0;
+	mregion.hdr.token = 0;
+	mregion.hdr.opcode = AFE_SERVICE_CMD_SHARED_MEM_UNMAP_REGIONS;
+	mregion.mem_map_handle = mem_map_handle;
+
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &mregion);
+	if (ret < 0) {
+		pr_err("%s: AFE memory unmap cmd failed %d\n",
+			__func__, ret);
+		ret = -EINVAL;
+	}
+	return 0;
+}
+
+int afe_register_get_events(u16 port_id,
+		void (*cb) (uint32_t opcode,
+		uint32_t token, uint32_t *payload, void *priv),
+		void *private_data)
+{
+	int ret = 0;
+	struct afe_service_cmd_register_rt_port_driver rtproxy;
+
+	pr_debug("%s:\n", __func__);
+
+	if (this_afe.apr == NULL) {
+		this_afe.apr = apr_register("ADSP", "AFE", afe_callback,
+					0xFFFFFFFF, &this_afe);
+		pr_debug("%s: Register AFE\n", __func__);
+		if (this_afe.apr == NULL) {
+			pr_err("%s: Unable to register AFE\n", __func__);
+			ret = -ENODEV;
+			return ret;
+		}
+	}
+	if ((port_id == RT_PROXY_DAI_002_RX) ||
+		(port_id == RT_PROXY_DAI_001_TX))
+		port_id = VIRTUAL_ID_TO_PORTID(port_id);
+	else
+		return -EINVAL;
+
+	if (port_id == RT_PROXY_PORT_001_TX) {
+		this_afe.tx_cb = cb;
+		this_afe.tx_private_data = private_data;
+	} else if (port_id == RT_PROXY_PORT_001_RX) {
+		this_afe.rx_cb = cb;
+		this_afe.rx_private_data = private_data;
+	}
+
+	rtproxy.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	rtproxy.hdr.pkt_size = sizeof(rtproxy);
+	rtproxy.hdr.src_port = 1;
+	rtproxy.hdr.dest_port = 1;
+	rtproxy.hdr.opcode = AFE_SERVICE_CMD_REGISTER_RT_PORT_DRIVER;
+	rtproxy.port_id = port_id;
+	rtproxy.reserved = 0;
+
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &rtproxy);
+	if (ret < 0) {
+		pr_err("%s: AFE  reg. rtproxy_event failed %d\n",
+			   __func__, ret);
+		ret = -EINVAL;
+		return ret;
+	}
+	return 0;
+}
+
+int afe_unregister_get_events(u16 port_id)
+{
+	int ret = 0;
+	struct afe_service_cmd_unregister_rt_port_driver rtproxy;
+	int index = 0;
+
+	pr_debug("%s:\n", __func__);
+
+	if (this_afe.apr == NULL) {
+		this_afe.apr = apr_register("ADSP", "AFE", afe_callback,
+					0xFFFFFFFF, &this_afe);
+		pr_debug("%s: Register AFE\n", __func__);
+		if (this_afe.apr == NULL) {
+			pr_err("%s: Unable to register AFE\n", __func__);
+			ret = -ENODEV;
+			return ret;
+		}
+	}
+	index = q6audio_get_port_index(port_id);
+	if (q6audio_validate_port(port_id) < 0)
+		return -EINVAL;
+
+	if ((port_id == RT_PROXY_DAI_002_RX) ||
+		(port_id == RT_PROXY_DAI_001_TX))
+		port_id = VIRTUAL_ID_TO_PORTID(port_id);
+	else
+		return -EINVAL;
+
+	rtproxy.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	rtproxy.hdr.pkt_size = sizeof(rtproxy);
+	rtproxy.hdr.src_port = 0;
+	rtproxy.hdr.dest_port = 0;
+	rtproxy.hdr.token = 0;
+	rtproxy.hdr.opcode = AFE_SERVICE_CMD_UNREGISTER_RT_PORT_DRIVER;
+	rtproxy.port_id = port_id;
+	rtproxy.reserved = 0;
+
+	rtproxy.hdr.token = index;
+
+	if (port_id == RT_PROXY_PORT_001_TX) {
+		this_afe.tx_cb = NULL;
+		this_afe.tx_private_data = NULL;
+	} else if (port_id == RT_PROXY_PORT_001_RX) {
+		this_afe.rx_cb = NULL;
+		this_afe.rx_private_data = NULL;
+	}
+
+	atomic_set(&this_afe.state, 1);
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &rtproxy);
+	if (ret < 0) {
+		pr_err("%s: AFE enable Unreg. rtproxy_event failed %d\n",
+			   __func__, ret);
+		ret = -EINVAL;
+		return ret;
+	}
+
+	ret = wait_event_timeout(this_afe.wait[index],
+				 (atomic_read(&this_afe.state) == 0),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		ret = -EINVAL;
+		return ret;
+	}
+	return 0;
+}
+
+int afe_rt_proxy_port_write(u32 buf_addr_p, u32 mem_map_handle, int bytes)
+{
+	int ret = 0;
+	struct afe_port_data_cmd_rt_proxy_port_write_v2 afecmd_wr;
+
+	if (this_afe.apr == NULL) {
+		pr_err("%s:register to AFE is not done\n", __func__);
+		ret = -ENODEV;
+		return ret;
+	}
+	pr_debug("%s: buf_addr_p = 0x%08x bytes = %d\n", __func__,
+						buf_addr_p, bytes);
+
+	afecmd_wr.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	afecmd_wr.hdr.pkt_size = sizeof(afecmd_wr);
+	afecmd_wr.hdr.src_port = 0;
+	afecmd_wr.hdr.dest_port = 0;
+	afecmd_wr.hdr.token = 0;
+	afecmd_wr.hdr.opcode = AFE_PORT_DATA_CMD_RT_PROXY_PORT_WRITE_V2;
+	afecmd_wr.port_id = RT_PROXY_PORT_001_TX;
+	afecmd_wr.buffer_address_lsw = (uint32_t)buf_addr_p;
+	afecmd_wr.buffer_address_msw = 0x00;
+	afecmd_wr.mem_map_handle = mem_map_handle;
+	afecmd_wr.available_bytes = bytes;
+	afecmd_wr.reserved = 0;
+
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &afecmd_wr);
+	if (ret < 0) {
+		pr_err("%s: AFE rtproxy write to port 0x%x failed %d\n",
+			   __func__, afecmd_wr.port_id, ret);
+		ret = -EINVAL;
+		return ret;
+	}
+	return 0;
+
+}
+
+int afe_rt_proxy_port_read(u32 buf_addr_p, u32 mem_map_handle, int bytes)
+{
+	int ret = 0;
+	struct afe_port_data_cmd_rt_proxy_port_read_v2 afecmd_rd;
+
+	if (this_afe.apr == NULL) {
+		pr_err("%s: register to AFE is not done\n", __func__);
+		ret = -ENODEV;
+		return ret;
+	}
+	pr_debug("%s: buf_addr_p = 0x%08x bytes = %d\n", __func__,
+						buf_addr_p, bytes);
+
+	afecmd_rd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	afecmd_rd.hdr.pkt_size = sizeof(afecmd_rd);
+	afecmd_rd.hdr.src_port = 0;
+	afecmd_rd.hdr.dest_port = 0;
+	afecmd_rd.hdr.token = 0;
+	afecmd_rd.hdr.opcode = AFE_PORT_DATA_CMD_RT_PROXY_PORT_READ_V2;
+	afecmd_rd.port_id = RT_PROXY_PORT_001_RX;
+	afecmd_rd.buffer_address_lsw = (uint32_t)buf_addr_p;
+	afecmd_rd.buffer_address_msw = 0x00;
+	afecmd_rd.available_bytes = bytes;
+
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &afecmd_rd);
+	if (ret < 0) {
+		pr_err("%s: AFE rtproxy read  cmd to port 0x%x failed %d\n",
+			   __func__, afecmd_rd.port_id, ret);
+		ret = -EINVAL;
+		return ret;
+	}
+	return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *debugfs_afelb;
+static struct dentry *debugfs_afelb_gain;
+
+static int afe_debug_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	pr_info("debug intf %s\n", (char *) file->private_data);
+	return 0;
+}
+
+static int afe_get_parameters(char *buf, long int *param1, int num_of_par)
+{
+	char *token;
+	int base, cnt;
+
+	token = strsep(&buf, " ");
+
+	for (cnt = 0; cnt < num_of_par; cnt++) {
+		if (token != NULL) {
+			if ((token[1] == 'x') || (token[1] == 'X'))
+				base = 16;
+			else
+				base = 10;
+
+			if (strict_strtoul(token, base, &param1[cnt]) != 0)
+				return -EINVAL;
+
+			token = strsep(&buf, " ");
+		} else
+			return -EINVAL;
+	}
+	return 0;
+}
+#define AFE_LOOPBACK_ON (1)
+#define AFE_LOOPBACK_OFF (0)
+static ssize_t afe_debug_write(struct file *filp,
+	const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+	char *lb_str = filp->private_data;
+	char lbuf[32];
+	int rc;
+	unsigned long param[5];
+
+	if (cnt > sizeof(lbuf) - 1)
+		return -EINVAL;
+
+	rc = copy_from_user(lbuf, ubuf, cnt);
+	if (rc)
+		return -EFAULT;
+
+	lbuf[cnt] = '\0';
+
+	if (!strncmp(lb_str, "afe_loopback", 12)) {
+		rc = afe_get_parameters(lbuf, param, 3);
+		if (!rc) {
+			pr_info("%s %lu %lu %lu\n", lb_str, param[0], param[1],
+				param[2]);
+
+			if ((param[0] != AFE_LOOPBACK_ON) && (param[0] !=
+				AFE_LOOPBACK_OFF)) {
+				pr_err("%s: Error, parameter 0 incorrect\n",
+					__func__);
+				rc = -EINVAL;
+				goto afe_error;
+			}
+			if ((q6audio_validate_port(param[1]) < 0) ||
+			    (q6audio_validate_port(param[2])) < 0) {
+				pr_err("%s: Error, invalid afe port\n",
+					__func__);
+			}
+			if (this_afe.apr == NULL) {
+				pr_err("%s: Error, AFE not opened\n", __func__);
+				rc = -EINVAL;
+			} else {
+				rc = afe_loopback(param[0], param[1], param[2]);
+			}
+		} else {
+			pr_err("%s: Error, invalid parameters\n", __func__);
+			rc = -EINVAL;
+		}
+
+	} else if (!strncmp(lb_str, "afe_loopback_gain", 17)) {
+		rc = afe_get_parameters(lbuf, param, 2);
+		if (!rc) {
+			pr_info("%s %lu %lu\n", lb_str, param[0], param[1]);
+
+			if (q6audio_validate_port(param[0]) < 0) {
+				pr_err("%s: Error, invalid afe port\n",
+					__func__);
+				rc = -EINVAL;
+				goto afe_error;
+			}
+
+			if (param[1] < 0 || param[1] > 100) {
+				pr_err("%s: Error, volume shoud be 0 to 100"
+					" percentage param = %lu\n",
+					__func__, param[1]);
+				rc = -EINVAL;
+				goto afe_error;
+			}
+
+			param[1] = (Q6AFE_MAX_VOLUME * param[1]) / 100;
+
+			if (this_afe.apr == NULL) {
+				pr_err("%s: Error, AFE not opened\n", __func__);
+				rc = -EINVAL;
+			} else {
+				rc = afe_loopback_gain(param[0], param[1]);
+			}
+		} else {
+			pr_err("%s: Error, invalid parameters\n", __func__);
+			rc = -EINVAL;
+		}
+	}
+
+afe_error:
+	if (rc == 0)
+		rc = cnt;
+	else
+		pr_err("%s: rc = %d\n", __func__, rc);
+
+	return rc;
+}
+
+static const struct file_operations afe_debug_fops = {
+	.open = afe_debug_open,
+	.write = afe_debug_write
+};
+
+static void config_debug_fs_init(void)
+{
+	debugfs_afelb = debugfs_create_file("afe_loopback",
+	S_IFREG | S_IWUGO, NULL, (void *) "afe_loopback",
+	&afe_debug_fops);
+
+	debugfs_afelb_gain = debugfs_create_file("afe_loopback_gain",
+	S_IFREG | S_IWUGO, NULL, (void *) "afe_loopback_gain",
+	&afe_debug_fops);
+}
+static void config_debug_fs_exit(void)
+{
+	if (debugfs_afelb)
+		debugfs_remove(debugfs_afelb);
+	if (debugfs_afelb_gain)
+		debugfs_remove(debugfs_afelb_gain);
+}
+#else
+static void config_debug_fs_init(void)
+{
+	return;
+}
+static void config_debug_fs_exit(void)
+{
+	return;
+}
+#endif
+int afe_sidetone(u16 tx_port_id, u16 rx_port_id, u16 enable, uint16_t gain)
+{
+	struct afe_loopback_cfg_v1 cmd_sidetone;
+	int ret = 0;
+	int index = 0;
+
+	pr_info("%s: tx_port_id:%d rx_port_id:%d enable:%d gain:%d\n", __func__,
+					tx_port_id, rx_port_id, enable, gain);
+	index = q6audio_get_port_index(rx_port_id);
+	if (q6audio_validate_port(rx_port_id) < 0)
+		return -EINVAL;
+
+	cmd_sidetone.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	cmd_sidetone.hdr.pkt_size = sizeof(cmd_sidetone);
+	cmd_sidetone.hdr.src_port = 0;
+	cmd_sidetone.hdr.dest_port = 0;
+	cmd_sidetone.hdr.token = 0;
+	cmd_sidetone.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+	/* should it be rx or tx port id ?? , bharath*/
+	cmd_sidetone.param.port_id = tx_port_id;
+	/* size of data param & payload */
+	cmd_sidetone.param.payload_size = (sizeof(cmd_sidetone) -
+			sizeof(struct apr_hdr) -
+			sizeof(struct afe_port_cmd_set_param_v2));
+	cmd_sidetone.param.payload_address_lsw = 0x00;
+	cmd_sidetone.param.payload_address_msw = 0x00;
+	cmd_sidetone.param.mem_map_handle = 0x00;
+	cmd_sidetone.pdata.module_id = AFE_MODULE_LOOPBACK;
+	cmd_sidetone.pdata.param_id = AFE_PARAM_ID_LOOPBACK_CONFIG;
+	/* size of actual payload only */
+	cmd_sidetone.pdata.param_size =  cmd_sidetone.param.payload_size -
+				sizeof(struct afe_port_param_data_v2);
+
+	cmd_sidetone.loopback_cfg_minor_version =
+					AFE_API_VERSION_LOOPBACK_CONFIG;
+	cmd_sidetone.dst_port_id = rx_port_id;
+	cmd_sidetone.routing_mode = LB_MODE_SIDETONE;
+	cmd_sidetone.enable = enable;
+
+	atomic_set(&this_afe.state, 1);
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &cmd_sidetone);
+	if (ret < 0) {
+		pr_err("%s: AFE sidetone failed for tx_port:%d rx_port:%d\n",
+					__func__, tx_port_id, rx_port_id);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	ret = wait_event_timeout(this_afe.wait[index],
+		(atomic_read(&this_afe.state) == 0),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (ret < 0) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return ret;
+}
+
+int afe_port_stop_nowait(int port_id)
+{
+	struct afe_port_cmd_device_stop stop;
+	int ret = 0;
+
+	if (this_afe.apr == NULL) {
+		pr_err("AFE is already closed\n");
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	pr_debug("%s: port_id=%d\n", __func__, port_id);
+	port_id = q6audio_convert_virtual_to_portid(port_id);
+
+	stop.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	stop.hdr.pkt_size = sizeof(stop);
+	stop.hdr.src_port = 0;
+	stop.hdr.dest_port = 0;
+	stop.hdr.token = 0;
+	stop.hdr.opcode = AFE_PORT_CMD_DEVICE_STOP;
+	stop.port_id = port_id;
+	stop.reserved = 0;
+
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &stop);
+
+	if (IS_ERR_VALUE(ret)) {
+		pr_err("%s: AFE close failed\n", __func__);
+		ret = -EINVAL;
+	}
+
+fail_cmd:
+	return ret;
+
+}
+
+int afe_close(int port_id)
+{
+	struct afe_port_cmd_device_stop stop;
+	int ret = 0;
+	int index = 0;
+
+
+	if (this_afe.apr == NULL) {
+		pr_err("AFE is already closed\n");
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	pr_debug("%s: port_id=%d\n", __func__, port_id);
+
+	index = q6audio_get_port_index(port_id);
+	if (q6audio_validate_port(port_id) < 0)
+		return -EINVAL;
+
+	port_id = q6audio_convert_virtual_to_portid(port_id);
+
+	stop.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	stop.hdr.pkt_size = sizeof(stop);
+	stop.hdr.src_port = 0;
+	stop.hdr.dest_port = 0;
+	stop.hdr.token = index;
+	stop.hdr.opcode = AFE_PORT_CMD_DEVICE_STOP;
+	stop.port_id = q6audio_get_port_id(port_id);
+	stop.reserved = 0;
+
+	atomic_set(&this_afe.state, 1);
+	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &stop);
+
+	if (ret < 0) {
+		pr_err("%s: AFE close failed\n", __func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	ret = wait_event_timeout(this_afe.wait[index],
+			(atomic_read(&this_afe.state) == 0),
+					msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+fail_cmd:
+	return ret;
+}
+
+static int __init afe_init(void)
+{
+	int i = 0;
+	atomic_set(&this_afe.state, 0);
+	atomic_set(&this_afe.status, 0);
+	this_afe.apr = NULL;
+	for (i = 0; i < AFE_MAX_PORTS; i++)
+		init_waitqueue_head(&this_afe.wait[i]);
+
+	config_debug_fs_init();
+	return 0;
+}
+
+static void __exit afe_exit(void)
+{
+	int i;
+
+	config_debug_fs_exit();
+	for (i = 0; i < MAX_AUDPROC_TYPES; i++) {
+		if (afe_cal_addr[i].cal_paddr != 0)
+			afe_cmd_memory_unmap_nowait(
+				afe_cal_addr[i].cal_paddr);
+	}
+}
+
+device_initcall(afe_init);
+__exitcall(afe_exit);
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
new file mode 100644
index 0000000..f982134
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -0,0 +1,3342 @@
+/*
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/fs.h>
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/dma-mapping.h>
+#include <linux/miscdevice.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/msm_audio.h>
+#include <linux/android_pmem.h>
+#include <linux/memory_alloc.h>
+#include <linux/debugfs.h>
+#include <linux/time.h>
+#include <linux/atomic.h>
+
+#include <asm/ioctls.h>
+
+#include <mach/memory.h>
+#include <mach/debug_mm.h>
+#include <mach/peripheral-loader.h>
+#include <mach/qdsp6v2/audio_acdb.h>
+#include <mach/qdsp6v2/rtac.h>
+#include <mach/msm_subsystem_map.h>
+
+#include <sound/apr_audio-v2.h>
+#include <sound/q6asm-v2.h>
+
+#define TRUE        0x01
+#define FALSE       0x00
+#define READDONE_IDX_STATUS 0
+#define READDONE_IDX_BUFADD_LSW 1
+#define READDONE_IDX_BUFADD_MSW 2
+#define READDONE_IDX_MEMMAP_HDL 3
+#define READDONE_IDX_SIZE 4
+#define READDONE_IDX_OFFSET 5
+#define READDONE_IDX_LSW_TS 6
+#define READDONE_IDX_MSW_TS 7
+#define READDONE_IDX_FLAGS 8
+#define READDONE_IDX_NUMFRAMES 9
+#define READDONE_IDX_SEQ_ID 10
+
+/* TODO, combine them together */
+static DEFINE_MUTEX(session_lock);
+struct asm_mmap {
+	atomic_t ref_cnt;
+	void *apr;
+};
+
+static struct asm_mmap this_mmap;
+/* session id: 0 reserved */
+static struct audio_client *session[SESSION_MAX+1];
+
+struct asm_buffer_node {
+	struct list_head list;
+	uint32_t  buf_addr_lsw;
+	uint32_t  mmap_hdl;
+};
+static int32_t q6asm_mmapcallback(struct apr_client_data *data, void *priv);
+static int32_t q6asm_callback(struct apr_client_data *data, void *priv);
+static void q6asm_add_hdr(struct audio_client *ac, struct apr_hdr *hdr,
+			uint32_t pkt_size, uint32_t cmd_flg);
+static void q6asm_add_hdr_async(struct audio_client *ac, struct apr_hdr *hdr,
+			uint32_t pkt_size, uint32_t cmd_flg);
+static int q6asm_memory_map_regions(struct audio_client *ac, int dir,
+				uint32_t bufsz, uint32_t bufcnt);
+static int q6asm_memory_unmap_regions(struct audio_client *ac, int dir,
+				uint32_t bufsz, uint32_t bufcnt);
+static void q6asm_reset_buf_state(struct audio_client *ac);
+
+
+#ifdef CONFIG_DEBUG_FS
+#define OUT_BUFFER_SIZE 56
+#define IN_BUFFER_SIZE 24
+
+static struct timeval out_cold_tv;
+static struct timeval out_warm_tv;
+static struct timeval out_cont_tv;
+static struct timeval in_cont_tv;
+static long out_enable_flag;
+static long in_enable_flag;
+static struct dentry *out_dentry;
+static struct dentry *in_dentry;
+static int in_cont_index;
+/*This var is used to keep track of first write done for cold output latency */
+static int out_cold_index;
+static char *out_buffer;
+static char *in_buffer;
+static int audio_output_latency_dbgfs_open(struct inode *inode,
+							struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+static ssize_t audio_output_latency_dbgfs_read(struct file *file,
+				char __user *buf, size_t count, loff_t *ppos)
+{
+	snprintf(out_buffer, OUT_BUFFER_SIZE, "%ld,%ld,%ld,%ld,%ld,%ld,",\
+		out_cold_tv.tv_sec, out_cold_tv.tv_usec, out_warm_tv.tv_sec,\
+		out_warm_tv.tv_usec, out_cont_tv.tv_sec, out_cont_tv.tv_usec);
+	return  simple_read_from_buffer(buf, OUT_BUFFER_SIZE, ppos,
+						out_buffer, OUT_BUFFER_SIZE);
+}
+static ssize_t audio_output_latency_dbgfs_write(struct file *file,
+			const char __user *buf, size_t count, loff_t *ppos)
+{
+	char *temp;
+
+	if (count > 2*sizeof(char))
+		return -EINVAL;
+	else
+		temp  = kmalloc(2*sizeof(char), GFP_KERNEL);
+
+	out_cold_index = 0;
+
+	if (temp) {
+		if (copy_from_user(temp, buf, 2*sizeof(char))) {
+			kfree(temp);
+			return -EFAULT;
+		}
+		if (!strict_strtol(temp, 10, &out_enable_flag)) {
+			kfree(temp);
+			return count;
+		}
+		kfree(temp);
+	}
+	return -EINVAL;
+}
+static const struct file_operations audio_output_latency_debug_fops = {
+	.open = audio_output_latency_dbgfs_open,
+	.read = audio_output_latency_dbgfs_read,
+	.write = audio_output_latency_dbgfs_write
+};
+static int audio_input_latency_dbgfs_open(struct inode *inode,
+							struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+static ssize_t audio_input_latency_dbgfs_read(struct file *file,
+				char __user *buf, size_t count, loff_t *ppos)
+{
+	snprintf(in_buffer, IN_BUFFER_SIZE, "%ld,%ld,",\
+				in_cont_tv.tv_sec, in_cont_tv.tv_usec);
+	return  simple_read_from_buffer(buf, IN_BUFFER_SIZE, ppos,
+						in_buffer, IN_BUFFER_SIZE);
+}
+static ssize_t audio_input_latency_dbgfs_write(struct file *file,
+			const char __user *buf, size_t count, loff_t *ppos)
+{
+	char *temp;
+
+	if (count > 2*sizeof(char))
+		return -EINVAL;
+	else
+		temp  = kmalloc(2*sizeof(char), GFP_KERNEL);
+	if (temp) {
+		if (copy_from_user(temp, buf, 2*sizeof(char))) {
+			kfree(temp);
+			return -EFAULT;
+		}
+		if (!strict_strtol(temp, 10, &in_enable_flag)) {
+			kfree(temp);
+			return count;
+		}
+		kfree(temp);
+	}
+	return -EINVAL;
+}
+static const struct file_operations audio_input_latency_debug_fops = {
+	.open = audio_input_latency_dbgfs_open,
+	.read = audio_input_latency_dbgfs_read,
+	.write = audio_input_latency_dbgfs_write
+};
+
+static void config_debug_fs_write_cb(void)
+{
+	if (out_enable_flag) {
+		/* For first Write done log the time and reset
+		out_cold_index*/
+		if (out_cold_index != 1) {
+			do_gettimeofday(&out_cold_tv);
+			pr_debug("COLD: apr_send_pkt at %ld"
+				"sec %ld microsec\n",\
+				out_cold_tv.tv_sec,\
+				out_cold_tv.tv_usec);
+			out_cold_index = 1;
+		}
+		pr_debug("out_enable_flag %ld",\
+				out_enable_flag);
+	}
+}
+static void config_debug_fs_read_cb(void)
+{
+	if (in_enable_flag) {
+		/* when in_cont_index == 7, DSP would be
+		* writing into the 8th 512 byte buffer and this
+		* timestamp is tapped here.Once done it then writes
+		* to 9th 512 byte buffer.These two buffers(8th, 9th)
+		* reach the test application in 5th iteration and that
+		* timestamp is tapped at user level. The difference
+		* of these two timestamps gives us the time between
+		* the time at which dsp started filling the sample
+		* required and when it reached the test application.
+		* Hence continuous input latency
+		*/
+		if (in_cont_index == 7) {
+			do_gettimeofday(&in_cont_tv);
+			pr_err("In_CONT:previous read buffer done"
+				"at %ld sec %ld microsec\n",\
+				in_cont_tv.tv_sec, in_cont_tv.tv_usec);
+		}
+		in_cont_index++;
+	}
+}
+
+static void config_debug_fs_reset_index(void)
+{
+	in_cont_index = 0;
+}
+
+static void config_debug_fs_run(void)
+{
+	if (out_enable_flag) {
+		do_gettimeofday(&out_cold_tv);
+		pr_debug("COLD: apr_send_pkt at %ld sec %ld microsec\n",\
+				out_cold_tv.tv_sec, out_cold_tv.tv_usec);
+	}
+}
+
+static void config_debug_fs_write(struct audio_buffer *ab)
+{
+	if (out_enable_flag) {
+		char zero_pattern[2] = {0x00, 0x00};
+		/* If First two byte is non zero and last two byte
+		is zero then it is warm output pattern */
+		if ((strncmp(((char *)ab->data), zero_pattern, 2)) &&
+		(!strncmp(((char *)ab->data + 2), zero_pattern, 2))) {
+			do_gettimeofday(&out_warm_tv);
+			pr_debug("WARM:apr_send_pkt at"
+			"%ld sec %ld microsec\n", out_warm_tv.tv_sec,\
+			out_warm_tv.tv_usec);
+			pr_debug("Warm Pattern Matched");
+		}
+		/* If First two byte is zero and last two byte is
+		non zero then it is cont ouput pattern */
+		else if ((!strncmp(((char *)ab->data), zero_pattern, 2))
+		&& (strncmp(((char *)ab->data + 2), zero_pattern, 2))) {
+			do_gettimeofday(&out_cont_tv);
+			pr_debug("CONT:apr_send_pkt at"
+			"%ld sec %ld microsec\n", out_cont_tv.tv_sec,\
+			out_cont_tv.tv_usec);
+			pr_debug("Cont Pattern Matched");
+		}
+	}
+}
+static void config_debug_fs_init(void)
+{
+	out_buffer = kmalloc(OUT_BUFFER_SIZE, GFP_KERNEL);
+	out_dentry = debugfs_create_file("audio_out_latency_measurement_node",\
+				S_IFREG | S_IRUGO | S_IWUGO,\
+				NULL, NULL, &audio_output_latency_debug_fops);
+	if (IS_ERR(out_dentry))
+		pr_err("debugfs_create_file failed\n");
+	in_buffer = kmalloc(IN_BUFFER_SIZE, GFP_KERNEL);
+	in_dentry = debugfs_create_file("audio_in_latency_measurement_node",\
+				S_IFREG | S_IRUGO | S_IWUGO,\
+				NULL, NULL, &audio_input_latency_debug_fops);
+	if (IS_ERR(in_dentry))
+		pr_err("debugfs_create_file failed\n");
+}
+#else
+static void config_debug_fs_write(struct audio_buffer *ab)
+{
+	return;
+}
+static void config_debug_fs_run(void)
+{
+	return;
+}
+static void config_debug_fs_reset_index(void)
+{
+	return;
+}
+static void config_debug_fs_read_cb(void)
+{
+	return;
+}
+static void config_debug_fs_write_cb(void)
+{
+	return;
+}
+static void config_debug_fs_init(void)
+{
+	return;
+}
+#endif
+
+
+static int q6asm_session_alloc(struct audio_client *ac)
+{
+	int n;
+	mutex_lock(&session_lock);
+	for (n = 1; n <= SESSION_MAX; n++) {
+		if (!session[n]) {
+			session[n] = ac;
+			mutex_unlock(&session_lock);
+			return n;
+		}
+	}
+	mutex_unlock(&session_lock);
+	return -ENOMEM;
+}
+
+static void q6asm_session_free(struct audio_client *ac)
+{
+	pr_debug("%s: sessionid[%d]\n", __func__, ac->session);
+	rtac_remove_popp_from_adm_devices(ac->session);
+	mutex_lock(&session_lock);
+	session[ac->session] = 0;
+	mutex_unlock(&session_lock);
+	ac->session = 0;
+	return;
+}
+
+int q6asm_audio_client_buf_free(unsigned int dir,
+			struct audio_client *ac)
+{
+	struct audio_port_data *port;
+	int cnt = 0;
+	int rc = 0;
+	pr_debug("%s: Session id %d\n", __func__, ac->session);
+	mutex_lock(&ac->cmd_lock);
+	if (ac->io_mode == SYNC_IO_MODE) {
+		port = &ac->port[dir];
+		if (!port->buf) {
+			mutex_unlock(&ac->cmd_lock);
+			return 0;
+		}
+		cnt = port->max_buf_cnt - 1;
+
+		if (cnt >= 0) {
+			rc = q6asm_memory_unmap_regions(ac, dir,
+							port->buf[0].size,
+							port->max_buf_cnt);
+			if (rc < 0)
+				pr_err("%s CMD Memory_unmap_regions failed\n",
+								__func__);
+		}
+
+		while (cnt >= 0) {
+			if (port->buf[cnt].data) {
+				ion_unmap_kernel(port->buf[cnt].client,
+						port->buf[cnt].handle);
+				ion_free(port->buf[cnt].client,
+						port->buf[cnt].handle);
+				ion_client_destroy(port->buf[cnt].client);
+				port->buf[cnt].data = NULL;
+				port->buf[cnt].phys = 0;
+				--(port->max_buf_cnt);
+			}
+			--cnt;
+		}
+		kfree(port->buf);
+		port->buf = NULL;
+	}
+	mutex_unlock(&ac->cmd_lock);
+	return 0;
+}
+
+int q6asm_audio_client_buf_free_contiguous(unsigned int dir,
+			struct audio_client *ac)
+{
+	struct audio_port_data *port;
+	int cnt = 0;
+	int rc = 0;
+	pr_debug("%s: Session id %d\n", __func__, ac->session);
+	mutex_lock(&ac->cmd_lock);
+	port = &ac->port[dir];
+	if (!port->buf) {
+		mutex_unlock(&ac->cmd_lock);
+		return 0;
+	}
+	cnt = port->max_buf_cnt - 1;
+
+	if (cnt >= 0) {
+		rc = q6asm_memory_unmap(ac, port->buf[0].phys, dir);
+		if (rc < 0)
+			pr_err("%s CMD Memory_unmap_regions failed\n",
+							__func__);
+	}
+
+	if (port->buf[0].data) {
+		ion_unmap_kernel(port->buf[0].client, port->buf[0].handle);
+		ion_free(port->buf[0].client, port->buf[0].handle);
+		ion_client_destroy(port->buf[0].client);
+		pr_debug("%s:data[%p]phys[%p][%p]"
+			", client[%p] handle[%p]\n",
+			__func__,
+			(void *)port->buf[0].data,
+			(void *)port->buf[0].phys,
+			(void *)&port->buf[0].phys,
+			(void *)port->buf[0].client,
+			(void *)port->buf[0].handle);
+	}
+
+	while (cnt >= 0) {
+		port->buf[cnt].data = NULL;
+		port->buf[cnt].phys = 0;
+		cnt--;
+	}
+	port->max_buf_cnt = 0;
+	kfree(port->buf);
+	port->buf = NULL;
+	mutex_unlock(&ac->cmd_lock);
+	return 0;
+}
+
+int q6asm_mmap_apr_dereg(void)
+{
+	if (atomic_read(&this_mmap.ref_cnt) <= 0) {
+		pr_err("%s: APR Common Port Already Closed\n", __func__);
+		goto done;
+	}
+	atomic_dec(&this_mmap.ref_cnt);
+	if (atomic_read(&this_mmap.ref_cnt) == 0) {
+		apr_deregister(this_mmap.apr);
+		pr_debug("%s:APR De-Register common port\n", __func__);
+	}
+done:
+	return 0;
+}
+
+
+void q6asm_audio_client_free(struct audio_client *ac)
+{
+	int loopcnt;
+	struct audio_port_data *port;
+	if (!ac || !ac->session)
+		return;
+	pr_debug("%s: Session id %d\n", __func__, ac->session);
+	if (ac->io_mode == SYNC_IO_MODE) {
+		for (loopcnt = 0; loopcnt <= OUT; loopcnt++) {
+			port = &ac->port[loopcnt];
+			if (!port->buf)
+				continue;
+			pr_debug("%s:loopcnt = %d\n", __func__, loopcnt);
+			q6asm_audio_client_buf_free(loopcnt, ac);
+		}
+	}
+
+	apr_deregister(ac->apr);
+	ac->mmap_apr = NULL;
+	q6asm_session_free(ac);
+	q6asm_mmap_apr_dereg();
+
+	pr_debug("%s: APR De-Register\n", __func__);
+
+/*done:*/
+	kfree(ac);
+	return;
+}
+
+int q6asm_set_io_mode(struct audio_client *ac, uint32_t mode)
+{
+	if (ac == NULL) {
+		pr_err("%s APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	if ((mode == ASYNC_IO_MODE) || (mode == SYNC_IO_MODE)) {
+		ac->io_mode = mode;
+		pr_debug("%s:Set Mode to %d\n", __func__, ac->io_mode);
+		return 0;
+	} else {
+		pr_err("%s:Not an valid IO Mode:%d\n", __func__, ac->io_mode);
+		return -EINVAL;
+	}
+}
+
+void *q6asm_mmap_apr_reg(void)
+{
+	if (atomic_read(&this_mmap.ref_cnt) == 0) {
+		this_mmap.apr = apr_register("ADSP", "ASM", \
+					(apr_fn)q6asm_mmapcallback,\
+					0x0FFFFFFFF, &this_mmap);
+		if (this_mmap.apr == NULL) {
+			pr_debug("%s Unable to register"
+				"APR ASM common port\n", __func__);
+			goto fail;
+		}
+	}
+	atomic_inc(&this_mmap.ref_cnt);
+	return this_mmap.apr;
+fail:
+	return NULL;
+}
+
+struct audio_client *q6asm_audio_client_alloc(app_cb cb, void *priv)
+{
+	struct audio_client *ac;
+	int n;
+	int lcnt = 0;
+
+	ac = kzalloc(sizeof(struct audio_client), GFP_KERNEL);
+	if (!ac)
+		return NULL;
+	n = q6asm_session_alloc(ac);
+	if (n <= 0)
+		goto fail_session;
+	ac->session = n;
+	ac->cb = cb;
+	ac->priv = priv;
+	ac->io_mode = SYNC_IO_MODE;
+	ac->apr = apr_register("ADSP", "ASM", \
+				(apr_fn)q6asm_callback,\
+				((ac->session) << 8 | 0x0001),\
+				ac);
+
+	if (ac->apr == NULL) {
+		pr_err("%s Registration with APR failed\n", __func__);
+			goto fail;
+	}
+	rtac_set_asm_handle(n, ac->apr);
+
+	pr_debug("%s Registering the common port with APR\n", __func__);
+	ac->mmap_apr = q6asm_mmap_apr_reg();
+	if (ac->mmap_apr == NULL)
+		goto fail;
+
+	init_waitqueue_head(&ac->cmd_wait);
+	INIT_LIST_HEAD(&ac->port[0].mem_map_handle);
+	INIT_LIST_HEAD(&ac->port[1].mem_map_handle);
+	pr_debug("%s: mem_map_handle list init'ed\n", __func__);
+	mutex_init(&ac->cmd_lock);
+	for (lcnt = 0; lcnt <= OUT; lcnt++) {
+		mutex_init(&ac->port[lcnt].lock);
+		spin_lock_init(&ac->port[lcnt].dsp_lock);
+	}
+	atomic_set(&ac->cmd_state, 0);
+
+	pr_debug("%s: session[%d]\n", __func__, ac->session);
+
+	return ac;
+fail:
+	q6asm_audio_client_free(ac);
+	return NULL;
+fail_session:
+	kfree(ac);
+	return NULL;
+}
+
+struct audio_client *q6asm_get_audio_client(int session_id)
+{
+	if ((session_id <= 0) || (session_id > SESSION_MAX)) {
+		pr_err("%s: invalid session: %d\n", __func__, session_id);
+		goto err;
+	}
+
+	if (!session[session_id]) {
+		pr_err("%s: session not active: %d\n", __func__, session_id);
+		goto err;
+	}
+
+	return session[session_id];
+err:
+	return NULL;
+}
+
+int q6asm_audio_client_buf_alloc(unsigned int dir,
+			struct audio_client *ac,
+			unsigned int bufsz,
+			unsigned int bufcnt)
+{
+	int cnt = 0;
+	int rc = 0;
+	struct audio_buffer *buf;
+	int len;
+
+	if (!(ac) || ((dir != IN) && (dir != OUT)))
+		return -EINVAL;
+
+	pr_debug("%s: session[%d]bufsz[%d]bufcnt[%d]\n", __func__, ac->session,
+		bufsz, bufcnt);
+
+	if (ac->session <= 0 || ac->session > 8)
+		goto fail;
+
+	if (ac->io_mode == SYNC_IO_MODE) {
+		if (ac->port[dir].buf) {
+			pr_debug("%s: buffer already allocated\n", __func__);
+			return 0;
+		}
+		mutex_lock(&ac->cmd_lock);
+		buf = kzalloc(((sizeof(struct audio_buffer))*bufcnt),
+				GFP_KERNEL);
+
+		if (!buf) {
+			mutex_unlock(&ac->cmd_lock);
+			goto fail;
+		}
+
+		ac->port[dir].buf = buf;
+
+		while (cnt < bufcnt) {
+			if (bufsz > 0) {
+				if (!buf[cnt].data) {
+					buf[cnt].client = msm_ion_client_create
+						(UINT_MAX, "audio_client");
+					if (IS_ERR_OR_NULL((void *)
+						buf[cnt].client)) {
+						pr_err("%s: ION create client"
+						" for AUDIO failed\n",
+						__func__);
+						goto fail;
+					}
+					buf[cnt].handle = ion_alloc
+						(buf[cnt].client, bufsz, SZ_4K,
+						(0x1 << ION_AUDIO_HEAP_ID));
+					if (IS_ERR_OR_NULL((void *)
+						buf[cnt].handle)) {
+						pr_err("%s: ION memory"
+					" allocation for AUDIO failed\n",
+							__func__);
+						goto fail;
+					}
+
+					rc = ion_phys(buf[cnt].client,
+						buf[cnt].handle,
+						(ion_phys_addr_t *)
+						&buf[cnt].phys,
+						(size_t *)&len);
+					if (rc) {
+						pr_err("%s: ION Get Physical"
+						" for AUDIO failed, rc = %d\n",
+							__func__, rc);
+						goto fail;
+					}
+
+					buf[cnt].data = ion_map_kernel
+					(buf[cnt].client, buf[cnt].handle,
+							 0);
+					if (IS_ERR_OR_NULL((void *)
+						buf[cnt].data)) {
+						pr_err("%s: ION memory"
+				" mapping for AUDIO failed\n", __func__);
+						goto fail;
+					}
+					memset((void *)buf[cnt].data, 0, bufsz);
+					buf[cnt].used = 1;
+					buf[cnt].size = bufsz;
+					buf[cnt].actual_size = bufsz;
+					pr_debug("%s data[%p]phys[%p][%p]\n",
+						__func__,
+					   (void *)buf[cnt].data,
+					   (void *)buf[cnt].phys,
+					   (void *)&buf[cnt].phys);
+					cnt++;
+				}
+			}
+		}
+		ac->port[dir].max_buf_cnt = cnt;
+
+		mutex_unlock(&ac->cmd_lock);
+		rc = q6asm_memory_map_regions(ac, dir, bufsz, cnt);
+		if (rc < 0) {
+			pr_err("%s:CMD Memory_map_regions failed\n", __func__);
+			goto fail;
+		}
+	}
+	return 0;
+fail:
+	q6asm_audio_client_buf_free(dir, ac);
+	return -EINVAL;
+}
+
+int q6asm_audio_client_buf_alloc_contiguous(unsigned int dir,
+			struct audio_client *ac,
+			unsigned int bufsz,
+			unsigned int bufcnt)
+{
+	int cnt = 0;
+	int rc = 0;
+	struct audio_buffer *buf;
+	int len;
+
+	if (!(ac) || ((dir != IN) && (dir != OUT)))
+		return -EINVAL;
+
+	pr_debug("%s: session[%d]bufsz[%d]bufcnt[%d]\n",
+			__func__, ac->session,
+			bufsz, bufcnt);
+
+	if (ac->session <= 0 || ac->session > 8)
+		goto fail;
+
+	if (ac->port[dir].buf) {
+		pr_debug("%s: buffer already allocated\n", __func__);
+		return 0;
+	}
+	mutex_lock(&ac->cmd_lock);
+	buf = kzalloc(((sizeof(struct audio_buffer))*bufcnt),
+			GFP_KERNEL);
+
+	if (!buf) {
+		mutex_unlock(&ac->cmd_lock);
+		goto fail;
+	}
+
+	ac->port[dir].buf = buf;
+
+	buf[0].client = msm_ion_client_create(UINT_MAX, "audio_client");
+	if (IS_ERR_OR_NULL((void *)buf[0].client)) {
+		pr_err("%s: ION create client for AUDIO failed\n", __func__);
+		goto fail;
+	}
+	buf[0].handle = ion_alloc(buf[0].client, bufsz * bufcnt, SZ_4K,
+				  (0x1 << ION_AUDIO_HEAP_ID));
+	if (IS_ERR_OR_NULL((void *) buf[0].handle)) {
+		pr_err("%s: ION memory allocation for AUDIO failed\n",
+			__func__);
+		goto fail;
+	}
+
+	rc = ion_phys(buf[0].client, buf[0].handle,
+		  (ion_phys_addr_t *)&buf[0].phys, (size_t *)&len);
+	if (rc) {
+		pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
+			__func__, rc);
+		goto fail;
+	}
+
+	buf[0].data = ion_map_kernel(buf[0].client, buf[0].handle, 0);
+	if (IS_ERR_OR_NULL((void *) buf[0].data)) {
+		pr_err("%s: ION memory mapping for AUDIO failed\n", __func__);
+		goto fail;
+	}
+	memset((void *)buf[0].data, 0, (bufsz * bufcnt));
+	if (!buf[0].data) {
+		pr_err("%s:invalid vaddr,"
+			" iomap failed\n", __func__);
+		mutex_unlock(&ac->cmd_lock);
+		goto fail;
+	}
+
+	buf[0].used = dir ^ 1;
+	buf[0].size = bufsz;
+	buf[0].actual_size = bufsz;
+	cnt = 1;
+	while (cnt < bufcnt) {
+		if (bufsz > 0) {
+			buf[cnt].data =  buf[0].data + (cnt * bufsz);
+			buf[cnt].phys =  buf[0].phys + (cnt * bufsz);
+			if (!buf[cnt].data) {
+				pr_err("%s Buf alloc failed\n",
+							__func__);
+				mutex_unlock(&ac->cmd_lock);
+				goto fail;
+			}
+			buf[cnt].used = dir ^ 1;
+			buf[cnt].size = bufsz;
+			buf[cnt].actual_size = bufsz;
+			pr_debug("%s data[%p]phys[%p][%p]\n", __func__,
+				   (void *)buf[cnt].data,
+				   (void *)buf[cnt].phys,
+				   (void *)&buf[cnt].phys);
+		}
+		cnt++;
+	}
+	ac->port[dir].max_buf_cnt = cnt;
+	mutex_unlock(&ac->cmd_lock);
+	rc = q6asm_memory_map_regions(ac, dir, bufsz, cnt);
+	if (rc < 0) {
+		pr_err("%s:CMD Memory_map_regions failed\n", __func__);
+		goto fail;
+	}
+	return 0;
+fail:
+	q6asm_audio_client_buf_free_contiguous(dir, ac);
+	return -EINVAL;
+}
+
+static int32_t q6asm_mmapcallback(struct apr_client_data *data, void *priv)
+{
+	uint32_t sid = 0;
+	uint32_t dir = 0;
+	uint32_t *payload = data->payload;
+	unsigned long dsp_flags;
+
+	struct audio_client *ac = NULL;
+	struct audio_port_data *port;
+
+	if (!data) {
+		pr_err("%s: Invalid CB\n", __func__);
+		return 0;
+	}
+	if (data->opcode == RESET_EVENTS) {
+		pr_debug("%s: Reset event is received: %d %d apr[%p]\n",
+				__func__,
+				data->reset_event,
+				data->reset_proc,
+				this_mmap.apr);
+		apr_reset(this_mmap.apr);
+		atomic_set(&this_mmap.ref_cnt, 0);
+		this_mmap.apr = NULL;
+		return 0;
+	}
+	sid = (data->token >> 8) & 0x0F;
+	ac = q6asm_get_audio_client(sid);
+	pr_debug("%s:ptr0[0x%x]ptr1[0x%x]opcode[0x%x]"
+		"token[0x%x]payload_s[%d] src[%d] dest[%d]sid[%d]dir[%d]\n",
+		__func__, payload[0], payload[1], data->opcode, data->token,
+		data->payload_size, data->src_port, data->dest_port, sid, dir);
+	pr_debug("%s:Payload = [0x%x] status[0x%x]\n",
+			__func__, payload[0], payload[1]);
+
+	if (data->opcode == APR_BASIC_RSP_RESULT) {
+		switch (payload[0]) {
+		case ASM_CMD_SHARED_MEM_MAP_REGIONS:
+		case ASM_CMD_SHARED_MEM_UNMAP_REGIONS:
+			if (atomic_read(&ac->cmd_state)) {
+				atomic_set(&ac->cmd_state, 0);
+				wake_up(&ac->cmd_wait);
+			}
+			pr_debug("%s:Payload = [0x%x] status[0x%x]\n",
+					__func__, payload[0], payload[1]);
+			break;
+		default:
+			pr_debug("%s:command[0x%x] not expecting rsp\n",
+						__func__, payload[0]);
+			break;
+		}
+		return 0;
+	}
+
+	dir = (data->token & 0x0F);
+	port = &ac->port[dir];
+
+	switch (data->opcode) {
+	case ASM_CMDRSP_SHARED_MEM_MAP_REGIONS:{
+		pr_debug("%s:PL#0[0x%x]PL#1 [0x%x] dir=%x s_id=%x\n",
+				__func__, payload[0], payload[1], dir, sid);
+		spin_lock_irqsave(&port->dsp_lock, dsp_flags);
+		if (atomic_read(&ac->cmd_state)) {
+			ac->port[dir].tmp_hdl = payload[0];
+			atomic_set(&ac->cmd_state, 0);
+			wake_up(&ac->cmd_wait);
+		}
+		spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
+		break;
+	}
+	case ASM_CMD_SHARED_MEM_UNMAP_REGIONS:{
+		pr_debug("%s:PL#0[0x%x]PL#1 [0x%x]\n",
+					__func__, payload[0], payload[1]);
+		spin_lock_irqsave(&port->dsp_lock, dsp_flags);
+		if (atomic_read(&ac->cmd_state)) {
+			atomic_set(&ac->cmd_state, 0);
+			wake_up(&ac->cmd_wait);
+		}
+		spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
+
+		break;
+	}
+	default:
+		pr_debug("%s:command[0x%x]success [0x%x]\n",
+					__func__, payload[0], payload[1]);
+	}
+	if (ac->cb)
+		ac->cb(data->opcode, data->token,
+			data->payload, ac->priv);
+	return 0;
+}
+
+
+static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
+{
+	int i = 0;
+	struct audio_client *ac = (struct audio_client *)priv;
+	uint32_t token;
+	unsigned long dsp_flags;
+	uint32_t *payload;
+
+
+	if ((ac == NULL) || (data == NULL)) {
+		pr_err("ac or priv NULL\n");
+		return -EINVAL;
+	}
+	if (ac->session <= 0 || ac->session > 8) {
+		pr_err("%s:Session ID is invalid, session = %d\n", __func__,
+			ac->session);
+		return -EINVAL;
+	}
+
+	payload = data->payload;
+
+	if (data->opcode == RESET_EVENTS) {
+		pr_debug("q6asm_callback: Reset event is received: %d %d apr[%p]\n",
+				data->reset_event, data->reset_proc, ac->apr);
+			if (ac->cb)
+				ac->cb(data->opcode, data->token,
+					(uint32_t *)data->payload, ac->priv);
+		apr_reset(ac->apr);
+		return 0;
+	}
+
+	pr_debug("%s: session[%d]opcode[0x%x]"
+		"token[0x%x]payload_s[%d] src[%d] dest[%d]\n", __func__,
+		ac->session, data->opcode,
+		data->token, data->payload_size, data->src_port,
+		data->dest_port);
+	if ((data->opcode != ASM_DATA_EVENT_RENDERED_EOS) &&
+			(data->opcode != ASM_DATA_EVENT_EOS))
+		pr_debug("%s:Payload = [0x%x] status[0x%x]\n",
+			__func__, payload[0], payload[1]);
+	if (data->opcode == APR_BASIC_RSP_RESULT) {
+		token = data->token;
+		switch (payload[0]) {
+		case ASM_STREAM_CMD_SET_PP_PARAMS_V2:
+			if (rtac_make_asm_callback(ac->session, payload,
+					data->payload_size))
+				break;
+		case ASM_SESSION_CMD_PAUSE:
+		case ASM_DATA_CMD_EOS:
+		case ASM_STREAM_CMD_CLOSE:
+		case ASM_STREAM_CMD_FLUSH:
+		case ASM_SESSION_CMD_RUN_V2:
+		case ASM_SESSION_CMD_REGISTER_FORX_OVERFLOW_EVENTS:
+		case ASM_STREAM_CMD_FLUSH_READBUFS:
+		pr_debug("%s:Payload = [0x%x]\n", __func__, payload[0]);
+		if (token != ac->session) {
+			pr_err("%s:Invalid session[%d] rxed expected[%d]",
+					__func__, token, ac->session);
+			return -EINVAL;
+		}
+		case ASM_STREAM_CMD_OPEN_READ_V2:
+		case ASM_STREAM_CMD_OPEN_WRITE_V2:
+		case ASM_STREAM_CMD_OPEN_READWRITE_V2:
+		case ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2:
+		case ASM_STREAM_CMD_SET_ENCDEC_PARAM:
+		pr_debug("%s:Payload = [0x%x]stat[0x%x]\n",
+				__func__, payload[0], payload[1]);
+			if (atomic_read(&ac->cmd_state)) {
+				atomic_set(&ac->cmd_state, 0);
+				wake_up(&ac->cmd_wait);
+			}
+			if (ac->cb)
+				ac->cb(data->opcode, data->token,
+					(uint32_t *)data->payload, ac->priv);
+			break;
+		default:
+			pr_debug("%s:command[0x%x] not expecting rsp\n",
+							__func__, payload[0]);
+			break;
+		}
+		return 0;
+	}
+
+	switch (data->opcode) {
+	case ASM_DATA_EVENT_WRITE_DONE_V2:{
+		struct audio_port_data *port = &ac->port[IN];
+		pr_debug("%s: Rxed opcode[0x%x] status[0x%x] token[%d]",
+				__func__, payload[0], payload[1],
+				data->token);
+		if (ac->io_mode == SYNC_IO_MODE) {
+			if (port->buf == NULL) {
+				pr_err("%s: Unexpected Write Done\n",
+								__func__);
+				return -EINVAL;
+			}
+			spin_lock_irqsave(&port->dsp_lock, dsp_flags);
+			if (port->buf[data->token].phys !=
+				payload[0]) {
+				pr_err("Buf expected[%p]rxed[%p]\n",\
+				   (void *)port->buf[data->token].phys,\
+				   (void *)payload[0]);
+				spin_unlock_irqrestore(&port->dsp_lock,
+								dsp_flags);
+				return -EINVAL;
+			}
+			token = data->token;
+			port->buf[token].used = 1;
+			spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
+
+			config_debug_fs_write_cb();
+
+			for (i = 0; i < port->max_buf_cnt; i++)
+				pr_debug("%d ", port->buf[i].used);
+
+		}
+		break;
+	}
+	case ASM_STREAM_CMDRSP_GET_PP_PARAMS_V2:
+		rtac_make_asm_callback(ac->session, payload,
+			data->payload_size);
+		break;
+	case ASM_DATA_EVENT_READ_DONE_V2:{
+
+		struct audio_port_data *port = &ac->port[OUT];
+
+		config_debug_fs_read_cb();
+
+		pr_debug("%s:R-D: status=%d buff_add=%x act_size=%d offset=%d\n",
+				__func__, payload[READDONE_IDX_STATUS],
+				payload[READDONE_IDX_BUFADD_LSW],
+				payload[READDONE_IDX_SIZE],
+				payload[READDONE_IDX_OFFSET]);
+
+		pr_debug("%s:R-D:msw_ts=%d lsw_ts=%d memmap_hdl=%x flags=%d id=%d num=%d\n",
+				__func__, payload[READDONE_IDX_MSW_TS],
+				payload[READDONE_IDX_LSW_TS],
+				payload[READDONE_IDX_MEMMAP_HDL],
+				payload[READDONE_IDX_FLAGS],
+				payload[READDONE_IDX_SEQ_ID],
+				payload[READDONE_IDX_NUMFRAMES]);
+
+		if (ac->io_mode == SYNC_IO_MODE) {
+			if (port->buf == NULL) {
+				pr_err("%s: Unexpected Write Done\n", __func__);
+				return -EINVAL;
+			}
+			spin_lock_irqsave(&port->dsp_lock, dsp_flags);
+			token = data->token;
+			port->buf[token].used = 0;
+			if (port->buf[token].phys !=
+				payload[READDONE_IDX_BUFADD_LSW]) {
+				pr_err("Buf expected[%p]rxed[%p]\n",\
+				   (void *)port->buf[token].phys,\
+				   (void *)payload[READDONE_IDX_BUFADD_LSW]);
+				spin_unlock_irqrestore(&port->dsp_lock,
+							dsp_flags);
+				break;
+			}
+			port->buf[token].actual_size =
+				payload[READDONE_IDX_SIZE];
+			spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
+		}
+		break;
+	}
+	case ASM_DATA_EVENT_EOS:
+	case ASM_DATA_EVENT_RENDERED_EOS:
+		pr_debug("%s:EOS ACK received: rxed opcode[0x%x]\n",
+				  __func__, data->opcode);
+		break;
+	case ASM_SESSION_EVENTX_OVERFLOW:
+		pr_err("ASM_SESSION_EVENTX_OVERFLOW\n");
+		break;
+	case ASM_SESSION_CMDRSP_GET_SESSIONTIME_V3:
+		pr_debug("%s: ASM_SESSION_CMDRSP_GET_SESSIONTIME_V3, "
+				"payload[0] = %d, payload[1] = %d, "
+				"payload[2] = %d\n", __func__,
+				 payload[0], payload[1], payload[2]);
+		ac->time_stamp = (uint64_t)(((uint64_t)payload[1] << 32) |
+				payload[2]);
+		if (atomic_read(&ac->cmd_state)) {
+			atomic_set(&ac->cmd_state, 0);
+			wake_up(&ac->cmd_wait);
+		}
+		break;
+	case ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY:
+	case ASM_DATA_EVENT_ENC_SR_CM_CHANGE_NOTIFY:
+		pr_debug("%s: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, "
+				"payload[0] = %d, payload[1] = %d, "
+				"payload[2] = %d, payload[3] = %d\n", __func__,
+				payload[0], payload[1], payload[2],
+				payload[3]);
+		break;
+	}
+	if (ac->cb)
+		ac->cb(data->opcode, data->token,
+			data->payload, ac->priv);
+
+	return 0;
+}
+
+void *q6asm_is_cpu_buf_avail(int dir, struct audio_client *ac, uint32_t *size,
+				uint32_t *index)
+{
+	void *data;
+	unsigned char idx;
+	struct audio_port_data *port;
+
+	if (!ac || ((dir != IN) && (dir != OUT)))
+		return NULL;
+
+	if (ac->io_mode == SYNC_IO_MODE) {
+		port = &ac->port[dir];
+
+		mutex_lock(&port->lock);
+		idx = port->cpu_buf;
+		if (port->buf == NULL) {
+			pr_debug("%s:Buffer pointer null\n", __func__);
+			mutex_unlock(&port->lock);
+			return NULL;
+		}
+		/*  dir 0: used = 0 means buf in use
+			dir 1: used = 1 means buf in use */
+		if (port->buf[idx].used == dir) {
+			/* To make it more robust, we could loop and get the
+			next avail buf, its risky though */
+			pr_debug("%s:Next buf idx[0x%x] not available,"
+				"dir[%d]\n", __func__, idx, dir);
+			mutex_unlock(&port->lock);
+			return NULL;
+		}
+		*size = port->buf[idx].actual_size;
+		*index = port->cpu_buf;
+		data = port->buf[idx].data;
+		pr_debug("%s:session[%d]index[%d] data[%p]size[%d]\n",
+						__func__,
+						ac->session,
+						port->cpu_buf,
+						data, *size);
+		/* By default increase the cpu_buf cnt
+		user accesses this function,increase cpu
+		buf(to avoid another api)*/
+		port->buf[idx].used = dir;
+		port->cpu_buf = ((port->cpu_buf + 1) & (port->max_buf_cnt - 1));
+		mutex_unlock(&port->lock);
+		return data;
+	}
+	return NULL;
+}
+
+void *q6asm_is_cpu_buf_avail_nolock(int dir, struct audio_client *ac,
+					uint32_t *size, uint32_t *index)
+{
+	void *data;
+	unsigned char idx;
+	struct audio_port_data *port;
+
+	if (!ac || ((dir != IN) && (dir != OUT)))
+		return NULL;
+
+	port = &ac->port[dir];
+
+	idx = port->cpu_buf;
+	if (port->buf == NULL) {
+		pr_debug("%s:Buffer pointer null\n", __func__);
+		return NULL;
+	}
+	/*
+	 * dir 0: used = 0 means buf in use
+	 * dir 1: used = 1 means buf in use
+	 */
+	if (port->buf[idx].used == dir) {
+		/*
+		 * To make it more robust, we could loop and get the
+		 * next avail buf, its risky though
+		 */
+		pr_debug("%s:Next buf idx[0x%x] not available,"
+			"dir[%d]\n", __func__, idx, dir);
+		return NULL;
+	}
+	*size = port->buf[idx].actual_size;
+	*index = port->cpu_buf;
+	data = port->buf[idx].data;
+	pr_debug("%s:session[%d]index[%d] data[%p]size[%d]\n",
+		__func__, ac->session, port->cpu_buf,
+		data, *size);
+	/*
+	 * By default increase the cpu_buf cnt
+	 * user accesses this function,increase cpu
+	 * buf(to avoid another api)
+	 */
+	port->buf[idx].used = dir;
+	port->cpu_buf = ((port->cpu_buf + 1) & (port->max_buf_cnt - 1));
+	return data;
+}
+
+int q6asm_is_dsp_buf_avail(int dir, struct audio_client *ac)
+{
+	int ret = -1;
+	struct audio_port_data *port;
+	uint32_t idx;
+
+	if (!ac || (dir != OUT))
+		return ret;
+
+	if (ac->io_mode == SYNC_IO_MODE) {
+		port = &ac->port[dir];
+
+		mutex_lock(&port->lock);
+		idx = port->dsp_buf;
+
+		if (port->buf[idx].used == (dir ^ 1)) {
+			/* To make it more robust, we could loop and get the
+			next avail buf, its risky though */
+			pr_err("Next buf idx[0x%x] not available, dir[%d]\n",
+								idx, dir);
+			mutex_unlock(&port->lock);
+			return ret;
+		}
+		pr_debug("%s: session[%d]dsp_buf=%d cpu_buf=%d\n", __func__,
+			ac->session, port->dsp_buf, port->cpu_buf);
+		ret = ((port->dsp_buf != port->cpu_buf) ? 0 : -1);
+		mutex_unlock(&port->lock);
+	}
+	return ret;
+}
+
+static void q6asm_add_hdr(struct audio_client *ac, struct apr_hdr *hdr,
+			uint32_t pkt_size, uint32_t cmd_flg)
+{
+	pr_debug("%s:pkt_size=%d cmd_flg=%d session=%d\n", __func__, pkt_size,
+		cmd_flg, ac->session);
+	mutex_lock(&ac->cmd_lock);
+	hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \
+				APR_HDR_LEN(sizeof(struct apr_hdr)),\
+				APR_PKT_VER);
+	hdr->src_svc = ((struct apr_svc *)ac->apr)->id;
+	hdr->src_domain = APR_DOMAIN_APPS;
+	hdr->dest_svc = APR_SVC_ASM;
+	hdr->dest_domain = APR_DOMAIN_ADSP;
+	hdr->src_port = ((ac->session << 8) & 0xFF00) | 0x01;
+	hdr->dest_port = ((ac->session << 8) & 0xFF00) | 0x01;
+	if (cmd_flg) {
+		hdr->token = ac->session;
+		atomic_set(&ac->cmd_state, 1);
+	}
+	hdr->pkt_size  = pkt_size;
+	mutex_unlock(&ac->cmd_lock);
+	return;
+}
+
+static void q6asm_add_hdr_async(struct audio_client *ac, struct apr_hdr *hdr,
+			uint32_t pkt_size, uint32_t cmd_flg)
+{
+	pr_debug("pkt_size = %d, cmd_flg = %d, session = %d\n",
+			pkt_size, cmd_flg, ac->session);
+	hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \
+				APR_HDR_LEN(sizeof(struct apr_hdr)),\
+				APR_PKT_VER);
+	hdr->src_svc = ((struct apr_svc *)ac->apr)->id;
+	hdr->src_domain = APR_DOMAIN_APPS;
+	hdr->dest_svc = APR_SVC_ASM;
+	hdr->dest_domain = APR_DOMAIN_ADSP;
+	hdr->src_port = ((ac->session << 8) & 0xFF00) | 0x01;
+	hdr->dest_port = ((ac->session << 8) & 0xFF00) | 0x01;
+	if (cmd_flg) {
+		hdr->token = ac->session;
+		atomic_set(&ac->cmd_state, 1);
+	}
+	hdr->pkt_size  = pkt_size;
+	return;
+}
+
+static void q6asm_add_mmaphdr(struct audio_client *ac, struct apr_hdr *hdr,
+			u32 pkt_size, u32 cmd_flg, u32 token)
+{
+	pr_debug("%s:pkt size=%d cmd_flg=%d\n", __func__, pkt_size, cmd_flg);
+	hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	hdr->src_port = 0;
+	hdr->dest_port = 0;
+	if (cmd_flg) {
+		hdr->token = token;
+		atomic_set(&ac->cmd_state, 1);
+	}
+	hdr->pkt_size  = pkt_size;
+	return;
+}
+int q6asm_open_read(struct audio_client *ac,
+		uint32_t format)
+{
+	int rc = 0x00;
+	struct asm_stream_cmd_open_read_v2 open;
+
+	uint16_t bits_per_sample = 16;
+
+
+	config_debug_fs_reset_index();
+
+	if ((ac == NULL) || (ac->apr == NULL)) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	pr_debug("%s:session[%d]", __func__, ac->session);
+
+	q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
+	open.hdr.opcode = ASM_STREAM_CMD_OPEN_READ_V2;
+	/* Stream prio : High, provide meta info with encoded frames */
+	open.src_endpointype = ASM_END_POINT_DEVICE_MATRIX;
+
+	open.preprocopo_id = get_asm_topology();
+	if (open.preprocopo_id == 0)
+		open.preprocopo_id = ASM_STREAM_POSTPROC_TOPO_ID_DEFAULT;
+	open.bits_per_sample = bits_per_sample;
+
+	switch (format) {
+	case FORMAT_LINEAR_PCM:
+		open.mode_flags = 0x00;
+		open.enc_cfg_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2;
+		break;
+	case FORMAT_MPEG4_AAC:
+		open.mode_flags = BUFFER_META_ENABLE;
+		open.enc_cfg_id = ASM_MEDIA_FMT_AAC_V2;
+		break;
+	case FORMAT_V13K:
+		open.mode_flags = BUFFER_META_ENABLE;
+		open.enc_cfg_id = ASM_MEDIA_FMT_V13K_FS;
+		break;
+	case FORMAT_EVRC:
+		open.mode_flags = BUFFER_META_ENABLE;
+		open.enc_cfg_id = ASM_MEDIA_FMT_EVRC_FS;
+		break;
+	case FORMAT_AMRNB:
+		open.mode_flags = BUFFER_META_ENABLE ;
+		open.enc_cfg_id = ASM_MEDIA_FMT_AMRNB_FS;
+		break;
+	case FORMAT_AMRWB:
+		open.mode_flags = BUFFER_META_ENABLE ;
+		open.enc_cfg_id = ASM_MEDIA_FMT_AMRWB_FS;
+		break;
+	default:
+		pr_err("Invalid format[%d]\n", format);
+		goto fail_cmd;
+	}
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &open);
+	if (rc < 0) {
+		pr_err("open failed op[0x%x]rc[%d]\n", \
+						open.hdr.opcode, rc);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for open read rc[%d]\n", __func__,
+			rc);
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+int q6asm_open_write(struct audio_client *ac, uint32_t format)
+{
+	int rc = 0x00;
+	struct asm_stream_cmd_open_write_v2 open;
+
+	if ((ac == NULL) || (ac->apr == NULL)) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	pr_debug("%s: session[%d] wr_format[0x%x]", __func__, ac->session,
+		format);
+
+	q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
+
+	open.hdr.opcode = ASM_STREAM_CMD_OPEN_WRITE_V2;
+	open.mode_flags = 0x00;
+	/* source endpoint : matrix */
+	open.sink_endpointype = ASM_END_POINT_DEVICE_MATRIX;
+	open.bits_per_sample = 16;
+
+	open.postprocopo_id = get_asm_topology();
+	if (open.postprocopo_id == 0)
+		open.postprocopo_id = ASM_STREAM_POSTPROC_TOPO_ID_DEFAULT;
+
+	switch (format) {
+	case FORMAT_LINEAR_PCM:
+		open.dec_fmt_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2;
+		break;
+	case FORMAT_MPEG4_AAC:
+		open.dec_fmt_id = ASM_MEDIA_FMT_AAC_V2;
+		break;
+	case FORMAT_MPEG4_MULTI_AAC:
+		open.dec_fmt_id = ASM_MEDIA_FMT_DOLBY_AAC;
+		break;
+	case FORMAT_WMA_V9:
+		open.dec_fmt_id = ASM_MEDIA_FMT_WMA_V9_V2;
+		break;
+	case FORMAT_WMA_V10PRO:
+		open.dec_fmt_id = ASM_MEDIA_FMT_WMA_V10PRO_V2;
+		break;
+	case FORMAT_MP3:
+		open.dec_fmt_id = ASM_MEDIA_FMT_MP3;
+		break;
+	default:
+		pr_err("%s: Invalid format[%d]\n", __func__, format);
+		goto fail_cmd;
+	}
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &open);
+	if (rc < 0) {
+		pr_err("%s: open failed op[0x%x]rc[%d]\n", \
+					__func__, open.hdr.opcode, rc);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for open write rc[%d]\n", __func__,
+			rc);
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_open_read_write(struct audio_client *ac,
+			uint32_t rd_format,
+			uint32_t wr_format)
+{
+	int rc = 0x00;
+	struct asm_stream_cmd_open_readwrite_v2 open;
+
+	if ((ac == NULL) || (ac->apr == NULL)) {
+		pr_err("APR handle NULL\n");
+		return -EINVAL;
+	}
+	pr_debug("%s: session[%d]", __func__, ac->session);
+	pr_debug("wr_format[0x%x]rd_format[0x%x]",
+				wr_format, rd_format);
+
+	q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
+	open.hdr.opcode = ASM_STREAM_CMD_OPEN_READWRITE_V2;
+
+	open.mode_flags = BUFFER_META_ENABLE;
+	open.bits_per_sample = 16;
+	/* source endpoint : matrix */
+	open.postprocopo_id = get_asm_topology();
+	if (open.postprocopo_id == 0)
+		open.postprocopo_id = ASM_STREAM_POSTPROC_TOPO_ID_DEFAULT;
+
+	switch (wr_format) {
+	case FORMAT_LINEAR_PCM:
+		open.dec_fmt_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2;
+		break;
+	case FORMAT_MPEG4_AAC:
+		open.dec_fmt_id = ASM_MEDIA_FMT_AAC_V2;
+		break;
+	case FORMAT_MPEG4_MULTI_AAC:
+		open.dec_fmt_id = ASM_MEDIA_FMT_DOLBY_AAC;
+		break;
+	case FORMAT_WMA_V9:
+		open.dec_fmt_id = ASM_MEDIA_FMT_WMA_V9_V2;
+		break;
+	case FORMAT_WMA_V10PRO:
+		open.dec_fmt_id = ASM_MEDIA_FMT_WMA_V10PRO_V2;
+		break;
+	case FORMAT_AMRNB:
+		open.dec_fmt_id = ASM_MEDIA_FMT_AMRNB_FS;
+		break;
+	case FORMAT_AMRWB:
+		open.dec_fmt_id = ASM_MEDIA_FMT_AMRWB_FS;
+		break;
+	case FORMAT_V13K:
+		open.dec_fmt_id = ASM_MEDIA_FMT_V13K_FS;
+		break;
+	case FORMAT_EVRC:
+		open.dec_fmt_id = ASM_MEDIA_FMT_EVRC_FS;
+		break;
+	case FORMAT_EVRCB:
+		open.dec_fmt_id = ASM_MEDIA_FMT_EVRCB_FS;
+		break;
+	case FORMAT_EVRCWB:
+		open.dec_fmt_id = ASM_MEDIA_FMT_EVRCWB_FS;
+		break;
+	case FORMAT_MP3:
+		open.dec_fmt_id = ASM_MEDIA_FMT_MP3;
+		break;
+	default:
+		pr_err("Invalid format[%d]\n", wr_format);
+		goto fail_cmd;
+	}
+
+	switch (rd_format) {
+	case FORMAT_LINEAR_PCM:
+		open.enc_cfg_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2;
+		break;
+	case FORMAT_MPEG4_AAC:
+		open.enc_cfg_id = ASM_MEDIA_FMT_AAC_V2;
+		break;
+	case FORMAT_V13K:
+		open.enc_cfg_id = ASM_MEDIA_FMT_V13K_FS;
+		break;
+	case FORMAT_EVRC:
+		open.enc_cfg_id = ASM_MEDIA_FMT_EVRC_FS;
+		break;
+	case FORMAT_AMRNB:
+		open.enc_cfg_id = ASM_MEDIA_FMT_AMRNB_FS;
+		break;
+	case FORMAT_AMRWB:
+		open.enc_cfg_id = ASM_MEDIA_FMT_AMRWB_FS;
+		break;
+	default:
+		pr_err("Invalid format[%d]\n", rd_format);
+		goto fail_cmd;
+	}
+	pr_debug("%s:rdformat[0x%x]wrformat[0x%x]\n", __func__,
+			open.enc_cfg_id, open.dec_fmt_id);
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &open);
+	if (rc < 0) {
+		pr_err("open failed op[0x%x]rc[%d]\n", \
+						open.hdr.opcode, rc);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("timeout. waited for open read-write rc[%d]\n", rc);
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_run(struct audio_client *ac, uint32_t flags,
+		uint32_t msw_ts, uint32_t lsw_ts)
+{
+	struct asm_session_cmd_run_v2 run;
+	int rc;
+	if (!ac || ac->apr == NULL) {
+		pr_err("APR handle NULL\n");
+		return -EINVAL;
+	}
+	pr_debug("%s session[%d]", __func__, ac->session);
+	q6asm_add_hdr(ac, &run.hdr, sizeof(run), TRUE);
+
+	run.hdr.opcode = ASM_SESSION_CMD_RUN_V2;
+	run.flags    = flags;
+	run.time_lsw = lsw_ts;
+	run.time_msw = msw_ts;
+
+	config_debug_fs_run();
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &run);
+	if (rc < 0) {
+		pr_err("Commmand run failed[%d]", rc);
+		goto fail_cmd;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("timeout. waited for run success rc[%d]", rc);
+		goto fail_cmd;
+	}
+
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_run_nowait(struct audio_client *ac, uint32_t flags,
+		uint32_t msw_ts, uint32_t lsw_ts)
+{
+	struct asm_session_cmd_run_v2 run;
+	int rc;
+	if (!ac || ac->apr == NULL) {
+		pr_err("%s:APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	pr_debug("session[%d]", ac->session);
+	q6asm_add_hdr_async(ac, &run.hdr, sizeof(run), TRUE);
+
+	run.hdr.opcode = ASM_SESSION_CMD_RUN_V2;
+	run.flags    = flags;
+	run.time_lsw = lsw_ts;
+	run.time_msw = msw_ts;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &run);
+	if (rc < 0) {
+		pr_err("%s:Commmand run failed[%d]", __func__, rc);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+
+int q6asm_enc_cfg_blk_aac(struct audio_client *ac,
+			 uint32_t frames_per_buf,
+			uint32_t sample_rate, uint32_t channels,
+			uint32_t bit_rate, uint32_t mode, uint32_t format)
+{
+	struct asm_aac_enc_cfg_v2 enc_cfg;
+	int rc = 0;
+
+	pr_debug("%s:session[%d]frames[%d]SR[%d]ch[%d]bitrate[%d]mode[%d]"
+		"format[%d]", __func__, ac->session, frames_per_buf,
+		sample_rate, channels, bit_rate, mode, format);
+
+	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+
+	enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
+	enc_cfg.encdec.param_size = sizeof(struct asm_aac_enc_cfg_v2) -
+				sizeof(struct asm_stream_cmd_set_encdec_param);
+	enc_cfg.encblk.frames_per_buf = frames_per_buf;
+	enc_cfg.encblk.enc_cfg_blk_size  = enc_cfg.encdec.param_size -
+				sizeof(struct asm_enc_cfg_blk_param_v2);
+	enc_cfg.bit_rate = bit_rate;
+	enc_cfg.enc_mode = mode;
+	enc_cfg.aac_fmt_flag = format;
+	enc_cfg.channel_cfg = channels;
+	enc_cfg.sample_rate = sample_rate;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
+	if (rc < 0) {
+		pr_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("timeout. waited for FORMAT_UPDATE\n");
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_set_encdec_chan_map(struct audio_client *ac,
+			uint32_t num_channels)
+{
+	/* Todo: */
+	return 0;
+}
+
+int q6asm_enc_cfg_blk_pcm(struct audio_client *ac,
+			uint32_t rate, uint32_t channels)
+{
+	struct asm_multi_channel_pcm_enc_cfg_v2  enc_cfg;
+	u8 *channel_mapping;
+	u32 frames_per_buf = 0;
+
+	int rc = 0;
+
+	pr_debug("%s: Session %d, rate = %d, channels = %d\n", __func__,
+			 ac->session, rate, channels);
+
+	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+	enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
+	enc_cfg.encdec.param_size = sizeof(enc_cfg) - sizeof(enc_cfg.hdr) -
+				sizeof(enc_cfg.encdec);
+	enc_cfg.encblk.frames_per_buf = frames_per_buf;
+	enc_cfg.encblk.enc_cfg_blk_size  = enc_cfg.encdec.param_size -
+					sizeof(struct asm_enc_cfg_blk_param_v2);
+
+	enc_cfg.num_channels = channels;
+	enc_cfg.bits_per_sample = 16;
+	enc_cfg.sample_rate = rate;
+	enc_cfg.is_signed = 1;
+	channel_mapping = enc_cfg.channel_mapping;  /* ??? PHANI */
+
+	memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
+
+	if (channels == 1)  {
+		channel_mapping[0] = PCM_CHANNEL_FL;
+	} else if (channels == 2) {
+		channel_mapping[0] = PCM_CHANNEL_FL;
+		channel_mapping[1] = PCM_CHANNEL_FR;
+	} else if (channels == 6) {
+		channel_mapping[0] = PCM_CHANNEL_FC;
+		channel_mapping[1] = PCM_CHANNEL_FL;
+		channel_mapping[2] = PCM_CHANNEL_FR;
+		channel_mapping[3] = PCM_CHANNEL_LB;
+		channel_mapping[4] = PCM_CHANNEL_RB;
+		channel_mapping[5] = PCM_CHANNEL_LFE;
+	} else {
+		pr_err("%s: ERROR.unsupported num_ch = %u\n", __func__,
+				channels);
+		return -EINVAL;
+	}
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
+	if (rc < 0) {
+		pr_err("Comamnd open failed\n");
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("timeout opcode[0x%x] ", enc_cfg.hdr.opcode);
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_enable_sbrps(struct audio_client *ac,
+			uint32_t sbr_ps_enable)
+{
+	struct asm_aac_sbr_ps_flag_param  sbrps;
+	u32 frames_per_buf = 0;
+
+	int rc = 0;
+
+	pr_debug("%s: Session %d\n", __func__, ac->session);
+
+	q6asm_add_hdr(ac, &sbrps.hdr, sizeof(sbrps), TRUE);
+
+	sbrps.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	sbrps.encdec.param_id = ASM_PARAM_ID_AAC_SBR_PS_FLAG;
+	sbrps.encdec.param_size = sizeof(struct asm_aac_sbr_ps_flag_param) -
+				sizeof(struct asm_stream_cmd_set_encdec_param);
+	sbrps.encblk.frames_per_buf = frames_per_buf;
+	sbrps.encblk.enc_cfg_blk_size  = sbrps.encdec.param_size -
+				sizeof(struct asm_enc_cfg_blk_param_v2);
+
+	sbrps.sbr_ps_flag = sbr_ps_enable;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &sbrps);
+	if (rc < 0) {
+		pr_err("Command opcode[0x%x]paramid[0x%x] failed\n",
+				ASM_STREAM_CMD_SET_ENCDEC_PARAM,
+				ASM_PARAM_ID_AAC_SBR_PS_FLAG);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("timeout opcode[0x%x] ", sbrps.hdr.opcode);
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_cfg_dual_mono_aac(struct audio_client *ac,
+			uint16_t sce_left, uint16_t sce_right)
+{
+	struct asm_aac_dual_mono_mapping_param dual_mono;
+	u32 frames_per_buf = 0;
+
+	int rc = 0;
+
+	pr_debug("%s: Session %d, sce_left = %d, sce_right = %d\n",
+			 __func__, ac->session, sce_left, sce_right);
+
+	q6asm_add_hdr(ac, &dual_mono.hdr, sizeof(dual_mono), TRUE);
+
+	dual_mono.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	dual_mono.encdec.param_id = ASM_PARAM_ID_AAC_DUAL_MONO_MAPPING;
+	dual_mono.encdec.param_size = sizeof(struct asm_aac_enc_cfg_v2) -
+				sizeof(struct asm_stream_cmd_set_encdec_param);
+	dual_mono.encblk.frames_per_buf = frames_per_buf;
+	dual_mono.encblk.enc_cfg_blk_size  = dual_mono.encdec.param_size -
+				sizeof(struct asm_enc_cfg_blk_param_v2);
+	dual_mono.left_channel_sce = sce_left;
+	dual_mono.right_channel_sce = sce_right;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &dual_mono);
+	if (rc < 0) {
+		pr_err("%s:Command opcode[0x%x]paramid[0x%x] failed\n",
+				__func__, ASM_STREAM_CMD_SET_ENCDEC_PARAM,
+				ASM_PARAM_ID_AAC_DUAL_MONO_MAPPING);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s:timeout opcode[0x%x]\n", __func__,
+						dual_mono.hdr.opcode);
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_enc_cfg_blk_qcelp(struct audio_client *ac, uint32_t frames_per_buf,
+		uint16_t min_rate, uint16_t max_rate,
+		uint16_t reduced_rate_level, uint16_t rate_modulation_cmd)
+{
+	struct asm_v13k_enc_cfg enc_cfg;
+	int rc = 0;
+
+	pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x]"
+		"reduced_rate_level[0x%4x]rate_modulation_cmd[0x%4x]", __func__,
+		ac->session, frames_per_buf, min_rate, max_rate,
+		reduced_rate_level, rate_modulation_cmd);
+
+	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+	enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
+	enc_cfg.encdec.param_size = sizeof(struct asm_v13k_enc_cfg) -
+				sizeof(struct asm_stream_cmd_set_encdec_param);
+	enc_cfg.encblk.frames_per_buf = frames_per_buf;
+	enc_cfg.encblk.enc_cfg_blk_size  = enc_cfg.encdec.param_size -
+				sizeof(struct asm_enc_cfg_blk_param_v2);
+
+	enc_cfg.min_rate = min_rate;
+	enc_cfg.max_rate = max_rate;
+	enc_cfg.reduced_rate_cmd = reduced_rate_level;
+	enc_cfg.rate_mod_cmd = rate_modulation_cmd;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
+	if (rc < 0) {
+		pr_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("timeout. waited for setencdec v13k resp\n");
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_enc_cfg_blk_evrc(struct audio_client *ac, uint32_t frames_per_buf,
+		uint16_t min_rate, uint16_t max_rate,
+		uint16_t rate_modulation_cmd)
+{
+	struct asm_evrc_enc_cfg enc_cfg;
+	int rc = 0;
+
+	pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x]"
+		"rate_modulation_cmd[0x%4x]", __func__, ac->session,
+		frames_per_buf,	min_rate, max_rate, rate_modulation_cmd);
+
+	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+	enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
+	enc_cfg.encdec.param_size = sizeof(struct asm_evrc_enc_cfg) -
+				sizeof(struct asm_stream_cmd_set_encdec_param);
+	enc_cfg.encblk.frames_per_buf = frames_per_buf;
+	enc_cfg.encblk.enc_cfg_blk_size  = enc_cfg.encdec.param_size -
+				sizeof(struct asm_enc_cfg_blk_param_v2);
+
+	enc_cfg.min_rate = min_rate;
+	enc_cfg.max_rate = max_rate;
+	enc_cfg.rate_mod_cmd = rate_modulation_cmd;
+	enc_cfg.reserved = 0;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
+	if (rc < 0) {
+		pr_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("timeout. waited for encdec evrc\n");
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_enc_cfg_blk_amrnb(struct audio_client *ac, uint32_t frames_per_buf,
+			uint16_t band_mode, uint16_t dtx_enable)
+{
+	struct asm_amrnb_enc_cfg enc_cfg;
+	int rc = 0;
+
+	pr_debug("%s:session[%d]frames[%d]band_mode[0x%4x]dtx_enable[0x%4x]",
+		__func__, ac->session, frames_per_buf, band_mode, dtx_enable);
+
+	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+	enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
+	enc_cfg.encdec.param_size = sizeof(struct asm_amrnb_enc_cfg) -
+				sizeof(struct asm_stream_cmd_set_encdec_param);
+	enc_cfg.encblk.frames_per_buf = frames_per_buf;
+	enc_cfg.encblk.enc_cfg_blk_size  = enc_cfg.encdec.param_size -
+				sizeof(struct asm_enc_cfg_blk_param_v2);
+
+	enc_cfg.enc_mode = band_mode;
+	enc_cfg.dtx_mode = dtx_enable;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
+	if (rc < 0) {
+		pr_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("timeout. waited for set encdec amrnb\n");
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_enc_cfg_blk_amrwb(struct audio_client *ac, uint32_t frames_per_buf,
+			uint16_t band_mode, uint16_t dtx_enable)
+{
+	struct asm_amrwb_enc_cfg enc_cfg;
+	int rc = 0;
+
+	pr_debug("%s:session[%d]frames[%d]band_mode[0x%4x]dtx_enable[0x%4x]",
+		__func__, ac->session, frames_per_buf, band_mode, dtx_enable);
+
+	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+	enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
+	enc_cfg.encdec.param_size = sizeof(struct asm_amrwb_enc_cfg) -
+				sizeof(struct asm_stream_cmd_set_encdec_param);
+	enc_cfg.encblk.frames_per_buf = frames_per_buf;
+	enc_cfg.encblk.enc_cfg_blk_size  = enc_cfg.encdec.param_size -
+				sizeof(struct asm_enc_cfg_blk_param_v2);
+
+	enc_cfg.enc_mode = band_mode;
+	enc_cfg.dtx_mode = dtx_enable;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
+	if (rc < 0) {
+		pr_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("timeout. waited for FORMAT_UPDATE\n");
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+
+
+int q6asm_media_format_block_aac(struct audio_client *ac,
+			struct asm_aac_cfg *cfg)
+{
+	return q6asm_media_format_block_multi_aac(ac, cfg);
+}
+
+int q6asm_media_format_block_pcm(struct audio_client *ac,
+				uint32_t rate, uint32_t channels)
+{
+	struct asm_multi_channel_pcm_fmt_blk_v2 fmt;
+	u8 *channel_mapping;
+	int rc = 0;
+
+	pr_debug("%s:session[%d]rate[%d]ch[%d]\n", __func__, ac->session, rate,
+		channels);
+
+	q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+
+	fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+	fmt.fmt_blk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+					sizeof(fmt.fmt_blk);
+	fmt.num_channels = channels;
+	fmt.bits_per_sample = 16;
+	fmt.sample_rate = rate;
+	fmt.is_signed = 1;
+
+	channel_mapping = fmt.channel_mapping;
+
+	memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
+
+	if (channels == 1)  {
+		channel_mapping[0] = PCM_CHANNEL_FL;
+	} else if (channels == 2) {
+		channel_mapping[0] = PCM_CHANNEL_FL;
+		channel_mapping[1] = PCM_CHANNEL_FR;
+	} else if (channels == 6) {
+		channel_mapping[0] = PCM_CHANNEL_FC;
+		channel_mapping[1] = PCM_CHANNEL_FL;
+		channel_mapping[2] = PCM_CHANNEL_FR;
+		channel_mapping[3] = PCM_CHANNEL_LB;
+		channel_mapping[4] = PCM_CHANNEL_RB;
+		channel_mapping[5] = PCM_CHANNEL_LFE;
+	} else {
+		pr_err("%s: ERROR.unsupported num_ch = %u\n", __func__,
+				channels);
+		return -EINVAL;
+	}
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+	if (rc < 0) {
+		pr_err("%s:Comamnd open failed\n", __func__);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s:timeout. waited for format update\n", __func__);
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_media_format_block_multi_aac(struct audio_client *ac,
+				struct asm_aac_cfg *cfg)
+{
+	struct asm_aac_fmt_blk_v2 fmt;
+	int rc = 0;
+
+	pr_debug("%s:session[%d]rate[%d]ch[%d]\n", __func__, ac->session,
+		cfg->sample_rate, cfg->ch_cfg);
+
+	q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+
+	fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+	fmt.fmt_blk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+					sizeof(fmt.fmt_blk);
+	fmt.aac_fmt_flag = cfg->format;
+	fmt.audio_objype = cfg->aot;
+	/* If zero, PCE is assumed to be available in bitstream*/
+	fmt.total_size_of_PCE_bits = 0;
+	fmt.channel_config = cfg->ch_cfg;
+	fmt.sample_rate = cfg->sample_rate;
+
+	pr_info("%s:format=%x cfg_size=%d aac-cfg=%x aot=%d ch=%d sr=%d\n",
+			__func__, fmt.aac_fmt_flag, fmt.fmt_blk.fmt_blk_size,
+			fmt.aac_fmt_flag,
+			fmt.audio_objype,
+			fmt.channel_config,
+			fmt.sample_rate);
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+	if (rc < 0) {
+		pr_err("%s:Comamnd open failed\n", __func__);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__);
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_media_format_block_wma(struct audio_client *ac,
+				void *cfg)
+{
+	struct asm_wmastdv9_fmt_blk_v2 fmt;
+	struct asm_wma_cfg *wma_cfg = (struct asm_wma_cfg *)cfg;
+	int rc = 0;
+
+	pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d],"
+		"balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x]\n",
+		ac->session, wma_cfg->format_tag, wma_cfg->sample_rate,
+		wma_cfg->ch_cfg, wma_cfg->avg_bytes_per_sec,
+		wma_cfg->block_align, wma_cfg->valid_bits_per_sample,
+		wma_cfg->ch_mask, wma_cfg->encode_opt);
+
+	q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+
+	fmt.hdr.opcode = ASM_MEDIA_FMT_WMA_V9_V2;
+
+	fmt.fmtag = wma_cfg->format_tag;
+	fmt.num_channels = wma_cfg->ch_cfg;
+	fmt.sample_rate = wma_cfg->sample_rate;
+	fmt.avg_bytes_per_sec = wma_cfg->avg_bytes_per_sec;
+	fmt.blk_align = wma_cfg->block_align;
+	fmt.bits_per_sample =
+			wma_cfg->valid_bits_per_sample;
+	fmt.channel_mask = wma_cfg->ch_mask;
+	fmt.enc_options = wma_cfg->encode_opt;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+	if (rc < 0) {
+		pr_err("%s:Comamnd open failed\n", __func__);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__);
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_media_format_block_wmapro(struct audio_client *ac,
+				void *cfg)
+{
+	struct asm_wmaprov10_fmt_blk_v2 fmt;
+	struct asm_wmapro_cfg *wmapro_cfg = (struct asm_wmapro_cfg *)cfg;
+	int rc = 0;
+
+	pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d],"
+		"balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x],"
+		"adv_enc_opt[0x%4x], adv_enc_opt2[0x%8x]\n",
+		ac->session, wmapro_cfg->format_tag, wmapro_cfg->sample_rate,
+		wmapro_cfg->ch_cfg,  wmapro_cfg->avg_bytes_per_sec,
+		wmapro_cfg->block_align, wmapro_cfg->valid_bits_per_sample,
+		wmapro_cfg->ch_mask, wmapro_cfg->encode_opt,
+		wmapro_cfg->adv_encode_opt, wmapro_cfg->adv_encode_opt2);
+
+	q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+
+	fmt.hdr.opcode = ASM_MEDIA_FMT_WMA_V10PRO_V2;
+
+	fmt.fmtag = wmapro_cfg->format_tag;
+	fmt.num_channels = wmapro_cfg->ch_cfg;
+	fmt.sample_rate = wmapro_cfg->sample_rate;
+	fmt.avg_bytes_per_sec =
+				wmapro_cfg->avg_bytes_per_sec;
+	fmt.blk_align = wmapro_cfg->block_align;
+	fmt.bits_per_sample = wmapro_cfg->valid_bits_per_sample;
+	fmt.channel_mask = wmapro_cfg->ch_mask;
+	fmt.enc_options = wmapro_cfg->encode_opt;
+	fmt.usAdvancedEncodeOpt = wmapro_cfg->adv_encode_opt;
+	fmt.advanced_enc_options2 = wmapro_cfg->adv_encode_opt2;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+	if (rc < 0) {
+		pr_err("%s:Comamnd open failed\n", __func__);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__);
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_memory_map(struct audio_client *ac, uint32_t buf_add, int dir,
+				uint32_t bufsz, uint32_t bufcnt)
+{
+	struct avs_cmd_shared_mem_map_regions *mmap_regions = NULL;
+	struct avs_shared_map_region_payload  *mregions = NULL;
+	struct audio_port_data *port = NULL;
+	struct audio_buffer *ab = NULL;
+	void	*mmap_region_cmd = NULL;
+	void	*payload = NULL;
+	struct asm_buffer_node *buffer_node = NULL;
+	int	rc = 0;
+	int	i = 0;
+	int	cmd_size = 0;
+
+	if (!ac || ac->apr == NULL || ac->mmap_apr == NULL) {
+		pr_err("APR handle NULL\n");
+		return -EINVAL;
+	}
+	pr_debug("%s: Session[%d]\n", __func__, ac->session);
+
+	buffer_node = kmalloc(sizeof(struct asm_buffer_node), GFP_KERNEL);
+	if (!buffer_node)
+		return -ENOMEM;
+	cmd_size = sizeof(struct avs_cmd_shared_mem_map_regions)
+			+ sizeof(struct avs_shared_map_region_payload) * bufcnt;
+
+	mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
+	if (mmap_region_cmd == NULL) {
+		pr_err("%s: Mem alloc failed\n", __func__);
+		rc = -EINVAL;
+		return rc;
+	}
+	mmap_regions = (struct avs_cmd_shared_mem_map_regions *)
+							mmap_region_cmd;
+	q6asm_add_mmaphdr(ac, &mmap_regions->hdr, cmd_size,
+			TRUE, ((ac->session << 8) | dir));
+	mmap_regions->hdr.opcode = ASM_CMD_SHARED_MEM_MAP_REGIONS;
+	mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_EBI_POOL;
+	mmap_regions->num_regions = bufcnt & 0x00ff;
+	mmap_regions->property_flag = 0x00;
+	pr_debug("map_regions->nregions = %d\n", mmap_regions->num_regions);
+	payload = ((u8 *) mmap_region_cmd +
+		sizeof(struct avs_cmd_shared_mem_map_regions));
+	mregions = (struct avs_shared_map_region_payload *)payload;
+
+	ac->port[dir].tmp_hdl = 0;
+	port = &ac->port[dir];
+	for (i = 0; i < bufcnt; i++) {
+		ab = &port->buf[i];
+		mregions->shm_addr_lsw = ab->phys;
+		/* Using only 32 bit address */
+		mregions->shm_addr_msw = 0;
+		mregions->mem_size_bytes = ab->size;
+		++mregions;
+	}
+
+	rc = apr_send_pkt(ac->mmap_apr, (uint32_t *) mmap_region_cmd);
+	if (rc < 0) {
+		pr_err("mmap op[0x%x]rc[%d]\n",
+					mmap_regions->hdr.opcode, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0 &&
+			 ac->port[dir].tmp_hdl), 5*HZ);
+	if (!rc) {
+		pr_err("timeout. waited for memory_map\n");
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	buffer_node->buf_addr_lsw = buf_add;
+	buffer_node->mmap_hdl = ac->port[dir].tmp_hdl;
+	list_add_tail(&buffer_node->list, &ac->port[dir].mem_map_handle);
+	ac->port[dir].tmp_hdl = 0;
+	rc = 0;
+
+fail_cmd:
+	kfree(mmap_region_cmd);
+	return rc;
+}
+
+int q6asm_memory_unmap(struct audio_client *ac, uint32_t buf_add, int dir)
+{
+	struct avs_cmd_shared_mem_unmap_regions mem_unmap;
+	struct asm_buffer_node *buf_node = NULL;
+	struct list_head *ptr, *next;
+
+	int rc = 0;
+
+	if (!ac || ac->apr == NULL || this_mmap.apr == NULL) {
+		pr_err("APR handle NULL\n");
+		return -EINVAL;
+	}
+	pr_debug("%s: Session[%d]\n", __func__, ac->session);
+
+	q6asm_add_mmaphdr(ac, &mem_unmap.hdr,
+			sizeof(struct avs_cmd_shared_mem_unmap_regions),
+			TRUE, ((ac->session << 8) | dir));
+
+	mem_unmap.hdr.opcode = ASM_CMD_SHARED_MEM_UNMAP_REGIONS;
+	list_for_each_safe(ptr, next, &ac->port[dir].mem_map_handle) {
+		buf_node = list_entry(ptr, struct asm_buffer_node,
+						list);
+		if (buf_node->buf_addr_lsw == buf_add) {
+			pr_info("%s: Found the element\n", __func__);
+			mem_unmap.mem_map_handle = buf_node->mmap_hdl;
+			break;
+		}
+	}
+	pr_debug("%s: mem_unmap-mem_map_handle: 0x%x",
+		__func__, mem_unmap.mem_map_handle);
+	rc = apr_send_pkt(ac->mmap_apr, (uint32_t *) &mem_unmap);
+	if (rc < 0) {
+		pr_err("mem_unmap op[0x%x]rc[%d]\n",
+					mem_unmap.hdr.opcode, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0), 5 * HZ);
+	if (!rc) {
+		pr_err("timeout. waited for memory_map\n");
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	list_for_each_safe(ptr, next, &ac->port[dir].mem_map_handle) {
+		buf_node = list_entry(ptr, struct asm_buffer_node,
+						list);
+		if (buf_node->buf_addr_lsw == buf_add) {
+			list_del(&buf_node->list);
+			kfree(buf_node);
+		}
+	}
+
+	rc = 0;
+fail_cmd:
+	return rc;
+}
+
+
+static int q6asm_memory_map_regions(struct audio_client *ac, int dir,
+				uint32_t bufsz, uint32_t bufcnt)
+{
+	struct avs_cmd_shared_mem_map_regions *mmap_regions = NULL;
+	struct avs_shared_map_region_payload  *mregions = NULL;
+	struct audio_port_data *port = NULL;
+	struct audio_buffer *ab = NULL;
+	void	*mmap_region_cmd = NULL;
+	void	*payload = NULL;
+	struct asm_buffer_node *buffer_node = NULL;
+	int	rc = 0;
+	int	i = 0;
+	int	cmd_size = 0;
+
+	if (!ac || ac->apr == NULL || ac->mmap_apr == NULL) {
+		pr_err("APR handle NULL\n");
+		return -EINVAL;
+	}
+	pr_debug("%s: Session[%d]\n", __func__, ac->session);
+
+	cmd_size = sizeof(struct avs_cmd_shared_mem_map_regions)
+			+ (sizeof(struct avs_shared_map_region_payload));
+
+	buffer_node = kzalloc(sizeof(struct asm_buffer_node) * bufcnt,
+				GFP_KERNEL);
+
+	mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
+	if ((mmap_region_cmd == NULL) || (buffer_node == NULL)) {
+		pr_err("%s: Mem alloc failed\n", __func__);
+		rc = -EINVAL;
+		return rc;
+	}
+	mmap_regions = (struct avs_cmd_shared_mem_map_regions *)
+							mmap_region_cmd;
+	q6asm_add_mmaphdr(ac, &mmap_regions->hdr, cmd_size, TRUE,
+					((ac->session << 8) | dir));
+	pr_debug("mmap_region=0x%p token=0x%x\n",
+		mmap_regions, ((ac->session << 8) | dir));
+
+	mmap_regions->hdr.opcode = ASM_CMD_SHARED_MEM_MAP_REGIONS;
+	mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_EBI_POOL;
+	mmap_regions->num_regions = 1; /*bufcnt & 0x00ff; */
+	mmap_regions->property_flag = 0x00;
+	pr_debug("map_regions->nregions = %d\n", mmap_regions->num_regions);
+	payload = ((u8 *) mmap_region_cmd +
+		sizeof(struct avs_cmd_shared_mem_map_regions));
+	mregions = (struct avs_shared_map_region_payload *)payload;
+
+	ac->port[dir].tmp_hdl = 0;
+	port = &ac->port[dir];
+	ab = &port->buf[0];
+	mregions->shm_addr_lsw = ab->phys;
+	/* Using only 32 bit address */
+	mregions->shm_addr_msw = 0;
+	mregions->mem_size_bytes = (bufsz * bufcnt);
+
+	rc = apr_send_pkt(ac->mmap_apr, (uint32_t *) mmap_region_cmd);
+	if (rc < 0) {
+		pr_err("mmap_regions op[0x%x]rc[%d]\n",
+					mmap_regions->hdr.opcode, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0)
+			 , 5*HZ);
+			 /*ac->port[dir].tmp_hdl), 5*HZ);*/
+	if (!rc) {
+		pr_err("timeout. waited for memory_map\n");
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	mutex_lock(&ac->cmd_lock);
+
+	for (i = 0; i < bufcnt; i++) {
+		ab = &port->buf[i];
+		buffer_node[i].buf_addr_lsw = ab->phys;
+		buffer_node[i].mmap_hdl = ac->port[dir].tmp_hdl;
+		list_add_tail(&buffer_node[i].list,
+			&ac->port[dir].mem_map_handle);
+		pr_debug("%s: i=%d, bufadd[i] = 0x%x, maphdl[i] = 0x%x\n",
+			__func__, i, buffer_node[i].buf_addr_lsw,
+			buffer_node[i].mmap_hdl);
+	}
+	ac->port[dir].tmp_hdl = 0;
+	mutex_unlock(&ac->cmd_lock);
+	rc = 0;
+	pr_debug("%s: exit\n", __func__);
+fail_cmd:
+	kfree(mmap_region_cmd);
+	return rc;
+}
+
+static int q6asm_memory_unmap_regions(struct audio_client *ac, int dir,
+				uint32_t bufsz, uint32_t bufcnt)
+{
+	struct avs_cmd_shared_mem_unmap_regions mem_unmap;
+	struct audio_port_data *port = NULL;
+	struct asm_buffer_node *buf_node = NULL;
+	struct list_head *ptr, *next;
+	uint32_t buf_add;
+	int	rc = 0;
+	int	cmd_size = 0;
+
+	if (!ac || ac->apr == NULL || ac->mmap_apr == NULL) {
+		pr_err("APR handle NULL\n");
+		return -EINVAL;
+	}
+	pr_debug("%s: Session[%d]\n", __func__, ac->session);
+
+	cmd_size = sizeof(struct avs_cmd_shared_mem_unmap_regions);
+	q6asm_add_mmaphdr(ac, &mem_unmap.hdr, cmd_size,
+			TRUE, ((ac->session << 8) | dir));
+	port = &ac->port[dir];
+	buf_add = (uint32_t)port->buf->phys;
+	mem_unmap.hdr.opcode = ASM_CMD_SHARED_MEM_UNMAP_REGIONS;
+	list_for_each_safe(ptr, next, &ac->port[dir].mem_map_handle) {
+		buf_node = list_entry(ptr, struct asm_buffer_node,
+						list);
+		if (buf_node->buf_addr_lsw == buf_add) {
+			pr_debug("%s: Found the element\n", __func__);
+			mem_unmap.mem_map_handle = buf_node->mmap_hdl;
+			break;
+		}
+	}
+
+	pr_debug("%s: mem_unmap-mem_map_handle: 0x%x",
+			__func__, mem_unmap.mem_map_handle);
+	rc = apr_send_pkt(ac->mmap_apr, (uint32_t *) &mem_unmap);
+	if (rc < 0) {
+		pr_err("mmap_regions op[0x%x]rc[%d]\n",
+					mem_unmap.hdr.opcode, rc);
+		goto fail_cmd;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("timeout. waited for memory_unmap\n");
+		goto fail_cmd;
+	}
+	list_for_each_safe(ptr, next, &ac->port[dir].mem_map_handle) {
+		buf_node = list_entry(ptr, struct asm_buffer_node,
+						list);
+		if (buf_node->buf_addr_lsw == buf_add) {
+			list_del(&buf_node->list);
+			kfree(buf_node);
+		}
+	}
+	rc = 0;
+
+fail_cmd:
+	return rc;
+}
+
+int q6asm_set_lrgain(struct audio_client *ac, int left_gain, int right_gain)
+{
+	struct asm_volume_ctrl_lr_chan_gain lrgain;
+	int sz = 0;
+	int rc  = 0;
+
+	sz = sizeof(struct asm_volume_ctrl_lr_chan_gain);
+	q6asm_add_hdr_async(ac, &lrgain.hdr, sz, TRUE);
+	lrgain.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
+	lrgain.param.data_payload_addr_lsw = 0;
+	lrgain.param.data_payload_addr_msw = 0;
+	lrgain.param.mem_map_handle = 0;
+	lrgain.param.data_payload_size = sizeof(lrgain) -
+				sizeof(lrgain.hdr) - sizeof(lrgain.param);
+	lrgain.data.module_id = ASM_MODULE_ID_VOL_CTRL;
+	lrgain.data.param_id = ASM_PARAM_ID_VOL_CTRL_LR_CHANNEL_GAIN;
+	lrgain.data.param_size = lrgain.param.data_payload_size -
+				sizeof(lrgain.data);
+	lrgain.data.reserved = 0;
+	lrgain.l_chan_gain = left_gain;
+	lrgain.r_chan_gain = right_gain;
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &lrgain);
+	if (rc < 0) {
+		pr_err("%s: set-params send failed paramid[0x%x]\n", __func__,
+						lrgain.data.param_id);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
+						lrgain.data.param_id);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = 0;
+fail_cmd:
+	return rc;
+}
+
+int q6asm_set_mute(struct audio_client *ac, int muteflag)
+{
+	struct asm_volume_ctrl_mute_config mute;
+	int sz = 0;
+	int rc  = 0;
+
+	sz = sizeof(struct asm_volume_ctrl_mute_config);
+	q6asm_add_hdr_async(ac, &mute.hdr, sz, TRUE);
+	mute.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
+	mute.param.data_payload_addr_lsw = 0;
+	mute.param.data_payload_addr_msw = 0;
+	mute.param.mem_map_handle = 0;
+	mute.param.data_payload_size = sizeof(mute) -
+				sizeof(mute.hdr) - sizeof(mute.param);
+	mute.data.module_id = ASM_MODULE_ID_VOL_CTRL;
+	mute.data.param_id = ASM_PARAM_ID_VOL_CTRL_MUTE_CONFIG;
+	mute.data.param_size = mute.param.data_payload_size - sizeof(mute.data);
+	mute.data.reserved = 0;
+	mute.mute_flag = muteflag;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &mute);
+	if (rc < 0) {
+		pr_err("%s: set-params send failed paramid[0x%x]\n", __func__,
+						mute.data.param_id);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
+						mute.data.param_id);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = 0;
+fail_cmd:
+	return rc;
+}
+
+int q6asm_set_volume(struct audio_client *ac, int volume)
+{
+	struct asm_volume_ctrl_master_gain vol;
+	int sz = 0;
+	int rc  = 0;
+
+	sz = sizeof(struct asm_volume_ctrl_master_gain);
+	q6asm_add_hdr_async(ac, &vol.hdr, sz, TRUE);
+	vol.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
+	vol.param.data_payload_addr_lsw = 0;
+	vol.param.data_payload_addr_msw = 0;
+
+
+	vol.param.mem_map_handle = 0;
+	vol.param.data_payload_size = sizeof(vol) -
+				sizeof(vol.hdr) - sizeof(vol.param);
+	vol.data.module_id = ASM_MODULE_ID_VOL_CTRL;
+	vol.data.param_id = ASM_PARAM_ID_VOL_CTRL_MASTER_GAIN;
+	vol.data.param_size = vol.param.data_payload_size - sizeof(vol.data);
+	vol.data.reserved = 0;
+	vol.master_gain = volume;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &vol);
+	if (rc < 0) {
+		pr_err("%s: set-params send failed paramid[0x%x]\n", __func__,
+						vol.data.param_id);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
+						vol.data.param_id);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = 0;
+fail_cmd:
+	return rc;
+}
+int q6asm_set_softpause(struct audio_client *ac,
+			struct asm_softpause_params *pause_param)
+{
+	struct asm_soft_pause_params softpause;
+	int sz = 0;
+	int rc  = 0;
+
+	sz = sizeof(struct asm_soft_pause_params);
+	q6asm_add_hdr_async(ac, &softpause.hdr, sz, TRUE);
+	softpause.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
+
+	softpause.param.data_payload_addr_lsw = 0;
+	softpause.param.data_payload_addr_msw = 0;
+	softpause.param.mem_map_handle = 0;
+	softpause.param.data_payload_size = sizeof(softpause) -
+				sizeof(softpause.hdr) - sizeof(softpause.param);
+	softpause.data.module_id = ASM_MODULE_ID_VOL_CTRL;
+	softpause.data.param_id = ASM_PARAM_ID_SOFT_PAUSE_PARAMETERS;
+	softpause.data.param_size = softpause.param.data_payload_size -
+				sizeof(softpause.data);
+	softpause.data.reserved = 0;
+	softpause.enable_flag = pause_param->enable;
+	softpause.period = pause_param->period;
+	softpause.step = pause_param->step;
+	softpause.ramping_curve = pause_param->rampingcurve;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &softpause);
+	if (rc < 0) {
+		pr_err("%s: set-params send failed paramid[0x%x]\n", __func__,
+						softpause.data.param_id);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
+						softpause.data.param_id);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = 0;
+fail_cmd:
+	return rc;
+}
+
+int q6asm_set_softvolume(struct audio_client *ac,
+			struct asm_softvolume_params *softvol_param)
+{
+	struct asm_soft_step_volume_params softvol;
+	int sz = 0;
+	int rc  = 0;
+
+	sz = sizeof(struct asm_soft_step_volume_params);
+	q6asm_add_hdr_async(ac, &softvol.hdr, sz, TRUE);
+	softvol.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
+	softvol.param.data_payload_addr_lsw = 0;
+	softvol.param.data_payload_addr_msw = 0;
+	softvol.param.mem_map_handle = 0;
+	softvol.param.data_payload_size = sizeof(softvol) -
+				sizeof(softvol.hdr) - sizeof(softvol.param);
+	softvol.data.module_id = ASM_MODULE_ID_VOL_CTRL;
+	softvol.data.param_id = ASM_PARAM_ID_SOFT_VOL_STEPPING_PARAMETERS;
+	softvol.data.param_size = softvol.param.data_payload_size -
+				sizeof(softvol.data);
+	softvol.data.reserved = 0;
+	softvol.period = softvol_param->period;
+	softvol.step = softvol_param->step;
+	softvol.ramping_curve = softvol_param->rampingcurve;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &softvol);
+	if (rc < 0) {
+		pr_err("%s: set-params send failed paramid[0x%x]\n", __func__,
+						softvol.data.param_id);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
+						softvol.data.param_id);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = 0;
+fail_cmd:
+	return rc;
+}
+
+int q6asm_equalizer(struct audio_client *ac, void *eq_p)
+{
+	struct asm_eq_params eq;
+	struct msm_audio_eq_stream_config *eq_params = NULL;
+	int i  = 0;
+	int sz = 0;
+	int rc  = 0;
+
+	if (eq_p == NULL) {
+		pr_err("%s[%d]: Invalid Eq param\n", __func__, ac->session);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	sz = sizeof(struct asm_eq_params);
+	eq_params = (struct msm_audio_eq_stream_config *) eq_p;
+	q6asm_add_hdr(ac, &eq.hdr, sz, TRUE);
+
+	eq.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
+	eq.param.data_payload_addr_lsw = 0;
+	eq.param.data_payload_addr_msw = 0;
+	eq.param.mem_map_handle = 0;
+	eq.param.data_payload_size = sizeof(eq) -
+				sizeof(eq.hdr) - sizeof(eq.param);
+	eq.data.module_id = ASM_MODULE_ID_EQUALIZER;
+	eq.data.param_id = ASM_PARAM_ID_EQUALIZER_PARAMETERS;
+	eq.data.param_size = eq.param.data_payload_size - sizeof(eq.data);
+	eq.enable_flag = eq_params->enable;
+	eq.num_bands = eq_params->num_bands;
+
+	pr_debug("%s: enable:%d numbands:%d\n", __func__, eq_params->enable,
+							eq_params->num_bands);
+	for (i = 0; i < eq_params->num_bands; i++) {
+		eq.eq_bands[i].band_idx =
+					eq_params->eq_bands[i].band_idx;
+		eq.eq_bands[i].filterype =
+					eq_params->eq_bands[i].filter_type;
+		eq.eq_bands[i].center_freq_hz =
+					eq_params->eq_bands[i].center_freq_hz;
+		eq.eq_bands[i].filter_gain =
+					eq_params->eq_bands[i].filter_gain;
+		eq.eq_bands[i].q_factor =
+					eq_params->eq_bands[i].q_factor;
+		pr_debug("%s: filter_type:%u bandnum:%d\n", __func__,
+				eq_params->eq_bands[i].filter_type, i);
+		pr_debug("%s: center_freq_hz:%u bandnum:%d\n", __func__,
+				eq_params->eq_bands[i].center_freq_hz, i);
+		pr_debug("%s: filter_gain:%d bandnum:%d\n", __func__,
+				eq_params->eq_bands[i].filter_gain, i);
+		pr_debug("%s: q_factor:%d bandnum:%d\n", __func__,
+				eq_params->eq_bands[i].q_factor, i);
+	}
+	rc = apr_send_pkt(ac->apr, (uint32_t *)&eq);
+	if (rc < 0) {
+		pr_err("%s: set-params send failed paramid[0x%x]\n", __func__,
+						eq.data.param_id);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
+						eq.data.param_id);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = 0;
+fail_cmd:
+	return rc;
+}
+
+int q6asm_read(struct audio_client *ac)
+{
+	struct asm_data_cmd_read_v2 read;
+	struct asm_buffer_node *buf_node = NULL;
+	struct list_head *ptr, *next;
+	struct audio_buffer        *ab;
+	int dsp_buf;
+	struct audio_port_data     *port;
+	int rc;
+	if (!ac || ac->apr == NULL) {
+		pr_err("APR handle NULL\n");
+		return -EINVAL;
+	}
+	if (ac->io_mode == SYNC_IO_MODE) {
+		port = &ac->port[OUT];
+
+		q6asm_add_hdr(ac, &read.hdr, sizeof(read), FALSE);
+
+		mutex_lock(&port->lock);
+
+		dsp_buf = port->dsp_buf;
+		ab = &port->buf[dsp_buf];
+
+		pr_debug("%s:session[%d]dsp-buf[%d][%p]cpu_buf[%d][%p]\n",
+					__func__,
+					ac->session,
+					dsp_buf,
+					(void *)port->buf[dsp_buf].data,
+					port->cpu_buf,
+					(void *)port->buf[port->cpu_buf].phys);
+
+		read.hdr.opcode = ASM_DATA_CMD_READ_V2;
+		read.buf_addr_lsw = ab->phys;
+		read.buf_addr_msw = 0;
+
+		list_for_each_safe(ptr, next, &ac->port[OUT].mem_map_handle) {
+			buf_node = list_entry(ptr, struct asm_buffer_node,
+						list);
+			if (buf_node->buf_addr_lsw == (uint32_t) ab->phys)
+				read.mem_map_handle = buf_node->mmap_hdl;
+		}
+		pr_debug("memory_map handle in q6asm_read: [%0x]:",
+			read.mem_map_handle);
+		read.buf_size = ab->size;
+		read.seq_id = port->dsp_buf;
+		read.hdr.token = port->dsp_buf;
+		port->dsp_buf = (port->dsp_buf + 1) & (port->max_buf_cnt - 1);
+		mutex_unlock(&port->lock);
+		pr_debug("%s:buf add[0x%x] token[%d] uid[%d]\n", __func__,
+						read.buf_addr_lsw,
+						read.hdr.token,
+						read.seq_id);
+		rc = apr_send_pkt(ac->apr, (uint32_t *) &read);
+		if (rc < 0) {
+			pr_err("read op[0x%x]rc[%d]\n", read.hdr.opcode, rc);
+			goto fail_cmd;
+		}
+		return 0;
+	}
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_read_nolock(struct audio_client *ac)
+{
+	struct asm_data_cmd_read_v2 read;
+	struct asm_buffer_node *buf_node = NULL;
+	struct list_head *ptr, *next;
+	struct audio_buffer        *ab;
+	int dsp_buf;
+	struct audio_port_data     *port;
+	int rc;
+	if (!ac || ac->apr == NULL) {
+		pr_err("APR handle NULL\n");
+		return -EINVAL;
+	}
+	if (ac->io_mode == SYNC_IO_MODE) {
+		port = &ac->port[OUT];
+
+		q6asm_add_hdr_async(ac, &read.hdr, sizeof(read), FALSE);
+
+
+		dsp_buf = port->dsp_buf;
+		ab = &port->buf[dsp_buf];
+
+		pr_debug("%s:session[%d]dsp-buf[%d][%p]cpu_buf[%d][%p]\n",
+					__func__,
+					ac->session,
+					dsp_buf,
+					(void *)port->buf[dsp_buf].data,
+					port->cpu_buf,
+					(void *)port->buf[port->cpu_buf].phys);
+
+		read.hdr.opcode = ASM_DATA_CMD_READ_V2;
+		read.buf_addr_lsw = ab->phys;
+		read.buf_addr_msw = 0;
+		read.buf_size = ab->size;
+		read.seq_id = port->dsp_buf;
+		read.hdr.token = port->dsp_buf;
+
+		list_for_each_safe(ptr, next, &ac->port[OUT].mem_map_handle) {
+			buf_node = list_entry(ptr, struct asm_buffer_node,
+						list);
+			if (buf_node->buf_addr_lsw == (uint32_t)ab->phys) {
+				read.mem_map_handle = buf_node->mmap_hdl;
+				break;
+			}
+		}
+
+		port->dsp_buf = (port->dsp_buf + 1) & (port->max_buf_cnt - 1);
+		pr_debug("%s:buf add[0x%x] token[%d] uid[%d]\n", __func__,
+					read.buf_addr_lsw,
+					read.hdr.token,
+					read.seq_id);
+		pr_debug("q6asm_read_nolock mem-map handle is %x",
+				read.mem_map_handle);
+		rc = apr_send_pkt(ac->apr, (uint32_t *) &read);
+		if (rc < 0) {
+			pr_err("read op[0x%x]rc[%d]\n", read.hdr.opcode, rc);
+			goto fail_cmd;
+		}
+		return 0;
+	}
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_async_write(struct audio_client *ac,
+					  struct audio_aio_write_param *param)
+{
+	int rc = 0;
+	struct asm_data_cmd_write_v2 write;
+	struct asm_buffer_node *buf_node = NULL;
+	struct list_head *ptr, *next;
+	struct audio_buffer        *ab;
+	struct audio_port_data     *port;
+
+	if (!ac || ac->apr == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	q6asm_add_hdr_async(ac, &write.hdr, sizeof(write), FALSE);
+
+	port = &ac->port[IN];
+	ab = &port->buf[port->dsp_buf];
+
+	/* Pass physical address as token for AIO scheme */
+	write.hdr.token = param->uid;
+	write.hdr.opcode = ASM_DATA_CMD_WRITE_V2;
+	write.buf_addr_lsw = param->paddr;
+	write.buf_addr_msw = 0x00;
+	write.buf_size = param->len;
+	write.timestamp_msw = param->msw_ts;
+	write.timestamp_lsw = param->lsw_ts;
+	pr_debug("%s: token[0x%x], buf_addr_lsw[0x%x], buf_size[0x%x],"
+		"ts_msw[0x%x], ts_lsw[0x%x]\n",
+		__func__, write.hdr.token, write.buf_addr_lsw,
+		write.buf_size, write.timestamp_msw,
+		write.timestamp_lsw);
+	/* Use 0xFF00 for disabling timestamps */
+	if (param->flags == 0xFF00)
+		write.flags = (0x00000000 | (param->flags & 0x800000FF));
+	else
+		write.flags = (0x80000000 | param->flags);
+
+	write.seq_id = param->uid;
+	list_for_each_safe(ptr, next, &ac->port[IN].mem_map_handle) {
+		buf_node = list_entry(ptr, struct asm_buffer_node,
+						list);
+		if (buf_node->buf_addr_lsw == (uint32_t)write.buf_addr_lsw) {
+			write.mem_map_handle = buf_node->mmap_hdl;
+			pr_debug("%s:buf_node->mmap_hdl = 0x%x,"
+				"write.mem_map_handle = 0x%x\n",
+					__func__,
+					buf_node->mmap_hdl,
+					(uint32_t)write.mem_map_handle);
+			break;
+		}
+	}
+
+	pr_debug("%s: session[%d] bufadd[0x%x]len[0x%x],"
+			"mem_map_handle[0x%x]\n", __func__, ac->session,
+		write.buf_addr_lsw, write.buf_size, write.mem_map_handle);
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &write);
+	if (rc < 0) {
+		pr_debug("[%s] write op[0x%x]rc[%d]\n", __func__,
+			write.hdr.opcode, rc);
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_async_read(struct audio_client *ac,
+					  struct audio_aio_read_param *param)
+{
+	int rc = 0;
+	struct asm_data_cmd_read_v2 read;
+	struct asm_buffer_node *buf_node = NULL;
+	struct list_head *ptr, *next;
+
+	if (!ac || ac->apr == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	q6asm_add_hdr_async(ac, &read.hdr, sizeof(read), FALSE);
+
+	/* Pass physical address as token for AIO scheme */
+	read.hdr.token = param->paddr;
+	read.hdr.opcode = ASM_DATA_CMD_READ_V2;
+	read.buf_addr_lsw = param->paddr;
+	read.buf_addr_msw = 0;
+	read.buf_size = param->len;
+	read.seq_id = param->uid;
+
+	list_for_each_safe(ptr, next, &ac->port[IN].mem_map_handle) {
+		buf_node = list_entry(ptr, struct asm_buffer_node,
+						list);
+			if (buf_node->buf_addr_lsw == param->paddr)
+				read.mem_map_handle = buf_node->mmap_hdl;
+	}
+
+	pr_debug("%s: session[%d] bufadd[0x%x]len[0x%x]", __func__, ac->session,
+		read.buf_addr_lsw, read.buf_size);
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &read);
+	if (rc < 0) {
+		pr_debug("[%s] read op[0x%x]rc[%d]\n", __func__,
+			read.hdr.opcode, rc);
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_write(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
+		uint32_t lsw_ts, uint32_t flags)
+{
+	int rc = 0;
+	struct asm_data_cmd_write_v2 write;
+	struct asm_buffer_node *buf_node = NULL;
+	struct audio_port_data *port;
+	struct audio_buffer    *ab;
+	int dsp_buf = 0;
+
+	if (!ac || ac->apr == NULL) {
+		pr_err("APR handle NULL\n");
+		return -EINVAL;
+	}
+	pr_debug("%s: session[%d] len=%d", __func__, ac->session, len);
+	if (ac->io_mode == SYNC_IO_MODE) {
+		port = &ac->port[IN];
+
+		q6asm_add_hdr(ac, &write.hdr, sizeof(write),
+				FALSE);
+		mutex_lock(&port->lock);
+
+		dsp_buf = port->dsp_buf;
+		ab = &port->buf[dsp_buf];
+
+		write.hdr.token = port->dsp_buf;
+		write.hdr.opcode = ASM_DATA_CMD_WRITE_V2;
+		write.buf_addr_lsw = ab->phys;
+		write.buf_addr_msw = 0;
+		write.buf_size = len;
+		write.seq_id = port->dsp_buf;
+		write.timestamp_lsw = lsw_ts;
+		write.timestamp_msw = msw_ts;
+		/* Use 0xFF00 for disabling timestamps */
+		if (flags == 0xFF00)
+			write.flags = (0x00000000 | (flags & 0x800000FF));
+		else
+			write.flags = (0x80000000 | flags);
+		port->dsp_buf = (port->dsp_buf + 1) & (port->max_buf_cnt - 1);
+		buf_node = list_first_entry(&ac->port[IN].mem_map_handle,
+						struct asm_buffer_node,
+						list);
+		write.mem_map_handle = buf_node->mmap_hdl;
+
+		pr_debug("%s:ab->phys[0x%x]bufadd[0x%x]"
+			"token[0x%x]buf_id[0x%x]buf_size[0x%x]mmaphdl[0x%x]"
+						, __func__,
+						ab->phys,
+						write.buf_addr_lsw,
+						write.hdr.token,
+						write.seq_id,
+						write.buf_size,
+						write.mem_map_handle);
+		mutex_unlock(&port->lock);
+
+		config_debug_fs_write(ab);
+
+		rc = apr_send_pkt(ac->apr, (uint32_t *) &write);
+		if (rc < 0) {
+			pr_err("write op[0x%x]rc[%d]\n", write.hdr.opcode, rc);
+			goto fail_cmd;
+		}
+		pr_debug("%s: WRITE SUCCESS\n", __func__);
+		return 0;
+	}
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_write_nolock(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
+			uint32_t lsw_ts, uint32_t flags)
+{
+	int rc = 0;
+	struct asm_data_cmd_write_v2 write;
+	struct asm_buffer_node *buf_node = NULL;
+	struct audio_port_data *port;
+	struct audio_buffer    *ab;
+	int dsp_buf = 0;
+
+	if (!ac || ac->apr == NULL) {
+		pr_err("APR handle NULL\n");
+		return -EINVAL;
+	}
+	pr_debug("%s: session[%d] len=%d", __func__, ac->session, len);
+	if (ac->io_mode == SYNC_IO_MODE) {
+		port = &ac->port[IN];
+
+		q6asm_add_hdr_async(ac, &write.hdr, sizeof(write),
+					FALSE);
+
+		dsp_buf = port->dsp_buf;
+		ab = &port->buf[dsp_buf];
+
+		write.hdr.token = port->dsp_buf;
+		write.hdr.opcode = ASM_DATA_CMD_WRITE_V2;
+		write.buf_addr_lsw = ab->phys;
+		write.buf_addr_msw = 0;
+		write.buf_size = len;
+		write.seq_id = port->dsp_buf;
+		write.timestamp_lsw = lsw_ts;
+		write.timestamp_msw = msw_ts;
+		buf_node = list_first_entry(&ac->port[IN].mem_map_handle,
+						struct asm_buffer_node,
+						list);
+		write.mem_map_handle = buf_node->mmap_hdl;
+		/* Use 0xFF00 for disabling timestamps */
+		if (flags == 0xFF00)
+			write.flags = (0x00000000 | (flags & 0x800000FF));
+		else
+			write.flags = (0x80000000 | flags);
+		port->dsp_buf = (port->dsp_buf + 1) & (port->max_buf_cnt - 1);
+
+		pr_err("%s:ab->phys[0x%x]bufadd[0x%x]token[0x%x]"
+			"buf_id[0x%x]buf_size[0x%x]mmaphdl[0x%x]"
+							, __func__,
+							ab->phys,
+							write.buf_addr_lsw,
+							write.hdr.token,
+							write.seq_id,
+							write.buf_size,
+							write.mem_map_handle);
+
+		rc = apr_send_pkt(ac->apr, (uint32_t *) &write);
+		if (rc < 0) {
+			pr_err("write op[0x%x]rc[%d]\n", write.hdr.opcode, rc);
+			goto fail_cmd;
+		}
+		pr_debug("%s: WRITE SUCCESS\n", __func__);
+		return 0;
+	}
+fail_cmd:
+	return -EINVAL;
+}
+
+uint64_t q6asm_get_session_time(struct audio_client *ac)
+{
+	struct apr_hdr hdr;
+	int rc;
+
+	if (!ac || ac->apr == NULL) {
+		pr_err("APR handle NULL\n");
+		return -EINVAL;
+	}
+	q6asm_add_hdr(ac, &hdr, sizeof(hdr), TRUE);
+	hdr.opcode = ASM_SESSION_CMD_GET_SESSIONTIME_V3;
+	atomic_set(&ac->cmd_state, 1);
+
+	pr_debug("%s: session[%d]opcode[0x%x]\n", __func__,
+						ac->session,
+						hdr.opcode);
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &hdr);
+	if (rc < 0) {
+		pr_err("Commmand 0x%x failed\n", hdr.opcode);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout in getting session time from DSP\n",
+			__func__);
+		goto fail_cmd;
+	}
+	return ac->time_stamp;
+
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_cmd(struct audio_client *ac, int cmd)
+{
+	struct apr_hdr hdr;
+	int rc;
+	atomic_t *state;
+	int cnt = 0;
+
+	if (!ac || ac->apr == NULL) {
+		pr_err("APR handle NULL\n");
+		return -EINVAL;
+	}
+	q6asm_add_hdr(ac, &hdr, sizeof(hdr), TRUE);
+	switch (cmd) {
+	case CMD_PAUSE:
+		pr_debug("%s:CMD_PAUSE\n", __func__);
+		hdr.opcode = ASM_SESSION_CMD_PAUSE;
+		state = &ac->cmd_state;
+		break;
+	case CMD_FLUSH:
+		pr_debug("%s:CMD_FLUSH\n", __func__);
+		hdr.opcode = ASM_STREAM_CMD_FLUSH;
+		state = &ac->cmd_state;
+		break;
+	case CMD_OUT_FLUSH:
+		pr_debug("%s:CMD_OUT_FLUSH\n", __func__);
+		hdr.opcode = ASM_STREAM_CMD_FLUSH_READBUFS;
+		state = &ac->cmd_state;
+		break;
+	case CMD_EOS:
+		pr_debug("%s:CMD_EOS\n", __func__);
+		hdr.opcode = ASM_DATA_CMD_EOS;
+		atomic_set(&ac->cmd_state, 0);
+		state = &ac->cmd_state;
+		break;
+	case CMD_CLOSE:
+		pr_debug("%s:CMD_CLOSE\n", __func__);
+		hdr.opcode = ASM_STREAM_CMD_CLOSE;
+		state = &ac->cmd_state;
+		break;
+	default:
+		pr_err("Invalid format[%d]\n", cmd);
+		goto fail_cmd;
+	}
+	pr_debug("%s:session[%d]opcode[0x%x] ", __func__,
+						ac->session,
+						hdr.opcode);
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &hdr);
+	if (rc < 0) {
+		pr_err("Commmand 0x%x failed\n", hdr.opcode);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait, (atomic_read(state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("timeout. waited for response opcode[0x%x]\n",
+							hdr.opcode);
+		goto fail_cmd;
+	}
+	if (cmd == CMD_FLUSH)
+		q6asm_reset_buf_state(ac);
+	if (cmd == CMD_CLOSE) {
+		/* check if DSP return all buffers */
+		if (ac->port[IN].buf) {
+			for (cnt = 0; cnt < ac->port[IN].max_buf_cnt;
+								cnt++) {
+				if (ac->port[IN].buf[cnt].used == IN) {
+					pr_debug("Write Buf[%d] not returned\n",
+									cnt);
+				}
+			}
+		}
+		if (ac->port[OUT].buf) {
+			for (cnt = 0; cnt < ac->port[OUT].max_buf_cnt; cnt++) {
+				if (ac->port[OUT].buf[cnt].used == OUT) {
+					pr_debug("Read Buf[%d] not returned\n",
+									cnt);
+				}
+			}
+		}
+	}
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_cmd_nowait(struct audio_client *ac, int cmd)
+{
+	struct apr_hdr hdr;
+	int rc;
+
+	if (!ac || ac->apr == NULL) {
+		pr_err("%s:APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	q6asm_add_hdr_async(ac, &hdr, sizeof(hdr), TRUE);
+	switch (cmd) {
+	case CMD_PAUSE:
+		pr_debug("%s:CMD_PAUSE\n", __func__);
+		hdr.opcode = ASM_SESSION_CMD_PAUSE;
+		break;
+	case CMD_EOS:
+		pr_debug("%s:CMD_EOS\n", __func__);
+		hdr.opcode = ASM_DATA_CMD_EOS;
+		break;
+	default:
+		pr_err("%s:Invalid format[%d]\n", __func__, cmd);
+		goto fail_cmd;
+	}
+	pr_debug("%s:session[%d]opcode[0x%x] ", __func__,
+						ac->session,
+						hdr.opcode);
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &hdr);
+	if (rc < 0) {
+		pr_err("%s:Commmand 0x%x failed\n", __func__, hdr.opcode);
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+
+static void q6asm_reset_buf_state(struct audio_client *ac)
+{
+	int cnt = 0;
+	int loopcnt = 0;
+	struct audio_port_data *port = NULL;
+
+	if (ac->io_mode == SYNC_IO_MODE) {
+		mutex_lock(&ac->cmd_lock);
+		for (loopcnt = 0; loopcnt <= OUT; loopcnt++) {
+			port = &ac->port[loopcnt];
+			cnt = port->max_buf_cnt - 1;
+			port->dsp_buf = 0;
+			port->cpu_buf = 0;
+			while (cnt >= 0) {
+				if (!port->buf)
+					continue;
+				port->buf[cnt].used = 1;
+				cnt--;
+			}
+		}
+		mutex_unlock(&ac->cmd_lock);
+	}
+}
+
+int q6asm_reg_tx_overflow(struct audio_client *ac, uint16_t enable)
+{
+	struct asm_session_cmd_regx_overflow tx_overflow;
+	int rc;
+
+	if (!ac || ac->apr == NULL) {
+		pr_err("APR handle NULL\n");
+		return -EINVAL;
+	}
+	pr_debug("%s:session[%d]enable[%d]\n", __func__,
+						ac->session, enable);
+	q6asm_add_hdr(ac, &tx_overflow.hdr, sizeof(tx_overflow), TRUE);
+
+	tx_overflow.hdr.opcode = \
+			ASM_SESSION_CMD_REGISTER_FORX_OVERFLOW_EVENTS;
+	/* tx overflow event: enable */
+	tx_overflow.enable_flag = enable;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &tx_overflow);
+	if (rc < 0) {
+		pr_err("tx overflow op[0x%x]rc[%d]\n", \
+						tx_overflow.hdr.opcode, rc);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+				(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("timeout. waited for tx overflow\n");
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+
+int q6asm_get_apr_service_id(int session_id)
+{
+	pr_debug("%s\n", __func__);
+
+	if (session_id < 0 || session_id > SESSION_MAX) {
+		pr_err("%s: invalid session_id = %d\n", __func__, session_id);
+		return -EINVAL;
+	}
+
+	return ((struct apr_svc *)session[session_id]->apr)->id;
+}
+
+
+static int __init q6asm_init(void)
+{
+	pr_debug("%s\n", __func__);
+	memset(session, 0, sizeof(session));
+
+	config_debug_fs_init();
+
+	return 0;
+}
+
+device_initcall(q6asm_init);
diff --git a/sound/soc/msm/qdsp6v2/q6audio-v2.c b/sound/soc/msm/qdsp6v2/q6audio-v2.c
new file mode 100644
index 0000000..8c524fa
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/q6audio-v2.c
@@ -0,0 +1,151 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/jiffies.h>
+#include <linux/uaccess.h>
+#include <linux/atomic.h>
+#include <sound/q6afe-v2.h>
+#include <sound/q6audio-v2.h>
+
+int q6audio_get_port_index(u16 port_id)
+{
+	switch (port_id) {
+	case PRIMARY_I2S_RX: return IDX_PRIMARY_I2S_RX;
+	case PRIMARY_I2S_TX: return IDX_PRIMARY_I2S_TX;
+	case PCM_RX: return IDX_PCM_RX;
+	case PCM_TX: return IDX_PCM_TX;
+	case SECONDARY_I2S_RX: return IDX_SECONDARY_I2S_RX;
+	case SECONDARY_I2S_TX: return IDX_SECONDARY_I2S_TX;
+	case MI2S_RX: return IDX_MI2S_RX;
+	case MI2S_TX: return IDX_MI2S_TX;
+	case HDMI_RX: return IDX_HDMI_RX;
+	case RSVD_2: return IDX_RSVD_2;
+	case RSVD_3: return IDX_RSVD_3;
+	case DIGI_MIC_TX: return IDX_DIGI_MIC_TX;
+	case VOICE_RECORD_RX: return IDX_VOICE_RECORD_RX;
+	case VOICE_RECORD_TX: return IDX_VOICE_RECORD_TX;
+	case VOICE_PLAYBACK_TX: return IDX_VOICE_PLAYBACK_TX;
+	case SLIMBUS_0_RX: return IDX_SLIMBUS_0_RX;
+	case SLIMBUS_0_TX: return IDX_SLIMBUS_0_TX;
+	case SLIMBUS_1_RX: return IDX_SLIMBUS_1_RX;
+	case SLIMBUS_1_TX: return IDX_SLIMBUS_1_TX;
+	case INT_BT_SCO_RX: return IDX_INT_BT_SCO_RX;
+	case INT_BT_SCO_TX: return IDX_INT_BT_SCO_TX;
+	case INT_BT_A2DP_RX: return IDX_INT_BT_A2DP_RX;
+	case INT_FM_RX: return IDX_INT_FM_RX;
+	case INT_FM_TX: return IDX_INT_FM_TX;
+	case RT_PROXY_PORT_001_RX: return IDX_RT_PROXY_PORT_001_RX;
+	case RT_PROXY_PORT_001_TX: return IDX_RT_PROXY_PORT_001_TX;
+
+	default: return -EINVAL;
+	}
+}
+
+int q6audio_get_port_id(u16 port_id)
+{
+	switch (port_id) {
+	case PRIMARY_I2S_RX: return AFE_PORT_ID_PRIMARY_MI2S_RX;
+	case PRIMARY_I2S_TX: return AFE_PORT_ID_PRIMARY_MI2S_TX;
+	case PCM_RX: return AFE_PORT_ID_PRIMARY_PCM_RX;
+	case PCM_TX: return AFE_PORT_ID_PRIMARY_PCM_TX;
+	case SECONDARY_I2S_RX: return AFE_PORT_ID_SECONDARY_MI2S_RX;
+	case SECONDARY_I2S_TX: return AFE_PORT_ID_SECONDARY_MI2S_TX;
+	case MI2S_RX: return AFE_PORT_ID_PRIMARY_MI2S_RX;
+	case MI2S_TX: return AFE_PORT_ID_PRIMARY_MI2S_TX;
+	case HDMI_RX: return AFE_PORT_ID_MULTICHAN_HDMI_RX;
+	case RSVD_2: return IDX_RSVD_2;
+	case RSVD_3: return IDX_RSVD_3;
+	case DIGI_MIC_TX: return AFE_PORT_ID_DIGITAL_MIC_TX;
+	case VOICE_RECORD_RX: return AFE_PORT_ID_VOICE_RECORD_RX;
+	case VOICE_RECORD_TX: return AFE_PORT_ID_VOICE_RECORD_TX;
+	case VOICE_PLAYBACK_TX: return AFE_PORT_ID_VOICE_PLAYBACK_TX;
+	case SLIMBUS_0_RX: return AFE_PORT_ID_SLIMBUS_MULTI_CHAN_0_RX;
+	case SLIMBUS_0_TX: return AFE_PORT_ID_SLIMBUS_MULTI_CHAN_0_TX;
+	case SLIMBUS_1_RX: return AFE_PORT_ID_SLIMBUS_MULTI_CHAN_1_RX;
+	case SLIMBUS_1_TX: return AFE_PORT_ID_SLIMBUS_MULTI_CHAN_1_TX;
+	case INT_BT_SCO_RX: return AFE_PORT_ID_INTERNAL_BT_SCO_RX;
+	case INT_BT_SCO_TX: return AFE_PORT_ID_INTERNAL_BT_SCO_TX;
+	case INT_BT_A2DP_RX: return AFE_PORT_ID_INTERNAL_BT_A2DP_RX;
+	case INT_FM_RX: return AFE_PORT_ID_INTERNAL_FM_RX;
+	case INT_FM_TX: return AFE_PORT_ID_INTERNAL_FM_TX;
+	case RT_PROXY_PORT_001_RX: return AFE_PORT_ID_RT_PROXY_PORT_001_RX;
+	case RT_PROXY_PORT_001_TX: return AFE_PORT_ID_RT_PROXY_PORT_001_TX;
+
+	default: return -EINVAL;
+	}
+}
+int q6audio_convert_virtual_to_portid(u16 port_id)
+{
+	int ret;
+
+	/* if port_id is virtual, convert to physical..
+	 * if port_id is already physical, return physical
+	 */
+	if (q6audio_validate_port(port_id) < 0) {
+		if (port_id == RT_PROXY_DAI_001_RX ||
+			port_id == RT_PROXY_DAI_001_TX ||
+			port_id == RT_PROXY_DAI_002_RX ||
+			port_id == RT_PROXY_DAI_002_TX)
+			ret = VIRTUAL_ID_TO_PORTID(port_id);
+		else
+			ret = -EINVAL;
+	} else
+		ret = port_id;
+
+	return ret;
+}
+
+int q6audio_validate_port(u16 port_id)
+{
+	int ret;
+
+	switch (port_id) {
+	case PRIMARY_I2S_RX:
+	case PRIMARY_I2S_TX:
+	case PCM_RX:
+	case PCM_TX:
+	case SECONDARY_I2S_RX:
+	case SECONDARY_I2S_TX:
+	case MI2S_RX:
+	case MI2S_TX:
+	case HDMI_RX:
+	case RSVD_2:
+	case RSVD_3:
+	case DIGI_MIC_TX:
+	case VOICE_RECORD_RX:
+	case VOICE_RECORD_TX:
+	case VOICE_PLAYBACK_TX:
+	case SLIMBUS_0_RX:
+	case SLIMBUS_0_TX:
+	case SLIMBUS_1_RX:
+	case SLIMBUS_1_TX:
+	case INT_BT_SCO_RX:
+	case INT_BT_SCO_TX:
+	case INT_BT_A2DP_RX:
+	case INT_FM_RX:
+	case INT_FM_TX:
+	case RT_PROXY_PORT_001_RX:
+	case RT_PROXY_PORT_001_TX:
+	{
+		ret = 0;
+		break;
+	}
+
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}