Merge remote-tracking branch 'msm-4.4/tmp-8f70215' into msm-4.8

* msm44/tmp-8f70215:
  defconfig: msmskunk: Turn on SCHED_TUNE related config options.
  sysctl: disallow setting sched_time_avg_ms to 0
  sysctl: define upper limit for sched_freq_reporting_policy
  sched: fix argument type in update_task_burst()
  sched: maintain group busy time counters in runqueue
  sched: set LBF_IGNORE_PREFERRED_CLUSTER_TASKS correctly
  cpumask: Correctly report CPU as not isolated in UP case
  sched: Update capacity and load scale factor for all clusters at boot
  sched: kill sync_cpu maintenance
  sched: hmp: Remove the global sysctl_sched_enable_colocation tunable
  sched: hmp: Ensure that best_cluster() never returns NULL
  sched: Initialize variables
  sched: Fix compilation errors when CFS_BANDWIDTH && !SCHED_HMP
  sched: fix compiler errors with !SCHED_HMP
  sched: Convert the global wake_up_idle flag to a per cluster flag
  sched: fix a bug in handling top task table rollover
  sched: fix stale predicted load in trace_sched_get_busy()
  sched: Delete heavy task heuristics in prediction code
  sched: Fix new task accounting bug in transfer_busy_time()
  sched: Fix deadlock between cpu hotplug and upmigrate change
  sched: Avoid packing tasks with low sleep time
  sched: Track average sleep time
  sched: Avoid waking idle cpu for short-burst tasks
  sched: Track burst length for tasks
  sched: Ensure proper task migration when a CPU is isolated
  sched/core: Fix race condition in clearing hmp request
  sched/core: Prevent (user) space tasks from affining to isolated cpus
  sched: pre-allocate colocation groups
  sched/core: Do not free task while holding rq lock
  sched: Disable interrupts while holding related_thread_group_lock
  sched: Ensure proper synch between isolation, hotplug, and suspend
  sched/hmp: Enhance co-location and scheduler boost features
  sched: revise boost logic when boost_type is SCHED_BOOST_ON_BIG
  sched: Remove thread group iteration from colocation
  core_ctl: Export boost function
  sched: core: Skip migrating tasks that aren't enqueued on dead_rq
  sched/core: Fix migrate tasks bail-out condition
  core_ctl: Synchronize access to cluster cpu list
  sched: Ensure watchdog is enabled before disabling
  sched/core: Keep rq online after cpu isolation
  sched: Fix race condition with active balance
  sched/hmp: Fix memory leak when task fork fails
  sched/hmp: Use GFP_KERNEL for top task memory allocations
  sched/hmp: Use improved information for frequency notifications
  sched/hmp: Remove capping when reporting load to the cpufreq governor
  sched: prevent race between disable window statistics and task grouping
  sched/hmp: Disable interrupts when resetting all task stats
  sched/hmp: Automatically add children threads to colocation group
  sched: Fix compilation issue with reset_hmp_stats
  sched/fair: Fix compilation issue
  sched: Set curr/prev_window_cpu pointers to NULL in sched_exit()
  sched: don't bias towards waker cluster when sched_boost is set
  sched/hmp: Fix range checking for target load
  sched/core_ctl: Move header file to global location
  core_ctl: Add refcounting to boost api
  sched/fair: Fix issue with trace flag not being set properly
  sched: Add multiple load reporting policies for cpu frequency
  sched: Optimize the next top task search logic upon task migration
  sched: Add the mechanics of top task tracking for frequency guidance
  sched: Enhance the scheduler migration load fixup feature
  sched: Add per CPU load tracking for each task
  sched: bucketize CPU c-state levels
  sched: use wakeup latency as c-state determinant
  sched/tune: Remove redundant checks for NULL css
  sched: Add cgroup attach functionality to the tune controller
  sched: Update the number of tune groups to 5
  sched/tune: add initial support for CGroups based boosting
  sched/tune: add sysctl interface to define a boost value
  sched: Fix integer overflow in sched_update_nr_prod()
  sched: Add a device tree property to specify the sched boost type
  sched: Add a stub function for init_clusters()
  sched: add a knob to prefer the waker CPU for sync wakeups
  sched: Fix a division by zero bug in scale_exec_time()
  sched: Fix CPU selection when all online CPUs are isolated
  sched: don't assume higher capacity means higher power in lb
  sched/core_ctl: Integrate core control with cpu isolation
  sched/core_ctl: Refactor cpu data
  trace: Move core control trace events to scheduler
  core_ctrl: Move core control into kernel
  sched/tick: Ensure timers does not get queued on isolated cpus
  perf: Add cpu isolation awareness
  smp: Do not wake up all idle CPUs
  pmqos: Enable cpu isolation awareness
  vmstat: Add cpu isolation awareness
  irq: Make irq affinity function cpu isolation aware
  drivers/base: cpu: Add node for cpu isolation
  sched/core: Add trace point for cpu isolation
  sched: add cpu isolation support
  watchdog: Add support for cpu isolation
  soc: qcom: watchdog_v2: Add support for cpu isolation
  cpumask: Add cpu isolation support
  timer: Do not require CPUSETS to be enabled for migration
  timer: Add function to migrate timers
  hrtimer.h: prevent pinned timer state from breaking inactive test
  hrtimer: make sure PINNED flag is cleared after removing hrtimer
  hrtimer: create hrtimer_quiesce_cpu() to isolate CPU from hrtimers
  hrtimer: update timer->state with 'pinned' information
  timer: create timer_quiesce_cpu() to isolate CPU from timers
  arm64: topology: Export arch_get_cpu_efficiency API
  arm64: topology: Allow specifying the CPU efficiency from device tree
  arm64: topology: Define arch_get_cpu_efficiency() API for scheduler
  arm64: topology: Tell the scheduler about the relative power of cores
  sched: Introduce the concept CPU clusters in the scheduler

Change-Id: I76be10a2bec8d445f918e2b5505f117810001740
Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sdmbat-pinctrl b/Documentation/devicetree/bindings/pinctrl/qcom,sdmbat-pinctrl
new file mode 100644
index 0000000..9616d9d
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sdmbat-pinctrl
@@ -0,0 +1,146 @@
+Qualcomm Technologies, Inc. SDMBAT TLMM block
+
+This binding describes the Top Level Mode Multiplexer block found in the
+SDMBAT platform.
+
+- compatible:
+	Usage: required
+	Value type: <string>
+	Definition: must be "qcom,sdmbat-pinctrl"
+
+- reg:
+	Usage: required
+	Value type: <prop-encoded-array>
+	Definition: the base address and size of the TLMM register space.
+
+- interrupts:
+	Usage: required
+	Value type: <prop-encoded-array>
+	Definition: should specify the TLMM summary IRQ.
+
+- interrupt-controller:
+	Usage: required
+	Value type: <none>
+	Definition: identifies this node as an interrupt controller
+
+- #interrupt-cells:
+	Usage: required
+	Value type: <u32>
+	Definition: must be 2. Specifying the pin number and flags, as defined
+		    in <dt-bindings/interrupt-controller/irq.h>
+
+- gpio-controller:
+	Usage: required
+	Value type: <none>
+	Definition: identifies this node as a gpio controller
+
+- #gpio-cells:
+	Usage: required
+	Value type: <u32>
+	Definition: must be 2. Specifying the pin number and flags, as defined
+		    in <dt-bindings/gpio/gpio.h>
+
+Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
+a general description of GPIO and interrupt bindings.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+The pin configuration nodes act as a container for an arbitrary number of
+subnodes. Each of these subnodes represents some desired configuration for a
+pin, a group, or a list of pins or groups. This configuration can include the
+mux function to select on those pin(s)/group(s), and various pin configuration
+parameters, such as pull-up, drive strength, etc.
+
+
+PIN CONFIGURATION NODES:
+
+The name of each subnode is not important; all subnodes should be enumerated
+and processed purely based on their content.
+
+Each subnode only affects those parameters that are explicitly listed. In
+other words, a subnode that lists a mux function but no pin configuration
+parameters implies no information about any pin configuration parameters.
+Similarly, a pin subnode that describes a pullup parameter implies no
+information about e.g. the mux function.
+
+
+The following generic properties as defined in pinctrl-bindings.txt are valid
+to specify in a pin configuration subnode:
+
+- pins:
+	Usage: required
+	Value type: <string-array>
+	Definition: List of gpio pins affected by the properties specified in
+		    this subnode.
+
+		    Valid pins are:
+		      gpio0-gpio149
+		        Supports mux, bias and drive-strength
+
+		      sdc1_clk, sdc1_cmd, sdc1_data sdc2_clk, sdc2_cmd,
+		      sdc2_data sdc1_rclk
+		        Supports bias and drive-strength
+
+- function:
+	Usage: required
+	Value type: <string>
+	Definition: Specify the alternative function to be configured for the
+		    specified pins. Functions are only valid for gpio pins.
+		    Valid values are:
+
+		    qup0, qup9, qdss_cti, ddr_pxi0, ddr_bist, atest_tsens2,
+		    vsense_trigger, atest_usb1, qup_l4, GP_PDM1, qup_l5,
+		    mdp_vsync, qup_l6, wlan2_adc1, atest_usb11, ddr_pxi2,
+		    edp_lcd, dbg_out, wlan2_adc0, atest_usb10, m_voc,
+		    tsif1_sync, ddr_pxi3, cam_mclk, pll_bypassnl, qdss_gpio0,
+		    pll_reset, qdss_gpio1, qdss_gpio2, qdss_gpio3, cci_i2c,
+		    qup1, gpio
+
+- bias-disable:
+	Usage: optional
+	Value type: <none>
+	Definition: The specified pins should be configued as no pull.
+
+- bias-pull-down:
+	Usage: optional
+	Value type: <none>
+	Definition: The specified pins should be configued as pull down.
+
+- bias-pull-up:
+	Usage: optional
+	Value type: <none>
+	Definition: The specified pins should be configued as pull up.
+
+- output-high:
+	Usage: optional
+	Value type: <none>
+	Definition: The specified pins are configured in output mode, driven
+		    high.
+		    Not valid for sdc pins.
+
+- output-low:
+	Usage: optional
+	Value type: <none>
+	Definition: The specified pins are configured in output mode, driven
+		    low.
+		    Not valid for sdc pins.
+
+- drive-strength:
+	Usage: optional
+	Value type: <u32>
+	Definition: Selects the drive strength for the specified pins, in mA.
+		    Valid values are: 2, 4, 6, 8, 10, 12, 14 and 16
+
+Example:
+
+	tlmm: pinctrl@03800000 {
+		compatible = "qcom,sdmbat-pinctrl";
+		reg = <0x03800000 0xc00000>;
+		interrupts = <0 208 0>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
diff --git a/Documentation/devicetree/bindings/slimbus/slim-msm-ctrl.txt b/Documentation/devicetree/bindings/slimbus/slim-msm-ctrl.txt
new file mode 100644
index 0000000..90d8a35
--- /dev/null
+++ b/Documentation/devicetree/bindings/slimbus/slim-msm-ctrl.txt
@@ -0,0 +1,81 @@
+ Qualcomm Technologies,Inc SLIMBUS controller
+ Qualcomm Technologies,Inc implements 2 type of slimbus controllers:
+1. "qcom,slim-msm": This controller is used if applications processor
+	driver is controlling slimbus master component. This driver is
+	responsible for communicating with slave HW directly using
+	messaging interface, and doing data channel management. Driver
+	also communicates with satellite component (driver implemented
+	by other execution environment, such as ADSP) to get its
+	requirements for data channel and bandwidth requirements.
+2. "qcom,slim-ngd": This controller is used if applications processor
+	driver is controlling slimbus satellite component (also known as
+	Non-ported Generic Device, or NGD). This is light-weight slimbus
+	controller responsible for communicating with slave HW directly
+	over bus messaging interface, and communicating with master component
+	(driver residing on other execution environment, such as ADSP)
+	for bandwidth and data channel management.
+
+Required properties:
+
+ - reg : Offset and length of the register region(s) for the device
+ - reg-names : Register region name(s) referenced in reg above
+	 Required register resource entries are:
+	 "slimbus_physical": Physical adderss of controller register blocks
+	 "slimbus_bam_physical": Physical address of Bus Access Module (BAM)
+				 for this controller
+ - compatible : should be "qcom,slim-msm" if this is master component driver
+ - compatible : should be "qcom,slim-ngd" if this is satellite component driver
+ - cell-index : SLIMBUS number used for this controller
+ - interrupts : Interrupt numbers used by this controller
+ - interrupt-names : Required interrupt resource entries are:
+	"slimbus_irq" : Interrupt for SLIMBUS core
+	"slimbus_bam_irq" : Interrupt for controller core's BAM
+
+Optional property:
+ - reg entry for slew rate : If slew rate control register is provided, this
+	 entry should be used.
+ - reg-name for slew rate: "slimbus_slew_reg"
+ - qcom,min-clk-gear : Minimum clock gear at which this controller can be run
+		 (range: 1-10)
+		 Default value will be 1 if this entry is not specified
+ - qcom,max-clk-gear: Maximum clock gear at which this controller can be run
+		 (range: 1-10)
+		 Default value will be 10 if this entry is not specified
+ - qcom,rxreg-access: This boolean indicates that slimbus RX should use direct
+		 register access to receive data. This flag is only needed if
+		 BAM pipe is not available to receive data from slimbus
+ - qcom,apps-ch-pipes: This value represents BAM pipe-mask used by application
+		 processor for data channels. If this property is not defined,
+		 default mask of 0x3F000000 is used indicating apps can use 6
+		 pipes from 24-29.
+ - qcom,ea-pc: This value represents product code (PC) field of enumeration
+		 address (EA) for the QTI slimbus controller hardware.
+		 This value is needed if data-channels originating from apps
+		 are to be used, so that application processor can query
+		 logical address of the ported generic device to be used.
+		 Other than PC, fields of EA are same across platforms.
+ - qcom,slim-mdm: This value provides the identifier of slimbus component on
+		 external mdm. This property enables the slimbus driver to
+		 register and receive subsytem restart notification from mdm
+		 and follow appropriate steps to ensure communication on the bus
+		 can be resumed after mdm-restart.
+ - qcom,subsys-name: This value provides the subsystem name where slimbus master
+		 is present. This property enables the slimbus driver to
+		 register and receive subsytem restart notification from subsystem
+		 and follow appropriate steps to ensure communication on the bus
+		 can be resumed after subsytem restart. By default slimbus driver
+		 register with ADSP subsystem.
+Example:
+	slim@fe12f000 {
+		cell-index = <1>;
+		compatible = "qcom,slim-msm";
+		reg = <0xfe12f000 0x35000>,
+		      <0xfe104000 0x20000>;
+		reg-names = "slimbus_physical", "slimbus_bam_physical";
+		interrupts = <0 163 0 0 164 0>;
+		interrupt-names = "slimbus_irq", "slimbus_bam_irq";
+		qcom,min-clk-gear = <10>;
+		qcom,rxreg-access;
+		qcom,apps-ch-pipes = <0x60000000>;
+		qcom,ea-pc = <0x30>;
+	};
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 01efdf478..fab57d3 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -1067,6 +1067,8 @@
 #endif
 }
 
+void __init __weak init_random_pool(void) { }
+
 void __init setup_arch(char **cmdline_p)
 {
 	const struct machine_desc *mdesc;
@@ -1149,6 +1151,8 @@
 
 	if (mdesc->init_early)
 		mdesc->init_early();
+
+	init_random_pool();
 }
 
 
diff --git a/arch/arm64/boot/dts/qcom/msmskunk.dtsi b/arch/arm64/boot/dts/qcom/msmskunk.dtsi
index 822ebf1..44148cc 100644
--- a/arch/arm64/boot/dts/qcom/msmskunk.dtsi
+++ b/arch/arm64/boot/dts/qcom/msmskunk.dtsi
@@ -1595,3 +1595,4 @@
 #include "msm-arm-smmu-skunk.dtsi"
 #include "msmskunk-ion.dtsi"
 #include "msmskunk-smp2p.dtsi"
+#include "msmskunk-camera.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdmbat-cdp.dtsi
index 1003478..77151c5 100644
--- a/arch/arm64/boot/dts/qcom/sdmbat-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdmbat-cdp.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,4 +11,4 @@
  */
 
 #include "msmskunk-cdp.dtsi"
-
+#include "sdmbat-pinctrl.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdmbat-mtp.dtsi
index ad26a14..af7a194 100644
--- a/arch/arm64/boot/dts/qcom/sdmbat-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdmbat-mtp.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,4 +11,4 @@
  */
 
 #include "msmskunk-mtp.dtsi"
-
+#include "sdmbat-pinctrl.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdmbat-pinctrl.dtsi
new file mode 100644
index 0000000..ead34a6
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdmbat-pinctrl.dtsi
@@ -0,0 +1,23 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	tlmm: pinctrl@03800000 {
+		compatible = "qcom,sdmbat-pinctrl";
+		reg = <0x03800000 0xc00000>;
+		interrupts = <0 208 0>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+};
diff --git a/arch/arm64/configs/msmskunk-perf_defconfig b/arch/arm64/configs/msmskunk-perf_defconfig
index 9076f75..aeaafd0 100644
--- a/arch/arm64/configs/msmskunk-perf_defconfig
+++ b/arch/arm64/configs/msmskunk-perf_defconfig
@@ -254,6 +254,8 @@
 CONFIG_SPI_QUP=y
 CONFIG_SPI_SPIDEV=y
 CONFIG_SPMI=y
+CONFIG_PINCTRL_MSMSKUNK=y
+CONFIG_PINCTRL_SDMBAT=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_POWER_RESET_QCOM=y
@@ -316,6 +318,7 @@
 CONFIG_USB_CONFIGFS_F_DIAG=y
 CONFIG_USB_CONFIGFS_F_CDEV=y
 CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_USB_CONFIGFS_F_GSI=y
 CONFIG_MMC=y
 CONFIG_MMC_BLOCK_MINORS=32
 CONFIG_MMC_TEST=y
@@ -346,6 +349,7 @@
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QCOM_MEMORY_DUMP_V2=y
 CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_QCOM_EARLY_RANDOM=y
 CONFIG_MSM_SMEM=y
 CONFIG_MSM_GLINK=y
 CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
diff --git a/arch/arm64/configs/msmskunk_defconfig b/arch/arm64/configs/msmskunk_defconfig
index aaa114d..1bf4fcc 100644
--- a/arch/arm64/configs/msmskunk_defconfig
+++ b/arch/arm64/configs/msmskunk_defconfig
@@ -261,6 +261,8 @@
 CONFIG_SPI_QUP=y
 CONFIG_SPI_SPIDEV=y
 CONFIG_SPMI=y
+CONFIG_PINCTRL_MSMSKUNK=y
+CONFIG_PINCTRL_SDMBAT=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_POWER_RESET_QCOM=y
@@ -322,6 +324,7 @@
 CONFIG_USB_CONFIGFS_F_DIAG=y
 CONFIG_USB_CONFIGFS_F_CDEV=y
 CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_USB_CONFIGFS_F_GSI=y
 CONFIG_MMC=y
 CONFIG_MMC_BLOCK_MINORS=32
 CONFIG_MMC_TEST=y
@@ -357,6 +360,7 @@
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QCOM_MEMORY_DUMP_V2=y
 CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_QCOM_EARLY_RANDOM=y
 CONFIG_MSM_SMEM=y
 CONFIG_MSM_GLINK=y
 CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 012b95c..9377bec 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -40,6 +40,10 @@
  *	the implementation assumes non-aliasing VIPT D-cache and (aliasing)
  *	VIPT or ASID-tagged VIVT I-cache.
  *
+ *	flush_cache_all()
+ *
+ *		Unconditionally clean and invalidate the entire cache.
+ *
  *	flush_cache_mm(mm)
  *
  *		Clean and invalidate all user space cache entries
@@ -65,6 +69,7 @@
  *		- kaddr  - page address
  *		- size   - region size
  */
+extern void flush_cache_all(void);
 extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
 extern void flush_icache_range(unsigned long start, unsigned long end);
 extern void __flush_dcache_area(void *addr, size_t len);
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 14ad6e4..220633b7 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -28,8 +28,12 @@
 struct mm_struct;
 struct cpu_suspend_ctx;
 
+extern void cpu_cache_off(void);
 extern void cpu_do_idle(void);
 extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
+extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
+void cpu_soft_restart(phys_addr_t cpu_reset,
+		unsigned long addr) __attribute__((noreturn));
 extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr);
 extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
 
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 6a9cb77..49f3ae0 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -242,6 +242,8 @@
 
 u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
 
+void __init __weak init_random_pool(void) { }
+
 void __init setup_arch(char **cmdline_p)
 {
 	pr_info("Boot CPU: AArch64 Processor [%08x]\n", read_cpuid_id());
@@ -326,6 +328,8 @@
 			"This indicates a broken bootloader or old kernel\n",
 			boot_args[1], boot_args[2], boot_args[3]);
 	}
+
+	init_random_pool();
 }
 
 static int __init topology_init(void)
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 082d8d5..86774d3 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -815,6 +815,7 @@
 {
 	set_cpu_active(cpu, false);
 
+	flush_cache_all();
 	local_irq_disable();
 
 	while (1)
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index e9a7b3a..db00fc9 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -25,6 +25,79 @@
 #include <asm/alternative.h>
 
 /*
+ *	__flush_dcache_all()
+ *
+ *	Flush the whole D-cache.
+ *
+ *	Corrupted registers: x0-x7, x9-x11
+ */
+__flush_dcache_all:
+	dmb	sy				// ensure ordering with previous memory accesses
+	mrs	x0, clidr_el1			// read clidr
+	and	x3, x0, #0x7000000		// extract loc from clidr
+	lsr	x3, x3, #23			// left align loc bit field
+	cbz	x3, finished			// if loc is 0, then no need to clean
+	mov	x10, #0				// start clean at cache level 0
+loop1:
+	add	x2, x10, x10, lsr #1		// work out 3x current cache level
+	lsr	x1, x0, x2			// extract cache type bits from clidr
+	and	x1, x1, #7			// mask of the bits for current cache only
+	cmp	x1, #2				// see what cache we have at this level
+	b.lt	skip				// skip if no cache, or just i-cache
+	save_and_disable_irqs x9		// make CSSELR and CCSIDR access atomic
+	msr	csselr_el1, x10			// select current cache level in csselr
+	isb					// isb to sych the new cssr&csidr
+	mrs	x1, ccsidr_el1			// read the new ccsidr
+	restore_irqs x9
+	and	x2, x1, #7			// extract the length of the cache lines
+	add	x2, x2, #4			// add 4 (line length offset)
+	mov	x4, #0x3ff
+	and	x4, x4, x1, lsr #3		// find maximum number on the way size
+	clz	w5, w4				// find bit position of way size increment
+	mov	x7, #0x7fff
+	and	x7, x7, x1, lsr #13		// extract max number of the index size
+loop2:
+	mov	x9, x4				// create working copy of max way size
+loop3:
+	lsl	x6, x9, x5
+	orr	x11, x10, x6			// factor way and cache number into x11
+	lsl	x6, x7, x2
+	orr	x11, x11, x6			// factor index number into x11
+	dc	cisw, x11			// clean & invalidate by set/way
+	subs	x9, x9, #1			// decrement the way
+	b.ge	loop3
+	subs	x7, x7, #1			// decrement the index
+	b.ge	loop2
+skip:
+	add	x10, x10, #2			// increment cache number
+	cmp	x3, x10
+	b.gt	loop1
+finished:
+	mov	x10, #0				// swith back to cache level 0
+	msr	csselr_el1, x10			// select current cache level in csselr
+	dsb	sy
+	isb
+	ret
+ENDPROC(__flush_dcache_all)
+
+/*
+ *	flush_cache_all()
+ *
+ *	Flush the entire cache system.  The data cache flush is now achieved
+ *	using atomic clean / invalidates working outwards from L1 cache. This
+ *	is done using Set/Way based cache maintenance instructions.  The
+ *	instruction cache can still be invalidated back to the point of
+ *	unification in a single instruction.
+ */
+ENTRY(flush_cache_all)
+	mov	x12, lr
+	bl	__flush_dcache_all
+	mov	x0, #0
+	ic	ialluis				// I+BTB cache invalidate
+	ret	x12
+ENDPROC(flush_cache_all)
+
+/*
  *	flush_icache_range(start,end)
  *
  *	Ensure that the I and D caches are coherent within specified region.
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 8377329..024db34 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -91,4 +91,5 @@
 /*
  * Additional functions defined in assembly.
  */
+EXPORT_SYMBOL(flush_cache_all);
 EXPORT_SYMBOL(flush_icache_range);
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index c2adb0c..61330c9 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -44,6 +44,52 @@
 #define MAIR(attr, mt)	((attr) << ((mt) * 8))
 
 /*
+ *	cpu_cache_off()
+ *
+ *	Turn the CPU D-cache off.
+ */
+ENTRY(cpu_cache_off)
+	mrs	x0, sctlr_el1
+	bic	x0, x0, #1 << 2			// clear SCTLR.C
+	msr	sctlr_el1, x0
+	isb
+	ret
+ENDPROC(cpu_cache_off)
+
+/*
+ *	cpu_reset(loc)
+ *
+ *	Perform a soft reset of the system.  Put the CPU into the same state
+ *	as it would be if it had been reset, and branch to what would be the
+ *	reset vector. It must be executed with the flat identity mapping.
+ *
+ *	- loc   - location to jump to for soft reset
+ */
+	.align	5
+ENTRY(cpu_reset)
+	mrs	x1, sctlr_el1
+	bic	x1, x1, #1
+	msr	sctlr_el1, x1			// disable the MMU
+	isb
+	ret	x0
+ENDPROC(cpu_reset)
+
+ENTRY(cpu_soft_restart)
+	/* Save address of cpu_reset() and reset address */
+	mov	x19, x0
+	mov	x20, x1
+
+	/* Turn D-cache off */
+	bl	cpu_cache_off
+
+	/* Push out all dirty data, and ensure cache is empty */
+	bl	flush_cache_all
+
+	mov	x0, x20
+	ret	x19
+ENDPROC(cpu_soft_restart)
+
+/*
  *	cpu_do_idle()
  *
  *	Idle the processor (wait for interrupt).
diff --git a/drivers/Kconfig b/drivers/Kconfig
index cf03bd7..6266a37 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -58,6 +58,8 @@
 
 source "drivers/spi/Kconfig"
 
+source "drivers/slimbus/Kconfig"
+
 source "drivers/spmi/Kconfig"
 
 source "drivers/hsi/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 98dc622..e7ebee4 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -80,7 +80,7 @@
 obj-$(CONFIG_MTD)		+= mtd/
 obj-$(CONFIG_SPI)		+= spi/
 obj-$(CONFIG_SPMI)		+= spmi/
-obj-$(CONFIG_SOUNDWIRE)		+= soundwire/
+obj-$(CONFIG_SLIMBUS)		+= slimbus/
 obj-$(CONFIG_HSI)		+= hsi/
 obj-y				+= net/
 obj-$(CONFIG_ATM)		+= atm/
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index c8e317a..385cdd7 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013, 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -411,6 +411,15 @@
 	if (!f)
 		return -EINVAL;
 
+	/*
+	 * If CXO is not listed as a supported frequency in the frequency
+	 * table, the above API would return the lowest supported frequency
+	 * instead. This will lead to incorrect configuration of the RCG.
+	 * Check if the RCG rate is CXO and configure it accordingly.
+	 */
+	if (rate == cxo_f.freq)
+		f = &cxo_f;
+
 	clk_rcg2_set_force_enable(hw);
 	clk_rcg2_configure(rcg, f);
 	clk_rcg2_clear_force_enable(hw);
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index ebbf0a7..b7b8a15 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -594,6 +594,7 @@
 		return;
 
 	atomic_inc(&phy_enc->underrun_cnt);
+	SDE_EVT32(DRMID(drm_enc), atomic_read(&phy_enc->underrun_cnt));
 }
 
 void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index 5b8ee63..5d317d0 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -220,6 +220,7 @@
 {
 	struct sde_encoder_phys_cmd *cmd_enc =
 			to_sde_encoder_phys_cmd(phys_enc);
+	int idx_lookup = 0;
 	int ret = 0;
 
 	if (!phys_enc) {
@@ -227,8 +228,10 @@
 		return -EINVAL;
 	}
 
+	idx_lookup = (intr_type == SDE_IRQ_TYPE_INTF_UNDER_RUN) ?
+			cmd_enc->intf_idx : phys_enc->hw_pp->idx;
 	cmd_enc->irq_idx[idx] = sde_core_irq_idx_lookup(phys_enc->sde_kms,
-			intr_type, phys_enc->hw_pp->idx);
+			intr_type, idx_lookup);
 	if (cmd_enc->irq_idx[idx] < 0) {
 		SDE_ERROR_CMDENC(cmd_enc,
 			"failed to lookup IRQ index for %s with pp=%d\n",
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index ef06267..cb3daba 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -838,11 +838,13 @@
 	_sde_debugfs_destroy(sde_kms);
 	_sde_kms_mmu_destroy(sde_kms);
 
-	for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
-		u32 vbif_idx = sde_kms->catalog->vbif[i].id;
+	if (sde_kms->catalog) {
+		for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
+			u32 vbif_idx = sde_kms->catalog->vbif[i].id;
 
-		if ((vbif_idx < VBIF_MAX) && sde_kms->hw_vbif[vbif_idx])
-			sde_hw_vbif_destroy(sde_kms->hw_vbif[vbif_idx]);
+			if ((vbif_idx < VBIF_MAX) && sde_kms->hw_vbif[vbif_idx])
+				sde_hw_vbif_destroy(sde_kms->hw_vbif[vbif_idx]);
+		}
 	}
 
 	if (sde_kms->rm_init)
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 7ad94b8..2cb39ee 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -359,6 +359,13 @@
 	drawctxt->queued--;
 }
 
+static void _retire_sparseobj(struct kgsl_drawobj_sparse *sparseobj,
+				struct adreno_context *drawctxt)
+{
+	kgsl_sparse_bind(drawctxt->base.proc_priv, sparseobj);
+	_retire_timestamp(DRAWOBJ(sparseobj));
+}
+
 static int _retire_markerobj(struct kgsl_drawobj_cmd *cmdobj,
 				struct adreno_context *drawctxt)
 {
@@ -436,6 +443,8 @@
 				return drawobj;
 		} else if (drawobj->type == SYNCOBJ_TYPE)
 			ret = _retire_syncobj(SYNCOBJ(drawobj), drawctxt);
+		else
+			return ERR_PTR(-EINVAL);
 
 		if (ret == -EAGAIN)
 			return ERR_PTR(-EAGAIN);
@@ -671,6 +680,76 @@
 	return 0;
 }
 
+
+/*
+ * Retires all sync objs from the sparse context
+ * queue and returns one of the below
+ * a) next sparseobj
+ * b) -EAGAIN for syncobj with syncpoints pending
+ * c) -EINVAL for unexpected drawobj
+ * d) NULL for no sparseobj
+ */
+static struct kgsl_drawobj_sparse *_get_next_sparseobj(
+				struct adreno_context *drawctxt)
+{
+	struct kgsl_drawobj *drawobj;
+	unsigned int i = drawctxt->drawqueue_head;
+	int ret = 0;
+
+	if (drawctxt->drawqueue_head == drawctxt->drawqueue_tail)
+		return NULL;
+
+	for (i = drawctxt->drawqueue_head; i != drawctxt->drawqueue_tail;
+			i = DRAWQUEUE_NEXT(i, ADRENO_CONTEXT_DRAWQUEUE_SIZE)) {
+
+		drawobj = drawctxt->drawqueue[i];
+
+		if (drawobj == NULL)
+			return NULL;
+
+		if (drawobj->type == SYNCOBJ_TYPE)
+			ret = _retire_syncobj(SYNCOBJ(drawobj), drawctxt);
+		else if (drawobj->type == SPARSEOBJ_TYPE)
+			return SPARSEOBJ(drawobj);
+		else
+			return ERR_PTR(-EINVAL);
+
+		if (ret == -EAGAIN)
+			return ERR_PTR(-EAGAIN);
+
+		continue;
+	}
+
+	return NULL;
+}
+
+static int _process_drawqueue_sparse(
+		struct adreno_context *drawctxt)
+{
+	struct kgsl_drawobj_sparse *sparseobj;
+	int ret = 0;
+	unsigned int i;
+
+	for (i = 0; i < ADRENO_CONTEXT_DRAWQUEUE_SIZE; i++) {
+
+		spin_lock(&drawctxt->lock);
+		sparseobj = _get_next_sparseobj(drawctxt);
+		if (IS_ERR_OR_NULL(sparseobj)) {
+			if (IS_ERR(sparseobj))
+				ret = PTR_ERR(sparseobj);
+			spin_unlock(&drawctxt->lock);
+			return ret;
+		}
+
+		_pop_drawobj(drawctxt);
+		spin_unlock(&drawctxt->lock);
+
+		_retire_sparseobj(sparseobj, drawctxt);
+	}
+
+	return 0;
+}
+
 /**
  * dispatcher_context_sendcmds() - Send commands from a context to the GPU
  * @adreno_dev: Pointer to the adreno device struct
@@ -690,6 +769,9 @@
 	int inflight = _drawqueue_inflight(dispatch_q);
 	unsigned int timestamp;
 
+	if (drawctxt->base.flags & KGSL_CONTEXT_SPARSE)
+		return _process_drawqueue_sparse(drawctxt);
+
 	if (dispatch_q->inflight >= inflight) {
 		spin_lock(&drawctxt->lock);
 		_process_drawqueue_get_next_drawobj(drawctxt);
@@ -1125,6 +1207,31 @@
 	trace_adreno_cmdbatch_queued(drawobj, drawctxt->queued);
 }
 
+static int _queue_sparseobj(struct adreno_device *adreno_dev,
+	struct adreno_context *drawctxt, struct kgsl_drawobj_sparse *sparseobj,
+	uint32_t *timestamp, unsigned int user_ts)
+{
+	struct kgsl_drawobj *drawobj = DRAWOBJ(sparseobj);
+	int ret;
+
+	ret = get_timestamp(drawctxt, drawobj, timestamp, user_ts);
+	if (ret)
+		return ret;
+
+	/*
+	 * See if we can fastpath this thing - if nothing is
+	 * queued bind/unbind without queueing the context
+	 */
+	if (!drawctxt->queued)
+		return 1;
+
+	drawctxt->queued_timestamp = *timestamp;
+	_queue_drawobj(drawctxt, drawobj);
+
+	return 0;
+}
+
+
 static int _queue_markerobj(struct adreno_device *adreno_dev,
 	struct adreno_context *drawctxt, struct kgsl_drawobj_cmd *markerobj,
 	uint32_t *timestamp, unsigned int user_ts)
@@ -1142,7 +1249,6 @@
 	 */
 	if (!drawctxt->queued && kgsl_check_timestamp(drawobj->device,
 			drawobj->context, drawctxt->queued_timestamp)) {
-		trace_adreno_cmdbatch_queued(drawobj, drawctxt->queued);
 		_retire_timestamp(drawobj);
 		return 1;
 	}
@@ -1213,7 +1319,7 @@
 }
 
 /**
- * adreno_dispactcher_queue_drawobj() - Queue a new draw object in the context
+ * adreno_dispactcher_queue_cmds() - Queue a new draw object in the context
  * @dev_priv: Pointer to the device private struct
  * @context: Pointer to the kgsl draw context
  * @drawobj: Pointer to the array of drawobj's being submitted
@@ -1235,6 +1341,9 @@
 	int ret;
 	unsigned int i, user_ts;
 
+	if (!count)
+		return -EINVAL;
+
 	ret = _check_context_state(&drawctxt->base);
 	if (ret)
 		return ret;
@@ -1284,6 +1393,20 @@
 			_queue_syncobj(drawctxt, SYNCOBJ(drawobj[i]),
 						timestamp);
 			break;
+		case SPARSEOBJ_TYPE:
+			ret = _queue_sparseobj(adreno_dev, drawctxt,
+					SPARSEOBJ(drawobj[i]),
+					timestamp, user_ts);
+			if (ret == 1) {
+				spin_unlock(&drawctxt->lock);
+				_retire_sparseobj(SPARSEOBJ(drawobj[i]),
+						drawctxt);
+				return 0;
+			} else if (ret) {
+				spin_unlock(&drawctxt->lock);
+				return ret;
+			}
+			break;
 		default:
 			spin_unlock(&drawctxt->lock);
 			return -EINVAL;
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index 4da6763..cd7ffe7 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -349,7 +349,8 @@
 		KGSL_CONTEXT_IFH_NOP |
 		KGSL_CONTEXT_SECURE |
 		KGSL_CONTEXT_PREEMPT_STYLE_MASK |
-		KGSL_CONTEXT_NO_SNAPSHOT);
+		KGSL_CONTEXT_NO_SNAPSHOT |
+		KGSL_CONTEXT_SPARSE);
 
 	/* Check for errors before trying to initialize */
 
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index e50442a..56eae50 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -1448,6 +1448,17 @@
 	return result;
 }
 
+static inline bool _check_context_is_sparse(struct kgsl_context *context,
+			uint64_t flags)
+{
+	if ((context->flags & KGSL_CONTEXT_SPARSE) ||
+		(flags & KGSL_DRAWOBJ_SPARSE))
+		return true;
+
+	return false;
+}
+
+
 long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
 				      unsigned int cmd, void *data)
 {
@@ -1472,6 +1483,11 @@
 	if (context == NULL)
 		return -EINVAL;
 
+	if (_check_context_is_sparse(context, param->flags)) {
+		kgsl_context_put(context);
+		return -EINVAL;
+	}
+
 	cmdobj = kgsl_drawobj_cmd_create(device, context, param->flags,
 					CMDOBJ_TYPE);
 	if (IS_ERR(cmdobj)) {
@@ -1567,6 +1583,11 @@
 	if (context == NULL)
 		return -EINVAL;
 
+	if (_check_context_is_sparse(context, param->flags)) {
+		kgsl_context_put(context);
+		return -EINVAL;
+	}
+
 	if (type & SYNCOBJ_TYPE) {
 		struct kgsl_drawobj_sync *syncobj =
 				kgsl_drawobj_sync_create(device, context);
@@ -1641,6 +1662,11 @@
 	if (context == NULL)
 		return -EINVAL;
 
+	if (_check_context_is_sparse(context, param->flags)) {
+		kgsl_context_put(context);
+		return -EINVAL;
+	}
+
 	if (type & SYNCOBJ_TYPE) {
 		struct kgsl_drawobj_sync *syncobj =
 				kgsl_drawobj_sync_create(device, context);
@@ -3752,6 +3778,128 @@
 	return ret;
 }
 
+long kgsl_ioctl_gpu_sparse_command(struct kgsl_device_private *dev_priv,
+		unsigned int cmd, void *data)
+{
+	struct kgsl_gpu_sparse_command *param = data;
+	struct kgsl_device *device = dev_priv->device;
+	struct kgsl_context *context;
+	struct kgsl_drawobj *drawobj[2];
+	struct kgsl_drawobj_sparse *sparseobj;
+	long result;
+	unsigned int i = 0;
+
+	/* Make sure sparse and syncpoint count isn't too big */
+	if (param->numsparse > KGSL_MAX_SPARSE ||
+		param->numsyncs > KGSL_MAX_SYNCPOINTS)
+		return -EINVAL;
+
+	/* Make sure there is atleast one sparse or sync */
+	if (param->numsparse == 0 && param->numsyncs == 0)
+		return -EINVAL;
+
+	/* Only Sparse commands are supported in this ioctl */
+	if (!(param->flags & KGSL_DRAWOBJ_SPARSE) || (param->flags &
+			(KGSL_DRAWOBJ_SUBMIT_IB_LIST | KGSL_DRAWOBJ_MARKER
+			| KGSL_DRAWOBJ_SYNC)))
+		return -EINVAL;
+
+	context = kgsl_context_get_owner(dev_priv, param->context_id);
+	if (context == NULL)
+		return -EINVAL;
+
+	/* Restrict bind commands to bind context */
+	if (!(context->flags & KGSL_CONTEXT_SPARSE)) {
+		kgsl_context_put(context);
+		return -EINVAL;
+	}
+
+	if (param->numsyncs) {
+		struct kgsl_drawobj_sync *syncobj = kgsl_drawobj_sync_create(
+				device, context);
+		if (IS_ERR(syncobj)) {
+			result = PTR_ERR(syncobj);
+			goto done;
+		}
+
+		drawobj[i++] = DRAWOBJ(syncobj);
+		result = kgsl_drawobj_sync_add_synclist(device, syncobj,
+				to_user_ptr(param->synclist),
+				param->syncsize, param->numsyncs);
+		if (result)
+			goto done;
+	}
+
+	if (param->numsparse) {
+		sparseobj = kgsl_drawobj_sparse_create(device, context,
+					param->flags);
+		if (IS_ERR(sparseobj)) {
+			result = PTR_ERR(sparseobj);
+			goto done;
+		}
+
+		sparseobj->id = param->id;
+		drawobj[i++] = DRAWOBJ(sparseobj);
+		result = kgsl_drawobj_sparse_add_sparselist(device, sparseobj,
+				param->id, to_user_ptr(param->sparselist),
+				param->sparsesize, param->numsparse);
+		if (result)
+			goto done;
+	}
+
+	result = dev_priv->device->ftbl->queue_cmds(dev_priv, context,
+					drawobj, i, &param->timestamp);
+
+done:
+	/*
+	 * -EPROTO is a "success" error - it just tells the user that the
+	 * context had previously faulted
+	 */
+	if (result && result != -EPROTO)
+		while (i--)
+			kgsl_drawobj_destroy(drawobj[i]);
+
+	kgsl_context_put(context);
+	return result;
+}
+
+void kgsl_sparse_bind(struct kgsl_process_private *private,
+		struct kgsl_drawobj_sparse *sparseobj)
+{
+	struct kgsl_sparseobj_node *sparse_node;
+	struct kgsl_mem_entry *virt_entry = NULL;
+	long ret = 0;
+	char *name;
+
+	virt_entry = kgsl_sharedmem_find_id_flags(private, sparseobj->id,
+			KGSL_MEMFLAGS_SPARSE_VIRT);
+	if (virt_entry == NULL)
+		return;
+
+	list_for_each_entry(sparse_node, &sparseobj->sparselist, node) {
+		if (sparse_node->obj.flags & KGSL_SPARSE_BIND) {
+			ret = sparse_bind_range(private, &sparse_node->obj,
+					virt_entry);
+			name = "bind";
+		} else {
+			ret = sparse_unbind_range(&sparse_node->obj,
+					virt_entry);
+			name = "unbind";
+		}
+
+		if (ret)
+			KGSL_CORE_ERR("kgsl: Unable to '%s' ret %ld virt_id %d, phys_id %d, virt_offset %16.16llX, phys_offset %16.16llX, size %16.16llX, flags %16.16llX\n",
+				name, ret, sparse_node->virt_id,
+				sparse_node->obj.id,
+				sparse_node->obj.virtoffset,
+				sparse_node->obj.physoffset,
+				sparse_node->obj.size, sparse_node->obj.flags);
+	}
+
+	kgsl_mem_entry_put(virt_entry);
+}
+EXPORT_SYMBOL(kgsl_sparse_bind);
+
 long kgsl_ioctl_gpuobj_info(struct kgsl_device_private *dev_priv,
 		unsigned int cmd, void *data)
 {
@@ -4607,7 +4755,7 @@
 		kgsl_driver.class = NULL;
 	}
 
-	kgsl_drawobj_exit();
+	kgsl_drawobjs_cache_exit();
 
 	kgsl_memfree_exit();
 	unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX);
@@ -4684,7 +4832,7 @@
 
 	kgsl_events_init();
 
-	result = kgsl_drawobj_init();
+	result = kgsl_drawobjs_cache_init();
 	if (result)
 		goto err;
 
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index c60a071..3f1c86e 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -99,6 +99,7 @@
 
 #define KGSL_MAX_NUMIBS 100000
 #define KGSL_MAX_SYNCPOINTS 32
+#define KGSL_MAX_SPARSE 1000
 
 struct kgsl_device;
 struct kgsl_context;
@@ -425,6 +426,8 @@
 					unsigned int cmd, void *data);
 long kgsl_ioctl_sparse_unbind(struct kgsl_device_private *dev_priv,
 					unsigned int cmd, void *data);
+long kgsl_ioctl_gpu_sparse_command(struct kgsl_device_private *dev_priv,
+					unsigned int cmd, void *data);
 
 void kgsl_mem_entry_destroy(struct kref *kref);
 
diff --git a/drivers/gpu/msm/kgsl_compat.c b/drivers/gpu/msm/kgsl_compat.c
index e0e6a2b..1c89ed5 100644
--- a/drivers/gpu/msm/kgsl_compat.c
+++ b/drivers/gpu/msm/kgsl_compat.c
@@ -364,6 +364,8 @@
 			kgsl_ioctl_sparse_virt_free),
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_SPARSE_BIND,
 			kgsl_ioctl_sparse_bind),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_GPU_SPARSE_COMMAND,
+			kgsl_ioctl_gpu_sparse_command),
 };
 
 long kgsl_compat_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index ed3f78a..ae164bc 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -207,6 +207,18 @@
 	unsigned long priv;
 };
 
+/**
+ * struct kgsl_sparseobj_node - Sparse object descriptor
+ * @node: Local list node for the sparse cmdbatch
+ * @virt_id: Virtual ID to bind/unbind
+ * @obj:  struct kgsl_sparse_binding_object
+ */
+struct kgsl_sparseobj_node {
+	struct list_head node;
+	unsigned int virt_id;
+	struct kgsl_sparse_binding_object obj;
+};
+
 struct kgsl_device {
 	struct device *dev;
 	const char *name;
@@ -644,6 +656,9 @@
 long kgsl_ioctl_copy_out(unsigned int kernel_cmd, unsigned int user_cmd,
 		unsigned long arg, unsigned char *ptr);
 
+void kgsl_sparse_bind(struct kgsl_process_private *private,
+		struct kgsl_drawobj_sparse *sparse);
+
 /**
  * kgsl_context_put() - Release context reference count
  * @context: Pointer to the KGSL context to be released
diff --git a/drivers/gpu/msm/kgsl_drawobj.c b/drivers/gpu/msm/kgsl_drawobj.c
index 01c3a06..910f405 100644
--- a/drivers/gpu/msm/kgsl_drawobj.c
+++ b/drivers/gpu/msm/kgsl_drawobj.c
@@ -37,10 +37,12 @@
 #include "kgsl_compat.h"
 
 /*
- * Define an kmem cache for the memobj structures since we allocate and free
- * them so frequently
+ * Define an kmem cache for the memobj & sparseobj structures since we
+ * allocate and free them so frequently
  */
 static struct kmem_cache *memobjs_cache;
+static struct kmem_cache *sparseobjs_cache;
+
 
 static void drawobj_destroy_object(struct kref *kref)
 {
@@ -60,6 +62,9 @@
 	case MARKEROBJ_TYPE:
 		kfree(CMDOBJ(drawobj));
 		break;
+	case SPARSEOBJ_TYPE:
+		kfree(SPARSEOBJ(drawobj));
+		break;
 	}
 }
 
@@ -211,6 +216,18 @@
 	}
 }
 
+static void drawobj_destroy_sparse(struct kgsl_drawobj *drawobj)
+{
+	struct kgsl_sparseobj_node *mem, *tmpmem;
+	struct list_head *list = &SPARSEOBJ(drawobj)->sparselist;
+
+	/* Free the sparse mem here */
+	list_for_each_entry_safe(mem, tmpmem, list, node) {
+		list_del_init(&mem->node);
+		kmem_cache_free(sparseobjs_cache, mem);
+	}
+}
+
 static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj)
 {
 	struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj);
@@ -297,6 +314,8 @@
 		drawobj_destroy_sync(drawobj);
 	else if (drawobj->type & (CMDOBJ_TYPE | MARKEROBJ_TYPE))
 		drawobj_destroy_cmd(drawobj);
+	else if (drawobj->type == SPARSEOBJ_TYPE)
+		drawobj_destroy_sparse(drawobj);
 	else
 		return;
 
@@ -610,16 +629,26 @@
 	return 0;
 }
 
-static inline int drawobj_init(struct kgsl_device *device,
-	struct kgsl_context *context, struct kgsl_drawobj *drawobj,
+static void *_drawobj_create(struct kgsl_device *device,
+	struct kgsl_context *context, unsigned int size,
 	unsigned int type)
 {
+	void *obj = kzalloc(size, GFP_KERNEL);
+	struct kgsl_drawobj *drawobj;
+
+	if (obj == NULL)
+		return ERR_PTR(-ENOMEM);
+
 	/*
 	 * Increase the reference count on the context so it doesn't disappear
 	 * during the lifetime of this object
 	 */
-	if (!_kgsl_context_get(context))
-		return -ENOENT;
+	if (!_kgsl_context_get(context)) {
+		kfree(obj);
+		return ERR_PTR(-ENOENT);
+	}
+
+	drawobj = obj;
 
 	kref_init(&drawobj->refcount);
 
@@ -627,7 +656,28 @@
 	drawobj->context = context;
 	drawobj->type = type;
 
-	return 0;
+	return obj;
+}
+
+/**
+ * kgsl_drawobj_sparse_create() - Create a new sparse obj structure
+ * @device: Pointer to a KGSL device struct
+ * @context: Pointer to a KGSL context struct
+ * @flags: Flags for the sparse obj
+ *
+ * Allocate an new kgsl_drawobj_sparse structure
+ */
+struct kgsl_drawobj_sparse *kgsl_drawobj_sparse_create(
+		struct kgsl_device *device,
+		struct kgsl_context *context, unsigned int flags)
+{
+	struct kgsl_drawobj_sparse *sparseobj = _drawobj_create(device,
+		context, sizeof(*sparseobj), SPARSEOBJ_TYPE);
+
+	if (!IS_ERR(sparseobj))
+		INIT_LIST_HEAD(&sparseobj->sparselist);
+
+	return sparseobj;
 }
 
 /**
@@ -641,18 +691,13 @@
 struct kgsl_drawobj_sync *kgsl_drawobj_sync_create(struct kgsl_device *device,
 		struct kgsl_context *context)
 {
-	struct kgsl_drawobj_sync *syncobj = kzalloc(sizeof(*syncobj),
-							GFP_KERNEL);
-	if (syncobj == NULL)
-		return ERR_PTR(-ENOMEM);
-
-	if (drawobj_init(device, context, DRAWOBJ(syncobj), SYNCOBJ_TYPE)) {
-		kfree(syncobj);
-		return ERR_PTR(-ENOENT);
-	}
+	struct kgsl_drawobj_sync *syncobj = _drawobj_create(device,
+		context, sizeof(*syncobj), SYNCOBJ_TYPE);
 
 	/* Add a timer to help debug sync deadlocks */
-	setup_timer(&syncobj->timer, syncobj_timer, (unsigned long) syncobj);
+	if (!IS_ERR(syncobj))
+		setup_timer(&syncobj->timer, syncobj_timer,
+				(unsigned long) syncobj);
 
 	return syncobj;
 }
@@ -671,27 +716,13 @@
 		struct kgsl_context *context, unsigned int flags,
 		unsigned int type)
 {
-	struct kgsl_drawobj_cmd *cmdobj = kzalloc(sizeof(*cmdobj), GFP_KERNEL);
-	struct kgsl_drawobj *drawobj;
+	struct kgsl_drawobj_cmd *cmdobj = _drawobj_create(device,
+		context, sizeof(*cmdobj),
+		(type & (CMDOBJ_TYPE | MARKEROBJ_TYPE)));
 
-	if (cmdobj == NULL)
-		return ERR_PTR(-ENOMEM);
-
-	type &= CMDOBJ_TYPE | MARKEROBJ_TYPE;
-	if (type == 0) {
-		kfree(cmdobj);
-		return ERR_PTR(-EINVAL);
-	}
-
-	drawobj = DRAWOBJ(cmdobj);
-
-	if (drawobj_init(device, context, drawobj, type)) {
-		kfree(cmdobj);
-		return ERR_PTR(-ENOENT);
-	}
-
-	/* sanitize our flags for drawobj's */
-	drawobj->flags = flags & (KGSL_DRAWOBJ_CTX_SWITCH
+	if (!IS_ERR(cmdobj)) {
+		/* sanitize our flags for drawobj's */
+		cmdobj->base.flags = flags & (KGSL_DRAWOBJ_CTX_SWITCH
 				| KGSL_DRAWOBJ_MARKER
 				| KGSL_DRAWOBJ_END_OF_FRAME
 				| KGSL_DRAWOBJ_PWR_CONSTRAINT
@@ -699,8 +730,9 @@
 				| KGSL_DRAWOBJ_PROFILING
 				| KGSL_DRAWOBJ_PROFILING_KTIME);
 
-	INIT_LIST_HEAD(&cmdobj->cmdlist);
-	INIT_LIST_HEAD(&cmdobj->memlist);
+		INIT_LIST_HEAD(&cmdobj->cmdlist);
+		INIT_LIST_HEAD(&cmdobj->memlist);
+	}
 
 	return cmdobj;
 }
@@ -864,7 +896,7 @@
 	return 0;
 }
 
-static int drawobj_add_object(struct list_head *head,
+static int kgsl_drawobj_add_memobject(struct list_head *head,
 		struct kgsl_command_object *obj)
 {
 	struct kgsl_memobj_node *mem;
@@ -884,6 +916,62 @@
 	return 0;
 }
 
+static int kgsl_drawobj_add_sparseobject(struct list_head *head,
+		struct kgsl_sparse_binding_object *obj, unsigned int virt_id)
+{
+	struct kgsl_sparseobj_node *mem;
+
+	mem = kmem_cache_alloc(sparseobjs_cache, GFP_KERNEL);
+	if (mem == NULL)
+		return -ENOMEM;
+
+	mem->virt_id = virt_id;
+	mem->obj.id = obj->id;
+	mem->obj.virtoffset = obj->virtoffset;
+	mem->obj.physoffset = obj->physoffset;
+	mem->obj.size = obj->size;
+	mem->obj.flags = obj->flags;
+
+	list_add_tail(&mem->node, head);
+	return 0;
+}
+
+int kgsl_drawobj_sparse_add_sparselist(struct kgsl_device *device,
+		struct kgsl_drawobj_sparse *sparseobj, unsigned int id,
+		void __user *ptr, unsigned int size, unsigned int count)
+{
+	struct kgsl_sparse_binding_object obj;
+	int i, ret = 0;
+
+	ret = _verify_input_list(count, ptr, size);
+	if (ret <= 0)
+		return ret;
+
+	for (i = 0; i < count; i++) {
+		memset(&obj, 0, sizeof(obj));
+
+		ret = _copy_from_user(&obj, ptr, sizeof(obj), size);
+		if (ret)
+			return ret;
+
+		if (!(obj.flags & (KGSL_SPARSE_BIND | KGSL_SPARSE_UNBIND)))
+			return -EINVAL;
+
+		ret = kgsl_drawobj_add_sparseobject(&sparseobj->sparselist,
+			&obj, id);
+		if (ret)
+			return ret;
+
+		ptr += sizeof(obj);
+	}
+
+	sparseobj->size = size;
+	sparseobj->count = count;
+
+	return 0;
+}
+
+
 #define CMDLIST_FLAGS \
 	(KGSL_CMDLIST_IB | \
 	 KGSL_CMDLIST_CTXTSWITCH_PREAMBLE | \
@@ -922,7 +1010,7 @@
 			return -EINVAL;
 		}
 
-		ret = drawobj_add_object(&cmdobj->cmdlist, &obj);
+		ret = kgsl_drawobj_add_memobject(&cmdobj->cmdlist, &obj);
 		if (ret)
 			return ret;
 
@@ -967,7 +1055,8 @@
 			add_profiling_buffer(device, cmdobj, obj.gpuaddr,
 				obj.size, obj.id, obj.offset);
 		else {
-			ret = drawobj_add_object(&cmdobj->memlist, &obj);
+			ret = kgsl_drawobj_add_memobject(&cmdobj->memlist,
+				&obj);
 			if (ret)
 				return ret;
 		}
@@ -1018,19 +1107,19 @@
 	return 0;
 }
 
-void kgsl_drawobj_exit(void)
+void kgsl_drawobjs_cache_exit(void)
 {
-	if (memobjs_cache != NULL)
-		kmem_cache_destroy(memobjs_cache);
+	kmem_cache_destroy(memobjs_cache);
+	kmem_cache_destroy(sparseobjs_cache);
 }
 
-int kgsl_drawobj_init(void)
+int kgsl_drawobjs_cache_init(void)
 {
 	memobjs_cache = KMEM_CACHE(kgsl_memobj_node, 0);
-	if (memobjs_cache == NULL) {
-		KGSL_CORE_ERR("failed to create memobjs_cache");
+	sparseobjs_cache = KMEM_CACHE(kgsl_sparseobj_node, 0);
+
+	if (!memobjs_cache || !sparseobjs_cache)
 		return -ENOMEM;
-	}
 
 	return 0;
 }
diff --git a/drivers/gpu/msm/kgsl_drawobj.h b/drivers/gpu/msm/kgsl_drawobj.h
index 89ed944..fd9d2bc 100644
--- a/drivers/gpu/msm/kgsl_drawobj.h
+++ b/drivers/gpu/msm/kgsl_drawobj.h
@@ -18,10 +18,13 @@
 	container_of(obj, struct kgsl_drawobj_sync, base)
 #define CMDOBJ(obj) \
 	container_of(obj, struct kgsl_drawobj_cmd, base)
+#define SPARSEOBJ(obj) \
+	container_of(obj, struct kgsl_drawobj_sparse, base)
 
 #define CMDOBJ_TYPE     BIT(0)
 #define MARKEROBJ_TYPE  BIT(1)
 #define SYNCOBJ_TYPE    BIT(2)
+#define SPARSEOBJ_TYPE  BIT(3)
 
 /**
  * struct kgsl_drawobj - KGSL drawobj descriptor
@@ -45,7 +48,7 @@
  * struct kgsl_drawobj_cmd - KGSL command obj, This covers marker
  * cmds also since markers are special form of cmds that do not
  * need their cmds to be executed.
- * @base: Base kgsl_drawobj
+ * @base: Base kgsl_drawobj, this needs to be the first entry
  * @priv: Internal flags
  * @global_ts: The ringbuffer timestamp corresponding to this
  *    command obj
@@ -123,6 +126,22 @@
 	struct kgsl_device *device;
 };
 
+/**
+ * struct kgsl_drawobj_sparse - KGSl sparse obj descriptor
+ * @base: Base kgsl_obj, this needs to be the first entry
+ * @id: virtual id of the bind/unbind
+ * @sparselist: list of binds/unbinds
+ * @size: Size of kgsl_sparse_bind_object
+ * @count: Number of elements in list
+ */
+struct kgsl_drawobj_sparse {
+	struct kgsl_drawobj base;
+	unsigned int id;
+	struct list_head sparselist;
+	unsigned int size;
+	unsigned int count;
+};
+
 #define KGSL_DRAWOBJ_FLAGS \
 	{ KGSL_DRAWOBJ_MARKER, "MARKER" }, \
 	{ KGSL_DRAWOBJ_CTX_SWITCH, "CTX_SWITCH" }, \
@@ -172,9 +191,15 @@
 int kgsl_drawobj_sync_add_sync(struct kgsl_device *device,
 		struct kgsl_drawobj_sync *syncobj,
 		struct kgsl_cmd_syncpoint *sync);
+struct kgsl_drawobj_sparse *kgsl_drawobj_sparse_create(
+		struct kgsl_device *device,
+		struct kgsl_context *context, unsigned int flags);
+int kgsl_drawobj_sparse_add_sparselist(struct kgsl_device *device,
+		struct kgsl_drawobj_sparse *sparseobj, unsigned int id,
+		void __user *ptr, unsigned int size, unsigned int count);
 
-int kgsl_drawobj_init(void);
-void kgsl_drawobj_exit(void);
+int kgsl_drawobjs_cache_init(void);
+void kgsl_drawobjs_cache_exit(void);
 
 void kgsl_dump_syncpoints(struct kgsl_device *device,
 	struct kgsl_drawobj_sync *syncobj);
diff --git a/drivers/gpu/msm/kgsl_ioctl.c b/drivers/gpu/msm/kgsl_ioctl.c
index 2c57816..bfce4d4 100644
--- a/drivers/gpu/msm/kgsl_ioctl.c
+++ b/drivers/gpu/msm/kgsl_ioctl.c
@@ -94,6 +94,8 @@
 			kgsl_ioctl_sparse_virt_free),
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_SPARSE_BIND,
 			kgsl_ioctl_sparse_bind),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_GPU_SPARSE_COMMAND,
+			kgsl_ioctl_gpu_sparse_command),
 };
 
 long kgsl_ioctl_copy_in(unsigned int kernel_cmd, unsigned int user_cmd,
diff --git a/drivers/net/ethernet/msm/ecm_ipa.c b/drivers/net/ethernet/msm/ecm_ipa.c
index ebc424b..b769cca 100644
--- a/drivers/net/ethernet/msm/ecm_ipa.c
+++ b/drivers/net/ethernet/msm/ecm_ipa.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -9,7 +9,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
-
 #include <linux/debugfs.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
@@ -105,10 +104,6 @@
  * struct ecm_ipa_dev - main driver context parameters
  * @net: network interface struct implemented by this driver
  * @directory: debugfs directory for various debuging switches
- * @tx_enable: flag that enable/disable Tx path to continue to IPA
- * @rx_enable: flag that enable/disable Rx path to continue to IPA
- * @rm_enable: flag that enable/disable Resource manager request prior to Tx
- * @dma_enable: flag that allow on-the-fly DMA mode for IPA
  * @eth_ipv4_hdr_hdl: saved handle for ipv4 header-insertion table
  * @eth_ipv6_hdr_hdl: saved handle for ipv6 header-insertion table
  * @usb_to_ipa_hdl: save handle for IPA pipe operations
@@ -128,10 +123,6 @@
  */
 struct ecm_ipa_dev {
 	struct net_device *net;
-	u32 tx_enable;
-	u32 rx_enable;
-	u32  rm_enable;
-	bool dma_enable;
 	struct dentry *directory;
 	u32 eth_ipv4_hdr_hdl;
 	u32 eth_ipv6_hdr_hdl;
@@ -167,26 +158,11 @@
 static struct net_device_stats *ecm_ipa_get_stats(struct net_device *net);
 static int ecm_ipa_create_rm_resource(struct ecm_ipa_dev *ecm_ipa_ctx);
 static void ecm_ipa_destroy_rm_resource(struct ecm_ipa_dev *ecm_ipa_ctx);
-static bool rx_filter(struct sk_buff *skb);
-static bool tx_filter(struct sk_buff *skb);
-static bool rm_enabled(struct ecm_ipa_dev *ecm_ipa_ctx);
 static int resource_request(struct ecm_ipa_dev *ecm_ipa_ctx);
 static void resource_release(struct ecm_ipa_dev *ecm_ipa_ctx);
 static netdev_tx_t ecm_ipa_start_xmit
 	(struct sk_buff *skb, struct net_device *net);
-static int ecm_ipa_debugfs_stall_open
-	(struct inode *inode, struct file *file);
-static ssize_t ecm_ipa_debugfs_stall_write
-	(struct file *file, const char __user *buf, size_t count,
-		loff_t *ppos);
 static int ecm_ipa_debugfs_atomic_open(struct inode *inode, struct file *file);
-static ssize_t ecm_ipa_debugfs_enable_write_dma
-	(struct file *file, const char __user *buf, size_t count,
-		loff_t *ppos);
-static int ecm_ipa_debugfs_dma_open(struct inode *inode, struct file *file);
-static ssize_t ecm_ipa_debugfs_enable_write
-	(struct file *file, const char __user *buf, size_t count,
-		loff_t *ppos);
 static ssize_t ecm_ipa_debugfs_enable_read
 	(struct file *file, char __user *ubuf, size_t count, loff_t *ppos);
 static ssize_t ecm_ipa_debugfs_atomic_read
@@ -194,8 +170,6 @@
 static int ecm_ipa_debugfs_init(struct ecm_ipa_dev *ecm_ipa_ctx);
 static void ecm_ipa_debugfs_destroy(struct ecm_ipa_dev *ecm_ipa_ctx);
 static int ecm_ipa_ep_registers_cfg(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl);
-static int ecm_ipa_ep_registers_dma_cfg
-	(u32 usb_to_ipa_hdl, enum ipa_client_type prod_client);
 static int ecm_ipa_set_device_ethernet_addr
 	(u8 *dev_ethaddr, u8 device_ethaddr[]);
 static enum ecm_ipa_state ecm_ipa_next_state
@@ -213,22 +187,11 @@
 	.ndo_get_stats = ecm_ipa_get_stats,
 };
 
-const struct file_operations ecm_ipa_debugfs_dma_ops = {
-	.open = ecm_ipa_debugfs_dma_open,
-	.read = ecm_ipa_debugfs_enable_read,
-	.write = ecm_ipa_debugfs_enable_write_dma,
-};
-
 const struct file_operations ecm_ipa_debugfs_atomic_ops = {
 	.open = ecm_ipa_debugfs_atomic_open,
 	.read = ecm_ipa_debugfs_atomic_read,
 };
 
-const struct file_operations ecm_ipa_debugfs_stall_ops = {
-	.open = ecm_ipa_debugfs_stall_open,
-	.write = ecm_ipa_debugfs_stall_write,
-};
-
 static void ecm_ipa_msg_free_cb(void *buff, u32 len, u32 type)
 {
 	kfree(buff);
@@ -294,9 +257,6 @@
 	ECM_IPA_DEBUG("ecm_ipa_ctx (private) = %p\n", ecm_ipa_ctx);
 
 	ecm_ipa_ctx->net = net;
-	ecm_ipa_ctx->tx_enable = true;
-	ecm_ipa_ctx->rx_enable = true;
-	ecm_ipa_ctx->rm_enable = true;
 	ecm_ipa_ctx->outstanding_high = DEFAULT_OUTSTANDING_HIGH;
 	ecm_ipa_ctx->outstanding_low = DEFAULT_OUTSTANDING_LOW;
 	atomic_set(&ecm_ipa_ctx->outstanding_pkts, 0);
@@ -619,12 +579,6 @@
 		return NETDEV_TX_BUSY;
 	}
 
-	if (unlikely(tx_filter(skb))) {
-		dev_kfree_skb_any(skb);
-		ECM_IPA_DEBUG("packet got filtered out on Tx path\n");
-		status = NETDEV_TX_OK;
-		goto out;
-	}
 	ret = resource_request(ecm_ipa_ctx);
 	if (ret) {
 		ECM_IPA_DEBUG("Waiting to resource\n");
@@ -698,11 +652,6 @@
 
 	skb->dev = ecm_ipa_ctx->net;
 	skb->protocol = eth_type_trans(skb, ecm_ipa_ctx->net);
-	if (rx_filter(skb)) {
-		ECM_IPA_DEBUG("packet got filtered out on Rx path\n");
-		dev_kfree_skb_any(skb);
-		return;
-	}
 
 	result = netif_rx(skb);
 	if (result)
@@ -1174,44 +1123,15 @@
 	ECM_IPA_LOG_EXIT();
 }
 
-static bool rx_filter(struct sk_buff *skb)
-{
-	struct ecm_ipa_dev *ecm_ipa_ctx = netdev_priv(skb->dev);
-
-	return !ecm_ipa_ctx->rx_enable;
-}
-
-static bool tx_filter(struct sk_buff *skb)
-{
-	struct ecm_ipa_dev *ecm_ipa_ctx = netdev_priv(skb->dev);
-
-	return !ecm_ipa_ctx->tx_enable;
-}
-
-static bool rm_enabled(struct ecm_ipa_dev *ecm_ipa_ctx)
-{
-	return ecm_ipa_ctx->rm_enable;
-}
-
 static int resource_request(struct ecm_ipa_dev *ecm_ipa_ctx)
 {
-	int result = 0;
-
-	if (!rm_enabled(ecm_ipa_ctx))
-		goto out;
-	result = ipa_rm_inactivity_timer_request_resource(
-			IPA_RM_RESOURCE_STD_ECM_PROD);
-out:
-	return result;
+	return ipa_rm_inactivity_timer_request_resource(
+		IPA_RM_RESOURCE_STD_ECM_PROD);
 }
 
 static void resource_release(struct ecm_ipa_dev *ecm_ipa_ctx)
 {
-	if (!rm_enabled(ecm_ipa_ctx))
-		goto out;
 	ipa_rm_inactivity_timer_release_resource(IPA_RM_RESOURCE_STD_ECM_PROD);
-out:
-	return;
 }
 
 /**
@@ -1289,45 +1209,6 @@
 	net->stats.tx_errors++;
 }
 
-static int ecm_ipa_debugfs_stall_open
-	(struct inode *inode, struct file *file)
-{
-	ECM_IPA_LOG_ENTRY();
-
-	ECM_IPA_LOG_EXIT();
-
-	return 0;
-}
-
-static ssize_t ecm_ipa_debugfs_stall_write
-	(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
-{
-	u32 cmdq_cfg_mmio_phy = 0xFD4E3038;
-	void *cmdq_cfg_mmio_virt;
-	int result;
-	bool val = 0;
-
-	ECM_IPA_LOG_ENTRY();
-
-	file->private_data = &val;
-	result = ecm_ipa_debugfs_enable_write(file, buf, count, ppos);
-
-	cmdq_cfg_mmio_virt = ioremap(cmdq_cfg_mmio_phy, sizeof(u32));
-	if (!cmdq_cfg_mmio_virt) {
-		ECM_IPA_ERROR
-			("fail on mmio for cmdq_cfg_mmio_phy=0x%x",
-			cmdq_cfg_mmio_phy);
-		return result;
-	}
-
-	iowrite32(val, cmdq_cfg_mmio_virt);
-	ECM_IPA_DEBUG("Value %d was written to cfgq", val);
-
-	ECM_IPA_LOG_EXIT();
-
-	return result;
-}
-
 static int ecm_ipa_debugfs_atomic_open(struct inode *inode, struct file *file)
 {
 	struct ecm_ipa_dev *ecm_ipa_ctx = inode->i_private;
@@ -1338,84 +1219,6 @@
 	return 0;
 }
 
-static ssize_t ecm_ipa_debugfs_enable_write_dma
-	(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
-{
-	struct ecm_ipa_dev *ecm_ipa_ctx = file->private_data;
-	int result;
-
-	ECM_IPA_LOG_ENTRY();
-	file->private_data = &ecm_ipa_ctx->dma_enable;
-	result = ecm_ipa_debugfs_enable_write(file, buf, count, ppos);
-	if (ecm_ipa_ctx->dma_enable)
-		ecm_ipa_ep_registers_dma_cfg
-			(ecm_ipa_ctx->usb_to_ipa_hdl,
-			ecm_ipa_ctx->ipa_to_usb_client);
-	else
-		ecm_ipa_ep_registers_cfg
-			(ecm_ipa_ctx->usb_to_ipa_hdl,
-			ecm_ipa_ctx->usb_to_ipa_hdl);
-	ECM_IPA_LOG_EXIT();
-	return result;
-}
-
-static int ecm_ipa_debugfs_dma_open(struct inode *inode, struct file *file)
-{
-	struct ecm_ipa_dev *ecm_ipa_ctx = inode->i_private;
-
-	ECM_IPA_LOG_ENTRY();
-	file->private_data = ecm_ipa_ctx;
-	ECM_IPA_LOG_EXIT();
-	return 0;
-}
-
-static ssize_t ecm_ipa_debugfs_enable_write
-	(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
-{
-	unsigned long missing;
-	char input;
-	bool *enable = file->private_data;
-
-	if (count != sizeof(input) + 1) {
-		ECM_IPA_ERROR("wrong input length(%zd)\n", count);
-		return -EINVAL;
-	}
-	if (!buf) {
-		ECM_IPA_ERROR("Bad argument\n");
-		return -EINVAL;
-	}
-	missing = copy_from_user(&input, buf, 1);
-	if (missing)
-		return -EFAULT;
-	ECM_IPA_DEBUG("input received %c\n", input);
-	*enable = input - '0';
-	ECM_IPA_DEBUG("value was set to %d\n", *enable);
-	return count;
-}
-
-static ssize_t ecm_ipa_debugfs_enable_read
-	(struct file *file, char __user *ubuf, size_t count, loff_t *ppos)
-{
-	int nbytes;
-	int size = 0;
-	int ret;
-	loff_t pos;
-	u8 enable_str[sizeof(char) * 3] = {0};
-	bool *enable = file->private_data;
-
-	pos = *ppos;
-	nbytes = scnprintf(enable_str, sizeof(enable_str), "%d\n", *enable);
-	ret = simple_read_from_buffer(ubuf, count, ppos, enable_str, nbytes);
-	if (ret < 0) {
-		ECM_IPA_ERROR("simple_read_from_buffer problem\n");
-		return ret;
-	}
-	size += ret;
-	count -= nbytes;
-	*ppos = pos + size;
-	return size;
-}
-
 static ssize_t ecm_ipa_debugfs_atomic_read
 	(struct file *file, char __user *ubuf, size_t count, loff_t *ppos)
 {
@@ -1433,7 +1236,6 @@
 {
 	const mode_t flags_read_write = 0666;
 	const mode_t flags_read_only = 0444;
-	const mode_t flags_write_only = 0222;
 	struct dentry *file;
 
 	ECM_IPA_LOG_ENTRY();
@@ -1446,27 +1248,6 @@
 		ECM_IPA_ERROR("could not create debugfs directory entry\n");
 		goto fail_directory;
 	}
-	file = debugfs_create_bool
-		("tx_enable", flags_read_write,
-		ecm_ipa_ctx->directory, &ecm_ipa_ctx->tx_enable);
-	if (!file) {
-		ECM_IPA_ERROR("could not create debugfs tx file\n");
-		goto fail_file;
-	}
-	file = debugfs_create_bool
-		("rx_enable", flags_read_write,
-		ecm_ipa_ctx->directory, &ecm_ipa_ctx->rx_enable);
-	if (!file) {
-		ECM_IPA_ERROR("could not create debugfs rx file\n");
-		goto fail_file;
-	}
-	file = debugfs_create_bool
-		("rm_enable", flags_read_write,
-		ecm_ipa_ctx->directory, &ecm_ipa_ctx->rm_enable);
-	if (!file) {
-		ECM_IPA_ERROR("could not create debugfs rm file\n");
-		goto fail_file;
-	}
 	file = debugfs_create_u8
 		("outstanding_high", flags_read_write,
 		ecm_ipa_ctx->directory, &ecm_ipa_ctx->outstanding_high);
@@ -1482,14 +1263,6 @@
 		goto fail_file;
 	}
 	file = debugfs_create_file
-		("dma_enable", flags_read_write,
-		ecm_ipa_ctx->directory,
-		ecm_ipa_ctx, &ecm_ipa_debugfs_dma_ops);
-	if (!file) {
-		ECM_IPA_ERROR("could not create debugfs dma file\n");
-		goto fail_file;
-	}
-	file = debugfs_create_file
 		("outstanding", flags_read_only,
 		ecm_ipa_ctx->directory,
 		ecm_ipa_ctx, &ecm_ipa_debugfs_atomic_ops);
@@ -1498,15 +1271,7 @@
 		goto fail_file;
 	}
 
-	file = debugfs_create_file
-		("stall_ipa_rx_proc", flags_write_only,
-		ecm_ipa_ctx->directory,
-		ecm_ipa_ctx, &ecm_ipa_debugfs_stall_ops);
-	if (!file) {
-		ECM_IPA_ERROR("could not create stall_ipa_rx_proc file\n");
-		goto fail_file;
-	}
-
+	ECM_IPA_DEBUG("debugfs entries were created\n");
 	ECM_IPA_LOG_EXIT();
 
 	return 0;
@@ -1571,46 +1336,6 @@
 }
 
 /**
- * ecm_ipa_ep_registers_dma_cfg() - configure the USB endpoints for ECM
- *	DMA
- * @usb_to_ipa_hdl: handle received from ipa_connect
- *
- * This function will override the previous configuration
- * which is needed for cores that does not support blocks logic
- * Note that client handles are the actual pipe index
- */
-static int ecm_ipa_ep_registers_dma_cfg(u32 usb_to_ipa_hdl,
-					enum ipa_client_type prod_client)
-{
-	int result = 0;
-	struct ipa_ep_cfg_mode cfg_mode;
-	u32 apps_to_ipa_hdl = 2;
-
-	ECM_IPA_LOG_ENTRY();
-
-	memset(&cfg_mode, 0, sizeof(cfg_mode));
-	cfg_mode.mode = IPA_DMA;
-	cfg_mode.dst = prod_client;
-	result = ipa_cfg_ep_mode(apps_to_ipa_hdl, &cfg_mode);
-	if (result) {
-		ECM_IPA_ERROR("failed to configure Apps to IPA\n");
-		goto out;
-	}
-	memset(&cfg_mode, 0, sizeof(cfg_mode));
-	cfg_mode.mode = IPA_DMA;
-	cfg_mode.dst = IPA_CLIENT_A5_LAN_WAN_CONS;
-	result = ipa_cfg_ep_mode(usb_to_ipa_hdl, &cfg_mode);
-	if (result) {
-		ECM_IPA_ERROR("failed to configure USB to IPA\n");
-		goto out;
-	}
-	ECM_IPA_DEBUG("end-point registers successfully configured\n");
-out:
-	ECM_IPA_LOG_EXIT();
-	return result;
-}
-
-/**
  * ecm_ipa_set_device_ethernet_addr() - set device etherenet address
  * @dev_ethaddr: device etherenet address
  *
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index ba7b034b..7810bad 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -99,6 +99,12 @@
 config OF_RESOLVE
 	bool
 
+config OF_SLIMBUS
+	def_tristate SLIMBUS
+	depends on SLIMBUS
+	help
+	  OpenFirmware SLIMBUS accessors
+
 config OF_OVERLAY
 	bool "Device Tree overlays"
 	select OF_DYNAMIC
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index d7efd9d..4b8dabe 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -13,6 +13,7 @@
 obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o
 obj-$(CONFIG_OF_RESOLVE)  += resolver.o
 obj-$(CONFIG_OF_OVERLAY) += overlay.o
+obj-$(CONFIG_OF_SLIMBUS)        += of_slimbus.o
 obj-$(CONFIG_OF_NUMA) += of_numa.o
 
 obj-$(CONFIG_OF_UNITTEST) += unittest-data/
diff --git a/drivers/of/of_slimbus.c b/drivers/of/of_slimbus.c
new file mode 100644
index 0000000..2b3d240
--- /dev/null
+++ b/drivers/of/of_slimbus.c
@@ -0,0 +1,89 @@
+/* Copyright (c) 2012, 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* OF helpers for SLIMbus */
+#include <linux/slimbus/slimbus.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_slimbus.h>
+
+int of_register_slim_devices(struct slim_controller *ctrl)
+{
+	struct device_node *node;
+	struct slim_boardinfo *binfo = NULL;
+	struct slim_boardinfo *temp;
+	int n = 0;
+	int ret = 0;
+
+	if (!ctrl->dev.of_node)
+		return -EINVAL;
+
+	for_each_child_of_node(ctrl->dev.of_node, node) {
+		struct property *prop;
+		struct slim_device *slim;
+		char *name;
+
+		prop = of_find_property(node, "elemental-addr", NULL);
+		if (!prop || prop->length != 6) {
+			dev_err(&ctrl->dev, "of_slim: invalid E-addr");
+			continue;
+		}
+		name = kzalloc(SLIMBUS_NAME_SIZE, GFP_KERNEL);
+		if (!name) {
+			ret = -ENOMEM;
+			goto of_slim_err;
+		}
+		if (of_modalias_node(node, name, SLIMBUS_NAME_SIZE) < 0) {
+			dev_err(&ctrl->dev, "of_slim: modalias failure on %s\n",
+				node->full_name);
+			kfree(name);
+			continue;
+		}
+		slim = kzalloc(sizeof(struct slim_device), GFP_KERNEL);
+		if (!slim) {
+			ret = -ENOMEM;
+			kfree(name);
+			goto of_slim_err;
+		}
+		memcpy(slim->e_addr, prop->value, 6);
+
+		temp = krealloc(binfo, (n + 1) * sizeof(struct slim_boardinfo),
+					GFP_KERNEL);
+		if (!temp) {
+			dev_err(&ctrl->dev, "out of memory");
+			kfree(name);
+			kfree(slim);
+			ret = -ENOMEM;
+			goto of_slim_err;
+		}
+		binfo = temp;
+
+		slim->dev.of_node = of_node_get(node);
+		slim->name = (const char *)name;
+		binfo[n].bus_num = ctrl->nr;
+		binfo[n].slim_slave = slim;
+		n++;
+	}
+	ret = slim_register_board_info(binfo, n);
+	if (!ret)
+		goto of_slim_ret;
+of_slim_err:
+	while (n-- > 0) {
+		kfree(binfo[n].slim_slave->name);
+		kfree(binfo[n].slim_slave);
+	}
+of_slim_ret:
+	kfree(binfo);
+	return ret;
+}
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 9ddaff9..1058e5e 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -88,6 +88,15 @@
 	  Qualcomm Technologies Inc TLMM block found on the Qualcomm
 	  Technologies Inc MSMSKUNK platform.
 
+config PINCTRL_SDMBAT
+	tristate "Qualcomm Technologies Inc SDMBAT pin controller driver"
+	depends on GPIOLIB && OF
+	select PINCTRL_MSM
+	help
+	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+	  Qualcomm Technologies Inc TLMM block found on the Qualcomm
+	  Technologies Inc SDMBAT platform.
+
 
 config PINCTRL_MSM8996
 	tristate "Qualcomm MSM8996 pin controller driver"
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 6e14ef9..fd52c43 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -16,3 +16,4 @@
 obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-gpio.o
 obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-mpp.o
 obj-$(CONFIG_PINCTRL_MSMSKUNK) += pinctrl-msmskunk.o
+obj-$(CONFIG_PINCTRL_SDMBAT) += pinctrl-sdmbat.o
diff --git a/drivers/pinctrl/qcom/pinctrl-sdmbat.c b/drivers/pinctrl/qcom/pinctrl-sdmbat.c
new file mode 100644
index 0000000..3e4fdda
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sdmbat.c
@@ -0,0 +1,2283 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname)					\
+	[msm_mux_##fname] = {				\
+		.name = #fname,				\
+		.groups = fname##_groups,		\
+		.ngroups = ARRAY_SIZE(fname##_groups),	\
+	}
+
+#define NORTH	0x00500000
+#define SOUTH	0x00900000
+#define WEST	0x00100000
+#define REG_SIZE 0x1000
+
+#define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9)	\
+	{						\
+		.name = "gpio" #id,			\
+		.pins = gpio##id##_pins,		\
+		.npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins),	\
+		.funcs = (int[]){			\
+			msm_mux_gpio, /* gpio mode */	\
+			msm_mux_##f1,			\
+			msm_mux_##f2,			\
+			msm_mux_##f3,			\
+			msm_mux_##f4,			\
+			msm_mux_##f5,			\
+			msm_mux_##f6,			\
+			msm_mux_##f7,			\
+			msm_mux_##f8,			\
+			msm_mux_##f9			\
+		},					\
+		.nfuncs = 10,				\
+		.ctl_reg = base + REG_SIZE * id,		\
+		.io_reg = base + 0x4 + REG_SIZE * id,		\
+		.intr_cfg_reg = base + 0x8 + REG_SIZE * id,	\
+		.intr_status_reg = base + 0xc + REG_SIZE * id,	\
+		.intr_target_reg = base + 0x8 + REG_SIZE * id,	\
+		.mux_bit = 2,			\
+		.pull_bit = 0,			\
+		.drv_bit = 6,			\
+		.oe_bit = 9,			\
+		.in_bit = 0,			\
+		.out_bit = 1,			\
+		.intr_enable_bit = 0,		\
+		.intr_status_bit = 0,		\
+		.intr_target_bit = 5,		\
+		.intr_target_kpss_val = 3,	\
+		.intr_raw_status_bit = 4,	\
+		.intr_polarity_bit = 1,		\
+		.intr_detection_bit = 2,	\
+		.intr_detection_width = 2,	\
+	}
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv)	\
+	{						\
+		.name = #pg_name,			\
+		.pins = pg_name##_pins,			\
+		.npins = (unsigned int)ARRAY_SIZE(pg_name##_pins),	\
+		.ctl_reg = ctl,				\
+		.io_reg = 0,				\
+		.intr_cfg_reg = 0,			\
+		.intr_status_reg = 0,			\
+		.intr_target_reg = 0,			\
+		.mux_bit = -1,				\
+		.pull_bit = pull,			\
+		.drv_bit = drv,				\
+		.oe_bit = -1,				\
+		.in_bit = -1,				\
+		.out_bit = -1,				\
+		.intr_enable_bit = -1,			\
+		.intr_status_bit = -1,			\
+		.intr_target_bit = -1,			\
+		.intr_raw_status_bit = -1,		\
+		.intr_polarity_bit = -1,		\
+		.intr_detection_bit = -1,		\
+		.intr_detection_width = -1,		\
+	}
+
+#define UFS_RESET(pg_name, offset)				\
+	{						\
+		.name = #pg_name,			\
+		.pins = pg_name##_pins,			\
+		.npins = (unsigned int)ARRAY_SIZE(pg_name##_pins),	\
+		.ctl_reg = offset,			\
+		.io_reg = offset + 0x4,			\
+		.intr_cfg_reg = 0,			\
+		.intr_status_reg = 0,			\
+		.intr_target_reg = 0,			\
+		.mux_bit = -1,				\
+		.pull_bit = 3,				\
+		.drv_bit = 0,				\
+		.oe_bit = -1,				\
+		.in_bit = -1,				\
+		.out_bit = 0,				\
+		.intr_enable_bit = -1,			\
+		.intr_status_bit = -1,			\
+		.intr_target_bit = -1,			\
+		.intr_raw_status_bit = -1,		\
+		.intr_polarity_bit = -1,		\
+		.intr_detection_bit = -1,		\
+		.intr_detection_width = -1,		\
+	}
+static const struct pinctrl_pin_desc sdmbat_pins[] = {
+	PINCTRL_PIN(0, "GPIO_0"),
+	PINCTRL_PIN(1, "GPIO_1"),
+	PINCTRL_PIN(2, "GPIO_2"),
+	PINCTRL_PIN(3, "GPIO_3"),
+	PINCTRL_PIN(4, "GPIO_4"),
+	PINCTRL_PIN(5, "GPIO_5"),
+	PINCTRL_PIN(6, "GPIO_6"),
+	PINCTRL_PIN(7, "GPIO_7"),
+	PINCTRL_PIN(8, "GPIO_8"),
+	PINCTRL_PIN(9, "GPIO_9"),
+	PINCTRL_PIN(10, "GPIO_10"),
+	PINCTRL_PIN(11, "GPIO_11"),
+	PINCTRL_PIN(12, "GPIO_12"),
+	PINCTRL_PIN(13, "GPIO_13"),
+	PINCTRL_PIN(14, "GPIO_14"),
+	PINCTRL_PIN(15, "GPIO_15"),
+	PINCTRL_PIN(16, "GPIO_16"),
+	PINCTRL_PIN(17, "GPIO_17"),
+	PINCTRL_PIN(18, "GPIO_18"),
+	PINCTRL_PIN(19, "GPIO_19"),
+	PINCTRL_PIN(20, "GPIO_20"),
+	PINCTRL_PIN(21, "GPIO_21"),
+	PINCTRL_PIN(22, "GPIO_22"),
+	PINCTRL_PIN(23, "GPIO_23"),
+	PINCTRL_PIN(24, "GPIO_24"),
+	PINCTRL_PIN(25, "GPIO_25"),
+	PINCTRL_PIN(26, "GPIO_26"),
+	PINCTRL_PIN(27, "GPIO_27"),
+	PINCTRL_PIN(28, "GPIO_28"),
+	PINCTRL_PIN(29, "GPIO_29"),
+	PINCTRL_PIN(30, "GPIO_30"),
+	PINCTRL_PIN(31, "GPIO_31"),
+	PINCTRL_PIN(32, "GPIO_32"),
+	PINCTRL_PIN(33, "GPIO_33"),
+	PINCTRL_PIN(34, "GPIO_34"),
+	PINCTRL_PIN(35, "GPIO_35"),
+	PINCTRL_PIN(36, "GPIO_36"),
+	PINCTRL_PIN(37, "GPIO_37"),
+	PINCTRL_PIN(38, "GPIO_38"),
+	PINCTRL_PIN(39, "GPIO_39"),
+	PINCTRL_PIN(40, "GPIO_40"),
+	PINCTRL_PIN(41, "GPIO_41"),
+	PINCTRL_PIN(42, "GPIO_42"),
+	PINCTRL_PIN(43, "GPIO_43"),
+	PINCTRL_PIN(44, "GPIO_44"),
+	PINCTRL_PIN(45, "GPIO_45"),
+	PINCTRL_PIN(46, "GPIO_46"),
+	PINCTRL_PIN(47, "GPIO_47"),
+	PINCTRL_PIN(48, "GPIO_48"),
+	PINCTRL_PIN(49, "GPIO_49"),
+	PINCTRL_PIN(50, "GPIO_50"),
+	PINCTRL_PIN(51, "GPIO_51"),
+	PINCTRL_PIN(52, "GPIO_52"),
+	PINCTRL_PIN(53, "GPIO_53"),
+	PINCTRL_PIN(54, "GPIO_54"),
+	PINCTRL_PIN(55, "GPIO_55"),
+	PINCTRL_PIN(56, "GPIO_56"),
+	PINCTRL_PIN(57, "GPIO_57"),
+	PINCTRL_PIN(65, "GPIO_65"),
+	PINCTRL_PIN(66, "GPIO_66"),
+	PINCTRL_PIN(67, "GPIO_67"),
+	PINCTRL_PIN(68, "GPIO_68"),
+	PINCTRL_PIN(75, "GPIO_75"),
+	PINCTRL_PIN(76, "GPIO_76"),
+	PINCTRL_PIN(77, "GPIO_77"),
+	PINCTRL_PIN(78, "GPIO_78"),
+	PINCTRL_PIN(79, "GPIO_79"),
+	PINCTRL_PIN(80, "GPIO_80"),
+	PINCTRL_PIN(81, "GPIO_81"),
+	PINCTRL_PIN(82, "GPIO_82"),
+	PINCTRL_PIN(83, "GPIO_83"),
+	PINCTRL_PIN(84, "GPIO_84"),
+	PINCTRL_PIN(85, "GPIO_85"),
+	PINCTRL_PIN(86, "GPIO_86"),
+	PINCTRL_PIN(87, "GPIO_87"),
+	PINCTRL_PIN(88, "GPIO_88"),
+	PINCTRL_PIN(89, "GPIO_89"),
+	PINCTRL_PIN(90, "GPIO_90"),
+	PINCTRL_PIN(91, "GPIO_91"),
+	PINCTRL_PIN(92, "GPIO_92"),
+	PINCTRL_PIN(93, "GPIO_93"),
+	PINCTRL_PIN(94, "GPIO_94"),
+	PINCTRL_PIN(95, "GPIO_95"),
+	PINCTRL_PIN(96, "GPIO_96"),
+	PINCTRL_PIN(97, "GPIO_97"),
+	PINCTRL_PIN(98, "GPIO_98"),
+	PINCTRL_PIN(99, "GPIO_99"),
+	PINCTRL_PIN(100, "GPIO_100"),
+	PINCTRL_PIN(101, "GPIO_101"),
+	PINCTRL_PIN(102, "GPIO_102"),
+	PINCTRL_PIN(103, "GPIO_103"),
+	PINCTRL_PIN(105, "GPIO_105"),
+	PINCTRL_PIN(106, "GPIO_106"),
+	PINCTRL_PIN(107, "GPIO_107"),
+	PINCTRL_PIN(108, "GPIO_108"),
+	PINCTRL_PIN(109, "GPIO_109"),
+	PINCTRL_PIN(110, "GPIO_110"),
+	PINCTRL_PIN(111, "GPIO_111"),
+	PINCTRL_PIN(112, "GPIO_112"),
+	PINCTRL_PIN(113, "GPIO_113"),
+	PINCTRL_PIN(114, "GPIO_114"),
+	PINCTRL_PIN(115, "GPIO_115"),
+	PINCTRL_PIN(116, "GPIO_116"),
+	PINCTRL_PIN(117, "GPIO_117"),
+	PINCTRL_PIN(118, "GPIO_118"),
+	PINCTRL_PIN(119, "GPIO_119"),
+	PINCTRL_PIN(120, "GPIO_120"),
+	PINCTRL_PIN(121, "GPIO_121"),
+	PINCTRL_PIN(122, "GPIO_122"),
+	PINCTRL_PIN(123, "GPIO_123"),
+	PINCTRL_PIN(124, "GPIO_124"),
+	PINCTRL_PIN(125, "GPIO_125"),
+	PINCTRL_PIN(126, "GPIO_126"),
+	PINCTRL_PIN(127, "GPIO_127"),
+	PINCTRL_PIN(128, "GPIO_128"),
+	PINCTRL_PIN(129, "GPIO_129"),
+	PINCTRL_PIN(130, "GPIO_130"),
+	PINCTRL_PIN(131, "GPIO_131"),
+	PINCTRL_PIN(132, "GPIO_132"),
+	PINCTRL_PIN(133, "GPIO_133"),
+	PINCTRL_PIN(134, "GPIO_134"),
+	PINCTRL_PIN(135, "GPIO_135"),
+	PINCTRL_PIN(136, "GPIO_136"),
+	PINCTRL_PIN(137, "GPIO_137"),
+	PINCTRL_PIN(138, "GPIO_138"),
+	PINCTRL_PIN(139, "GPIO_139"),
+	PINCTRL_PIN(140, "GPIO_140"),
+	PINCTRL_PIN(141, "GPIO_141"),
+	PINCTRL_PIN(142, "GPIO_142"),
+	PINCTRL_PIN(143, "GPIO_143"),
+	PINCTRL_PIN(144, "GPIO_144"),
+	PINCTRL_PIN(145, "GPIO_145"),
+	PINCTRL_PIN(146, "GPIO_146"),
+	PINCTRL_PIN(147, "GPIO_147"),
+	PINCTRL_PIN(148, "GPIO_148"),
+	PINCTRL_PIN(149, "GPIO_149"),
+	PINCTRL_PIN(150, "SDC2_CLK"),
+	PINCTRL_PIN(151, "SDC2_CMD"),
+	PINCTRL_PIN(152, "SDC2_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+	static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+DECLARE_MSM_GPIO_PINS(145);
+DECLARE_MSM_GPIO_PINS(146);
+DECLARE_MSM_GPIO_PINS(147);
+DECLARE_MSM_GPIO_PINS(148);
+DECLARE_MSM_GPIO_PINS(149);
+
+static const unsigned int sdc2_clk_pins[] = { 150 };
+static const unsigned int sdc2_cmd_pins[] = { 151 };
+static const unsigned int sdc2_data_pins[] = { 152 };
+
+enum sdmbat_functions {
+	msm_mux_qup0,
+	msm_mux_gpio,
+	msm_mux_reserved0,
+	msm_mux_reserved1,
+	msm_mux_reserved2,
+	msm_mux_reserved3,
+	msm_mux_qup9,
+	msm_mux_qdss_cti,
+	msm_mux_reserved4,
+	msm_mux_reserved5,
+	msm_mux_ddr_pxi0,
+	msm_mux_reserved6,
+	msm_mux_ddr_bist,
+	msm_mux_atest_tsens2,
+	msm_mux_vsense_trigger,
+	msm_mux_atest_usb1,
+	msm_mux_reserved7,
+	msm_mux_qup_l4,
+	msm_mux_GP_PDM1,
+	msm_mux_reserved8,
+	msm_mux_qup_l5,
+	msm_mux_reserved9,
+	msm_mux_mdp_vsync,
+	msm_mux_qup_l6,
+	msm_mux_wlan2_adc1,
+	msm_mux_atest_usb11,
+	msm_mux_ddr_pxi2,
+	msm_mux_reserved10,
+	msm_mux_edp_lcd,
+	msm_mux_dbg_out,
+	msm_mux_wlan2_adc0,
+	msm_mux_atest_usb10,
+	msm_mux_reserved11,
+	msm_mux_m_voc,
+	msm_mux_tsif1_sync,
+	msm_mux_ddr_pxi3,
+	msm_mux_reserved12,
+	msm_mux_cam_mclk,
+	msm_mux_pll_bypassnl,
+	msm_mux_qdss_gpio0,
+	msm_mux_reserved13,
+	msm_mux_pll_reset,
+	msm_mux_qdss_gpio1,
+	msm_mux_reserved14,
+	msm_mux_qdss_gpio2,
+	msm_mux_reserved15,
+	msm_mux_qdss_gpio3,
+	msm_mux_reserved16,
+	msm_mux_cci_i2c,
+	msm_mux_qup1,
+	msm_mux_qdss_gpio4,
+	msm_mux_reserved17,
+	msm_mux_cci_timer1,
+	msm_mux_gcc_gp3,
+	msm_mux_qdss_gpio,
+	msm_mux_reserved22,
+	msm_mux_cci_timer2,
+	msm_mux_qdss_gpio9,
+	msm_mux_reserved23,
+	msm_mux_cci_timer3,
+	msm_mux_cci_async,
+	msm_mux_qdss_gpio10,
+	msm_mux_reserved24,
+	msm_mux_cci_timer4,
+	msm_mux_qdss_gpio11,
+	msm_mux_reserved25,
+	msm_mux_qdss_gpio5,
+	msm_mux_reserved18,
+	msm_mux_qdss_gpio6,
+	msm_mux_reserved19,
+	msm_mux_qdss_gpio7,
+	msm_mux_reserved20,
+	msm_mux_cci_timer0,
+	msm_mux_gcc_gp2,
+	msm_mux_qdss_gpio8,
+	msm_mux_reserved21,
+	msm_mux_qdss_gpio12,
+	msm_mux_JITTER_BIST,
+	msm_mux_reserved26,
+	msm_mux_qup2,
+	msm_mux_qdss_gpio13,
+	msm_mux_PLL_BIST,
+	msm_mux_reserved27,
+	msm_mux_qdss_gpio14,
+	msm_mux_AGERA_PLL,
+	msm_mux_reserved28,
+	msm_mux_phase_flag1,
+	msm_mux_qdss_gpio15,
+	msm_mux_atest_tsens,
+	msm_mux_reserved29,
+	msm_mux_phase_flag2,
+	msm_mux_reserved30,
+	msm_mux_qup11,
+	msm_mux_qup14,
+	msm_mux_reserved31,
+	msm_mux_reserved32,
+	msm_mux_reserved33,
+	msm_mux_reserved34,
+	msm_mux_pci_e0,
+	msm_mux_QUP_L4,
+	msm_mux_reserved35,
+	msm_mux_QUP_L5,
+	msm_mux_reserved36,
+	msm_mux_QUP_L6,
+	msm_mux_reserved37,
+	msm_mux_usb_phy,
+	msm_mux_reserved38,
+	msm_mux_lpass_slimbus,
+	msm_mux_reserved39,
+	msm_mux_sd_write,
+	msm_mux_tsif1_error,
+	msm_mux_reserved40,
+	msm_mux_qup3,
+	msm_mux_reserved41,
+	msm_mux_reserved42,
+	msm_mux_reserved43,
+	msm_mux_reserved44,
+	msm_mux_bt_reset,
+	msm_mux_qup6,
+	msm_mux_reserved45,
+	msm_mux_reserved46,
+	msm_mux_reserved47,
+	msm_mux_reserved124,
+	msm_mux_reserved125,
+	msm_mux_reserved126,
+	msm_mux_reserved127,
+	msm_mux_reserved128,
+	msm_mux_reserved129,
+	msm_mux_qlink_request,
+	msm_mux_reserved130,
+	msm_mux_qlink_enable,
+	msm_mux_reserved131,
+	msm_mux_reserved132,
+	msm_mux_reserved133,
+	msm_mux_reserved134,
+	msm_mux_pa_indicator,
+	msm_mux_reserved135,
+	msm_mux_reserved136,
+	msm_mux_phase_flag26,
+	msm_mux_reserved137,
+	msm_mux_phase_flag27,
+	msm_mux_reserved138,
+	msm_mux_phase_flag28,
+	msm_mux_reserved139,
+	msm_mux_phase_flag6,
+	msm_mux_reserved140,
+	msm_mux_phase_flag29,
+	msm_mux_reserved141,
+	msm_mux_phase_flag30,
+	msm_mux_reserved142,
+	msm_mux_phase_flag31,
+	msm_mux_reserved143,
+	msm_mux_mss_lte,
+	msm_mux_reserved144,
+	msm_mux_reserved145,
+	msm_mux_reserved146,
+	msm_mux_reserved147,
+	msm_mux_reserved148,
+	msm_mux_reserved149,
+	msm_mux_reserved48,
+	msm_mux_qup12,
+	msm_mux_reserved49,
+	msm_mux_reserved50,
+	msm_mux_reserved51,
+	msm_mux_phase_flag16,
+	msm_mux_reserved52,
+	msm_mux_qup10,
+	msm_mux_phase_flag11,
+	msm_mux_reserved53,
+	msm_mux_GP_PDM0,
+	msm_mux_phase_flag12,
+	msm_mux_wlan1_adc1,
+	msm_mux_atest_usb13,
+	msm_mux_ddr_pxi1,
+	msm_mux_reserved54,
+	msm_mux_phase_flag13,
+	msm_mux_wlan1_adc0,
+	msm_mux_atest_usb12,
+	msm_mux_reserved55,
+	msm_mux_phase_flag17,
+	msm_mux_reserved56,
+	msm_mux_qua_mi2s,
+	msm_mux_gcc_gp1,
+	msm_mux_phase_flag18,
+	msm_mux_reserved57,
+	msm_mux_pri_mi2s,
+	msm_mux_qup8,
+	msm_mux_wsa_clk,
+	msm_mux_reserved65,
+	msm_mux_pri_mi2s_ws,
+	msm_mux_wsa_data,
+	msm_mux_reserved66,
+	msm_mux_wsa_en,
+	msm_mux_atest_usb2,
+	msm_mux_reserved67,
+	msm_mux_atest_usb23,
+	msm_mux_reserved68,
+	msm_mux_ter_mi2s,
+	msm_mux_phase_flag8,
+	msm_mux_atest_usb22,
+	msm_mux_reserved75,
+	msm_mux_phase_flag9,
+	msm_mux_atest_usb21,
+	msm_mux_reserved76,
+	msm_mux_phase_flag4,
+	msm_mux_atest_usb20,
+	msm_mux_reserved77,
+	msm_mux_ssc_irq,
+	msm_mux_reserved78,
+	msm_mux_sec_mi2s,
+	msm_mux_GP_PDM2,
+	msm_mux_reserved79,
+	msm_mux_reserved80,
+	msm_mux_qup15,
+	msm_mux_reserved81,
+	msm_mux_reserved82,
+	msm_mux_reserved83,
+	msm_mux_reserved84,
+	msm_mux_qup5,
+	msm_mux_reserved85,
+	msm_mux_copy_gp,
+	msm_mux_reserved86,
+	msm_mux_reserved87,
+	msm_mux_reserved88,
+	msm_mux_tsif1_clk,
+	msm_mux_qup4,
+	msm_mux_tgu_ch3,
+	msm_mux_phase_flag10,
+	msm_mux_reserved89,
+	msm_mux_tsif1_en,
+	msm_mux_mdp_vsync0,
+	msm_mux_mdp_vsync1,
+	msm_mux_mdp_vsync2,
+	msm_mux_mdp_vsync3,
+	msm_mux_tgu_ch0,
+	msm_mux_phase_flag0,
+	msm_mux_reserved90,
+	msm_mux_tsif1_data,
+	msm_mux_sdc4_cmd,
+	msm_mux_tgu_ch1,
+	msm_mux_reserved91,
+	msm_mux_tsif2_error,
+	msm_mux_sdc43,
+	msm_mux_vfr_1,
+	msm_mux_tgu_ch2,
+	msm_mux_reserved92,
+	msm_mux_tsif2_clk,
+	msm_mux_sdc4_clk,
+	msm_mux_qup7,
+	msm_mux_reserved93,
+	msm_mux_tsif2_en,
+	msm_mux_sdc42,
+	msm_mux_reserved94,
+	msm_mux_tsif2_data,
+	msm_mux_sdc41,
+	msm_mux_reserved95,
+	msm_mux_tsif2_sync,
+	msm_mux_sdc40,
+	msm_mux_phase_flag3,
+	msm_mux_reserved96,
+	msm_mux_ldo_en,
+	msm_mux_reserved97,
+	msm_mux_ldo_update,
+	msm_mux_reserved98,
+	msm_mux_phase_flag14,
+	msm_mux_prng_rosc,
+	msm_mux_reserved99,
+	msm_mux_phase_flag15,
+	msm_mux_reserved100,
+	msm_mux_phase_flag5,
+	msm_mux_reserved101,
+	msm_mux_pci_e1,
+	msm_mux_reserved102,
+	msm_mux_COPY_PHASE,
+	msm_mux_reserved103,
+	msm_mux_uim2_data,
+	msm_mux_qup13,
+	msm_mux_reserved105,
+	msm_mux_uim2_clk,
+	msm_mux_reserved106,
+	msm_mux_uim2_reset,
+	msm_mux_reserved107,
+	msm_mux_uim2_present,
+	msm_mux_reserved108,
+	msm_mux_uim1_data,
+	msm_mux_reserved109,
+	msm_mux_uim1_clk,
+	msm_mux_reserved110,
+	msm_mux_uim1_reset,
+	msm_mux_reserved111,
+	msm_mux_uim1_present,
+	msm_mux_reserved112,
+	msm_mux_uim_batt,
+	msm_mux_edp_hot,
+	msm_mux_reserved113,
+	msm_mux_NAV_PPS,
+	msm_mux_GPS_TX,
+	msm_mux_reserved114,
+	msm_mux_reserved115,
+	msm_mux_reserved116,
+	msm_mux_atest_char,
+	msm_mux_reserved117,
+	msm_mux_adsp_ext,
+	msm_mux_atest_char3,
+	msm_mux_reserved118,
+	msm_mux_atest_char2,
+	msm_mux_reserved119,
+	msm_mux_atest_char1,
+	msm_mux_reserved120,
+	msm_mux_atest_char0,
+	msm_mux_reserved121,
+	msm_mux_reserved122,
+	msm_mux_reserved123,
+	msm_mux_NA,
+};
+
+static const char * const qup0_groups[] = {
+	"gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const gpio_groups[] = {
+	"gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+	"gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+	"gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+	"gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+	"gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+	"gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+	"gpio43", "gpio44", "gpio46", "gpio47", "gpio48", "gpio49", "gpio50",
+	"gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56", "gpio57",
+	"gpio65", "gpio66", "gpio75", "gpio76", "gpio77", "gpio81", "gpio82",
+	"gpio83", "gpio84", "gpio85", "gpio86", "gpio87", "gpio88", "gpio89",
+	"gpio90", "gpio91", "gpio92", "gpio93", "gpio94", "gpio95", "gpio96",
+	"gpio97", "gpio98", "gpio99", "gpio100", "gpio101", "gpio102",
+	"gpio103", "gpio105", "gpio106", "gpio107", "gpio108", "gpio109",
+	"gpio110", "gpio111", "gpio112", "gpio113", "gpio114", "gpio115",
+	"gpio116", "gpio126", "gpio127", "gpio128", "gpio129", "gpio130",
+	"gpio131", "gpio132", "gpio133", "gpio134", "gpio135", "gpio136",
+	"gpio137", "gpio138", "gpio139", "gpio140", "gpio141", "gpio142",
+	"gpio143", "gpio144", "gpio145", "gpio146", "gpio147", "gpio148",
+	"gpio149",
+};
+static const char * const reserved0_groups[] = {
+	"gpio0",
+};
+static const char * const reserved1_groups[] = {
+	"gpio1",
+};
+static const char * const reserved2_groups[] = {
+	"gpio2",
+};
+static const char * const reserved3_groups[] = {
+	"gpio3",
+};
+static const char * const qup9_groups[] = {
+	"gpio4", "gpio5", "gpio6", "gpio7",
+};
+static const char * const qdss_cti_groups[] = {
+	"gpio4", "gpio5", "gpio51", "gpio52", "gpio90", "gpio91",
+};
+static const char * const reserved4_groups[] = {
+	"gpio4",
+};
+static const char * const reserved5_groups[] = {
+	"gpio5",
+};
+static const char * const ddr_pxi0_groups[] = {
+	"gpio6", "gpio7",
+};
+static const char * const reserved6_groups[] = {
+	"gpio6",
+};
+static const char * const ddr_bist_groups[] = {
+	"gpio7", "gpio8", "gpio9", "gpio10",
+};
+static const char * const atest_tsens2_groups[] = {
+	"gpio7",
+};
+static const char * const vsense_trigger_groups[] = {
+	"gpio7",
+};
+static const char * const atest_usb1_groups[] = {
+	"gpio7",
+};
+static const char * const reserved7_groups[] = {
+	"gpio7",
+};
+static const char * const qup_l4_groups[] = {
+	"gpio8", "gpio105", "gpio123",
+};
+static const char * const GP_PDM1_groups[] = {
+	"gpio8", "gpio66",
+};
+static const char * const reserved8_groups[] = {
+	"gpio8",
+};
+static const char * const qup_l5_groups[] = {
+	"gpio9", "gpio106", "gpio124",
+};
+static const char * const reserved9_groups[] = {
+	"gpio9",
+};
+static const char * const mdp_vsync_groups[] = {
+	"gpio10", "gpio11", "gpio12", "gpio97", "gpio98",
+};
+static const char * const qup_l6_groups[] = {
+	"gpio10", "gpio107", "gpio125",
+};
+static const char * const wlan2_adc1_groups[] = {
+	"gpio10",
+};
+static const char * const atest_usb11_groups[] = {
+	"gpio10",
+};
+static const char * const ddr_pxi2_groups[] = {
+	"gpio10", "gpio11",
+};
+static const char * const reserved10_groups[] = {
+	"gpio10",
+};
+static const char * const edp_lcd_groups[] = {
+	"gpio11",
+};
+static const char * const dbg_out_groups[] = {
+	"gpio11",
+};
+static const char * const wlan2_adc0_groups[] = {
+	"gpio11",
+};
+static const char * const atest_usb10_groups[] = {
+	"gpio11",
+};
+static const char * const reserved11_groups[] = {
+	"gpio11",
+};
+static const char * const m_voc_groups[] = {
+	"gpio12",
+};
+static const char * const tsif1_sync_groups[] = {
+	"gpio12",
+};
+static const char * const ddr_pxi3_groups[] = {
+	"gpio12", "gpio13",
+};
+static const char * const reserved12_groups[] = {
+	"gpio12",
+};
+static const char * const cam_mclk_groups[] = {
+	"gpio13", "gpio14", "gpio15", "gpio16",
+};
+static const char * const pll_bypassnl_groups[] = {
+	"gpio13",
+};
+static const char * const qdss_gpio0_groups[] = {
+	"gpio13", "gpio117",
+};
+static const char * const reserved13_groups[] = {
+	"gpio13",
+};
+static const char * const pll_reset_groups[] = {
+	"gpio14",
+};
+static const char * const qdss_gpio1_groups[] = {
+	"gpio14", "gpio118",
+};
+static const char * const reserved14_groups[] = {
+	"gpio14",
+};
+static const char * const qdss_gpio2_groups[] = {
+	"gpio15", "gpio119",
+};
+static const char * const reserved15_groups[] = {
+	"gpio15",
+};
+static const char * const qdss_gpio3_groups[] = {
+	"gpio16", "gpio120",
+};
+static const char * const reserved16_groups[] = {
+	"gpio16",
+};
+static const char * const cci_i2c_groups[] = {
+	"gpio17", "gpio18", "gpio19", "gpio20",
+};
+static const char * const qup1_groups[] = {
+	"gpio17", "gpio18", "gpio19", "gpio20",
+};
+static const char * const qdss_gpio4_groups[] = {
+	"gpio17", "gpio121",
+};
+static const char * const reserved17_groups[] = {
+	"gpio17",
+};
+static const char * const cci_timer1_groups[] = {
+	"gpio22",
+};
+static const char * const gcc_gp3_groups[] = {
+	"gpio22",
+};
+static const char * const qdss_gpio_groups[] = {
+	"gpio22", "gpio30", "gpio123", "gpio124",
+};
+static const char * const reserved22_groups[] = {
+	"gpio22",
+};
+static const char * const cci_timer2_groups[] = {
+	"gpio23",
+};
+static const char * const qdss_gpio9_groups[] = {
+	"gpio23", "gpio76",
+};
+static const char * const reserved23_groups[] = {
+	"gpio23",
+};
+static const char * const cci_timer3_groups[] = {
+	"gpio24",
+};
+static const char * const cci_async_groups[] = {
+	"gpio24", "gpio25", "gpio26",
+};
+static const char * const qdss_gpio10_groups[] = {
+	"gpio24", "gpio77",
+};
+static const char * const reserved24_groups[] = {
+	"gpio24",
+};
+static const char * const cci_timer4_groups[] = {
+	"gpio25",
+};
+static const char * const qdss_gpio11_groups[] = {
+	"gpio25", "gpio79",
+};
+static const char * const reserved25_groups[] = {
+	"gpio25",
+};
+static const char * const qdss_gpio5_groups[] = {
+	"gpio18", "gpio122",
+};
+static const char * const reserved18_groups[] = {
+	"gpio18",
+};
+static const char * const qdss_gpio6_groups[] = {
+	"gpio19", "gpio41",
+};
+static const char * const reserved19_groups[] = {
+	"gpio19",
+};
+static const char * const qdss_gpio7_groups[] = {
+	"gpio20", "gpio42",
+};
+static const char * const reserved20_groups[] = {
+	"gpio20",
+};
+static const char * const cci_timer0_groups[] = {
+	"gpio21",
+};
+static const char * const gcc_gp2_groups[] = {
+	"gpio21",
+};
+static const char * const qdss_gpio8_groups[] = {
+	"gpio21", "gpio75",
+};
+static const char * const reserved21_groups[] = {
+	"gpio21",
+};
+static const char * const qdss_gpio12_groups[] = {
+	"gpio26", "gpio80",
+};
+static const char * const JITTER_BIST_groups[] = {
+	"gpio26", "gpio35",
+};
+static const char * const reserved26_groups[] = {
+	"gpio26",
+};
+static const char * const qup2_groups[] = {
+	"gpio27", "gpio28", "gpio29", "gpio30",
+};
+static const char * const qdss_gpio13_groups[] = {
+	"gpio27", "gpio93",
+};
+static const char * const PLL_BIST_groups[] = {
+	"gpio27", "gpio36",
+};
+static const char * const reserved27_groups[] = {
+	"gpio27",
+};
+static const char * const qdss_gpio14_groups[] = {
+	"gpio28", "gpio43",
+};
+static const char * const AGERA_PLL_groups[] = {
+	"gpio28", "gpio37",
+};
+static const char * const reserved28_groups[] = {
+	"gpio28",
+};
+static const char * const phase_flag1_groups[] = {
+	"gpio29",
+};
+static const char * const qdss_gpio15_groups[] = {
+	"gpio29", "gpio44",
+};
+static const char * const atest_tsens_groups[] = {
+	"gpio29",
+};
+static const char * const reserved29_groups[] = {
+	"gpio29",
+};
+static const char * const phase_flag2_groups[] = {
+	"gpio30",
+};
+static const char * const reserved30_groups[] = {
+	"gpio30",
+};
+static const char * const qup11_groups[] = {
+	"gpio31", "gpio32", "gpio33", "gpio34",
+};
+static const char * const qup14_groups[] = {
+	"gpio31", "gpio32", "gpio33", "gpio34",
+};
+static const char * const reserved31_groups[] = {
+	"gpio31",
+};
+static const char * const reserved32_groups[] = {
+	"gpio32",
+};
+static const char * const reserved33_groups[] = {
+	"gpio33",
+};
+static const char * const reserved34_groups[] = {
+	"gpio34",
+};
+static const char * const pci_e0_groups[] = {
+	"gpio35", "gpio36",
+};
+static const char * const QUP_L4_groups[] = {
+	"gpio35", "gpio75",
+};
+static const char * const reserved35_groups[] = {
+	"gpio35",
+};
+static const char * const QUP_L5_groups[] = {
+	"gpio36", "gpio76",
+};
+static const char * const reserved36_groups[] = {
+	"gpio36",
+};
+static const char * const QUP_L6_groups[] = {
+	"gpio37", "gpio77",
+};
+static const char * const reserved37_groups[] = {
+	"gpio37",
+};
+static const char * const usb_phy_groups[] = {
+	"gpio38",
+};
+static const char * const reserved38_groups[] = {
+	"gpio38",
+};
+static const char * const lpass_slimbus_groups[] = {
+	"gpio39",
+};
+static const char * const reserved39_groups[] = {
+	"gpio39",
+};
+static const char * const sd_write_groups[] = {
+	"gpio40",
+};
+static const char * const tsif1_error_groups[] = {
+	"gpio40",
+};
+static const char * const reserved40_groups[] = {
+	"gpio40",
+};
+static const char * const qup3_groups[] = {
+	"gpio41", "gpio42", "gpio43", "gpio44",
+};
+static const char * const reserved41_groups[] = {
+	"gpio41",
+};
+static const char * const reserved42_groups[] = {
+	"gpio42",
+};
+static const char * const reserved43_groups[] = {
+	"gpio43",
+};
+static const char * const reserved44_groups[] = {
+	"gpio44",
+};
+static const char * const bt_reset_groups[] = {
+	"gpio45",
+};
+static const char * const qup6_groups[] = {
+	"gpio45", "gpio46", "gpio47", "gpio48",
+};
+static const char * const reserved45_groups[] = {
+	"gpio45",
+};
+static const char * const reserved46_groups[] = {
+	"gpio46",
+};
+static const char * const reserved47_groups[] = {
+	"gpio47",
+};
+static const char * const reserved124_groups[] = {
+	"gpio124",
+};
+static const char * const reserved125_groups[] = {
+	"gpio125",
+};
+static const char * const reserved126_groups[] = {
+	"gpio126",
+};
+static const char * const reserved127_groups[] = {
+	"gpio127",
+};
+static const char * const reserved128_groups[] = {
+	"gpio128",
+};
+static const char * const reserved129_groups[] = {
+	"gpio129",
+};
+static const char * const qlink_request_groups[] = {
+	"gpio130",
+};
+static const char * const reserved130_groups[] = {
+	"gpio130",
+};
+static const char * const qlink_enable_groups[] = {
+	"gpio131",
+};
+static const char * const reserved131_groups[] = {
+	"gpio131",
+};
+static const char * const reserved132_groups[] = {
+	"gpio132",
+};
+static const char * const reserved133_groups[] = {
+	"gpio133",
+};
+static const char * const reserved134_groups[] = {
+	"gpio134",
+};
+static const char * const pa_indicator_groups[] = {
+	"gpio135",
+};
+static const char * const reserved135_groups[] = {
+	"gpio135",
+};
+static const char * const reserved136_groups[] = {
+	"gpio136",
+};
+static const char * const phase_flag26_groups[] = {
+	"gpio137",
+};
+static const char * const reserved137_groups[] = {
+	"gpio137",
+};
+static const char * const phase_flag27_groups[] = {
+	"gpio138",
+};
+static const char * const reserved138_groups[] = {
+	"gpio138",
+};
+static const char * const phase_flag28_groups[] = {
+	"gpio139",
+};
+static const char * const reserved139_groups[] = {
+	"gpio139",
+};
+static const char * const phase_flag6_groups[] = {
+	"gpio140",
+};
+static const char * const reserved140_groups[] = {
+	"gpio140",
+};
+static const char * const phase_flag29_groups[] = {
+	"gpio141",
+};
+static const char * const reserved141_groups[] = {
+	"gpio141",
+};
+static const char * const phase_flag30_groups[] = {
+	"gpio142",
+};
+static const char * const reserved142_groups[] = {
+	"gpio142",
+};
+static const char * const phase_flag31_groups[] = {
+	"gpio143",
+};
+static const char * const reserved143_groups[] = {
+	"gpio143",
+};
+static const char * const mss_lte_groups[] = {
+	"gpio144", "gpio145",
+};
+static const char * const reserved144_groups[] = {
+	"gpio144",
+};
+static const char * const reserved145_groups[] = {
+	"gpio145",
+};
+static const char * const reserved146_groups[] = {
+	"gpio146",
+};
+static const char * const reserved147_groups[] = {
+	"gpio147",
+};
+static const char * const reserved148_groups[] = {
+	"gpio148",
+};
+static const char * const reserved149_groups[] = {
+	"gpio149", "gpio149",
+};
+static const char * const reserved48_groups[] = {
+	"gpio48",
+};
+static const char * const qup12_groups[] = {
+	"gpio49", "gpio50", "gpio51", "gpio52",
+};
+static const char * const reserved49_groups[] = {
+	"gpio49",
+};
+static const char * const reserved50_groups[] = {
+	"gpio50",
+};
+static const char * const reserved51_groups[] = {
+	"gpio51",
+};
+static const char * const phase_flag16_groups[] = {
+	"gpio52",
+};
+static const char * const reserved52_groups[] = {
+	"gpio52",
+};
+static const char * const qup10_groups[] = {
+	"gpio53", "gpio54", "gpio55", "gpio56",
+};
+static const char * const phase_flag11_groups[] = {
+	"gpio53",
+};
+static const char * const reserved53_groups[] = {
+	"gpio53",
+};
+static const char * const GP_PDM0_groups[] = {
+	"gpio54", "gpio95",
+};
+static const char * const phase_flag12_groups[] = {
+	"gpio54",
+};
+static const char * const wlan1_adc1_groups[] = {
+	"gpio54",
+};
+static const char * const atest_usb13_groups[] = {
+	"gpio54",
+};
+static const char * const ddr_pxi1_groups[] = {
+	"gpio54", "gpio55",
+};
+static const char * const reserved54_groups[] = {
+	"gpio54",
+};
+static const char * const phase_flag13_groups[] = {
+	"gpio55",
+};
+static const char * const wlan1_adc0_groups[] = {
+	"gpio55",
+};
+static const char * const atest_usb12_groups[] = {
+	"gpio55",
+};
+static const char * const reserved55_groups[] = {
+	"gpio55",
+};
+static const char * const phase_flag17_groups[] = {
+	"gpio56",
+};
+static const char * const reserved56_groups[] = {
+	"gpio56",
+};
+static const char * const qua_mi2s_groups[] = {
+	"gpio57",
+};
+static const char * const gcc_gp1_groups[] = {
+	"gpio57", "gpio78",
+};
+static const char * const phase_flag18_groups[] = {
+	"gpio57",
+};
+static const char * const reserved57_groups[] = {
+	"gpio57",
+};
+static const char * const pri_mi2s_groups[] = {
+	"gpio65", "gpio67", "gpio68",
+};
+static const char * const qup8_groups[] = {
+	"gpio65", "gpio66", "gpio67", "gpio68",
+};
+static const char * const wsa_clk_groups[] = {
+	"gpio65",
+};
+static const char * const reserved65_groups[] = {
+	"gpio65",
+};
+static const char * const pri_mi2s_ws_groups[] = {
+	"gpio66",
+};
+static const char * const wsa_data_groups[] = {
+	"gpio66",
+};
+static const char * const reserved66_groups[] = {
+	"gpio66",
+};
+static const char * const wsa_en_groups[] = {
+	"gpio67", "gpio68",
+};
+static const char * const atest_usb2_groups[] = {
+	"gpio67",
+};
+static const char * const reserved67_groups[] = {
+	"gpio67",
+};
+static const char * const atest_usb23_groups[] = {
+	"gpio68",
+};
+static const char * const reserved68_groups[] = {
+	"gpio68",
+};
+static const char * const ter_mi2s_groups[] = {
+	"gpio75", "gpio76", "gpio77", "gpio78",
+};
+static const char * const phase_flag8_groups[] = {
+	"gpio75",
+};
+static const char * const atest_usb22_groups[] = {
+	"gpio75",
+};
+static const char * const reserved75_groups[] = {
+	"gpio75",
+};
+static const char * const phase_flag9_groups[] = {
+	"gpio76",
+};
+static const char * const atest_usb21_groups[] = {
+	"gpio76",
+};
+static const char * const reserved76_groups[] = {
+	"gpio76",
+};
+static const char * const phase_flag4_groups[] = {
+	"gpio77",
+};
+static const char * const atest_usb20_groups[] = {
+	"gpio77",
+};
+static const char * const reserved77_groups[] = {
+	"gpio77",
+};
+static const char * const ssc_irq_groups[] = {
+	"gpio78", "gpio79", "gpio80", "gpio117", "gpio118", "gpio119",
+	"gpio120", "gpio121", "gpio122", "gpio123", "gpio124", "gpio125",
+};
+static const char * const reserved78_groups[] = {
+	"gpio78",
+};
+static const char * const sec_mi2s_groups[] = {
+	"gpio79", "gpio80", "gpio81", "gpio82", "gpio83",
+};
+static const char * const GP_PDM2_groups[] = {
+	"gpio79",
+};
+static const char * const reserved79_groups[] = {
+	"gpio79",
+};
+static const char * const reserved80_groups[] = {
+	"gpio80",
+};
+static const char * const qup15_groups[] = {
+	"gpio81", "gpio82", "gpio83", "gpio84",
+};
+static const char * const reserved81_groups[] = {
+	"gpio81",
+};
+static const char * const reserved82_groups[] = {
+	"gpio82",
+};
+static const char * const reserved83_groups[] = {
+	"gpio83",
+};
+static const char * const reserved84_groups[] = {
+	"gpio84",
+};
+static const char * const qup5_groups[] = {
+	"gpio85", "gpio86", "gpio87", "gpio88",
+};
+static const char * const reserved85_groups[] = {
+	"gpio85",
+};
+static const char * const copy_gp_groups[] = {
+	"gpio86",
+};
+static const char * const reserved86_groups[] = {
+	"gpio86",
+};
+static const char * const reserved87_groups[] = {
+	"gpio87",
+};
+static const char * const reserved88_groups[] = {
+	"gpio88",
+};
+static const char * const tsif1_clk_groups[] = {
+	"gpio89",
+};
+static const char * const qup4_groups[] = {
+	"gpio89", "gpio90", "gpio91", "gpio92",
+};
+static const char * const tgu_ch3_groups[] = {
+	"gpio89",
+};
+static const char * const phase_flag10_groups[] = {
+	"gpio89",
+};
+static const char * const reserved89_groups[] = {
+	"gpio89",
+};
+static const char * const tsif1_en_groups[] = {
+	"gpio90",
+};
+static const char * const mdp_vsync0_groups[] = {
+	"gpio90",
+};
+static const char * const mdp_vsync1_groups[] = {
+	"gpio90",
+};
+static const char * const mdp_vsync2_groups[] = {
+	"gpio90",
+};
+static const char * const mdp_vsync3_groups[] = {
+	"gpio90",
+};
+static const char * const tgu_ch0_groups[] = {
+	"gpio90",
+};
+static const char * const phase_flag0_groups[] = {
+	"gpio90",
+};
+static const char * const reserved90_groups[] = {
+	"gpio90",
+};
+static const char * const tsif1_data_groups[] = {
+	"gpio91",
+};
+static const char * const sdc4_cmd_groups[] = {
+	"gpio91",
+};
+static const char * const tgu_ch1_groups[] = {
+	"gpio91",
+};
+static const char * const reserved91_groups[] = {
+	"gpio91",
+};
+static const char * const tsif2_error_groups[] = {
+	"gpio92",
+};
+static const char * const sdc43_groups[] = {
+	"gpio92",
+};
+static const char * const vfr_1_groups[] = {
+	"gpio92",
+};
+static const char * const tgu_ch2_groups[] = {
+	"gpio92",
+};
+static const char * const reserved92_groups[] = {
+	"gpio92",
+};
+static const char * const tsif2_clk_groups[] = {
+	"gpio93",
+};
+static const char * const sdc4_clk_groups[] = {
+	"gpio93",
+};
+static const char * const qup7_groups[] = {
+	"gpio93", "gpio94", "gpio95", "gpio96",
+};
+static const char * const reserved93_groups[] = {
+	"gpio93",
+};
+static const char * const tsif2_en_groups[] = {
+	"gpio94",
+};
+static const char * const sdc42_groups[] = {
+	"gpio94",
+};
+static const char * const reserved94_groups[] = {
+	"gpio94",
+};
+static const char * const tsif2_data_groups[] = {
+	"gpio95",
+};
+static const char * const sdc41_groups[] = {
+	"gpio95",
+};
+static const char * const reserved95_groups[] = {
+	"gpio95",
+};
+static const char * const tsif2_sync_groups[] = {
+	"gpio96",
+};
+static const char * const sdc40_groups[] = {
+	"gpio96",
+};
+static const char * const phase_flag3_groups[] = {
+	"gpio96",
+};
+static const char * const reserved96_groups[] = {
+	"gpio96",
+};
+static const char * const ldo_en_groups[] = {
+	"gpio97",
+};
+static const char * const reserved97_groups[] = {
+	"gpio97",
+};
+static const char * const ldo_update_groups[] = {
+	"gpio98",
+};
+static const char * const reserved98_groups[] = {
+	"gpio98",
+};
+static const char * const phase_flag14_groups[] = {
+	"gpio99",
+};
+static const char * const prng_rosc_groups[] = {
+	"gpio99", "gpio102",
+};
+static const char * const reserved99_groups[] = {
+	"gpio99",
+};
+static const char * const phase_flag15_groups[] = {
+	"gpio100",
+};
+static const char * const reserved100_groups[] = {
+	"gpio100",
+};
+static const char * const phase_flag5_groups[] = {
+	"gpio101",
+};
+static const char * const reserved101_groups[] = {
+	"gpio101",
+};
+static const char * const pci_e1_groups[] = {
+	"gpio102", "gpio103",
+};
+static const char * const reserved102_groups[] = {
+	"gpio102",
+};
+static const char * const COPY_PHASE_groups[] = {
+	"gpio103",
+};
+static const char * const reserved103_groups[] = {
+	"gpio103",
+};
+static const char * const uim2_data_groups[] = {
+	"gpio105",
+};
+static const char * const qup13_groups[] = {
+	"gpio105", "gpio106", "gpio107", "gpio108",
+};
+static const char * const reserved105_groups[] = {
+	"gpio105",
+};
+static const char * const uim2_clk_groups[] = {
+	"gpio106",
+};
+static const char * const reserved106_groups[] = {
+	"gpio106",
+};
+static const char * const uim2_reset_groups[] = {
+	"gpio107",
+};
+static const char * const reserved107_groups[] = {
+	"gpio107",
+};
+static const char * const uim2_present_groups[] = {
+	"gpio108",
+};
+static const char * const reserved108_groups[] = {
+	"gpio108",
+};
+static const char * const uim1_data_groups[] = {
+	"gpio109",
+};
+static const char * const reserved109_groups[] = {
+	"gpio109",
+};
+static const char * const uim1_clk_groups[] = {
+	"gpio110",
+};
+static const char * const reserved110_groups[] = {
+	"gpio110",
+};
+static const char * const uim1_reset_groups[] = {
+	"gpio111",
+};
+static const char * const reserved111_groups[] = {
+	"gpio111",
+};
+static const char * const uim1_present_groups[] = {
+	"gpio112",
+};
+static const char * const reserved112_groups[] = {
+	"gpio112",
+};
+static const char * const uim_batt_groups[] = {
+	"gpio113",
+};
+static const char * const edp_hot_groups[] = {
+	"gpio113",
+};
+static const char * const reserved113_groups[] = {
+	"gpio113",
+};
+static const char * const NAV_PPS_groups[] = {
+	"gpio114", "gpio114", "gpio115", "gpio115", "gpio128", "gpio128",
+	"gpio129", "gpio129", "gpio143", "gpio143",
+};
+static const char * const GPS_TX_groups[] = {
+	"gpio114", "gpio115", "gpio128", "gpio129", "gpio143", "gpio145",
+};
+static const char * const reserved114_groups[] = {
+	"gpio114",
+};
+static const char * const reserved115_groups[] = {
+	"gpio115",
+};
+static const char * const reserved116_groups[] = {
+	"gpio116",
+};
+static const char * const atest_char_groups[] = {
+	"gpio117",
+};
+static const char * const reserved117_groups[] = {
+	"gpio117",
+};
+static const char * const adsp_ext_groups[] = {
+	"gpio118",
+};
+static const char * const atest_char3_groups[] = {
+	"gpio118",
+};
+static const char * const reserved118_groups[] = {
+	"gpio118",
+};
+static const char * const atest_char2_groups[] = {
+	"gpio119",
+};
+static const char * const reserved119_groups[] = {
+	"gpio119",
+};
+static const char * const atest_char1_groups[] = {
+	"gpio120",
+};
+static const char * const reserved120_groups[] = {
+	"gpio120",
+};
+static const char * const atest_char0_groups[] = {
+	"gpio121",
+};
+static const char * const reserved121_groups[] = {
+	"gpio121",
+};
+static const char * const reserved122_groups[] = {
+	"gpio122",
+};
+static const char * const reserved123_groups[] = {
+	"gpio123",
+};
+
+static const struct msm_function sdmbat_functions[] = {
+	FUNCTION(qup0),
+	FUNCTION(gpio),
+	FUNCTION(reserved0),
+	FUNCTION(reserved1),
+	FUNCTION(reserved2),
+	FUNCTION(reserved3),
+	FUNCTION(qup9),
+	FUNCTION(qdss_cti),
+	FUNCTION(reserved4),
+	FUNCTION(reserved5),
+	FUNCTION(ddr_pxi0),
+	FUNCTION(reserved6),
+	FUNCTION(ddr_bist),
+	FUNCTION(atest_tsens2),
+	FUNCTION(vsense_trigger),
+	FUNCTION(atest_usb1),
+	FUNCTION(reserved7),
+	FUNCTION(qup_l4),
+	FUNCTION(GP_PDM1),
+	FUNCTION(reserved8),
+	FUNCTION(qup_l5),
+	FUNCTION(reserved9),
+	FUNCTION(mdp_vsync),
+	FUNCTION(qup_l6),
+	FUNCTION(wlan2_adc1),
+	FUNCTION(atest_usb11),
+	FUNCTION(ddr_pxi2),
+	FUNCTION(reserved10),
+	FUNCTION(edp_lcd),
+	FUNCTION(dbg_out),
+	FUNCTION(wlan2_adc0),
+	FUNCTION(atest_usb10),
+	FUNCTION(reserved11),
+	FUNCTION(m_voc),
+	FUNCTION(tsif1_sync),
+	FUNCTION(ddr_pxi3),
+	FUNCTION(reserved12),
+	FUNCTION(cam_mclk),
+	FUNCTION(pll_bypassnl),
+	FUNCTION(qdss_gpio0),
+	FUNCTION(reserved13),
+	FUNCTION(pll_reset),
+	FUNCTION(qdss_gpio1),
+	FUNCTION(reserved14),
+	FUNCTION(qdss_gpio2),
+	FUNCTION(reserved15),
+	FUNCTION(qdss_gpio3),
+	FUNCTION(reserved16),
+	FUNCTION(cci_i2c),
+	FUNCTION(qup1),
+	FUNCTION(qdss_gpio4),
+	FUNCTION(reserved17),
+	FUNCTION(cci_timer1),
+	FUNCTION(gcc_gp3),
+	FUNCTION(qdss_gpio),
+	FUNCTION(reserved22),
+	FUNCTION(cci_timer2),
+	FUNCTION(qdss_gpio9),
+	FUNCTION(reserved23),
+	FUNCTION(cci_timer3),
+	FUNCTION(cci_async),
+	FUNCTION(qdss_gpio10),
+	FUNCTION(reserved24),
+	FUNCTION(cci_timer4),
+	FUNCTION(qdss_gpio11),
+	FUNCTION(reserved25),
+	FUNCTION(qdss_gpio5),
+	FUNCTION(reserved18),
+	FUNCTION(qdss_gpio6),
+	FUNCTION(reserved19),
+	FUNCTION(qdss_gpio7),
+	FUNCTION(reserved20),
+	FUNCTION(cci_timer0),
+	FUNCTION(gcc_gp2),
+	FUNCTION(qdss_gpio8),
+	FUNCTION(reserved21),
+	FUNCTION(qdss_gpio12),
+	FUNCTION(JITTER_BIST),
+	FUNCTION(reserved26),
+	FUNCTION(qup2),
+	FUNCTION(qdss_gpio13),
+	FUNCTION(PLL_BIST),
+	FUNCTION(reserved27),
+	FUNCTION(qdss_gpio14),
+	FUNCTION(AGERA_PLL),
+	FUNCTION(reserved28),
+	FUNCTION(phase_flag1),
+	FUNCTION(qdss_gpio15),
+	FUNCTION(atest_tsens),
+	FUNCTION(reserved29),
+	FUNCTION(phase_flag2),
+	FUNCTION(reserved30),
+	FUNCTION(qup11),
+	FUNCTION(qup14),
+	FUNCTION(reserved31),
+	FUNCTION(reserved32),
+	FUNCTION(reserved33),
+	FUNCTION(reserved34),
+	FUNCTION(pci_e0),
+	FUNCTION(QUP_L4),
+	FUNCTION(reserved35),
+	FUNCTION(QUP_L5),
+	FUNCTION(reserved36),
+	FUNCTION(QUP_L6),
+	FUNCTION(reserved37),
+	FUNCTION(usb_phy),
+	FUNCTION(reserved38),
+	FUNCTION(lpass_slimbus),
+	FUNCTION(reserved39),
+	FUNCTION(sd_write),
+	FUNCTION(tsif1_error),
+	FUNCTION(reserved40),
+	FUNCTION(qup3),
+	FUNCTION(reserved41),
+	FUNCTION(reserved42),
+	FUNCTION(reserved43),
+	FUNCTION(reserved44),
+	FUNCTION(bt_reset),
+	FUNCTION(qup6),
+	FUNCTION(reserved45),
+	FUNCTION(reserved46),
+	FUNCTION(reserved47),
+	FUNCTION(reserved124),
+	FUNCTION(reserved125),
+	FUNCTION(reserved126),
+	FUNCTION(reserved127),
+	FUNCTION(reserved128),
+	FUNCTION(reserved129),
+	FUNCTION(qlink_request),
+	FUNCTION(reserved130),
+	FUNCTION(qlink_enable),
+	FUNCTION(reserved131),
+	FUNCTION(reserved132),
+	FUNCTION(reserved133),
+	FUNCTION(reserved134),
+	FUNCTION(pa_indicator),
+	FUNCTION(reserved135),
+	FUNCTION(reserved136),
+	FUNCTION(phase_flag26),
+	FUNCTION(reserved137),
+	FUNCTION(phase_flag27),
+	FUNCTION(reserved138),
+	FUNCTION(phase_flag28),
+	FUNCTION(reserved139),
+	FUNCTION(phase_flag6),
+	FUNCTION(reserved140),
+	FUNCTION(phase_flag29),
+	FUNCTION(reserved141),
+	FUNCTION(phase_flag30),
+	FUNCTION(reserved142),
+	FUNCTION(phase_flag31),
+	FUNCTION(reserved143),
+	FUNCTION(mss_lte),
+	FUNCTION(reserved144),
+	FUNCTION(reserved145),
+	FUNCTION(reserved146),
+	FUNCTION(reserved147),
+	FUNCTION(reserved148),
+	FUNCTION(reserved149),
+	FUNCTION(reserved48),
+	FUNCTION(qup12),
+	FUNCTION(reserved49),
+	FUNCTION(reserved50),
+	FUNCTION(reserved51),
+	FUNCTION(phase_flag16),
+	FUNCTION(reserved52),
+	FUNCTION(qup10),
+	FUNCTION(phase_flag11),
+	FUNCTION(reserved53),
+	FUNCTION(GP_PDM0),
+	FUNCTION(phase_flag12),
+	FUNCTION(wlan1_adc1),
+	FUNCTION(atest_usb13),
+	FUNCTION(ddr_pxi1),
+	FUNCTION(reserved54),
+	FUNCTION(phase_flag13),
+	FUNCTION(wlan1_adc0),
+	FUNCTION(atest_usb12),
+	FUNCTION(reserved55),
+	FUNCTION(phase_flag17),
+	FUNCTION(reserved56),
+	FUNCTION(qua_mi2s),
+	FUNCTION(gcc_gp1),
+	FUNCTION(phase_flag18),
+	FUNCTION(reserved57),
+	FUNCTION(pri_mi2s),
+	FUNCTION(qup8),
+	FUNCTION(wsa_clk),
+	FUNCTION(reserved65),
+	FUNCTION(pri_mi2s_ws),
+	FUNCTION(wsa_data),
+	FUNCTION(reserved66),
+	FUNCTION(wsa_en),
+	FUNCTION(atest_usb2),
+	FUNCTION(reserved67),
+	FUNCTION(atest_usb23),
+	FUNCTION(reserved68),
+	FUNCTION(ter_mi2s),
+	FUNCTION(phase_flag8),
+	FUNCTION(atest_usb22),
+	FUNCTION(reserved75),
+	FUNCTION(phase_flag9),
+	FUNCTION(atest_usb21),
+	FUNCTION(reserved76),
+	FUNCTION(phase_flag4),
+	FUNCTION(atest_usb20),
+	FUNCTION(reserved77),
+	FUNCTION(ssc_irq),
+	FUNCTION(reserved78),
+	FUNCTION(sec_mi2s),
+	FUNCTION(GP_PDM2),
+	FUNCTION(reserved79),
+	FUNCTION(reserved80),
+	FUNCTION(qup15),
+	FUNCTION(reserved81),
+	FUNCTION(reserved82),
+	FUNCTION(reserved83),
+	FUNCTION(reserved84),
+	FUNCTION(qup5),
+	FUNCTION(reserved85),
+	FUNCTION(copy_gp),
+	FUNCTION(reserved86),
+	FUNCTION(reserved87),
+	FUNCTION(reserved88),
+	FUNCTION(tsif1_clk),
+	FUNCTION(qup4),
+	FUNCTION(tgu_ch3),
+	FUNCTION(phase_flag10),
+	FUNCTION(reserved89),
+	FUNCTION(tsif1_en),
+	FUNCTION(mdp_vsync0),
+	FUNCTION(mdp_vsync1),
+	FUNCTION(mdp_vsync2),
+	FUNCTION(mdp_vsync3),
+	FUNCTION(tgu_ch0),
+	FUNCTION(phase_flag0),
+	FUNCTION(reserved90),
+	FUNCTION(tsif1_data),
+	FUNCTION(sdc4_cmd),
+	FUNCTION(tgu_ch1),
+	FUNCTION(reserved91),
+	FUNCTION(tsif2_error),
+	FUNCTION(sdc43),
+	FUNCTION(vfr_1),
+	FUNCTION(tgu_ch2),
+	FUNCTION(reserved92),
+	FUNCTION(tsif2_clk),
+	FUNCTION(sdc4_clk),
+	FUNCTION(qup7),
+	FUNCTION(reserved93),
+	FUNCTION(tsif2_en),
+	FUNCTION(sdc42),
+	FUNCTION(reserved94),
+	FUNCTION(tsif2_data),
+	FUNCTION(sdc41),
+	FUNCTION(reserved95),
+	FUNCTION(tsif2_sync),
+	FUNCTION(sdc40),
+	FUNCTION(phase_flag3),
+	FUNCTION(reserved96),
+	FUNCTION(ldo_en),
+	FUNCTION(reserved97),
+	FUNCTION(ldo_update),
+	FUNCTION(reserved98),
+	FUNCTION(phase_flag14),
+	FUNCTION(prng_rosc),
+	FUNCTION(reserved99),
+	FUNCTION(phase_flag15),
+	FUNCTION(reserved100),
+	FUNCTION(phase_flag5),
+	FUNCTION(reserved101),
+	FUNCTION(pci_e1),
+	FUNCTION(reserved102),
+	FUNCTION(COPY_PHASE),
+	FUNCTION(reserved103),
+	FUNCTION(uim2_data),
+	FUNCTION(qup13),
+	FUNCTION(reserved105),
+	FUNCTION(uim2_clk),
+	FUNCTION(reserved106),
+	FUNCTION(uim2_reset),
+	FUNCTION(reserved107),
+	FUNCTION(uim2_present),
+	FUNCTION(reserved108),
+	FUNCTION(uim1_data),
+	FUNCTION(reserved109),
+	FUNCTION(uim1_clk),
+	FUNCTION(reserved110),
+	FUNCTION(uim1_reset),
+	FUNCTION(reserved111),
+	FUNCTION(uim1_present),
+	FUNCTION(reserved112),
+	FUNCTION(uim_batt),
+	FUNCTION(edp_hot),
+	FUNCTION(reserved113),
+	FUNCTION(NAV_PPS),
+	FUNCTION(GPS_TX),
+	FUNCTION(reserved114),
+	FUNCTION(reserved115),
+	FUNCTION(reserved116),
+	FUNCTION(atest_char),
+	FUNCTION(reserved117),
+	FUNCTION(adsp_ext),
+	FUNCTION(atest_char3),
+	FUNCTION(reserved118),
+	FUNCTION(atest_char2),
+	FUNCTION(reserved119),
+	FUNCTION(atest_char1),
+	FUNCTION(reserved120),
+	FUNCTION(atest_char0),
+	FUNCTION(reserved121),
+	FUNCTION(reserved122),
+	FUNCTION(reserved123),
+};
+
+static const struct msm_pingroup sdmbat_groups[] = {
+	PINGROUP(0, SOUTH, qup0, NA, reserved0, NA, NA, NA, NA, NA, NA),
+	PINGROUP(1, SOUTH, qup0, NA, reserved1, NA, NA, NA, NA, NA, NA),
+	PINGROUP(2, SOUTH, qup0, NA, reserved2, NA, NA, NA, NA, NA, NA),
+	PINGROUP(3, SOUTH, qup0, NA, reserved3, NA, NA, NA, NA, NA, NA),
+	PINGROUP(4, NORTH, qup9, qdss_cti, reserved4, NA, NA, NA, NA, NA, NA),
+	PINGROUP(5, NORTH, qup9, qdss_cti, reserved5, NA, NA, NA, NA, NA, NA),
+	PINGROUP(6, NORTH, qup9, NA, ddr_pxi0, reserved6, NA, NA, NA, NA, NA),
+	PINGROUP(7, NORTH, qup9, ddr_bist, NA, atest_tsens2, vsense_trigger,
+		 atest_usb1, ddr_pxi0, reserved7, NA),
+	PINGROUP(8, WEST, qup_l4, GP_PDM1, ddr_bist, NA, reserved8, NA, NA, NA,
+		 NA),
+	PINGROUP(9, WEST, qup_l5, ddr_bist, reserved9, NA, NA, NA, NA, NA, NA),
+	PINGROUP(10, NORTH, mdp_vsync, qup_l6, ddr_bist, wlan2_adc1,
+		 atest_usb11, ddr_pxi2, reserved10, NA, NA),
+	PINGROUP(11, NORTH, mdp_vsync, edp_lcd, dbg_out, wlan2_adc0,
+		 atest_usb10, ddr_pxi2, reserved11, NA, NA),
+	PINGROUP(12, SOUTH, mdp_vsync, m_voc, tsif1_sync, ddr_pxi3, reserved12,
+		 NA, NA, NA, NA),
+	PINGROUP(13, WEST, cam_mclk, pll_bypassnl, qdss_gpio0, ddr_pxi3,
+		 reserved13, NA, NA, NA, NA),
+	PINGROUP(14, WEST, cam_mclk, pll_reset, qdss_gpio1, reserved14, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(15, WEST, cam_mclk, qdss_gpio2, reserved15, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(16, WEST, cam_mclk, qdss_gpio3, reserved16, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(17, WEST, cci_i2c, qup1, qdss_gpio4, reserved17, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(18, WEST, cci_i2c, qup1, NA, qdss_gpio5, reserved18, NA, NA,
+		 NA, NA),
+	PINGROUP(19, WEST, cci_i2c, qup1, NA, qdss_gpio6, reserved19, NA, NA,
+		 NA, NA),
+	PINGROUP(20, WEST, cci_i2c, qup1, NA, qdss_gpio7, reserved20, NA, NA,
+		 NA, NA),
+	PINGROUP(21, WEST, cci_timer0, gcc_gp2, qdss_gpio8, NA, reserved21, NA,
+		 NA, NA, NA),
+	PINGROUP(22, WEST, cci_timer1, gcc_gp3, qdss_gpio, NA, reserved22, NA,
+		 NA, NA, NA),
+	PINGROUP(23, WEST, cci_timer2, qdss_gpio9, NA, reserved23, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(24, WEST, cci_timer3, cci_async, qdss_gpio10, reserved24, NA,
+		 NA, NA, NA, NA),
+	PINGROUP(25, WEST, cci_timer4, cci_async, qdss_gpio11, NA, reserved25,
+		 NA, NA, NA, NA),
+	PINGROUP(26, WEST, cci_async, qdss_gpio12, JITTER_BIST, NA, reserved26,
+		 NA, NA, NA, NA),
+	PINGROUP(27, WEST, qup2, qdss_gpio13, PLL_BIST, NA, reserved27, NA, NA,
+		 NA, NA),
+	PINGROUP(28, WEST, qup2, qdss_gpio14, AGERA_PLL, NA, reserved28, NA,
+		 NA, NA, NA),
+	PINGROUP(29, WEST, qup2, NA, phase_flag1, qdss_gpio15, atest_tsens,
+		 reserved29, NA, NA, NA),
+	PINGROUP(30, WEST, qup2, phase_flag2, qdss_gpio, reserved30, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(31, WEST, qup11, qup14, reserved31, NA, NA, NA, NA, NA, NA),
+	PINGROUP(32, WEST, qup11, qup14, NA, reserved32, NA, NA, NA, NA, NA),
+	PINGROUP(33, WEST, qup11, qup14, NA, reserved33, NA, NA, NA, NA, NA),
+	PINGROUP(34, WEST, qup11, qup14, NA, reserved34, NA, NA, NA, NA, NA),
+	PINGROUP(35, NORTH, pci_e0, QUP_L4, JITTER_BIST, NA, reserved35, NA,
+		 NA, NA, NA),
+	PINGROUP(36, NORTH, pci_e0, QUP_L5, PLL_BIST, NA, reserved36, NA, NA,
+		 NA, NA),
+	PINGROUP(37, NORTH, QUP_L6, AGERA_PLL, NA, reserved37, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(38, NORTH, usb_phy, NA, reserved38, NA, NA, NA, NA, NA, NA),
+	PINGROUP(39, NORTH, lpass_slimbus, NA, reserved39, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(40, NORTH, sd_write, tsif1_error, NA, reserved40, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(41, SOUTH, qup3, NA, qdss_gpio6, reserved41, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(42, SOUTH, qup3, NA, qdss_gpio7, reserved42, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(43, SOUTH, qup3, NA, qdss_gpio14, reserved43, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(44, SOUTH, qup3, NA, qdss_gpio15, reserved44, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(45, SOUTH, qup6, NA, reserved45, NA, NA, NA, NA, NA, NA),
+	PINGROUP(46, SOUTH, qup6, NA, reserved46, NA, NA, NA, NA, NA, NA),
+	PINGROUP(47, SOUTH, qup6, reserved47, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(48, SOUTH, qup6, reserved48, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(49, NORTH, qup12, reserved49, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(50, NORTH, qup12, reserved50, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(51, NORTH, qup12, qdss_cti, reserved51, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(52, NORTH, qup12, phase_flag16, qdss_cti, reserved52, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(53, NORTH, qup10, phase_flag11, reserved53, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(54, NORTH, qup10, GP_PDM0, phase_flag12, NA, wlan1_adc1,
+		 atest_usb13, ddr_pxi1, reserved54, NA),
+	PINGROUP(55, NORTH, qup10, phase_flag13, NA, wlan1_adc0, atest_usb12,
+		 ddr_pxi1, reserved55, NA, NA),
+	PINGROUP(56, NORTH, qup10, phase_flag17, reserved56, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(57, NORTH, qua_mi2s, gcc_gp1, phase_flag18, reserved57, NA,
+		 NA, NA, NA, NA),
+	PINGROUP(65, NORTH, pri_mi2s, qup8, wsa_clk, NA, reserved65, NA, NA,
+		 NA, NA),
+	PINGROUP(66, NORTH, pri_mi2s_ws, qup8, wsa_data, GP_PDM1, NA,
+		 reserved66, NA, NA, NA),
+	PINGROUP(67, NORTH, pri_mi2s, qup8, NA, atest_usb2, reserved67, NA, NA,
+		 NA, NA),
+	PINGROUP(68, NORTH, pri_mi2s, qup8, NA, atest_usb23, reserved68, NA,
+		 NA, NA, NA),
+	PINGROUP(75, NORTH, ter_mi2s, phase_flag8, qdss_gpio8, atest_usb22,
+		 QUP_L4, reserved75, NA, NA, NA),
+	PINGROUP(76, NORTH, ter_mi2s, phase_flag9, qdss_gpio9, atest_usb21,
+		 QUP_L5, reserved76, NA, NA, NA),
+	PINGROUP(77, NORTH, ter_mi2s, phase_flag4, qdss_gpio10, atest_usb20,
+		 QUP_L6, reserved77, NA, NA, NA),
+	PINGROUP(78, NORTH, ter_mi2s, gcc_gp1, reserved78, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(79, NORTH, sec_mi2s, GP_PDM2, NA, qdss_gpio11, reserved79, NA,
+		 NA, NA, NA),
+	PINGROUP(80, NORTH, sec_mi2s, NA, qdss_gpio12, reserved80, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(81, NORTH, sec_mi2s, qup15, NA, reserved81, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(82, NORTH, sec_mi2s, qup15, NA, reserved82, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(83, NORTH, sec_mi2s, qup15, NA, reserved83, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(84, NORTH, qup15, NA, reserved84, NA, NA, NA, NA, NA, NA),
+	PINGROUP(85, SOUTH, qup5, NA, reserved85, NA, NA, NA, NA, NA, NA),
+	PINGROUP(86, SOUTH, qup5, copy_gp, NA, reserved86, NA, NA, NA, NA, NA),
+	PINGROUP(87, SOUTH, qup5, NA, reserved87, NA, NA, NA, NA, NA, NA),
+	PINGROUP(88, SOUTH, qup5, NA, reserved88, NA, NA, NA, NA, NA, NA),
+	PINGROUP(89, SOUTH, tsif1_clk, qup4, tgu_ch3, phase_flag10, reserved89,
+		 NA, NA, NA, NA),
+	PINGROUP(90, SOUTH, tsif1_en, mdp_vsync0, qup4, mdp_vsync1, mdp_vsync2,
+		 mdp_vsync3, tgu_ch0, phase_flag0, qdss_cti),
+	PINGROUP(91, SOUTH, tsif1_data, sdc4_cmd, qup4, tgu_ch1, NA, qdss_cti,
+		 reserved91, NA, NA),
+	PINGROUP(92, SOUTH, tsif2_error, sdc43, qup4, vfr_1, tgu_ch2, NA,
+		 reserved92, NA, NA),
+	PINGROUP(93, SOUTH, tsif2_clk, sdc4_clk, qup7, NA, qdss_gpio13,
+		 reserved93, NA, NA, NA),
+	PINGROUP(94, SOUTH, tsif2_en, sdc42, qup7, NA, reserved94, NA, NA, NA,
+		 NA),
+	PINGROUP(95, SOUTH, tsif2_data, sdc41, qup7, GP_PDM0, NA, reserved95,
+		 NA, NA, NA),
+	PINGROUP(96, SOUTH, tsif2_sync, sdc40, qup7, phase_flag3, reserved96,
+		 NA, NA, NA, NA),
+	PINGROUP(97, WEST, NA, NA, mdp_vsync, ldo_en, reserved97, NA, NA, NA,
+		 NA),
+	PINGROUP(98, WEST, NA, mdp_vsync, ldo_update, reserved98, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(99, NORTH, phase_flag14, prng_rosc, reserved99, NA, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(100, WEST, phase_flag15, reserved100, NA, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(101, WEST, NA, phase_flag5, reserved101, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(102, WEST, pci_e1, prng_rosc, reserved102, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(103, WEST, pci_e1, COPY_PHASE, reserved103, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(105, NORTH, uim2_data, qup13, qup_l4, NA, reserved105, NA, NA,
+		 NA, NA),
+	PINGROUP(106, NORTH, uim2_clk, qup13, qup_l5, NA, reserved106, NA, NA,
+		 NA, NA),
+	PINGROUP(107, NORTH, uim2_reset, qup13, qup_l6, reserved107, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(108, NORTH, uim2_present, qup13, reserved108, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(109, NORTH, uim1_data, reserved109, NA, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(110, NORTH, uim1_clk, reserved110, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(111, NORTH, uim1_reset, reserved111, NA, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(112, NORTH, uim1_present, reserved112, NA, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(113, NORTH, uim_batt, edp_hot, reserved113, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(114, WEST, NA, NAV_PPS, NAV_PPS, GPS_TX, NA, reserved114, NA,
+		 NA, NA),
+	PINGROUP(115, WEST, NA, NAV_PPS, NAV_PPS, GPS_TX, NA, reserved115, NA,
+		 NA, NA),
+	PINGROUP(116, SOUTH, NA, reserved116, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(117, NORTH, NA, qdss_gpio0, atest_char, reserved117, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(118, NORTH, adsp_ext, NA, qdss_gpio1, atest_char3,
+		 reserved118, NA, NA, NA, NA),
+	PINGROUP(119, NORTH, NA, qdss_gpio2, atest_char2, reserved119, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(120, NORTH, NA, qdss_gpio3, atest_char1, reserved120, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(121, NORTH, NA, qdss_gpio4, atest_char0, reserved121, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(122, NORTH, NA, qdss_gpio5, reserved122, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(123, NORTH, qup_l4, NA, qdss_gpio, reserved123, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(124, NORTH, qup_l5, NA, qdss_gpio, reserved124, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(125, NORTH, qup_l6, NA, reserved125, NA, NA, NA, NA, NA, NA),
+	PINGROUP(126, NORTH, NA, reserved126, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(127, WEST, NA, NA, reserved127, NA, NA, NA, NA, NA, NA),
+	PINGROUP(128, WEST, NAV_PPS, NAV_PPS, GPS_TX, NA, reserved128, NA, NA,
+		 NA, NA),
+	PINGROUP(129, WEST, NAV_PPS, NAV_PPS, GPS_TX, NA, reserved129, NA, NA,
+		 NA, NA),
+	PINGROUP(130, WEST, qlink_request, NA, reserved130, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(131, WEST, qlink_enable, NA, reserved131, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(132, WEST, NA, NA, reserved132, NA, NA, NA, NA, NA, NA),
+	PINGROUP(133, NORTH, NA, reserved133, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(134, NORTH, NA, reserved134, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(135, WEST, NA, pa_indicator, NA, reserved135, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(136, WEST, NA, NA, reserved136, NA, NA, NA, NA, NA, NA),
+	PINGROUP(137, WEST, NA, NA, phase_flag26, reserved137, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(138, WEST, NA, NA, phase_flag27, reserved138, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(139, WEST, NA, phase_flag28, reserved139, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(140, WEST, NA, NA, phase_flag6, reserved140, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(141, WEST, NA, phase_flag29, reserved141, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(142, WEST, NA, phase_flag30, reserved142, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(143, WEST, NA, NAV_PPS, NAV_PPS, GPS_TX, phase_flag31,
+		 reserved143, NA, NA, NA),
+	PINGROUP(144, SOUTH, mss_lte, reserved144, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(145, SOUTH, mss_lte, GPS_TX, reserved145, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(146, WEST, NA, NA, reserved146, NA, NA, NA, NA, NA, NA),
+	PINGROUP(147, WEST, NA, NA, reserved147, NA, NA, NA, NA, NA, NA),
+	PINGROUP(148, WEST, NA, reserved148, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(149, WEST, NA, reserved149, NA, NA, NA, NA, NA, NA, NA),
+	SDC_QDSD_PINGROUP(sdc2_clk, 0x59a000, 14, 6),
+	SDC_QDSD_PINGROUP(sdc2_cmd, 0x59a000, 11, 3),
+	SDC_QDSD_PINGROUP(sdc2_data, 0x59a000, 9, 0),
+};
+
+static const struct msm_pinctrl_soc_data sdmbat_pinctrl = {
+	.pins = sdmbat_pins,
+	.npins = ARRAY_SIZE(sdmbat_pins),
+	.functions = sdmbat_functions,
+	.nfunctions = ARRAY_SIZE(sdmbat_functions),
+	.groups = sdmbat_groups,
+	.ngroups = ARRAY_SIZE(sdmbat_groups),
+	.ngpios = 136,
+};
+
+static int sdmbat_pinctrl_probe(struct platform_device *pdev)
+{
+	return msm_pinctrl_probe(pdev, &sdmbat_pinctrl);
+}
+
+static const struct of_device_id sdmbat_pinctrl_of_match[] = {
+	{ .compatible = "qcom,sdmbat-pinctrl", },
+	{ },
+};
+
+static struct platform_driver sdmbat_pinctrl_driver = {
+	.driver = {
+		.name = "sdmbat-pinctrl",
+		.owner = THIS_MODULE,
+		.of_match_table = sdmbat_pinctrl_of_match,
+	},
+	.probe = sdmbat_pinctrl_probe,
+	.remove = msm_pinctrl_remove,
+};
+
+static int __init sdmbat_pinctrl_init(void)
+{
+	return platform_driver_register(&sdmbat_pinctrl_driver);
+}
+arch_initcall(sdmbat_pinctrl_init);
+
+static void __exit sdmbat_pinctrl_exit(void)
+{
+	platform_driver_unregister(&sdmbat_pinctrl_driver);
+}
+module_exit(sdmbat_pinctrl_exit);
+
+MODULE_DESCRIPTION("QTI sdmbat pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, sdmbat_pinctrl_of_match);
diff --git a/drivers/slimbus/Kconfig b/drivers/slimbus/Kconfig
new file mode 100644
index 0000000..b7d040e
--- /dev/null
+++ b/drivers/slimbus/Kconfig
@@ -0,0 +1,32 @@
+#
+# SLIMBUS driver configuration
+#
+menuconfig SLIMBUS
+	bool "Slimbus support"
+	depends on HAS_IOMEM
+	help
+	  Slimbus is standard interface between baseband and
+	  application processors and peripheral components in mobile
+	  terminals.
+
+if SLIMBUS
+config SLIMBUS_MSM_CTRL
+	tristate "QTI Slimbus Master Component"
+	default n
+	help
+	  Select driver for Qualcomm Technologies Inc. (QTI) Slimbus
+	  Master Component. This driver is responsible for configuring
+	  SLIMbus and performing bus administration, administration of
+	  components on the bus and dynamic channel allocation.
+
+config SLIMBUS_MSM_NGD
+	tristate "QTI Slimbus Satellite Component"
+	help
+	  Select driver for Qualcomm Technologies Inc. (QTI) Slimbus
+	  Satellite Component. This is light-weight slimbus controller
+	  driver responsible for communicating with slave HW directly over
+	  the bus using messaging interface, and communicating with master
+	  component residing on ADSP for bandwidth and data-channel
+	  management.
+
+endif
diff --git a/drivers/slimbus/Makefile b/drivers/slimbus/Makefile
new file mode 100644
index 0000000..45d6e6e
--- /dev/null
+++ b/drivers/slimbus/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for kernel slimbus framework.
+#
+obj-$(CONFIG_SLIMBUS)			+= slimbus.o
+obj-$(CONFIG_SLIMBUS_MSM_CTRL)		+= slim-msm.o slim-msm-ctrl.o
+obj-$(CONFIG_SLIMBUS_MSM_NGD)		+= slim-msm.o slim-msm-ngd.o
diff --git a/drivers/slimbus/slim-msm-ctrl.c b/drivers/slimbus/slim-msm-ctrl.c
new file mode 100644
index 0000000..3f99b2b
--- /dev/null
+++ b/drivers/slimbus/slim-msm-ctrl.c
@@ -0,0 +1,1641 @@
+/* Copyright (c) 2011-2015, 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slimbus/slimbus.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_slimbus.h>
+#include <linux/msm-sps.h>
+#include <linux/qdsp6v2/apr.h>
+#include "slim-msm.h"
+
+#define MSM_SLIM_NAME	"msm_slim_ctrl"
+#define SLIM_ROOT_FREQ 24576000
+
+#define QC_MSM_DEVS	5
+
+/* Manager registers */
+enum mgr_reg {
+	MGR_CFG		= 0x200,
+	MGR_STATUS	= 0x204,
+	MGR_RX_MSGQ_CFG	= 0x208,
+	MGR_INT_EN	= 0x210,
+	MGR_INT_STAT	= 0x214,
+	MGR_INT_CLR	= 0x218,
+	MGR_TX_MSG	= 0x230,
+	MGR_RX_MSG	= 0x270,
+	MGR_IE_STAT	= 0x2F0,
+	MGR_VE_STAT	= 0x300,
+};
+
+enum msg_cfg {
+	MGR_CFG_ENABLE		= 1,
+	MGR_CFG_RX_MSGQ_EN	= 1 << 1,
+	MGR_CFG_TX_MSGQ_EN_HIGH	= 1 << 2,
+	MGR_CFG_TX_MSGQ_EN_LOW	= 1 << 3,
+};
+/* Message queue types */
+enum msm_slim_msgq_type {
+	MSGQ_RX		= 0,
+	MSGQ_TX_LOW	= 1,
+	MSGQ_TX_HIGH	= 2,
+};
+/* Framer registers */
+enum frm_reg {
+	FRM_CFG		= 0x400,
+	FRM_STAT	= 0x404,
+	FRM_INT_EN	= 0x410,
+	FRM_INT_STAT	= 0x414,
+	FRM_INT_CLR	= 0x418,
+	FRM_WAKEUP	= 0x41C,
+	FRM_CLKCTL_DONE	= 0x420,
+	FRM_IE_STAT	= 0x430,
+	FRM_VE_STAT	= 0x440,
+};
+
+/* Interface registers */
+enum intf_reg {
+	INTF_CFG	= 0x600,
+	INTF_STAT	= 0x604,
+	INTF_INT_EN	= 0x610,
+	INTF_INT_STAT	= 0x614,
+	INTF_INT_CLR	= 0x618,
+	INTF_IE_STAT	= 0x630,
+	INTF_VE_STAT	= 0x640,
+};
+
+enum mgr_intr {
+	MGR_INT_RECFG_DONE	= 1 << 24,
+	MGR_INT_TX_NACKED_2	= 1 << 25,
+	MGR_INT_MSG_BUF_CONTE	= 1 << 26,
+	MGR_INT_RX_MSG_RCVD	= 1 << 30,
+	MGR_INT_TX_MSG_SENT	= 1 << 31,
+};
+
+enum frm_cfg {
+	FRM_ACTIVE	= 1,
+	CLK_GEAR	= 7,
+	ROOT_FREQ	= 11,
+	REF_CLK_GEAR	= 15,
+	INTR_WAKE	= 19,
+};
+
+static struct msm_slim_sat *msm_slim_alloc_sat(struct msm_slim_ctrl *dev);
+
+static int msm_sat_enqueue(struct msm_slim_sat *sat, u32 *buf, u8 len)
+{
+	struct msm_slim_ctrl *dev = sat->dev;
+	unsigned long flags;
+
+	spin_lock_irqsave(&sat->lock, flags);
+	if ((sat->stail + 1) % SAT_CONCUR_MSG == sat->shead) {
+		spin_unlock_irqrestore(&sat->lock, flags);
+		dev_err(dev->dev, "SAT QUEUE full!");
+		return -EXFULL;
+	}
+	memcpy(sat->sat_msgs[sat->stail], (u8 *)buf, len);
+	sat->stail = (sat->stail + 1) % SAT_CONCUR_MSG;
+	spin_unlock_irqrestore(&sat->lock, flags);
+	return 0;
+}
+
+static int msm_sat_dequeue(struct msm_slim_sat *sat, u8 *buf)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sat->lock, flags);
+	if (sat->stail == sat->shead) {
+		spin_unlock_irqrestore(&sat->lock, flags);
+		return -ENODATA;
+	}
+	memcpy(buf, sat->sat_msgs[sat->shead], 40);
+	sat->shead = (sat->shead + 1) % SAT_CONCUR_MSG;
+	spin_unlock_irqrestore(&sat->lock, flags);
+	return 0;
+}
+
+static void msm_get_eaddr(u8 *e_addr, u32 *buffer)
+{
+	e_addr[0] = (buffer[1] >> 24) & 0xff;
+	e_addr[1] = (buffer[1] >> 16) & 0xff;
+	e_addr[2] = (buffer[1] >> 8) & 0xff;
+	e_addr[3] = buffer[1] & 0xff;
+	e_addr[4] = (buffer[0] >> 24) & 0xff;
+	e_addr[5] = (buffer[0] >> 16) & 0xff;
+}
+
+static bool msm_is_sat_dev(u8 *e_addr)
+{
+	if (e_addr[5] == QC_MFGID_LSB && e_addr[4] == QC_MFGID_MSB &&
+		e_addr[2] != QC_CHIPID_SL &&
+		(e_addr[1] == QC_DEVID_SAT1 || e_addr[1] == QC_DEVID_SAT2))
+		return true;
+	return false;
+}
+
+static struct msm_slim_sat *addr_to_sat(struct msm_slim_ctrl *dev, u8 laddr)
+{
+	struct msm_slim_sat *sat = NULL;
+	int i = 0;
+
+	while (!sat && i < dev->nsats) {
+		if (laddr == dev->satd[i]->satcl.laddr)
+			sat = dev->satd[i];
+		i++;
+	}
+	return sat;
+}
+
+static irqreturn_t msm_slim_interrupt(int irq, void *d)
+{
+	struct msm_slim_ctrl *dev = d;
+	u32 pstat;
+	u32 stat = readl_relaxed(dev->base + MGR_INT_STAT);
+
+	if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2) {
+		if (stat & MGR_INT_TX_MSG_SENT)
+			writel_relaxed(MGR_INT_TX_MSG_SENT,
+					dev->base + MGR_INT_CLR);
+		else {
+			u32 mgr_stat = readl_relaxed(dev->base + MGR_STATUS);
+			u32 mgr_ie_stat = readl_relaxed(dev->base +
+						MGR_IE_STAT);
+			u32 frm_stat = readl_relaxed(dev->base + FRM_STAT);
+			u32 frm_cfg = readl_relaxed(dev->base + FRM_CFG);
+			u32 frm_intr_stat = readl_relaxed(dev->base +
+						FRM_INT_STAT);
+			u32 frm_ie_stat = readl_relaxed(dev->base +
+						FRM_IE_STAT);
+			u32 intf_stat = readl_relaxed(dev->base + INTF_STAT);
+			u32 intf_intr_stat = readl_relaxed(dev->base +
+						INTF_INT_STAT);
+			u32 intf_ie_stat = readl_relaxed(dev->base +
+						INTF_IE_STAT);
+
+			writel_relaxed(MGR_INT_TX_NACKED_2,
+					dev->base + MGR_INT_CLR);
+			pr_err("TX Nack MGR dump:int_stat:0x%x, mgr_stat:0x%x",
+					stat, mgr_stat);
+			pr_err("TX Nack MGR dump:ie_stat:0x%x", mgr_ie_stat);
+			pr_err("TX Nack FRM dump:int_stat:0x%x, frm_stat:0x%x",
+					frm_intr_stat, frm_stat);
+			pr_err("TX Nack FRM dump:frm_cfg:0x%x, ie_stat:0x%x",
+					frm_cfg, frm_ie_stat);
+			pr_err("TX Nack INTF dump:intr_st:0x%x, intf_stat:0x%x",
+					intf_intr_stat, intf_stat);
+			pr_err("TX Nack INTF dump:ie_stat:0x%x", intf_ie_stat);
+
+			dev->err = -EIO;
+		}
+		/*
+		 * Guarantee that interrupt clear bit write goes through before
+		 * signalling completion/exiting ISR
+		 */
+		mb();
+		msm_slim_manage_tx_msgq(dev, false, NULL);
+	}
+	if (stat & MGR_INT_RX_MSG_RCVD) {
+		u32 rx_buf[10];
+		u32 mc, mt;
+		u8 len, i;
+
+		rx_buf[0] = readl_relaxed(dev->base + MGR_RX_MSG);
+		len = rx_buf[0] & 0x1F;
+		for (i = 1; i < ((len + 3) >> 2); i++) {
+			rx_buf[i] = readl_relaxed(dev->base + MGR_RX_MSG +
+						(4 * i));
+			dev_dbg(dev->dev, "reading data: %x\n", rx_buf[i]);
+		}
+		mt = (rx_buf[0] >> 5) & 0x7;
+		mc = (rx_buf[0] >> 8) & 0xff;
+		dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
+		if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
+				mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
+			u8 laddr = (u8)((rx_buf[0] >> 16) & 0xFF);
+			struct msm_slim_sat *sat = addr_to_sat(dev, laddr);
+
+			if (sat)
+				msm_sat_enqueue(sat, rx_buf, len);
+			else
+				dev_err(dev->dev, "unknown sat:%d message",
+						laddr);
+			writel_relaxed(MGR_INT_RX_MSG_RCVD,
+					dev->base + MGR_INT_CLR);
+			/*
+			 * Guarantee that CLR bit write goes through before
+			 * queuing work
+			 */
+			mb();
+			if (sat)
+				queue_work(sat->wq, &sat->wd);
+		} else if (mt == SLIM_MSG_MT_CORE &&
+			mc == SLIM_MSG_MC_REPORT_PRESENT) {
+			u8 e_addr[6];
+
+			msm_get_eaddr(e_addr, rx_buf);
+			msm_slim_rx_enqueue(dev, rx_buf, len);
+			writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
+						MGR_INT_CLR);
+			/*
+			 * Guarantee that CLR bit write goes through
+			 * before signalling completion
+			 */
+			mb();
+			complete(&dev->rx_msgq_notify);
+		} else if (mt == SLIM_MSG_MT_CORE &&
+			mc == SLIM_MSG_MC_REPORT_ABSENT) {
+			writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
+						MGR_INT_CLR);
+			/*
+			 * Guarantee that CLR bit write goes through
+			 * before signalling completion
+			 */
+			mb();
+			complete(&dev->rx_msgq_notify);
+
+		} else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
+				mc == SLIM_MSG_MC_REPLY_VALUE) {
+			msm_slim_rx_enqueue(dev, rx_buf, len);
+			writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
+						MGR_INT_CLR);
+			/*
+			 * Guarantee that CLR bit write goes through
+			 * before signalling completion
+			 */
+			mb();
+			complete(&dev->rx_msgq_notify);
+		} else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
+			u8 *buf = (u8 *)rx_buf;
+			u8 l_addr = buf[2];
+			u16 ele = (u16)buf[4] << 4;
+
+			ele |= ((buf[3] & 0xf0) >> 4);
+			dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
+					l_addr, ele);
+			for (i = 0; i < len - 5; i++)
+				dev_err(dev->dev, "offset:0x%x:bit mask:%x",
+						i, buf[i+5]);
+			writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
+						MGR_INT_CLR);
+			/*
+			 * Guarantee that CLR bit write goes through
+			 * before exiting
+			 */
+			mb();
+		} else {
+			dev_err(dev->dev, "Unexpected MC,%x MT:%x, len:%d",
+						mc, mt, len);
+			for (i = 0; i < ((len + 3) >> 2); i++)
+				dev_err(dev->dev, "error msg: %x", rx_buf[i]);
+			writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
+						MGR_INT_CLR);
+			/*
+			 * Guarantee that CLR bit write goes through
+			 * before exiting
+			 */
+			mb();
+		}
+	}
+	if (stat & MGR_INT_RECFG_DONE) {
+		writel_relaxed(MGR_INT_RECFG_DONE, dev->base + MGR_INT_CLR);
+		/*
+		 * Guarantee that CLR bit write goes through
+		 * before exiting ISR
+		 */
+		mb();
+		complete(&dev->reconf);
+	}
+	pstat = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_ST_EEn, dev->ver));
+	if (pstat != 0)
+		return msm_slim_port_irq_handler(dev, pstat);
+
+	return IRQ_HANDLED;
+}
+
+static int msm_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
+{
+	DECLARE_COMPLETION_ONSTACK(done);
+	struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+	u32 *pbuf;
+	u8 *puc;
+	int timeout;
+	int msgv = -1;
+	u8 la = txn->la;
+	u8 mc = (u8)(txn->mc & 0xFF);
+	/*
+	 * Voting for runtime PM: Slimbus has 2 possible use cases:
+	 * 1. messaging
+	 * 2. Data channels
+	 * Messaging case goes through messaging slots and data channels
+	 * use their own slots
+	 * This "get" votes for messaging bandwidth
+	 */
+	if (!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG))
+		msgv = msm_slim_get_ctrl(dev);
+	if (msgv >= 0)
+		dev->state = MSM_CTRL_AWAKE;
+	mutex_lock(&dev->tx_lock);
+	if (dev->state == MSM_CTRL_ASLEEP ||
+		((!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
+		dev->state == MSM_CTRL_IDLE)) {
+		dev_err(dev->dev, "runtime or system PM suspended state");
+		mutex_unlock(&dev->tx_lock);
+		if (msgv >= 0)
+			msm_slim_put_ctrl(dev);
+		return -EBUSY;
+	}
+	if (txn->mt == SLIM_MSG_MT_CORE &&
+		mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION) {
+		if (dev->reconf_busy) {
+			wait_for_completion(&dev->reconf);
+			dev->reconf_busy = false;
+		}
+		/* This "get" votes for data channels */
+		if (dev->ctrl.sched.usedslots != 0 &&
+			!dev->chan_active) {
+			int chv = msm_slim_get_ctrl(dev);
+
+			if (chv >= 0)
+				dev->chan_active = true;
+		}
+	}
+	txn->rl--;
+	pbuf = msm_get_msg_buf(dev, txn->rl, &done);
+	dev->err = 0;
+
+	if (txn->dt == SLIM_MSG_DEST_ENUMADDR) {
+		mutex_unlock(&dev->tx_lock);
+		if (msgv >= 0)
+			msm_slim_put_ctrl(dev);
+		return -EPROTONOSUPPORT;
+	}
+	if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
+		(mc == SLIM_MSG_MC_CONNECT_SOURCE ||
+		 mc == SLIM_MSG_MC_CONNECT_SINK ||
+		 mc == SLIM_MSG_MC_DISCONNECT_PORT))
+		la = dev->pgdla;
+	if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
+		*pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 0, la);
+	else
+		*pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 1, la);
+	if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
+		puc = ((u8 *)pbuf) + 3;
+	else
+		puc = ((u8 *)pbuf) + 2;
+	if (txn->rbuf)
+		*(puc++) = txn->tid;
+	if ((txn->mt == SLIM_MSG_MT_CORE) &&
+		((mc >= SLIM_MSG_MC_REQUEST_INFORMATION &&
+		mc <= SLIM_MSG_MC_REPORT_INFORMATION) ||
+		(mc >= SLIM_MSG_MC_REQUEST_VALUE &&
+		 mc <= SLIM_MSG_MC_CHANGE_VALUE))) {
+		*(puc++) = (txn->ec & 0xFF);
+		*(puc++) = (txn->ec >> 8)&0xFF;
+	}
+	if (txn->wbuf)
+		memcpy(puc, txn->wbuf, txn->len);
+	if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
+		(mc == SLIM_MSG_MC_CONNECT_SOURCE ||
+		 mc == SLIM_MSG_MC_CONNECT_SINK ||
+		 mc == SLIM_MSG_MC_DISCONNECT_PORT)) {
+		if (mc != SLIM_MSG_MC_DISCONNECT_PORT)
+			dev->err = msm_slim_connect_pipe_port(dev, *puc);
+		else {
+			/*
+			 * Remove channel disconnects master-side ports from
+			 * channel. No need to send that again on the bus
+			 * Only disable port
+			 */
+			writel_relaxed(0, PGD_PORT(PGD_PORT_CFGn,
+					dev->pipes[*puc].port_b, dev->ver));
+			mutex_unlock(&dev->tx_lock);
+			if (msgv >= 0)
+				msm_slim_put_ctrl(dev);
+			return 0;
+		}
+		if (dev->err) {
+			dev_err(dev->dev, "pipe-port connect err:%d", dev->err);
+			mutex_unlock(&dev->tx_lock);
+			if (msgv >= 0)
+				msm_slim_put_ctrl(dev);
+			return dev->err;
+		}
+		*(puc) = (u8)dev->pipes[*puc].port_b;
+	}
+	if (txn->mt == SLIM_MSG_MT_CORE &&
+		mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION)
+		dev->reconf_busy = true;
+	msm_send_msg_buf(dev, pbuf, txn->rl, MGR_TX_MSG);
+	timeout = wait_for_completion_timeout(&done, HZ);
+	if (mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
+		if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
+					SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
+				timeout) {
+			timeout = wait_for_completion_timeout(&dev->reconf, HZ);
+			dev->reconf_busy = false;
+			if (timeout) {
+				clk_disable_unprepare(dev->rclk);
+				disable_irq(dev->irq);
+			}
+		}
+		if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
+					SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
+				!timeout) {
+			dev->reconf_busy = false;
+			dev_err(dev->dev, "clock pause failed");
+			mutex_unlock(&dev->tx_lock);
+			return -ETIMEDOUT;
+		}
+		if (txn->mt == SLIM_MSG_MT_CORE &&
+			txn->mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
+			if (dev->ctrl.sched.usedslots == 0 &&
+					dev->chan_active) {
+				dev->chan_active = false;
+				msm_slim_put_ctrl(dev);
+			}
+		}
+	}
+	mutex_unlock(&dev->tx_lock);
+	if (msgv >= 0)
+		msm_slim_put_ctrl(dev);
+
+	if (!timeout)
+		dev_err(dev->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
+					txn->mt);
+
+	return timeout ? dev->err : -ETIMEDOUT;
+}
+
+static void msm_slim_wait_retry(struct msm_slim_ctrl *dev)
+{
+	int msec_per_frm = 0;
+	int sfr_per_sec;
+	/* Wait for 1 superframe, or default time and then retry */
+	sfr_per_sec = dev->framer.superfreq /
+			(1 << (SLIM_MAX_CLK_GEAR - dev->ctrl.clkgear));
+	if (sfr_per_sec)
+		msec_per_frm = MSEC_PER_SEC / sfr_per_sec;
+	if (msec_per_frm < DEF_RETRY_MS)
+		msec_per_frm = DEF_RETRY_MS;
+	msleep(msec_per_frm);
+}
+static int msm_set_laddr(struct slim_controller *ctrl, const u8 *ea,
+				u8 elen, u8 laddr)
+{
+	struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+	struct completion done;
+	int timeout, ret, retries = 0;
+	u32 *buf;
+retry_laddr:
+	init_completion(&done);
+	mutex_lock(&dev->tx_lock);
+	buf = msm_get_msg_buf(dev, 9, &done);
+	if (buf == NULL)
+		return -ENOMEM;
+	buf[0] = SLIM_MSG_ASM_FIRST_WORD(9, SLIM_MSG_MT_CORE,
+					SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS,
+					SLIM_MSG_DEST_LOGICALADDR,
+					ea[5] | ea[4] << 8);
+	buf[1] = ea[3] | (ea[2] << 8) | (ea[1] << 16) | (ea[0] << 24);
+	buf[2] = laddr;
+
+	ret = msm_send_msg_buf(dev, buf, 9, MGR_TX_MSG);
+	timeout = wait_for_completion_timeout(&done, HZ);
+	if (!timeout)
+		dev->err = -ETIMEDOUT;
+	if (dev->err) {
+		ret = dev->err;
+		dev->err = 0;
+	}
+	mutex_unlock(&dev->tx_lock);
+	if (ret) {
+		pr_err("set LADDR:0x%x failed:ret:%d, retrying", laddr, ret);
+		if (retries < INIT_MX_RETRIES) {
+			msm_slim_wait_retry(dev);
+			retries++;
+			goto retry_laddr;
+		} else {
+			pr_err("set LADDR failed after retrying:ret:%d", ret);
+		}
+	}
+	return ret;
+}
+
+static int msm_clk_pause_wakeup(struct slim_controller *ctrl)
+{
+	struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+
+	enable_irq(dev->irq);
+	clk_prepare_enable(dev->rclk);
+	writel_relaxed(1, dev->base + FRM_WAKEUP);
+	/* Make sure framer wakeup write goes through before exiting function */
+	mb();
+	/*
+	 * Workaround: Currently, slave is reporting lost-sync messages
+	 * after slimbus comes out of clock pause.
+	 * Transaction with slave fail before slave reports that message
+	 * Give some time for that report to come
+	 * Slimbus wakes up in clock gear 10 at 24.576MHz. With each superframe
+	 * being 250 usecs, we wait for 20 superframes here to ensure
+	 * we get the message
+	 */
+	usleep_range(4950, 5000);
+	return 0;
+}
+
+static int msm_sat_define_ch(struct msm_slim_sat *sat, u8 *buf, u8 len, u8 mc)
+{
+	struct msm_slim_ctrl *dev = sat->dev;
+	enum slim_ch_control oper;
+	int i;
+	int ret = 0;
+
+	if (mc == SLIM_USR_MC_CHAN_CTRL) {
+		for (i = 0; i < sat->nsatch; i++) {
+			if (buf[5] == sat->satch[i].chan)
+				break;
+		}
+		if (i >= sat->nsatch)
+			return -ENOTCONN;
+		oper = ((buf[3] & 0xC0) >> 6);
+		/* part of grp. activating/removing 1 will take care of rest */
+		ret = slim_control_ch(&sat->satcl, sat->satch[i].chanh, oper,
+					false);
+		if (!ret) {
+			for (i = 5; i < len; i++) {
+				int j;
+
+				for (j = 0; j < sat->nsatch; j++) {
+					if (buf[i] != sat->satch[j].chan)
+						continue;
+
+					if (oper == SLIM_CH_REMOVE)
+						sat->satch[j].req_rem++;
+					else
+						sat->satch[j].req_def++;
+					break;
+				}
+			}
+		}
+	} else {
+		u16 chh[40];
+		struct slim_ch prop;
+		u32 exp;
+		u16 *grph = NULL;
+		u8 coeff, cc;
+		u8 prrate = buf[6];
+
+		if (len <= 8)
+			return -EINVAL;
+		for (i = 8; i < len; i++) {
+			int j = 0;
+
+			for (j = 0; j < sat->nsatch; j++) {
+				if (sat->satch[j].chan == buf[i]) {
+					chh[i - 8] = sat->satch[j].chanh;
+					break;
+				}
+			}
+			if (j < sat->nsatch) {
+				u16 dummy;
+
+				ret = slim_query_ch(&sat->satcl, buf[i],
+							&dummy);
+				if (ret)
+					return ret;
+				if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
+					sat->satch[j].req_def++;
+				/* First channel in group from satellite */
+				if (i == 8)
+					grph = &sat->satch[j].chanh;
+				continue;
+			}
+			if (sat->nsatch >= MSM_MAX_SATCH)
+				return -EXFULL;
+			ret = slim_query_ch(&sat->satcl, buf[i], &chh[i - 8]);
+			if (ret)
+				return ret;
+			sat->satch[j].chan = buf[i];
+			sat->satch[j].chanh = chh[i - 8];
+			if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
+				sat->satch[j].req_def++;
+			if (i == 8)
+				grph = &sat->satch[j].chanh;
+			sat->nsatch++;
+		}
+		prop.dataf = (enum slim_ch_dataf)((buf[3] & 0xE0) >> 5);
+		prop.auxf = (enum slim_ch_auxf)((buf[4] & 0xC0) >> 5);
+		prop.baser = SLIM_RATE_4000HZ;
+		if (prrate & 0x8)
+			prop.baser = SLIM_RATE_11025HZ;
+		else
+			prop.baser = SLIM_RATE_4000HZ;
+		prop.prot = (enum slim_ch_proto)(buf[5] & 0x0F);
+		prop.sampleszbits = (buf[4] & 0x1F)*SLIM_CL_PER_SL;
+		exp = (u32)((buf[5] & 0xF0) >> 4);
+		coeff = (buf[4] & 0x20) >> 5;
+		cc = (coeff ? 3 : 1);
+		prop.ratem = cc * (1 << exp);
+		if (i > 9)
+			ret = slim_define_ch(&sat->satcl, &prop, chh, len - 8,
+					true, &chh[0]);
+		else
+			ret = slim_define_ch(&sat->satcl, &prop,
+					chh, 1, true, &chh[0]);
+		dev_dbg(dev->dev, "define sat grp returned:%d", ret);
+		if (ret)
+			return ret;
+		else if (grph)
+			*grph = chh[0];
+
+		/* part of group so activating 1 will take care of rest */
+		if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
+			ret = slim_control_ch(&sat->satcl,
+					chh[0],
+					SLIM_CH_ACTIVATE, false);
+	}
+	return ret;
+}
+
+static void msm_slim_rxwq(struct msm_slim_ctrl *dev)
+{
+	u8 buf[40];
+	u8 mc, mt, len;
+	int i, ret;
+
+	if ((msm_slim_rx_dequeue(dev, (u8 *)buf)) != -ENODATA) {
+		len = buf[0] & 0x1F;
+		mt = (buf[0] >> 5) & 0x7;
+		mc = buf[1];
+		if (mt == SLIM_MSG_MT_CORE &&
+			mc == SLIM_MSG_MC_REPORT_PRESENT) {
+			u8 laddr;
+			u8 e_addr[6];
+
+			for (i = 0; i < 6; i++)
+				e_addr[i] = buf[7-i];
+
+			ret = slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr,
+						false);
+			/* Is this QTI ported generic device? */
+			if (!ret && e_addr[5] == QC_MFGID_LSB &&
+				e_addr[4] == QC_MFGID_MSB &&
+				e_addr[1] == QC_DEVID_PGD &&
+				e_addr[2] != QC_CHIPID_SL)
+				dev->pgdla = laddr;
+			if (!ret && !pm_runtime_enabled(dev->dev) &&
+				laddr == (QC_MSM_DEVS - 1))
+				pm_runtime_enable(dev->dev);
+
+			if (!ret && msm_is_sat_dev(e_addr)) {
+				struct msm_slim_sat *sat = addr_to_sat(dev,
+								laddr);
+				if (!sat)
+					sat = msm_slim_alloc_sat(dev);
+				if (!sat)
+					return;
+
+				sat->satcl.laddr = laddr;
+				msm_sat_enqueue(sat, (u32 *)buf, len);
+				queue_work(sat->wq, &sat->wd);
+			}
+			if (ret)
+				pr_err("assign laddr failed, error:%d", ret);
+		} else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
+				mc == SLIM_MSG_MC_REPLY_VALUE) {
+			u8 tid = buf[3];
+
+			dev_dbg(dev->dev, "tid:%d, len:%d\n", tid, len - 4);
+			slim_msg_response(&dev->ctrl, &buf[4], tid,
+						len - 4);
+			pm_runtime_mark_last_busy(dev->dev);
+		} else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
+			u8 l_addr = buf[2];
+			u16 ele = (u16)buf[4] << 4;
+
+			ele |= ((buf[3] & 0xf0) >> 4);
+			dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
+					l_addr, ele);
+			for (i = 0; i < len - 5; i++)
+				dev_err(dev->dev, "offset:0x%x:bit mask:%x",
+						i, buf[i+5]);
+		} else {
+			dev_err(dev->dev, "unexpected message:mc:%x, mt:%x",
+					mc, mt);
+			for (i = 0; i < len; i++)
+				dev_err(dev->dev, "error msg: %x", buf[i]);
+
+		}
+	} else
+		dev_err(dev->dev, "rxwq called and no dequeue");
+}
+
+static void slim_sat_rxprocess(struct work_struct *work)
+{
+	struct msm_slim_sat *sat = container_of(work, struct msm_slim_sat, wd);
+	struct msm_slim_ctrl *dev = sat->dev;
+	u8 buf[40];
+
+	while ((msm_sat_dequeue(sat, buf)) != -ENODATA) {
+		struct slim_msg_txn txn;
+		u8 len, mc, mt;
+		u32 bw_sl;
+		int ret = 0;
+		int satv = -1;
+		bool gen_ack = false;
+		u8 tid;
+		u8 wbuf[8];
+		int i, retries = 0;
+
+		txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
+		txn.dt = SLIM_MSG_DEST_LOGICALADDR;
+		txn.ec = 0;
+		txn.rbuf = NULL;
+		txn.la = sat->satcl.laddr;
+		/* satellite handling */
+		len = buf[0] & 0x1F;
+		mc = buf[1];
+		mt = (buf[0] >> 5) & 0x7;
+
+		if (mt == SLIM_MSG_MT_CORE &&
+			mc == SLIM_MSG_MC_REPORT_PRESENT) {
+			u8 e_addr[6];
+
+			for (i = 0; i < 6; i++)
+				e_addr[i] = buf[7-i];
+
+			if (pm_runtime_enabled(dev->dev)) {
+				satv = msm_slim_get_ctrl(dev);
+				if (satv >= 0)
+					sat->pending_capability = true;
+			}
+			/*
+			 * Since capability message is already sent, present
+			 * message will indicate subsystem hosting this
+			 * satellite has restarted.
+			 * Remove all active channels of this satellite
+			 * when this is detected
+			 */
+			if (sat->sent_capability) {
+				for (i = 0; i < sat->nsatch; i++) {
+					if (sat->satch[i].reconf) {
+						pr_err("SSR, sat:%d, rm ch:%d",
+							sat->satcl.laddr,
+							sat->satch[i].chan);
+						slim_control_ch(&sat->satcl,
+							sat->satch[i].chanh,
+							SLIM_CH_REMOVE, true);
+						slim_dealloc_ch(&sat->satcl,
+							sat->satch[i].chanh);
+						sat->satch[i].reconf = false;
+					}
+				}
+			}
+		} else if (mt != SLIM_MSG_MT_CORE &&
+				mc != SLIM_MSG_MC_REPORT_PRESENT) {
+			satv = msm_slim_get_ctrl(dev);
+		}
+		switch (mc) {
+		case SLIM_MSG_MC_REPORT_PRESENT:
+			/* Remove runtime_pm vote once satellite acks */
+			if (mt != SLIM_MSG_MT_CORE) {
+				if (pm_runtime_enabled(dev->dev) &&
+					sat->pending_capability) {
+					msm_slim_put_ctrl(dev);
+					sat->pending_capability = false;
+				}
+				continue;
+			}
+			/* send a Manager capability msg */
+			if (sat->sent_capability) {
+				if (mt == SLIM_MSG_MT_CORE)
+					goto send_capability;
+				else
+					continue;
+			}
+			ret = slim_add_device(&dev->ctrl, &sat->satcl);
+			if (ret) {
+				dev_err(dev->dev,
+					"Satellite-init failed");
+				continue;
+			}
+			/* Satellite-channels */
+			sat->satch = kzalloc(MSM_MAX_SATCH *
+					sizeof(struct msm_sat_chan),
+					GFP_KERNEL);
+send_capability:
+			txn.mc = SLIM_USR_MC_MASTER_CAPABILITY;
+			txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
+			txn.la = sat->satcl.laddr;
+			txn.rl = 8;
+			wbuf[0] = SAT_MAGIC_LSB;
+			wbuf[1] = SAT_MAGIC_MSB;
+			wbuf[2] = SAT_MSG_VER;
+			wbuf[3] = SAT_MSG_PROT;
+			txn.wbuf = wbuf;
+			txn.len = 4;
+			ret = msm_xfer_msg(&dev->ctrl, &txn);
+			if (ret) {
+				pr_err("capability for:0x%x fail:%d, retry:%d",
+						sat->satcl.laddr, ret, retries);
+				if (retries < INIT_MX_RETRIES) {
+					msm_slim_wait_retry(dev);
+					retries++;
+					goto send_capability;
+				} else {
+					pr_err("failed after all retries:%d",
+							ret);
+				}
+			} else {
+				sat->sent_capability = true;
+			}
+			break;
+		case SLIM_USR_MC_ADDR_QUERY:
+			memcpy(&wbuf[1], &buf[4], 6);
+			ret = slim_get_logical_addr(&sat->satcl,
+					&wbuf[1], 6, &wbuf[7]);
+			if (ret)
+				memset(&wbuf[1], 0, 6);
+			wbuf[0] = buf[3];
+			txn.mc = SLIM_USR_MC_ADDR_REPLY;
+			txn.rl = 12;
+			txn.len = 8;
+			txn.wbuf = wbuf;
+			msm_xfer_msg(&dev->ctrl, &txn);
+			break;
+		case SLIM_USR_MC_DEFINE_CHAN:
+		case SLIM_USR_MC_DEF_ACT_CHAN:
+		case SLIM_USR_MC_CHAN_CTRL:
+			if (mc != SLIM_USR_MC_CHAN_CTRL)
+				tid = buf[7];
+			else
+				tid = buf[4];
+			gen_ack = true;
+			ret = msm_sat_define_ch(sat, buf, len, mc);
+			if (ret) {
+				dev_err(dev->dev,
+					"SAT define_ch returned:%d",
+					ret);
+			}
+			if (!sat->pending_reconf) {
+				int chv = msm_slim_get_ctrl(dev);
+
+				if (chv >= 0)
+					sat->pending_reconf = true;
+			}
+			break;
+		case SLIM_USR_MC_RECONFIG_NOW:
+			tid = buf[3];
+			gen_ack = true;
+			ret = slim_reconfigure_now(&sat->satcl);
+			for (i = 0; i < sat->nsatch; i++) {
+				struct msm_sat_chan *sch = &sat->satch[i];
+
+				if (sch->req_rem && sch->reconf) {
+					if (!ret) {
+						slim_dealloc_ch(&sat->satcl,
+								sch->chanh);
+						sch->reconf = false;
+					}
+					sch->req_rem--;
+				} else if (sch->req_def) {
+					if (ret)
+						slim_dealloc_ch(&sat->satcl,
+								sch->chanh);
+					else
+						sch->reconf = true;
+					sch->req_def--;
+				}
+			}
+			if (sat->pending_reconf) {
+				msm_slim_put_ctrl(dev);
+				sat->pending_reconf = false;
+			}
+			break;
+		case SLIM_USR_MC_REQ_BW:
+			/* what we get is in SLOTS */
+			bw_sl = (u32)buf[4] << 3 |
+						((buf[3] & 0xE0) >> 5);
+			sat->satcl.pending_msgsl = bw_sl;
+			tid = buf[5];
+			gen_ack = true;
+			break;
+		case SLIM_USR_MC_CONNECT_SRC:
+		case SLIM_USR_MC_CONNECT_SINK:
+			if (mc == SLIM_USR_MC_CONNECT_SRC)
+				txn.mc = SLIM_MSG_MC_CONNECT_SOURCE;
+			else
+				txn.mc = SLIM_MSG_MC_CONNECT_SINK;
+			wbuf[0] = buf[4] & 0x1F;
+			wbuf[1] = buf[5];
+			tid = buf[6];
+			txn.la = buf[3];
+			txn.mt = SLIM_MSG_MT_CORE;
+			txn.rl = 6;
+			txn.len = 2;
+			txn.wbuf = wbuf;
+			gen_ack = true;
+			ret = msm_xfer_msg(&dev->ctrl, &txn);
+			break;
+		case SLIM_USR_MC_DISCONNECT_PORT:
+			txn.mc = SLIM_MSG_MC_DISCONNECT_PORT;
+			wbuf[0] = buf[4] & 0x1F;
+			tid = buf[5];
+			txn.la = buf[3];
+			txn.rl = 5;
+			txn.len = 1;
+			txn.mt = SLIM_MSG_MT_CORE;
+			txn.wbuf = wbuf;
+			gen_ack = true;
+			ret = msm_xfer_msg(&dev->ctrl, &txn);
+			break;
+		case SLIM_MSG_MC_REPORT_ABSENT:
+			dev_info(dev->dev, "Received Report Absent Message\n");
+			break;
+		default:
+			break;
+		}
+		if (!gen_ack) {
+			if (mc != SLIM_MSG_MC_REPORT_PRESENT && satv >= 0)
+				msm_slim_put_ctrl(dev);
+			continue;
+		}
+
+		wbuf[0] = tid;
+		if (!ret)
+			wbuf[1] = MSM_SAT_SUCCSS;
+		else
+			wbuf[1] = 0;
+		txn.mc = SLIM_USR_MC_GENERIC_ACK;
+		txn.la = sat->satcl.laddr;
+		txn.rl = 6;
+		txn.len = 2;
+		txn.wbuf = wbuf;
+		txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
+		msm_xfer_msg(&dev->ctrl, &txn);
+		if (satv >= 0)
+			msm_slim_put_ctrl(dev);
+	}
+}
+
+static struct msm_slim_sat *msm_slim_alloc_sat(struct msm_slim_ctrl *dev)
+{
+	struct msm_slim_sat *sat;
+	char *name;
+
+	if (dev->nsats >= MSM_MAX_NSATS)
+		return NULL;
+
+	sat = kzalloc(sizeof(struct msm_slim_sat), GFP_KERNEL);
+	if (!sat)
+		return NULL;
+	name = kzalloc(SLIMBUS_NAME_SIZE, GFP_KERNEL);
+	if (!name) {
+		kfree(sat);
+		return NULL;
+	}
+	dev->satd[dev->nsats] = sat;
+	sat->dev = dev;
+	snprintf(name, SLIMBUS_NAME_SIZE, "msm_sat%d", dev->nsats);
+	sat->satcl.name = name;
+	spin_lock_init(&sat->lock);
+	INIT_WORK(&sat->wd, slim_sat_rxprocess);
+	sat->wq = create_singlethread_workqueue(sat->satcl.name);
+	if (!sat->wq) {
+		kfree(name);
+		kfree(sat);
+		return NULL;
+	}
+	/*
+	 * Both sats will be allocated from RX thread and RX thread will
+	 * process messages sequentially. No synchronization necessary
+	 */
+	dev->nsats++;
+	return sat;
+}
+
+static int msm_slim_rx_msgq_thread(void *data)
+{
+	struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data;
+	struct completion *notify = &dev->rx_msgq_notify;
+	struct msm_slim_sat *sat = NULL;
+	u32 mc = 0;
+	u32 mt = 0;
+	u32 buffer[10];
+	int index = 0;
+	u8 msg_len = 0;
+	int ret;
+
+	dev_dbg(dev->dev, "rx thread started");
+
+	while (!kthread_should_stop()) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		ret = wait_for_completion_interruptible(notify);
+
+		if (ret)
+			dev_err(dev->dev, "rx thread wait error:%d", ret);
+
+		/* 1 irq notification per message */
+		if (dev->use_rx_msgqs != MSM_MSGQ_ENABLED) {
+			msm_slim_rxwq(dev);
+			continue;
+		}
+
+		ret = msm_slim_rx_msgq_get(dev, buffer, index);
+		if (ret) {
+			dev_err(dev->dev, "rx_msgq_get() failed 0x%x\n", ret);
+			continue;
+		}
+
+		pr_debug("message[%d] = 0x%x\n", index, *buffer);
+
+		/* Decide if we use generic RX or satellite RX */
+		if (index++ == 0) {
+			msg_len = *buffer & 0x1F;
+			pr_debug("Start of new message, len = %d\n", msg_len);
+			mt = (buffer[0] >> 5) & 0x7;
+			mc = (buffer[0] >> 8) & 0xff;
+			dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
+			if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
+				mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
+				u8 laddr;
+
+				laddr = (u8)((buffer[0] >> 16) & 0xff);
+				sat = addr_to_sat(dev, laddr);
+			}
+		}
+		if ((index * 4) >= msg_len) {
+			index = 0;
+			if (sat) {
+				msm_sat_enqueue(sat, buffer, msg_len);
+				queue_work(sat->wq, &sat->wd);
+				sat = NULL;
+			} else {
+				msm_slim_rx_enqueue(dev, buffer, msg_len);
+				msm_slim_rxwq(dev);
+			}
+		}
+	}
+
+	return 0;
+}
+
+static void msm_slim_prg_slew(struct platform_device *pdev,
+				struct msm_slim_ctrl *dev)
+{
+	struct resource *slew_io;
+	void __iomem *slew_reg;
+	/* SLEW RATE register for this slimbus */
+	dev->slew_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"slimbus_slew_reg");
+	if (!dev->slew_mem) {
+		dev_dbg(&pdev->dev, "no slimbus slew resource\n");
+		return;
+	}
+	slew_io = request_mem_region(dev->slew_mem->start,
+				resource_size(dev->slew_mem), pdev->name);
+	if (!slew_io) {
+		dev_dbg(&pdev->dev, "slimbus-slew mem claimed\n");
+		dev->slew_mem = NULL;
+		return;
+	}
+
+	slew_reg = ioremap(dev->slew_mem->start, resource_size(dev->slew_mem));
+	if (!slew_reg) {
+		dev_dbg(dev->dev, "slew register mapping failed");
+		release_mem_region(dev->slew_mem->start,
+					resource_size(dev->slew_mem));
+		dev->slew_mem = NULL;
+		return;
+	}
+	writel_relaxed(1, slew_reg);
+	/* Make sure slimbus-slew rate enabling goes through */
+	wmb();
+	iounmap(slew_reg);
+}
+
+static int msm_slim_probe(struct platform_device *pdev)
+{
+	struct msm_slim_ctrl *dev;
+	int ret;
+	enum apr_subsys_state q6_state;
+	struct resource		*bam_mem, *bam_io;
+	struct resource		*slim_mem, *slim_io;
+	struct resource		*irq, *bam_irq;
+	bool			rxreg_access = false;
+
+	q6_state = apr_get_q6_state();
+	if (q6_state == APR_SUBSYS_DOWN) {
+		dev_dbg(&pdev->dev, "defering %s, adsp_state %d\n", __func__,
+			q6_state);
+		return -EPROBE_DEFER;
+	}
+	dev_dbg(&pdev->dev, "adsp is ready\n");
+
+	slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"slimbus_physical");
+	if (!slim_mem) {
+		dev_err(&pdev->dev, "no slimbus physical memory resource\n");
+		return -ENODEV;
+	}
+	slim_io = request_mem_region(slim_mem->start, resource_size(slim_mem),
+					pdev->name);
+	if (!slim_io) {
+		dev_err(&pdev->dev, "slimbus memory already claimed\n");
+		return -EBUSY;
+	}
+
+	bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"slimbus_bam_physical");
+	if (!bam_mem) {
+		dev_err(&pdev->dev, "no slimbus BAM memory resource\n");
+		ret = -ENODEV;
+		goto err_get_res_bam_failed;
+	}
+	bam_io = request_mem_region(bam_mem->start, resource_size(bam_mem),
+					pdev->name);
+	if (!bam_io) {
+		release_mem_region(slim_mem->start, resource_size(slim_mem));
+		dev_err(&pdev->dev, "slimbus BAM memory already claimed\n");
+		ret = -EBUSY;
+		goto err_get_res_bam_failed;
+	}
+	irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+						"slimbus_irq");
+	if (!irq) {
+		dev_err(&pdev->dev, "no slimbus IRQ resource\n");
+		ret = -ENODEV;
+		goto err_get_res_failed;
+	}
+	bam_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+						"slimbus_bam_irq");
+	if (!bam_irq) {
+		dev_err(&pdev->dev, "no slimbus BAM IRQ resource\n");
+		ret = -ENODEV;
+		goto err_get_res_failed;
+	}
+
+	dev = kzalloc(sizeof(struct msm_slim_ctrl), GFP_KERNEL);
+	if (!dev) {
+		ret = -ENOMEM;
+		goto err_get_res_failed;
+	}
+	dev->wr_comp = kzalloc(sizeof(struct completion *) * MSM_TX_BUFS,
+				GFP_KERNEL);
+	if (!dev->wr_comp)
+		return -ENOMEM;
+	dev->dev = &pdev->dev;
+	platform_set_drvdata(pdev, dev);
+	slim_set_ctrldata(&dev->ctrl, dev);
+	dev->base = ioremap(slim_mem->start, resource_size(slim_mem));
+	if (!dev->base) {
+		dev_err(&pdev->dev, "IOremap failed\n");
+		ret = -ENOMEM;
+		goto err_ioremap_failed;
+	}
+	dev->bam.base = ioremap(bam_mem->start, resource_size(bam_mem));
+	if (!dev->bam.base) {
+		dev_err(&pdev->dev, "BAM IOremap failed\n");
+		ret = -ENOMEM;
+		goto err_ioremap_bam_failed;
+	}
+	if (pdev->dev.of_node) {
+
+		ret = of_property_read_u32(pdev->dev.of_node, "cell-index",
+					&dev->ctrl.nr);
+		if (ret) {
+			dev_err(&pdev->dev, "Cell index not specified:%d", ret);
+			goto err_of_init_failed;
+		}
+		rxreg_access = of_property_read_bool(pdev->dev.of_node,
+					"qcom,rxreg-access");
+		/* Optional properties */
+		ret = of_property_read_u32(pdev->dev.of_node,
+					"qcom,min-clk-gear", &dev->ctrl.min_cg);
+		ret = of_property_read_u32(pdev->dev.of_node,
+					"qcom,max-clk-gear", &dev->ctrl.max_cg);
+		pr_debug("min_cg:%d, max_cg:%d, rxreg: %d", dev->ctrl.min_cg,
+					dev->ctrl.max_cg, rxreg_access);
+	} else {
+		dev->ctrl.nr = pdev->id;
+	}
+	dev->ctrl.nchans = MSM_SLIM_NCHANS;
+	dev->ctrl.nports = MSM_SLIM_NPORTS;
+	dev->ctrl.set_laddr = msm_set_laddr;
+	dev->ctrl.xfer_msg = msm_xfer_msg;
+	dev->ctrl.wakeup =  msm_clk_pause_wakeup;
+	dev->ctrl.alloc_port = msm_alloc_port;
+	dev->ctrl.dealloc_port = msm_dealloc_port;
+	dev->ctrl.port_xfer = msm_slim_port_xfer;
+	dev->ctrl.port_xfer_status = msm_slim_port_xfer_status;
+	/* Reserve some messaging BW for satellite-apps driver communication */
+	dev->ctrl.sched.pending_msgsl = 30;
+
+	init_completion(&dev->reconf);
+	mutex_init(&dev->tx_lock);
+	spin_lock_init(&dev->rx_lock);
+	dev->ee = 1;
+	if (rxreg_access)
+		dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
+	else
+		dev->use_rx_msgqs = MSM_MSGQ_RESET;
+
+	dev->irq = irq->start;
+	dev->bam.irq = bam_irq->start;
+
+	dev->hclk = clk_get(dev->dev, "iface_clk");
+	if (IS_ERR(dev->hclk))
+		dev->hclk = NULL;
+	else
+		clk_prepare_enable(dev->hclk);
+
+	ret = msm_slim_sps_init(dev, bam_mem, MGR_STATUS, false);
+	if (ret != 0) {
+		dev_err(dev->dev, "error SPS init\n");
+		goto err_sps_init_failed;
+	}
+
+	/* Fire up the Rx message queue thread */
+	dev->rx_msgq_thread = kthread_run(msm_slim_rx_msgq_thread, dev,
+					MSM_SLIM_NAME "_rx_msgq_thread");
+	if (IS_ERR(dev->rx_msgq_thread)) {
+		ret = PTR_ERR(dev->rx_msgq_thread);
+		dev_err(dev->dev, "Failed to start Rx message queue thread\n");
+		goto err_thread_create_failed;
+	}
+
+	dev->framer.rootfreq = SLIM_ROOT_FREQ >> 3;
+	dev->framer.superfreq =
+		dev->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
+	dev->ctrl.a_framer = &dev->framer;
+	dev->ctrl.clkgear = SLIM_MAX_CLK_GEAR;
+	dev->ctrl.dev.parent = &pdev->dev;
+	dev->ctrl.dev.of_node = pdev->dev.of_node;
+
+	ret = request_threaded_irq(dev->irq, NULL, msm_slim_interrupt,
+				IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+				"msm_slim_irq", dev);
+	if (ret) {
+		dev_err(&pdev->dev, "request IRQ failed\n");
+		goto err_request_irq_failed;
+	}
+
+	msm_slim_prg_slew(pdev, dev);
+
+	/* Register with framework before enabling frame, clock */
+	ret = slim_add_numbered_controller(&dev->ctrl);
+	if (ret) {
+		dev_err(dev->dev, "error adding controller\n");
+		goto err_ctrl_failed;
+	}
+
+
+	dev->rclk = clk_get(dev->dev, "core_clk");
+	if (!dev->rclk) {
+		dev_err(dev->dev, "slimbus clock not found");
+		goto err_clk_get_failed;
+	}
+	clk_set_rate(dev->rclk, SLIM_ROOT_FREQ);
+	clk_prepare_enable(dev->rclk);
+
+	dev->ver = readl_relaxed(dev->base);
+	/* Version info in 16 MSbits */
+	dev->ver >>= 16;
+	/* Component register initialization */
+	writel_relaxed(1, dev->base + CFG_PORT(COMP_CFG, dev->ver));
+	writel_relaxed((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1),
+				dev->base + CFG_PORT(COMP_TRUST_CFG, dev->ver));
+
+	/*
+	 * Manager register initialization
+	 * If RX msg Q is used, disable RX_MSG_RCVD interrupt
+	 */
+	if (dev->use_rx_msgqs == MSM_MSGQ_ENABLED)
+		writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
+			MGR_INT_MSG_BUF_CONTE | /* MGR_INT_RX_MSG_RCVD | */
+			MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
+	else
+		writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
+			MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD |
+			MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
+	writel_relaxed(1, dev->base + MGR_CFG);
+	/*
+	 * Framer registers are beyond 1K memory region after Manager and/or
+	 * component registers. Make sure those writes are ordered
+	 * before framer register writes
+	 */
+	wmb();
+
+	/* Framer register initialization */
+	writel_relaxed((1 << INTR_WAKE) | (0xA << REF_CLK_GEAR) |
+		(0xA << CLK_GEAR) | (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1,
+		dev->base + FRM_CFG);
+	/*
+	 * Make sure that framer wake-up and enabling writes go through
+	 * before any other component is enabled. Framer is responsible for
+	 * clocking the bus and enabling framer first will ensure that other
+	 * devices can report presence when they are enabled
+	 */
+	mb();
+
+	/* Enable RX msg Q */
+	if (dev->use_rx_msgqs == MSM_MSGQ_ENABLED)
+		writel_relaxed(MGR_CFG_ENABLE | MGR_CFG_RX_MSGQ_EN,
+					dev->base + MGR_CFG);
+	else
+		writel_relaxed(MGR_CFG_ENABLE, dev->base + MGR_CFG);
+	/*
+	 * Make sure that manager-enable is written through before interface
+	 * device is enabled
+	 */
+	mb();
+	writel_relaxed(1, dev->base + INTF_CFG);
+	/*
+	 * Make sure that interface-enable is written through before enabling
+	 * ported generic device inside MSM manager
+	 */
+	mb();
+	writel_relaxed(1, dev->base + CFG_PORT(PGD_CFG, dev->ver));
+	writel_relaxed(0x3F<<17, dev->base + CFG_PORT(PGD_OWN_EEn, dev->ver) +
+				(4 * dev->ee));
+	/*
+	 * Make sure that ported generic device is enabled and port-EE settings
+	 * are written through before finally enabling the component
+	 */
+	mb();
+
+	writel_relaxed(1, dev->base + CFG_PORT(COMP_CFG, dev->ver));
+	/*
+	 * Make sure that all writes have gone through before exiting this
+	 * function
+	 */
+	mb();
+
+	/* Add devices registered with board-info now that controller is up */
+	slim_ctrl_add_boarddevs(&dev->ctrl);
+
+	if (pdev->dev.of_node)
+		of_register_slim_devices(&dev->ctrl);
+
+	pm_runtime_use_autosuspend(&pdev->dev);
+	pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_SLIM_AUTOSUSPEND);
+	pm_runtime_set_active(&pdev->dev);
+
+	dev_dbg(dev->dev, "MSM SB controller is up!\n");
+	return 0;
+
+err_ctrl_failed:
+	writel_relaxed(0, dev->base + CFG_PORT(COMP_CFG, dev->ver));
+err_clk_get_failed:
+	kfree(dev->satd);
+err_request_irq_failed:
+	kthread_stop(dev->rx_msgq_thread);
+err_thread_create_failed:
+	msm_slim_sps_exit(dev, true);
+	msm_slim_deinit_ep(dev, &dev->rx_msgq,
+				&dev->use_rx_msgqs);
+	msm_slim_deinit_ep(dev, &dev->tx_msgq,
+				&dev->use_tx_msgqs);
+err_sps_init_failed:
+	if (dev->hclk) {
+		clk_disable_unprepare(dev->hclk);
+		clk_put(dev->hclk);
+	}
+err_of_init_failed:
+	iounmap(dev->bam.base);
+err_ioremap_bam_failed:
+	iounmap(dev->base);
+err_ioremap_failed:
+	kfree(dev->wr_comp);
+	kfree(dev);
+err_get_res_failed:
+	release_mem_region(bam_mem->start, resource_size(bam_mem));
+err_get_res_bam_failed:
+	release_mem_region(slim_mem->start, resource_size(slim_mem));
+	return ret;
+}
+
+static int msm_slim_remove(struct platform_device *pdev)
+{
+	struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
+	struct resource *bam_mem;
+	struct resource *slim_mem;
+	struct resource *slew_mem = dev->slew_mem;
+	int i;
+
+	for (i = 0; i < dev->nsats; i++) {
+		struct msm_slim_sat *sat = dev->satd[i];
+		int j;
+
+		for (j = 0; j < sat->nsatch; j++)
+			slim_dealloc_ch(&sat->satcl, sat->satch[j].chanh);
+		slim_remove_device(&sat->satcl);
+		kfree(sat->satch);
+		destroy_workqueue(sat->wq);
+		kfree(sat->satcl.name);
+		kfree(sat);
+	}
+	pm_runtime_disable(&pdev->dev);
+	pm_runtime_set_suspended(&pdev->dev);
+	free_irq(dev->irq, dev);
+	slim_del_controller(&dev->ctrl);
+	clk_put(dev->rclk);
+	if (dev->hclk)
+		clk_put(dev->hclk);
+	msm_slim_sps_exit(dev, true);
+	msm_slim_deinit_ep(dev, &dev->rx_msgq,
+				&dev->use_rx_msgqs);
+	msm_slim_deinit_ep(dev, &dev->tx_msgq,
+				&dev->use_tx_msgqs);
+
+	kthread_stop(dev->rx_msgq_thread);
+	iounmap(dev->bam.base);
+	iounmap(dev->base);
+	kfree(dev->wr_comp);
+	kfree(dev);
+	bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"slimbus_bam_physical");
+	if (bam_mem)
+		release_mem_region(bam_mem->start, resource_size(bam_mem));
+	if (slew_mem)
+		release_mem_region(slew_mem->start, resource_size(slew_mem));
+	slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"slimbus_physical");
+	if (slim_mem)
+		release_mem_region(slim_mem->start, resource_size(slim_mem));
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int msm_slim_runtime_idle(struct device *device)
+{
+	struct platform_device *pdev = to_platform_device(device);
+	struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
+
+	if (dev->state == MSM_CTRL_AWAKE)
+		dev->state = MSM_CTRL_IDLE;
+	dev_dbg(device, "pm_runtime: idle...\n");
+	pm_request_autosuspend(device);
+	return -EAGAIN;
+}
+#endif
+
+/*
+ * If PM_RUNTIME is not defined, these 2 functions become helper
+ * functions to be called from system suspend/resume. So they are not
+ * inside ifdef CONFIG_PM_RUNTIME
+ */
+#ifdef CONFIG_PM
+static int msm_slim_runtime_suspend(struct device *device)
+{
+	struct platform_device *pdev = to_platform_device(device);
+	struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
+	int ret;
+
+	dev_dbg(device, "pm_runtime: suspending...\n");
+	ret = slim_ctrl_clk_pause(&dev->ctrl, false, SLIM_CLK_UNSPECIFIED);
+	if (ret) {
+		dev_err(device, "clk pause not entered:%d", ret);
+		dev->state = MSM_CTRL_AWAKE;
+	} else {
+		dev->state = MSM_CTRL_ASLEEP;
+	}
+	return ret;
+}
+
+static int msm_slim_runtime_resume(struct device *device)
+{
+	struct platform_device *pdev = to_platform_device(device);
+	struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
+	int ret = 0;
+
+	dev_dbg(device, "pm_runtime: resuming...\n");
+	if (dev->state == MSM_CTRL_ASLEEP)
+		ret = slim_ctrl_clk_pause(&dev->ctrl, true, 0);
+	if (ret) {
+		dev_err(device, "clk pause not exited:%d", ret);
+		dev->state = MSM_CTRL_ASLEEP;
+	} else {
+		dev->state = MSM_CTRL_AWAKE;
+	}
+	return ret;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int msm_slim_suspend(struct device *dev)
+{
+	int ret = -EBUSY;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
+
+	if (!pm_runtime_enabled(dev) ||
+		(!pm_runtime_suspended(dev) &&
+			cdev->state == MSM_CTRL_IDLE)) {
+		dev_dbg(dev, "system suspend");
+		ret = msm_slim_runtime_suspend(dev);
+		if (!ret) {
+			if (cdev->hclk)
+				clk_disable_unprepare(cdev->hclk);
+		}
+	}
+	if (ret == -EBUSY) {
+		/*
+		 * If the clock pause failed due to active channels, there is
+		 * a possibility that some audio stream is active during suspend
+		 * We dont want to return suspend failure in that case so that
+		 * display and relevant components can still go to suspend.
+		 * If there is some other error, then it should be passed-on
+		 * to system level suspend
+		 */
+		ret = 0;
+	}
+	return ret;
+}
+
+static int msm_slim_resume(struct device *dev)
+{
+	/* If runtime_pm is enabled, this resume shouldn't do anything */
+	if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
+		struct platform_device *pdev = to_platform_device(dev);
+		struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
+		int ret;
+
+		dev_dbg(dev, "system resume");
+		if (cdev->hclk)
+			clk_prepare_enable(cdev->hclk);
+		ret = msm_slim_runtime_resume(dev);
+		if (!ret) {
+			pm_runtime_mark_last_busy(dev);
+			pm_request_autosuspend(dev);
+		}
+		return ret;
+
+	}
+	return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops msm_slim_dev_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(
+		msm_slim_suspend,
+		msm_slim_resume
+	)
+	SET_RUNTIME_PM_OPS(
+		msm_slim_runtime_suspend,
+		msm_slim_runtime_resume,
+		msm_slim_runtime_idle
+	)
+};
+
+static const struct of_device_id msm_slim_dt_match[] = {
+	{
+		.compatible = "qcom,slim-msm",
+	},
+	{}
+};
+
+static struct platform_driver msm_slim_driver = {
+	.probe = msm_slim_probe,
+	.remove = msm_slim_remove,
+	.driver	= {
+		.name = MSM_SLIM_NAME,
+		.owner = THIS_MODULE,
+		.pm = &msm_slim_dev_pm_ops,
+		.of_match_table = msm_slim_dt_match,
+	},
+};
+
+static int msm_slim_init(void)
+{
+	return platform_driver_register(&msm_slim_driver);
+}
+subsys_initcall(msm_slim_init);
+
+static void msm_slim_exit(void)
+{
+	platform_driver_unregister(&msm_slim_driver);
+}
+module_exit(msm_slim_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM Slimbus controller");
+MODULE_ALIAS("platform:msm-slim");
diff --git a/drivers/slimbus/slim-msm-ngd.c b/drivers/slimbus/slim-msm-ngd.c
new file mode 100644
index 0000000..f7f0269
--- /dev/null
+++ b/drivers/slimbus/slim-msm-ngd.c
@@ -0,0 +1,2110 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/slimbus/slimbus.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_slimbus.h>
+#include <linux/timer.h>
+#include <linux/msm-sps.h>
+#include <soc/qcom/service-locator.h>
+#include <soc/qcom/service-notifier.h>
+#include <soc/qcom/subsystem_notif.h>
+#include "slim-msm.h"
+
+#define NGD_SLIM_NAME	"ngd_msm_ctrl"
+#define SLIM_LA_MGR	0xFF
+#define SLIM_ROOT_FREQ	24576000
+#define LADDR_RETRY	5
+
+#define NGD_BASE_V1(r)	(((r) % 2) ? 0x800 : 0xA00)
+#define NGD_BASE_V2(r)	(((r) % 2) ? 0x1000 : 0x2000)
+#define NGD_BASE(r, v) ((v) ? NGD_BASE_V2(r) : NGD_BASE_V1(r))
+/* NGD (Non-ported Generic Device) registers */
+enum ngd_reg {
+	NGD_CFG		= 0x0,
+	NGD_STATUS	= 0x4,
+	NGD_RX_MSGQ_CFG	= 0x8,
+	NGD_INT_EN	= 0x10,
+	NGD_INT_STAT	= 0x14,
+	NGD_INT_CLR	= 0x18,
+	NGD_TX_MSG	= 0x30,
+	NGD_RX_MSG	= 0x70,
+	NGD_IE_STAT	= 0xF0,
+	NGD_VE_STAT	= 0x100,
+};
+
+enum ngd_msg_cfg {
+	NGD_CFG_ENABLE		= 1,
+	NGD_CFG_RX_MSGQ_EN	= 1 << 1,
+	NGD_CFG_TX_MSGQ_EN	= 1 << 2,
+};
+
+enum ngd_intr {
+	NGD_INT_RECFG_DONE	= 1 << 24,
+	NGD_INT_TX_NACKED_2	= 1 << 25,
+	NGD_INT_MSG_BUF_CONTE	= 1 << 26,
+	NGD_INT_MSG_TX_INVAL	= 1 << 27,
+	NGD_INT_IE_VE_CHG	= 1 << 28,
+	NGD_INT_DEV_ERR		= 1 << 29,
+	NGD_INT_RX_MSG_RCVD	= 1 << 30,
+	NGD_INT_TX_MSG_SENT	= 1 << 31,
+};
+
+enum ngd_offsets {
+	NGD_NACKED_MC		= 0x7F00000,
+	NGD_ACKED_MC		= 0xFE000,
+	NGD_ERROR		= 0x1800,
+	NGD_MSGQ_SUPPORT	= 0x400,
+	NGD_RX_MSGQ_TIME_OUT	= 0x16,
+	NGD_ENUMERATED		= 0x1,
+	NGD_TX_BUSY		= 0x0,
+};
+
+enum ngd_status {
+	NGD_LADDR		= 1 << 1,
+};
+
+static void ngd_slim_rx(struct msm_slim_ctrl *dev, u8 *buf);
+static int ngd_slim_runtime_resume(struct device *device);
+static int ngd_slim_power_up(struct msm_slim_ctrl *dev, bool mdm_restart);
+static void ngd_dom_down(struct msm_slim_ctrl *dev);
+static int dsp_domr_notify_cb(struct notifier_block *n, unsigned long code,
+				void *_cmd);
+
+static irqreturn_t ngd_slim_interrupt(int irq, void *d)
+{
+	struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)d;
+	void __iomem *ngd = dev->base + NGD_BASE(dev->ctrl.nr, dev->ver);
+	u32 stat = readl_relaxed(ngd + NGD_INT_STAT);
+	u32 pstat;
+
+	if ((stat & NGD_INT_MSG_BUF_CONTE) ||
+		(stat & NGD_INT_MSG_TX_INVAL) || (stat & NGD_INT_DEV_ERR) ||
+		(stat & NGD_INT_TX_NACKED_2)) {
+		writel_relaxed(stat, ngd + NGD_INT_CLR);
+		if (stat & NGD_INT_MSG_TX_INVAL)
+			dev->err = -EINVAL;
+		else
+			dev->err = -EIO;
+
+		SLIM_WARN(dev, "NGD interrupt error:0x%x, err:%d\n", stat,
+								dev->err);
+		/* Guarantee that error interrupts are cleared */
+		mb();
+		msm_slim_manage_tx_msgq(dev, false, NULL, dev->err);
+
+	} else if (stat & NGD_INT_TX_MSG_SENT) {
+		writel_relaxed(NGD_INT_TX_MSG_SENT, ngd + NGD_INT_CLR);
+		/* Make sure interrupt is cleared */
+		mb();
+		msm_slim_manage_tx_msgq(dev, false, NULL, 0);
+	}
+	if (stat & NGD_INT_RX_MSG_RCVD) {
+		u32 rx_buf[10];
+		u8 len, i;
+
+		rx_buf[0] = readl_relaxed(ngd + NGD_RX_MSG);
+		len = rx_buf[0] & 0x1F;
+		for (i = 1; i < ((len + 3) >> 2); i++) {
+			rx_buf[i] = readl_relaxed(ngd + NGD_RX_MSG +
+						(4 * i));
+			SLIM_DBG(dev, "REG-RX data: %x\n", rx_buf[i]);
+		}
+		writel_relaxed(NGD_INT_RX_MSG_RCVD,
+				ngd + NGD_INT_CLR);
+		/*
+		 * Guarantee that CLR bit write goes through before
+		 * queuing work
+		 */
+		mb();
+		ngd_slim_rx(dev, (u8 *)rx_buf);
+	}
+	if (stat & NGD_INT_RECFG_DONE) {
+		writel_relaxed(NGD_INT_RECFG_DONE, ngd + NGD_INT_CLR);
+		/* Guarantee RECONFIG DONE interrupt is cleared */
+		mb();
+		/* In satellite mode, just log the reconfig done IRQ */
+		SLIM_DBG(dev, "reconfig done IRQ for NGD\n");
+	}
+	if (stat & NGD_INT_IE_VE_CHG) {
+		writel_relaxed(NGD_INT_IE_VE_CHG, ngd + NGD_INT_CLR);
+		/* Guarantee IE VE change interrupt is cleared */
+		mb();
+		SLIM_DBG(dev, "NGD IE VE change\n");
+	}
+
+	pstat = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_ST_EEn, dev->ver));
+	if (pstat != 0)
+		return msm_slim_port_irq_handler(dev, pstat);
+	return IRQ_HANDLED;
+}
+
+static int ngd_qmi_available(struct notifier_block *n, unsigned long code,
+				void *_cmd)
+{
+	struct msm_slim_qmi *qmi = container_of(n, struct msm_slim_qmi, nb);
+	struct msm_slim_ctrl *dev =
+		container_of(qmi, struct msm_slim_ctrl, qmi);
+	SLIM_INFO(dev, "Slimbus QMI NGD CB received event:%ld\n", code);
+	switch (code) {
+	case QMI_SERVER_ARRIVE:
+		atomic_set(&dev->ssr_in_progress, 0);
+		schedule_work(&dev->dsp.dom_up);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static void ngd_reg_ssr(struct msm_slim_ctrl *dev)
+{
+	int ret;
+	const char *subsys_name = NULL;
+
+	dev->dsp.dom_t = MSM_SLIM_DOM_NONE;
+	ret = of_property_read_string(dev->dev->of_node,
+				"qcom,subsys-name", &subsys_name);
+	if (ret)
+		subsys_name = "adsp";
+
+	dev->dsp.nb.notifier_call = dsp_domr_notify_cb;
+	dev->dsp.domr = subsys_notif_register_notifier(subsys_name,
+							&dev->dsp.nb);
+	if (IS_ERR_OR_NULL(dev->dsp.domr)) {
+		dev_err(dev->dev,
+			"subsys_notif_register_notifier failed %ld",
+			PTR_ERR(dev->dsp.domr));
+		return;
+	}
+	dev->dsp.dom_t = MSM_SLIM_DOM_SS;
+	SLIM_INFO(dev, "reg-SSR with:%s, PDR not available\n",
+			subsys_name);
+}
+
+static int dsp_domr_notify_cb(struct notifier_block *n, unsigned long code,
+				void *_cmd)
+{
+	int cur = -1;
+	struct msm_slim_ss *dsp = container_of(n, struct msm_slim_ss, nb);
+	struct msm_slim_ctrl *dev = container_of(dsp, struct msm_slim_ctrl,
+						dsp);
+	struct pd_qmi_client_data *reg;
+
+	SLIM_INFO(dev, "SLIM DSP SSR/PDR notify cb:0x%lx, type:%d\n",
+			code, dsp->dom_t);
+	switch (code) {
+	case SUBSYS_BEFORE_SHUTDOWN:
+	case SERVREG_NOTIF_SERVICE_STATE_DOWN_V01:
+		SLIM_INFO(dev, "SLIM DSP SSR notify cb:%lu\n", code);
+		atomic_set(&dev->ssr_in_progress, 1);
+		/* wait for current transaction */
+		mutex_lock(&dev->tx_lock);
+		/* make sure autosuspend is not called until ADSP comes up*/
+		pm_runtime_get_noresume(dev->dev);
+		dev->state = MSM_CTRL_DOWN;
+		msm_slim_sps_exit(dev, false);
+		ngd_dom_down(dev);
+		mutex_unlock(&dev->tx_lock);
+		break;
+	case LOCATOR_UP:
+		reg = _cmd;
+		if (!reg || reg->total_domains != 1) {
+			SLIM_WARN(dev, "error locating audio-PD\n");
+			if (reg)
+				SLIM_WARN(dev, "audio-PDs matched:%d\n",
+						reg->total_domains);
+
+			/* Fall back to SSR */
+			ngd_reg_ssr(dev);
+			return NOTIFY_DONE;
+		}
+		dev->dsp.domr = service_notif_register_notifier(
+				reg->domain_list->name,
+				reg->domain_list->instance_id,
+				&dev->dsp.nb,
+				&cur);
+		SLIM_INFO(dev, "reg-PD client:%s with service:%s\n",
+				reg->client_name, reg->service_name);
+		SLIM_INFO(dev, "reg-PD dom:%s instance:%d, cur:%d\n",
+				reg->domain_list->name,
+				reg->domain_list->instance_id, cur);
+		if (IS_ERR_OR_NULL(dev->dsp.domr))
+			ngd_reg_ssr(dev);
+		else
+			dev->dsp.dom_t = MSM_SLIM_DOM_PD;
+		break;
+	case LOCATOR_DOWN:
+		ngd_reg_ssr(dev);
+	default:
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static void ngd_dom_init(struct msm_slim_ctrl *dev)
+{
+	struct pd_qmi_client_data reg;
+	int ret;
+
+	memset(&reg, 0, sizeof(struct pd_qmi_client_data));
+	dev->dsp.nb.priority = 4;
+	dev->dsp.nb.notifier_call = dsp_domr_notify_cb;
+	scnprintf(reg.client_name, QMI_SERVREG_LOC_NAME_LENGTH_V01, "appsngd%d",
+		 dev->ctrl.nr);
+	scnprintf(reg.service_name, QMI_SERVREG_LOC_NAME_LENGTH_V01,
+		 "avs/audio");
+	ret = get_service_location(reg.client_name, reg.service_name,
+				   &dev->dsp.nb);
+	if (ret)
+		ngd_reg_ssr(dev);
+}
+
+static int mdm_ssr_notify_cb(struct notifier_block *n, unsigned long code,
+				void *_cmd)
+{
+	void __iomem *ngd;
+	struct msm_slim_ss *ext_mdm = container_of(n, struct msm_slim_ss, nb);
+	struct msm_slim_ctrl *dev = container_of(ext_mdm, struct msm_slim_ctrl,
+						ext_mdm);
+	struct slim_controller *ctrl = &dev->ctrl;
+	u32 laddr;
+	struct slim_device *sbdev;
+
+	switch (code) {
+	case SUBSYS_BEFORE_SHUTDOWN:
+		SLIM_INFO(dev, "SLIM %lu external_modem SSR notify cb\n", code);
+		/* vote for runtime-pm so that ADSP doesn't go down */
+		msm_slim_get_ctrl(dev);
+		/*
+		 * checking framer here will wake-up ADSP and may avoid framer
+		 * handover later
+		 */
+		msm_slim_qmi_check_framer_request(dev);
+		dev->ext_mdm.state = MSM_CTRL_DOWN;
+		msm_slim_put_ctrl(dev);
+		break;
+	case SUBSYS_AFTER_POWERUP:
+		if (dev->ext_mdm.state != MSM_CTRL_DOWN)
+			return NOTIFY_DONE;
+		SLIM_INFO(dev,
+			"SLIM %lu external_modem SSR notify cb\n", code);
+		/* vote for runtime-pm so that ADSP doesn't go down */
+		msm_slim_get_ctrl(dev);
+		msm_slim_qmi_check_framer_request(dev);
+		/* If NGD enumeration is lost, we will need to power us up */
+		ngd = dev->base + NGD_BASE(dev->ctrl.nr, dev->ver);
+		laddr = readl_relaxed(ngd + NGD_STATUS);
+		if (!(laddr & NGD_LADDR)) {
+			mutex_lock(&dev->tx_lock);
+			/* runtime-pm state should be consistent with HW */
+			pm_runtime_disable(dev->dev);
+			pm_runtime_set_suspended(dev->dev);
+			dev->state = MSM_CTRL_DOWN;
+			mutex_unlock(&dev->tx_lock);
+			SLIM_INFO(dev,
+				"SLIM MDM SSR (active framer on MDM) dev-down\n");
+			list_for_each_entry(sbdev, &ctrl->devs, dev_list)
+				slim_report_absent(sbdev);
+			ngd_slim_runtime_resume(dev->dev);
+			pm_runtime_set_active(dev->dev);
+			pm_runtime_enable(dev->dev);
+		}
+		dev->ext_mdm.state = MSM_CTRL_AWAKE;
+		msm_slim_put_ctrl(dev);
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static int ngd_get_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn,
+				u8 *tid, struct completion *done)
+{
+	struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctrl->txn_lock, flags);
+	if (ctrl->last_tid <= 255) {
+		dev->msg_cnt = ctrl->last_tid;
+		ctrl->last_tid++;
+	} else {
+		int i;
+
+		for (i = 0; i < 256; i++) {
+			dev->msg_cnt = ((dev->msg_cnt + 1) & 0xFF);
+			if (ctrl->txnt[dev->msg_cnt] == NULL)
+				break;
+		}
+		if (i >= 256) {
+			dev_err(&ctrl->dev, "out of TID");
+			spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+			return -ENOMEM;
+		}
+	}
+	ctrl->txnt[dev->msg_cnt] = txn;
+	txn->tid = dev->msg_cnt;
+	txn->comp = done;
+	*tid = dev->msg_cnt;
+	spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+	return 0;
+}
+
+static void slim_reinit_tx_msgq(struct msm_slim_ctrl *dev)
+{
+	/*
+	 * disconnect/recoonect pipe so that subsequent
+	 * transactions don't timeout due to unavailable
+	 * descriptors
+	 */
+	if (dev->state != MSM_CTRL_DOWN) {
+		msm_slim_disconnect_endp(dev, &dev->tx_msgq,
+					&dev->use_tx_msgqs);
+		msm_slim_connect_endp(dev, &dev->tx_msgq);
+	}
+}
+
+static int ngd_check_hw_status(struct msm_slim_ctrl *dev)
+{
+	void __iomem *ngd = dev->base + NGD_BASE(dev->ctrl.nr, dev->ver);
+	u32 laddr = readl_relaxed(ngd + NGD_STATUS);
+	int ret = 0;
+
+	/* Lost logical addr due to noise */
+	if (!(laddr & NGD_LADDR)) {
+		SLIM_WARN(dev, "NGD lost LADDR: status:0x%x\n", laddr);
+		ret = ngd_slim_power_up(dev, false);
+
+		if (ret) {
+			SLIM_WARN(dev, "slim resume ret:%d, state:%d\n",
+					ret, dev->state);
+			ret = -EREMOTEIO;
+		}
+	}
+	return ret;
+}
+
+static int ngd_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
+{
+	DECLARE_COMPLETION_ONSTACK(done);
+	DECLARE_COMPLETION_ONSTACK(tx_sent);
+
+	struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+	u32 *pbuf;
+	u8 *puc;
+	int ret = 0;
+	u8 la = txn->la;
+	u8 txn_mt;
+	u16 txn_mc = txn->mc;
+	u8 wbuf[SLIM_MSGQ_BUF_LEN];
+	bool report_sat = false;
+	bool sync_wr = true;
+
+	if (txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)
+		return -EPROTONOSUPPORT;
+
+	if (txn->mt == SLIM_MSG_MT_CORE &&
+		(txn->mc >= SLIM_MSG_MC_BEGIN_RECONFIGURATION &&
+		 txn->mc <= SLIM_MSG_MC_RECONFIGURE_NOW))
+		return 0;
+
+	if (txn->mc == SLIM_USR_MC_REPORT_SATELLITE &&
+		txn->mt == SLIM_MSG_MT_SRC_REFERRED_USER)
+		report_sat = true;
+	else
+		mutex_lock(&dev->tx_lock);
+
+	if (!report_sat && !pm_runtime_enabled(dev->dev) &&
+			dev->state == MSM_CTRL_ASLEEP) {
+		/*
+		 * Counter-part of system-suspend when runtime-pm is not enabled
+		 * This way, resume can be left empty and device will be put in
+		 * active mode only if client requests anything on the bus
+		 * If the state was DOWN, SSR UP notification will take
+		 * care of putting the device in active state.
+		 */
+		mutex_unlock(&dev->tx_lock);
+		ret = ngd_slim_runtime_resume(dev->dev);
+
+		if (ret) {
+			SLIM_ERR(dev, "slim resume failed ret:%d, state:%d",
+					ret, dev->state);
+			return -EREMOTEIO;
+		}
+		mutex_lock(&dev->tx_lock);
+	}
+
+	/* If txn is tried when controller is down, wait for ADSP to boot */
+	if (!report_sat) {
+		if (dev->state == MSM_CTRL_DOWN) {
+			u8 mc = (u8)txn->mc;
+			int timeout;
+
+			mutex_unlock(&dev->tx_lock);
+			SLIM_INFO(dev, "ADSP slimbus not up yet\n");
+			/*
+			 * Messages related to data channel management can't
+			 * wait since they are holding reconfiguration lock.
+			 * clk_pause in resume (which can change state back to
+			 * MSM_CTRL_AWAKE), will need that lock.
+			 * Port disconnection, channel removal calls should pass
+			 * through since there is no activity on the bus and
+			 * those calls are triggered by clients due to
+			 * device_down callback in that situation.
+			 * Returning 0 on the disconnections and
+			 * removals will ensure consistent state of channels,
+			 * ports with the HW
+			 * Remote requests to remove channel/port will be
+			 * returned from the path where they wait on
+			 * acknowledgment from ADSP
+			 */
+			if ((txn->mt == SLIM_MSG_MT_DEST_REFERRED_USER) &&
+				((mc == SLIM_USR_MC_CHAN_CTRL ||
+				mc == SLIM_USR_MC_DISCONNECT_PORT ||
+				mc == SLIM_USR_MC_RECONFIG_NOW)))
+				return -EREMOTEIO;
+			if ((txn->mt == SLIM_MSG_MT_CORE) &&
+				((mc == SLIM_MSG_MC_DISCONNECT_PORT ||
+				mc == SLIM_MSG_MC_NEXT_REMOVE_CHANNEL ||
+				mc == SLIM_USR_MC_RECONFIG_NOW)))
+				return 0;
+			if ((txn->mt == SLIM_MSG_MT_CORE) &&
+				((mc >= SLIM_MSG_MC_CONNECT_SOURCE &&
+				mc <= SLIM_MSG_MC_CHANGE_CONTENT) ||
+				(mc >= SLIM_MSG_MC_BEGIN_RECONFIGURATION &&
+				mc <= SLIM_MSG_MC_RECONFIGURE_NOW)))
+				return -EREMOTEIO;
+			if ((txn->mt == SLIM_MSG_MT_DEST_REFERRED_USER) &&
+				((mc >= SLIM_USR_MC_DEFINE_CHAN &&
+				mc < SLIM_USR_MC_DISCONNECT_PORT)))
+				return -EREMOTEIO;
+			timeout = wait_for_completion_timeout(&dev->ctrl_up,
+							HZ);
+			if (!timeout)
+				return -ETIMEDOUT;
+			mutex_lock(&dev->tx_lock);
+		}
+
+		mutex_unlock(&dev->tx_lock);
+		ret = msm_slim_get_ctrl(dev);
+		mutex_lock(&dev->tx_lock);
+		/*
+		 * Runtime-pm's callbacks are not called until runtime-pm's
+		 * error status is cleared
+		 * Setting runtime status to suspended clears the error
+		 * It also makes HW status cosistent with what SW has it here
+		 */
+		if ((pm_runtime_enabled(dev->dev) && ret < 0) ||
+				dev->state >= MSM_CTRL_ASLEEP) {
+			SLIM_ERR(dev, "slim ctrl vote failed ret:%d, state:%d",
+					ret, dev->state);
+			pm_runtime_set_suspended(dev->dev);
+			mutex_unlock(&dev->tx_lock);
+			msm_slim_put_ctrl(dev);
+			return -EREMOTEIO;
+		}
+		ret = ngd_check_hw_status(dev);
+		if (ret) {
+			mutex_unlock(&dev->tx_lock);
+			msm_slim_put_ctrl(dev);
+			return ret;
+		}
+	}
+
+	if (txn->mt == SLIM_MSG_MT_CORE &&
+		(txn->mc == SLIM_MSG_MC_CONNECT_SOURCE ||
+		txn->mc == SLIM_MSG_MC_CONNECT_SINK ||
+		txn->mc == SLIM_MSG_MC_DISCONNECT_PORT)) {
+		int i = 0;
+
+		if (txn->mc != SLIM_MSG_MC_DISCONNECT_PORT)
+			SLIM_INFO(dev,
+				"Connect port: laddr 0x%x  port_num %d chan_num %d\n",
+					txn->la, txn->wbuf[0], txn->wbuf[1]);
+		else
+			SLIM_INFO(dev,
+				"Disconnect port: laddr 0x%x  port_num %d\n",
+					txn->la, txn->wbuf[0]);
+		txn->mt = SLIM_MSG_MT_DEST_REFERRED_USER;
+		if (txn->mc == SLIM_MSG_MC_CONNECT_SOURCE)
+			txn->mc = SLIM_USR_MC_CONNECT_SRC;
+		else if (txn->mc == SLIM_MSG_MC_CONNECT_SINK)
+			txn->mc = SLIM_USR_MC_CONNECT_SINK;
+		else if (txn->mc == SLIM_MSG_MC_DISCONNECT_PORT)
+			txn->mc = SLIM_USR_MC_DISCONNECT_PORT;
+		if (txn->la == SLIM_LA_MGR) {
+			if (dev->pgdla == SLIM_LA_MGR) {
+				u8 ea[] = {0, QC_DEVID_PGD, 0, 0, QC_MFGID_MSB,
+						QC_MFGID_LSB};
+				ea[2] = (u8)(dev->pdata.eapc & 0xFF);
+				ea[3] = (u8)((dev->pdata.eapc & 0xFF00) >> 8);
+				mutex_unlock(&dev->tx_lock);
+				ret = dev->ctrl.get_laddr(&dev->ctrl, ea, 6,
+						&dev->pgdla);
+				SLIM_DBG(dev, "SLIM PGD LA:0x%x, ret:%d\n",
+					dev->pgdla, ret);
+				if (ret) {
+					SLIM_ERR(dev,
+						"Incorrect SLIM-PGD EAPC:0x%x\n",
+							dev->pdata.eapc);
+					return ret;
+				}
+				mutex_lock(&dev->tx_lock);
+			}
+			txn->la = dev->pgdla;
+		}
+		wbuf[i++] = txn->la;
+		la = SLIM_LA_MGR;
+		wbuf[i++] = txn->wbuf[0];
+		if (txn->mc != SLIM_USR_MC_DISCONNECT_PORT)
+			wbuf[i++] = txn->wbuf[1];
+		ret = ngd_get_tid(ctrl, txn, &wbuf[i++], &done);
+		if (ret) {
+			SLIM_ERR(dev, "TID for connect/disconnect fail:%d\n",
+					ret);
+			goto ngd_xfer_err;
+		}
+		txn->len = i;
+		txn->wbuf = wbuf;
+		txn->rl = txn->len + 4;
+	}
+	txn->rl--;
+
+	if (txn->len > SLIM_MSGQ_BUF_LEN || txn->rl > SLIM_MSGQ_BUF_LEN) {
+		SLIM_WARN(dev, "msg exeeds HW lim:%d, rl:%d, mc:0x%x, mt:0x%x",
+					txn->len, txn->rl, txn->mc, txn->mt);
+		ret = -EDQUOT;
+		goto ngd_xfer_err;
+	}
+
+	if (txn->mt == SLIM_MSG_MT_CORE && txn->comp &&
+		dev->use_tx_msgqs == MSM_MSGQ_ENABLED &&
+		(txn_mc != SLIM_MSG_MC_REQUEST_INFORMATION &&
+		 txn_mc != SLIM_MSG_MC_REQUEST_VALUE &&
+		 txn_mc != SLIM_MSG_MC_REQUEST_CHANGE_VALUE &&
+		 txn_mc != SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION)) {
+		sync_wr = false;
+		pbuf = msm_get_msg_buf(dev, txn->rl, txn->comp);
+	} else if (txn->mt == SLIM_MSG_MT_DEST_REFERRED_USER &&
+			dev->use_tx_msgqs == MSM_MSGQ_ENABLED &&
+			txn->mc == SLIM_USR_MC_REPEAT_CHANGE_VALUE &&
+			txn->comp) {
+		sync_wr = false;
+		pbuf = msm_get_msg_buf(dev, txn->rl, txn->comp);
+	} else {
+		pbuf = msm_get_msg_buf(dev, txn->rl, &tx_sent);
+	}
+
+	if (!pbuf) {
+		SLIM_ERR(dev, "Message buffer unavailable\n");
+		ret = -ENOMEM;
+		goto ngd_xfer_err;
+	}
+	dev->err = 0;
+
+	if (txn->dt == SLIM_MSG_DEST_ENUMADDR) {
+		ret = -EPROTONOSUPPORT;
+		goto ngd_xfer_err;
+	}
+	if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
+		*pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, txn->mc, 0,
+				la);
+	else
+		*pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, txn->mc, 1,
+				la);
+	if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
+		puc = ((u8 *)pbuf) + 3;
+	else
+		puc = ((u8 *)pbuf) + 2;
+	if (txn->rbuf)
+		*(puc++) = txn->tid;
+	if (((txn->mt == SLIM_MSG_MT_CORE) &&
+		((txn->mc >= SLIM_MSG_MC_REQUEST_INFORMATION &&
+		txn->mc <= SLIM_MSG_MC_REPORT_INFORMATION) ||
+		(txn->mc >= SLIM_MSG_MC_REQUEST_VALUE &&
+		 txn->mc <= SLIM_MSG_MC_CHANGE_VALUE))) ||
+		(txn->mc == SLIM_USR_MC_REPEAT_CHANGE_VALUE &&
+		txn->mt == SLIM_MSG_MT_DEST_REFERRED_USER)) {
+		*(puc++) = (txn->ec & 0xFF);
+		*(puc++) = (txn->ec >> 8)&0xFF;
+	}
+	if (txn->wbuf)
+		memcpy(puc, txn->wbuf, txn->len);
+	if (txn->mt == SLIM_MSG_MT_DEST_REFERRED_USER &&
+		(txn->mc == SLIM_USR_MC_CONNECT_SRC ||
+		 txn->mc == SLIM_USR_MC_CONNECT_SINK ||
+		 txn->mc == SLIM_USR_MC_DISCONNECT_PORT) && txn->wbuf &&
+		wbuf[0] == dev->pgdla) {
+		if (txn->mc != SLIM_USR_MC_DISCONNECT_PORT)
+			dev->err = msm_slim_connect_pipe_port(dev, wbuf[1]);
+		else
+			writel_relaxed(0, PGD_PORT(PGD_PORT_CFGn,
+					(dev->pipes[wbuf[1]].port_b),
+						dev->ver));
+		if (dev->err) {
+			SLIM_ERR(dev, "pipe-port connect err:%d\n", dev->err);
+			goto ngd_xfer_err;
+		}
+		/* Add port-base to port number if this is manager side port */
+		puc[1] = (u8)dev->pipes[wbuf[1]].port_b;
+	}
+	dev->err = 0;
+	/*
+	 * If it's a read txn, it may be freed if a response is received by
+	 * received thread before reaching end of this function.
+	 * mc, mt may have changed to convert standard slimbus code/type to
+	 * satellite user-defined message. Reinitialize again
+	 */
+	txn_mc = txn->mc;
+	txn_mt = txn->mt;
+	ret = msm_send_msg_buf(dev, pbuf, txn->rl,
+			NGD_BASE(dev->ctrl.nr, dev->ver) + NGD_TX_MSG);
+	if (!ret && sync_wr) {
+		int i;
+		int timeout = wait_for_completion_timeout(&tx_sent, HZ);
+
+		if (!timeout && dev->use_tx_msgqs == MSM_MSGQ_ENABLED) {
+			struct msm_slim_endp *endpoint = &dev->tx_msgq;
+			struct sps_mem_buffer *mem = &endpoint->buf;
+			u32 idx = (u32) (((u8 *)pbuf - (u8 *)mem->base) /
+						SLIM_MSGQ_BUF_LEN);
+			phys_addr_t addr = mem->phys_base +
+						(idx * SLIM_MSGQ_BUF_LEN);
+			ret = -ETIMEDOUT;
+			SLIM_WARN(dev, "timeout, BAM desc_idx:%d, phys:%llx",
+					idx, (u64)addr);
+			for (i = 0; i < (SLIM_MSGQ_BUF_LEN >> 2) ; i++)
+				SLIM_WARN(dev, "timeout:bam-desc[%d]:0x%x",
+							i, *(pbuf + i));
+			if (idx < MSM_TX_BUFS)
+				dev->wr_comp[idx] = NULL;
+			slim_reinit_tx_msgq(dev);
+		} else if (!timeout) {
+			ret = -ETIMEDOUT;
+			SLIM_WARN(dev, "timeout non-BAM TX,len:%d", txn->rl);
+			for (i = 0; i < (SLIM_MSGQ_BUF_LEN >> 2) ; i++)
+				SLIM_WARN(dev, "timeout:txbuf[%d]:0x%x", i,
+						dev->tx_buf[i]);
+		} else {
+			ret = dev->err;
+		}
+	}
+	if (ret) {
+		u32 conf, stat, rx_msgq, int_stat, int_en, int_clr;
+		void __iomem *ngd = dev->base + NGD_BASE(dev->ctrl.nr,
+							dev->ver);
+		SLIM_WARN(dev, "TX failed :MC:0x%x,mt:0x%x, ret:%d, ver:%d\n",
+				txn_mc, txn_mt, ret, dev->ver);
+		conf = readl_relaxed(ngd);
+		stat = readl_relaxed(ngd + NGD_STATUS);
+		rx_msgq = readl_relaxed(ngd + NGD_RX_MSGQ_CFG);
+		int_stat = readl_relaxed(ngd + NGD_INT_STAT);
+		int_en = readl_relaxed(ngd + NGD_INT_EN);
+		int_clr = readl_relaxed(ngd + NGD_INT_CLR);
+
+		SLIM_WARN(dev, "conf:0x%x,stat:0x%x,rxmsgq:0x%x\n",
+				conf, stat, rx_msgq);
+		SLIM_ERR(dev, "int_stat:0x%x,int_en:0x%x,int_cll:0x%x\n",
+				int_stat, int_en, int_clr);
+	}
+
+	if (txn_mt == SLIM_MSG_MT_DEST_REFERRED_USER &&
+		(txn_mc == SLIM_USR_MC_CONNECT_SRC ||
+		 txn_mc == SLIM_USR_MC_CONNECT_SINK ||
+		 txn_mc == SLIM_USR_MC_DISCONNECT_PORT)) {
+		int timeout;
+		unsigned long flags;
+
+		mutex_unlock(&dev->tx_lock);
+		msm_slim_put_ctrl(dev);
+		if (!ret) {
+			timeout = wait_for_completion_timeout(txn->comp, HZ);
+			/* remote side did not acknowledge */
+			if (!timeout)
+				ret = -EREMOTEIO;
+			else
+				ret = txn->ec;
+		}
+		if (ret) {
+			SLIM_ERR(dev,
+				"connect/disconnect:0x%x,tid:%d err:%d\n",
+					txn->mc, txn->tid, ret);
+			spin_lock_irqsave(&ctrl->txn_lock, flags);
+			ctrl->txnt[txn->tid] = NULL;
+			spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+		}
+		return ret ? ret : dev->err;
+	}
+ngd_xfer_err:
+	if (!report_sat) {
+		mutex_unlock(&dev->tx_lock);
+		msm_slim_put_ctrl(dev);
+	}
+	return ret ? ret : dev->err;
+}
+
+static int ngd_get_ec(u16 start_offset, u8 len, u16 *ec)
+{
+	if (len > SLIM_MAX_VE_SLC_BYTES ||
+		start_offset > MSM_SLIM_VE_MAX_MAP_ADDR)
+		return -EINVAL;
+	if (len <= 4) {
+		*ec = len - 1;
+	} else if (len <= 8) {
+		if (len & 0x1)
+			return -EINVAL;
+		*ec = ((len >> 1) + 1);
+	} else {
+		if (len & 0x3)
+			return -EINVAL;
+		*ec = ((len >> 2) + 3);
+	}
+	*ec |= (0x8 | ((start_offset & 0xF) << 4));
+	*ec |= ((start_offset & 0xFF0) << 4);
+	return 0;
+}
+
+static int ngd_user_msg(struct slim_controller *ctrl, u8 la, u8 mt, u8 mc,
+				struct slim_ele_access *msg, u8 *buf, u8 len)
+{
+	int ret;
+	struct slim_msg_txn txn;
+
+	if (mt != SLIM_MSG_MT_DEST_REFERRED_USER ||
+		mc != SLIM_USR_MC_REPEAT_CHANGE_VALUE) {
+		return -EPROTONOSUPPORT;
+	}
+
+	ret = ngd_get_ec(msg->start_offset, len, &txn.ec);
+	if (ret)
+		return ret;
+	txn.la = la;
+	txn.mt = mt;
+	txn.mc = mc;
+	txn.dt = SLIM_MSG_DEST_LOGICALADDR;
+	txn.len = len;
+	txn.rl = len + 6;
+	txn.wbuf = buf;
+	txn.rbuf = NULL;
+	txn.comp = msg->comp;
+	return ngd_xfer_msg(ctrl, &txn);
+}
+
+static int ngd_bulk_cb(void *ctx, int err)
+{
+	if (ctx)
+		complete(ctx);
+	return err;
+}
+
+static int ngd_bulk_wr(struct slim_controller *ctrl, u8 la, u8 mt, u8 mc,
+			struct slim_val_inf msgs[], int n,
+			int (*comp_cb)(void *ctx, int err), void *ctx)
+{
+	struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+	int i, ret;
+	struct msm_slim_endp *endpoint = &dev->tx_msgq;
+	u32 *header;
+	DECLARE_COMPLETION_ONSTACK(done);
+
+	ret = msm_slim_get_ctrl(dev);
+	mutex_lock(&dev->tx_lock);
+
+	if ((pm_runtime_enabled(dev->dev) && ret < 0) ||
+			dev->state >= MSM_CTRL_ASLEEP) {
+		SLIM_WARN(dev, "vote failed/SSR in-progress ret:%d, state:%d",
+				ret, dev->state);
+		pm_runtime_set_suspended(dev->dev);
+		mutex_unlock(&dev->tx_lock);
+		msm_slim_put_ctrl(dev);
+		return -EREMOTEIO;
+	}
+	if (!pm_runtime_enabled(dev->dev) && dev->state == MSM_CTRL_ASLEEP) {
+		mutex_unlock(&dev->tx_lock);
+		ret = ngd_slim_runtime_resume(dev->dev);
+
+		if (ret) {
+			SLIM_ERR(dev, "slim resume failed ret:%d, state:%d",
+					ret, dev->state);
+			return -EREMOTEIO;
+		}
+		mutex_lock(&dev->tx_lock);
+	}
+
+	ret = ngd_check_hw_status(dev);
+	if (ret) {
+		mutex_unlock(&dev->tx_lock);
+		msm_slim_put_ctrl(dev);
+		return ret;
+	}
+
+	if (dev->use_tx_msgqs != MSM_MSGQ_ENABLED) {
+		SLIM_WARN(dev, "bulk wr not supported");
+		ret = -EPROTONOSUPPORT;
+		goto retpath;
+	}
+	if (dev->bulk.in_progress) {
+		SLIM_WARN(dev, "bulk wr in progress:");
+		ret = -EAGAIN;
+		goto retpath;
+	}
+	dev->bulk.in_progress = true;
+	/* every txn has 5 bytes of overhead: la, mc, mt, ec, len */
+	dev->bulk.size = n * 5;
+	for (i = 0; i < n; i++) {
+		dev->bulk.size += msgs[i].num_bytes;
+		dev->bulk.size += (4 - ((msgs[i].num_bytes + 1) & 0x3));
+	}
+
+	if (dev->bulk.size > 0xffff) {
+		SLIM_WARN(dev, "len exceeds limit, split bulk and retry");
+		ret = -EDQUOT;
+		goto retpath;
+	}
+	if (dev->bulk.size > dev->bulk.buf_sz) {
+		void *temp = krealloc(dev->bulk.base, dev->bulk.size,
+				      GFP_KERNEL | GFP_DMA);
+		if (!temp) {
+			ret = -ENOMEM;
+			goto retpath;
+		}
+		dev->bulk.base = temp;
+		dev->bulk.buf_sz = dev->bulk.size;
+	}
+
+	header = dev->bulk.base;
+	for (i = 0; i < n; i++) {
+		u8 *buf = (u8 *)header;
+		int rl = msgs[i].num_bytes + 5;
+		u16 ec;
+
+		*header = SLIM_MSG_ASM_FIRST_WORD(rl, mt, mc, 0, la);
+		buf += 3;
+		ret = ngd_get_ec(msgs[i].start_offset, msgs[i].num_bytes, &ec);
+		if (ret)
+			goto retpath;
+		*(buf++) = (ec & 0xFF);
+		*(buf++) = (ec >> 8) & 0xFF;
+		memcpy(buf, msgs[i].wbuf, msgs[i].num_bytes);
+		buf += msgs[i].num_bytes;
+		header += (rl >> 2);
+		if (rl & 3) {
+			header++;
+			memset(buf, 0, ((u8 *)header - buf));
+		}
+	}
+	header = dev->bulk.base;
+	if (comp_cb) {
+		dev->bulk.cb = comp_cb;
+		dev->bulk.ctx = ctx;
+	} else {
+		dev->bulk.cb = ngd_bulk_cb;
+		dev->bulk.ctx = &done;
+	}
+	dev->bulk.wr_dma = dma_map_single(dev->dev, dev->bulk.base,
+					  dev->bulk.size, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev->dev, dev->bulk.wr_dma)) {
+		ret = -ENOMEM;
+		goto retpath;
+	}
+
+	ret = sps_transfer_one(endpoint->sps, dev->bulk.wr_dma, dev->bulk.size,
+						NULL, SPS_IOVEC_FLAG_EOT);
+	if (ret) {
+		SLIM_WARN(dev, "sps transfer one returned error:%d", ret);
+		goto retpath;
+	}
+	if (dev->bulk.cb == ngd_bulk_cb) {
+		int timeout = wait_for_completion_timeout(&done, HZ);
+
+		if (!timeout) {
+			SLIM_WARN(dev, "timeout for bulk wr");
+			dma_unmap_single(dev->dev, dev->bulk.wr_dma,
+					 dev->bulk.size, DMA_TO_DEVICE);
+			ret = -ETIMEDOUT;
+		}
+	}
+retpath:
+	if (ret) {
+		dev->bulk.in_progress = false;
+		dev->bulk.ctx = NULL;
+		dev->bulk.wr_dma = 0;
+		slim_reinit_tx_msgq(dev);
+	}
+	mutex_unlock(&dev->tx_lock);
+	msm_slim_put_ctrl(dev);
+	return ret;
+}
+
+static int ngd_xferandwait_ack(struct slim_controller *ctrl,
+				struct slim_msg_txn *txn)
+{
+	struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+	unsigned long flags;
+	int ret;
+
+	if (dev->state == MSM_CTRL_DOWN) {
+		/*
+		 * no need to send anything to the bus due to SSR
+		 * transactions related to channel removal marked as success
+		 * since HW is down
+		 */
+		if ((txn->mt == SLIM_MSG_MT_DEST_REFERRED_USER) &&
+			((txn->mc >= SLIM_USR_MC_CHAN_CTRL &&
+			  txn->mc <= SLIM_USR_MC_REQ_BW) ||
+			txn->mc == SLIM_USR_MC_DISCONNECT_PORT)) {
+			spin_lock_irqsave(&ctrl->txn_lock, flags);
+			ctrl->txnt[txn->tid] = NULL;
+			spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+			return 0;
+		}
+	}
+
+	ret = ngd_xfer_msg(ctrl, txn);
+	if (!ret) {
+		int timeout;
+
+		timeout = wait_for_completion_timeout(txn->comp, HZ);
+		if (!timeout)
+			ret = -ETIMEDOUT;
+		else
+			ret = txn->ec;
+	}
+
+	if (ret) {
+		if (ret != -EREMOTEIO || txn->mc != SLIM_USR_MC_CHAN_CTRL)
+			SLIM_ERR(dev, "master msg:0x%x,tid:%d ret:%d\n",
+				txn->mc, txn->tid, ret);
+		spin_lock_irqsave(&ctrl->txn_lock, flags);
+		ctrl->txnt[txn->tid] = NULL;
+		spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+	}
+
+	return ret;
+}
+
+static int ngd_allocbw(struct slim_device *sb, int *subfrmc, int *clkgear)
+{
+	int ret = 0, num_chan = 0;
+	struct slim_pending_ch *pch;
+	struct slim_msg_txn txn;
+	struct slim_controller *ctrl = sb->ctrl;
+	DECLARE_COMPLETION_ONSTACK(done);
+	u8 wbuf[SLIM_MSGQ_BUF_LEN];
+	struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+
+	*clkgear = ctrl->clkgear;
+	*subfrmc = 0;
+	txn.mt = SLIM_MSG_MT_DEST_REFERRED_USER;
+	txn.dt = SLIM_MSG_DEST_LOGICALADDR;
+	txn.la = SLIM_LA_MGR;
+	txn.len = 0;
+	txn.ec = 0;
+	txn.wbuf = wbuf;
+	txn.rbuf = NULL;
+
+	if (ctrl->sched.msgsl != ctrl->sched.pending_msgsl) {
+		SLIM_DBG(dev, "slim reserve BW for messaging: req: %d\n",
+				ctrl->sched.pending_msgsl);
+		txn.mc = SLIM_USR_MC_REQ_BW;
+		wbuf[txn.len++] = ((sb->laddr & 0x1f) |
+				((u8)(ctrl->sched.pending_msgsl & 0x7) << 5));
+		wbuf[txn.len++] = (u8)(ctrl->sched.pending_msgsl >> 3);
+		ret = ngd_get_tid(ctrl, &txn, &wbuf[txn.len++], &done);
+		if (ret)
+			return ret;
+		txn.rl = txn.len + 4;
+		ret = ngd_xferandwait_ack(ctrl, &txn);
+		if (ret)
+			return ret;
+
+		txn.mc = SLIM_USR_MC_RECONFIG_NOW;
+		txn.len = 2;
+		wbuf[1] = sb->laddr;
+		txn.rl = txn.len + 4;
+		ret = ngd_get_tid(ctrl, &txn, &wbuf[0], &done);
+		if (ret)
+			return ret;
+		ret = ngd_xferandwait_ack(ctrl, &txn);
+		if (ret)
+			return ret;
+
+		txn.len = 0;
+	}
+	list_for_each_entry(pch, &sb->mark_define, pending) {
+		struct slim_ich *slc;
+
+		slc = &ctrl->chans[pch->chan];
+		if (!slc) {
+			SLIM_WARN(dev, "no channel in define?\n");
+			return -ENXIO;
+		}
+		if (txn.len == 0) {
+			/* Per protocol, only last 5 bits for client no. */
+			wbuf[txn.len++] = (u8) (slc->prop.dataf << 5) |
+					(sb->laddr & 0x1f);
+			wbuf[txn.len] = slc->prop.sampleszbits >> 2;
+			if (slc->srch && slc->prop.prot == SLIM_PUSH)
+				slc->prop.prot = SLIM_PULL;
+			if (slc->coeff == SLIM_COEFF_3)
+				wbuf[txn.len] |= 1 << 5;
+			wbuf[txn.len++] |= slc->prop.auxf << 6;
+			wbuf[txn.len++] = slc->rootexp << 4 | slc->prop.prot;
+			wbuf[txn.len++] = slc->prrate;
+			ret = ngd_get_tid(ctrl, &txn, &wbuf[txn.len++], &done);
+			if (ret) {
+				SLIM_WARN(dev, "no tid for channel define?\n");
+				return -ENXIO;
+			}
+		}
+		num_chan++;
+		wbuf[txn.len++] = slc->chan;
+		SLIM_INFO(dev, "slim activate chan:%d, laddr: 0x%x\n",
+				slc->chan, sb->laddr);
+	}
+	if (txn.len) {
+		txn.mc = SLIM_USR_MC_DEF_ACT_CHAN;
+		txn.rl = txn.len + 4;
+		ret = ngd_xferandwait_ack(ctrl, &txn);
+		if (ret)
+			return ret;
+
+		txn.mc = SLIM_USR_MC_RECONFIG_NOW;
+		txn.len = 2;
+		wbuf[1] = sb->laddr;
+		txn.rl = txn.len + 4;
+		ret = ngd_get_tid(ctrl, &txn, &wbuf[0], &done);
+		if (ret)
+			return ret;
+		ret = ngd_xferandwait_ack(ctrl, &txn);
+		if (ret)
+			return ret;
+	}
+	txn.len = 0;
+	list_for_each_entry(pch, &sb->mark_removal, pending) {
+		struct slim_ich *slc;
+
+		slc = &ctrl->chans[pch->chan];
+		if (!slc) {
+			SLIM_WARN(dev, "no channel in removal?\n");
+			return -ENXIO;
+		}
+		if (txn.len == 0) {
+			/* Per protocol, only last 5 bits for client no. */
+			wbuf[txn.len++] = (u8) (SLIM_CH_REMOVE << 6) |
+					(sb->laddr & 0x1f);
+			ret = ngd_get_tid(ctrl, &txn, &wbuf[txn.len++], &done);
+			if (ret) {
+				SLIM_WARN(dev, "no tid for channel define?\n");
+				return -ENXIO;
+			}
+		}
+		wbuf[txn.len++] = slc->chan;
+		SLIM_INFO(dev, "slim remove chan:%d, laddr: 0x%x\n",
+			   slc->chan, sb->laddr);
+	}
+	if (txn.len) {
+		txn.mc = SLIM_USR_MC_CHAN_CTRL;
+		txn.rl = txn.len + 4;
+		ret = ngd_xferandwait_ack(ctrl, &txn);
+		/* HW restarting, channel removal should succeed */
+		if (ret == -EREMOTEIO)
+			return 0;
+		else if (ret)
+			return ret;
+
+		txn.mc = SLIM_USR_MC_RECONFIG_NOW;
+		txn.len = 2;
+		wbuf[1] = sb->laddr;
+		txn.rl = txn.len + 4;
+		ret = ngd_get_tid(ctrl, &txn, &wbuf[0], &done);
+		if (ret)
+			return ret;
+		ret = ngd_xferandwait_ack(ctrl, &txn);
+		if (ret)
+			return ret;
+		txn.len = 0;
+	}
+	return 0;
+}
+
+static int ngd_set_laddr(struct slim_controller *ctrl, const u8 *ea,
+				u8 elen, u8 laddr)
+{
+	return 0;
+}
+
+static int ngd_get_laddr(struct slim_controller *ctrl, const u8 *ea,
+				u8 elen, u8 *laddr)
+{
+	int ret;
+	u8 wbuf[10];
+	struct slim_msg_txn txn;
+	DECLARE_COMPLETION_ONSTACK(done);
+
+	txn.mt = SLIM_MSG_MT_DEST_REFERRED_USER;
+	txn.dt = SLIM_MSG_DEST_LOGICALADDR;
+	txn.la = SLIM_LA_MGR;
+	txn.ec = 0;
+	ret = ngd_get_tid(ctrl, &txn, &wbuf[0], &done);
+	if (ret)
+		return ret;
+	memcpy(&wbuf[1], ea, elen);
+	txn.mc = SLIM_USR_MC_ADDR_QUERY;
+	txn.rl = 11;
+	txn.len = 7;
+	txn.wbuf = wbuf;
+	txn.rbuf = NULL;
+	ret = ngd_xferandwait_ack(ctrl, &txn);
+	if (!ret && txn.la == 0xFF)
+		ret = -ENXIO;
+	else if (!ret)
+		*laddr = txn.la;
+	return ret;
+}
+
+static void ngd_slim_setup(struct msm_slim_ctrl *dev)
+{
+	u32 new_cfg = NGD_CFG_ENABLE;
+	u32 cfg = readl_relaxed(dev->base +
+				 NGD_BASE(dev->ctrl.nr, dev->ver));
+	if (dev->state == MSM_CTRL_DOWN) {
+		/* if called after SSR, cleanup and re-assign */
+		if (dev->use_tx_msgqs != MSM_MSGQ_RESET)
+			msm_slim_deinit_ep(dev, &dev->tx_msgq,
+					   &dev->use_tx_msgqs);
+
+		if (dev->use_rx_msgqs != MSM_MSGQ_RESET)
+			msm_slim_deinit_ep(dev, &dev->rx_msgq,
+					   &dev->use_rx_msgqs);
+
+		msm_slim_sps_init(dev, dev->bam_mem,
+			NGD_BASE(dev->ctrl.nr,
+			dev->ver) + NGD_STATUS, true);
+	} else {
+		if (dev->use_rx_msgqs == MSM_MSGQ_DISABLED)
+			goto setup_tx_msg_path;
+
+		if ((dev->use_rx_msgqs == MSM_MSGQ_ENABLED) &&
+			(cfg & NGD_CFG_RX_MSGQ_EN))
+			goto setup_tx_msg_path;
+
+		if (dev->use_rx_msgqs == MSM_MSGQ_ENABLED)
+			msm_slim_disconnect_endp(dev, &dev->rx_msgq,
+						 &dev->use_rx_msgqs);
+		msm_slim_connect_endp(dev, &dev->rx_msgq);
+
+setup_tx_msg_path:
+		if (dev->use_tx_msgqs == MSM_MSGQ_DISABLED)
+			goto ngd_enable;
+		if (dev->use_tx_msgqs == MSM_MSGQ_ENABLED &&
+			cfg & NGD_CFG_TX_MSGQ_EN)
+			goto ngd_enable;
+
+		if (dev->use_tx_msgqs == MSM_MSGQ_ENABLED)
+			msm_slim_disconnect_endp(dev, &dev->tx_msgq,
+						 &dev->use_tx_msgqs);
+		msm_slim_connect_endp(dev, &dev->tx_msgq);
+	}
+ngd_enable:
+
+	if (dev->use_rx_msgqs == MSM_MSGQ_ENABLED)
+		new_cfg |= NGD_CFG_RX_MSGQ_EN;
+	if (dev->use_tx_msgqs == MSM_MSGQ_ENABLED)
+		new_cfg |= NGD_CFG_TX_MSGQ_EN;
+
+	/* Enable NGD, and program MSGQs if not already */
+	if (cfg == new_cfg)
+		return;
+
+	writel_relaxed(new_cfg, dev->base + NGD_BASE(dev->ctrl.nr, dev->ver));
+	/* make sure NGD MSG-Q config goes through */
+	mb();
+}
+
+static void ngd_slim_rx(struct msm_slim_ctrl *dev, u8 *buf)
+{
+	unsigned long flags;
+	u8 mc, mt, len;
+
+	len = buf[0] & 0x1F;
+	mt = (buf[0] >> 5) & 0x7;
+	mc = buf[1];
+	if (mc == SLIM_USR_MC_MASTER_CAPABILITY &&
+		mt == SLIM_MSG_MT_SRC_REFERRED_USER)
+		complete(&dev->rx_msgq_notify);
+
+	if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
+			mc == SLIM_MSG_MC_REPLY_VALUE) {
+		u8 tid = buf[3];
+
+		dev_dbg(dev->dev, "tid:%d, len:%d\n", tid, len);
+		slim_msg_response(&dev->ctrl, &buf[4], tid,
+					len - 4);
+		pm_runtime_mark_last_busy(dev->dev);
+	}
+	if (mc == SLIM_USR_MC_ADDR_REPLY &&
+		mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
+		struct slim_msg_txn *txn;
+		u8 failed_ea[6] = {0, 0, 0, 0, 0, 0};
+
+		spin_lock_irqsave(&dev->ctrl.txn_lock, flags);
+		txn = dev->ctrl.txnt[buf[3]];
+		if (!txn) {
+			spin_unlock_irqrestore(&dev->ctrl.txn_lock, flags);
+			SLIM_WARN(dev,
+				"LADDR response after timeout, tid:0x%x\n",
+					buf[3]);
+			return;
+		}
+		if (memcmp(&buf[4], failed_ea, 6))
+			txn->la = buf[10];
+		dev->ctrl.txnt[buf[3]] = NULL;
+		complete(txn->comp);
+		spin_unlock_irqrestore(&dev->ctrl.txn_lock, flags);
+	}
+	if (mc == SLIM_USR_MC_GENERIC_ACK &&
+		mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
+		struct slim_msg_txn *txn;
+
+		spin_lock_irqsave(&dev->ctrl.txn_lock, flags);
+		txn = dev->ctrl.txnt[buf[3]];
+		if (!txn) {
+			spin_unlock_irqrestore(&dev->ctrl.txn_lock, flags);
+			SLIM_WARN(dev, "ACK received after timeout, tid:0x%x\n",
+				buf[3]);
+			return;
+		}
+		dev_dbg(dev->dev, "got response:tid:%d, response:0x%x",
+				(int)buf[3], buf[4]);
+		if (!(buf[4] & MSM_SAT_SUCCSS)) {
+			SLIM_WARN(dev, "TID:%d, NACK code:0x%x\n", (int)buf[3],
+						buf[4]);
+			txn->ec = -EIO;
+		}
+		dev->ctrl.txnt[buf[3]] = NULL;
+		complete(txn->comp);
+		spin_unlock_irqrestore(&dev->ctrl.txn_lock, flags);
+	}
+}
+
+static int ngd_slim_power_up(struct msm_slim_ctrl *dev, bool mdm_restart)
+{
+	void __iomem *ngd;
+	int timeout, retries = 0, ret = 0;
+	enum msm_ctrl_state cur_state = dev->state;
+	u32 laddr;
+	u32 rx_msgq;
+	u32 ngd_int = (NGD_INT_TX_NACKED_2 |
+			NGD_INT_MSG_BUF_CONTE | NGD_INT_MSG_TX_INVAL |
+			NGD_INT_IE_VE_CHG | NGD_INT_DEV_ERR |
+			NGD_INT_TX_MSG_SENT);
+
+	if (!mdm_restart && cur_state == MSM_CTRL_DOWN) {
+		int timeout = wait_for_completion_timeout(&dev->qmi.qmi_comp,
+						HZ);
+		if (!timeout) {
+			SLIM_ERR(dev, "slimbus QMI init timed out\n");
+			return -EREMOTEIO;
+		}
+	}
+
+hw_init_retry:
+	/* No need to vote if contorller is not in low power mode */
+	if (!mdm_restart &&
+		(cur_state == MSM_CTRL_DOWN || cur_state == MSM_CTRL_ASLEEP)) {
+		ret = msm_slim_qmi_power_request(dev, true);
+		if (ret) {
+			SLIM_WARN(dev, "SLIM power req failed:%d, retry:%d\n",
+					ret, retries);
+			if (!atomic_read(&dev->ssr_in_progress))
+				msm_slim_qmi_power_request(dev, false);
+			if (retries < INIT_MX_RETRIES &&
+				!atomic_read(&dev->ssr_in_progress)) {
+				retries++;
+				goto hw_init_retry;
+			}
+			return ret;
+		}
+	}
+	retries = 0;
+
+	if (!dev->ver) {
+		dev->ver = readl_relaxed(dev->base);
+		/* Version info in 16 MSbits */
+		dev->ver >>= 16;
+	}
+	ngd = dev->base + NGD_BASE(dev->ctrl.nr, dev->ver);
+	laddr = readl_relaxed(ngd + NGD_STATUS);
+	if (laddr & NGD_LADDR) {
+		u32 int_en = readl_relaxed(ngd + NGD_INT_EN);
+
+		/*
+		 * external MDM restart case where ADSP itself was active framer
+		 * For example, modem restarted when playback was active
+		 */
+		if (cur_state == MSM_CTRL_AWAKE) {
+			SLIM_INFO(dev, "Subsys restart: ADSP active framer\n");
+			return 0;
+		}
+		/*
+		 * ADSP power collapse case, where HW wasn't reset.
+		 */
+		if (int_en != 0)
+			return 0;
+
+		/* Retention */
+		if (dev->use_rx_msgqs == MSM_MSGQ_ENABLED)
+			msm_slim_disconnect_endp(dev, &dev->rx_msgq,
+						 &dev->use_rx_msgqs);
+		if (dev->use_tx_msgqs == MSM_MSGQ_ENABLED)
+			msm_slim_disconnect_endp(dev, &dev->tx_msgq,
+						 &dev->use_tx_msgqs);
+
+		writel_relaxed(ngd_int, (dev->base + NGD_INT_EN +
+					NGD_BASE(dev->ctrl.nr, dev->ver)));
+
+		rx_msgq = readl_relaxed(ngd + NGD_RX_MSGQ_CFG);
+		/**
+		 * Program with minimum value so that signal get
+		 * triggered immediately after receiving the message
+		 */
+		writel_relaxed((rx_msgq | SLIM_RX_MSGQ_TIMEOUT_VAL),
+						(ngd + NGD_RX_MSGQ_CFG));
+		/* reconnect BAM pipes if needed and enable NGD */
+		ngd_slim_setup(dev);
+		return 0;
+	}
+
+	if (mdm_restart) {
+		/*
+		 * external MDM SSR when MDM is active framer
+		 * ADSP will reset slimbus HW. disconnect BAM pipes so that
+		 * they can be connected after capability message is received.
+		 * Set device state to ASLEEP to be synchronous with the HW
+		 */
+		/* make current state as DOWN */
+		cur_state = MSM_CTRL_DOWN;
+		SLIM_INFO(dev,
+			"SLIM MDM restart: MDM active framer: reinit HW\n");
+		/* disconnect BAM pipes */
+		msm_slim_sps_exit(dev, false);
+		dev->state = MSM_CTRL_DOWN;
+	}
+
+capability_retry:
+	/*
+	 * ADSP power collapse case (OR SSR), where HW was reset
+	 * BAM programming will happen when capability message is received
+	 */
+	writel_relaxed(ngd_int, dev->base + NGD_INT_EN +
+				NGD_BASE(dev->ctrl.nr, dev->ver));
+
+	rx_msgq = readl_relaxed(ngd + NGD_RX_MSGQ_CFG);
+	/*
+	 * Program with minimum value so that signal get
+	 * triggered immediately after receiving the message
+	 */
+	writel_relaxed(rx_msgq|SLIM_RX_MSGQ_TIMEOUT_VAL,
+					ngd + NGD_RX_MSGQ_CFG);
+	/* make sure register got updated */
+	mb();
+
+	/* reconnect BAM pipes if needed and enable NGD */
+	ngd_slim_setup(dev);
+
+	timeout = wait_for_completion_timeout(&dev->reconf, HZ);
+	if (!timeout) {
+		u32 cfg = readl_relaxed(dev->base +
+					 NGD_BASE(dev->ctrl.nr, dev->ver));
+		laddr = readl_relaxed(ngd + NGD_STATUS);
+		SLIM_WARN(dev,
+			  "slim capability time-out:%d, stat:0x%x,cfg:0x%x\n",
+				retries, laddr, cfg);
+		if ((retries < INIT_MX_RETRIES) &&
+				!atomic_read(&dev->ssr_in_progress)) {
+			retries++;
+			goto capability_retry;
+		}
+		return -ETIMEDOUT;
+	}
+	/* mutliple transactions waiting on slimbus to power up? */
+	if (cur_state == MSM_CTRL_DOWN)
+		complete_all(&dev->ctrl_up);
+	/* Resetting the log level */
+	SLIM_RST_LOGLVL(dev);
+	return 0;
+}
+
+static int ngd_slim_enable(struct msm_slim_ctrl *dev, bool enable)
+{
+	int ret = 0;
+
+	if (enable) {
+		ret = msm_slim_qmi_init(dev, false);
+		/* controller state should be in sync with framework state */
+		if (!ret) {
+			complete(&dev->qmi.qmi_comp);
+			if (!pm_runtime_enabled(dev->dev) ||
+					!pm_runtime_suspended(dev->dev))
+				ngd_slim_runtime_resume(dev->dev);
+			else
+				pm_runtime_resume(dev->dev);
+			pm_runtime_mark_last_busy(dev->dev);
+			pm_runtime_put(dev->dev);
+		} else
+			SLIM_ERR(dev, "qmi init fail, ret:%d, state:%d\n",
+					ret, dev->state);
+	} else {
+		msm_slim_qmi_exit(dev);
+	}
+
+	return ret;
+}
+
+#ifdef CONFIG_PM
+static int ngd_slim_power_down(struct msm_slim_ctrl *dev)
+{
+	unsigned long flags;
+	int i;
+	struct slim_controller *ctrl = &dev->ctrl;
+
+	spin_lock_irqsave(&ctrl->txn_lock, flags);
+	/* Pending response for a message */
+	for (i = 0; i < ctrl->last_tid; i++) {
+		if (ctrl->txnt[i]) {
+			spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+			SLIM_INFO(dev, "NGD down:txn-rsp for %d pending", i);
+			return -EBUSY;
+		}
+	}
+	spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+	return msm_slim_qmi_power_request(dev, false);
+}
+#endif
+
+static int ngd_slim_rx_msgq_thread(void *data)
+{
+	struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data;
+	struct completion *notify = &dev->rx_msgq_notify;
+	int ret = 0;
+
+	while (!kthread_should_stop()) {
+		struct slim_msg_txn txn;
+		int retries = 0;
+		u8 wbuf[8];
+
+		wait_for_completion_interruptible(notify);
+
+		txn.dt = SLIM_MSG_DEST_LOGICALADDR;
+		txn.ec = 0;
+		txn.rbuf = NULL;
+		txn.mc = SLIM_USR_MC_REPORT_SATELLITE;
+		txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
+		txn.la = SLIM_LA_MGR;
+		wbuf[0] = SAT_MAGIC_LSB;
+		wbuf[1] = SAT_MAGIC_MSB;
+		wbuf[2] = SAT_MSG_VER;
+		wbuf[3] = SAT_MSG_PROT;
+		txn.wbuf = wbuf;
+		txn.len = 4;
+		SLIM_INFO(dev, "SLIM SAT: Rcvd master capability\n");
+capability_retry:
+		txn.rl = 8;
+		ret = ngd_xfer_msg(&dev->ctrl, &txn);
+		if (!ret) {
+			enum msm_ctrl_state prev_state = dev->state;
+
+			SLIM_INFO(dev,
+				"SLIM SAT: capability exchange successful\n");
+			if (prev_state < MSM_CTRL_ASLEEP)
+				SLIM_WARN(dev,
+					"capability due to noise, state:%d\n",
+						prev_state);
+			complete(&dev->reconf);
+			/* ADSP SSR, send device_up notifications */
+			if (prev_state == MSM_CTRL_DOWN)
+				complete(&dev->qmi.slave_notify);
+		} else if (ret == -EIO) {
+			SLIM_WARN(dev, "capability message NACKed, retrying\n");
+			if (retries < INIT_MX_RETRIES) {
+				msleep(DEF_RETRY_MS);
+				retries++;
+				goto capability_retry;
+			}
+		} else {
+			SLIM_WARN(dev, "SLIM: capability TX failed:%d\n", ret);
+		}
+	}
+	return 0;
+}
+
+static int ngd_notify_slaves(void *data)
+{
+	struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data;
+	struct slim_controller *ctrl = &dev->ctrl;
+	struct slim_device *sbdev;
+	struct list_head *pos, *next;
+	int ret, i = 0;
+
+	ret = qmi_svc_event_notifier_register(SLIMBUS_QMI_SVC_ID,
+				SLIMBUS_QMI_SVC_V1,
+				SLIMBUS_QMI_INS_ID, &dev->qmi.nb);
+	if (ret) {
+		pr_err("Slimbus QMI service registration failed:%d", ret);
+		return ret;
+	}
+
+	while (!kthread_should_stop()) {
+		wait_for_completion_interruptible(&dev->qmi.slave_notify);
+		/* Probe devices for first notification */
+		if (!i) {
+			i++;
+			dev->err = 0;
+			if (dev->dev->of_node)
+				of_register_slim_devices(&dev->ctrl);
+
+			/*
+			 * Add devices registered with board-info now that
+			 * controller is up
+			 */
+			slim_ctrl_add_boarddevs(&dev->ctrl);
+			ngd_dom_init(dev);
+		} else {
+			slim_framer_booted(ctrl);
+		}
+		mutex_lock(&ctrl->m_ctrl);
+		list_for_each_safe(pos, next, &ctrl->devs) {
+			int j;
+
+			sbdev = list_entry(pos, struct slim_device, dev_list);
+			mutex_unlock(&ctrl->m_ctrl);
+			for (j = 0; j < LADDR_RETRY; j++) {
+				ret = slim_get_logical_addr(sbdev,
+						sbdev->e_addr,
+						6, &sbdev->laddr);
+				if (!ret)
+					break;
+				/* time for ADSP to assign LA */
+				msleep(20);
+			}
+			mutex_lock(&ctrl->m_ctrl);
+		}
+		mutex_unlock(&ctrl->m_ctrl);
+	}
+	return 0;
+}
+
+static void ngd_dom_down(struct msm_slim_ctrl *dev)
+{
+	struct slim_controller *ctrl = &dev->ctrl;
+	struct slim_device *sbdev;
+
+	mutex_lock(&dev->ssr_lock);
+	ngd_slim_enable(dev, false);
+	/* device up should be called again after SSR */
+	list_for_each_entry(sbdev, &ctrl->devs, dev_list)
+		slim_report_absent(sbdev);
+	SLIM_INFO(dev, "SLIM ADSP SSR (DOWN) done\n");
+	mutex_unlock(&dev->ssr_lock);
+}
+
+static void ngd_dom_up(struct work_struct *work)
+{
+	struct msm_slim_ss *dsp =
+		container_of(work, struct msm_slim_ss, dom_up);
+	struct msm_slim_ctrl *dev =
+		container_of(dsp, struct msm_slim_ctrl, dsp);
+	mutex_lock(&dev->ssr_lock);
+	ngd_slim_enable(dev, true);
+	mutex_unlock(&dev->ssr_lock);
+}
+
+static ssize_t show_mask(struct device *device, struct device_attribute *attr,
+			char *buf)
+{
+	struct platform_device *pdev = to_platform_device(device);
+	struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
+
+	return snprintf(buf, sizeof(int), "%u\n", dev->ipc_log_mask);
+}
+
+static ssize_t set_mask(struct device *device, struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct platform_device *pdev = to_platform_device(device);
+	struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
+
+	dev->ipc_log_mask = buf[0] - '0';
+	if (dev->ipc_log_mask > DBG_LEV)
+		dev->ipc_log_mask = DBG_LEV;
+	return count;
+}
+
+static DEVICE_ATTR(debug_mask, 0644, show_mask, set_mask);
+
+static int ngd_slim_probe(struct platform_device *pdev)
+{
+	struct msm_slim_ctrl *dev;
+	int ret;
+	struct resource		*bam_mem;
+	struct resource		*slim_mem;
+	struct resource		*irq, *bam_irq;
+	bool			rxreg_access = false;
+	bool			slim_mdm = false;
+	const char		*ext_modem_id = NULL;
+
+	slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"slimbus_physical");
+	if (!slim_mem) {
+		dev_err(&pdev->dev, "no slimbus physical memory resource\n");
+		return -ENODEV;
+	}
+	bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"slimbus_bam_physical");
+	if (!bam_mem) {
+		dev_err(&pdev->dev, "no slimbus BAM memory resource\n");
+		return -ENODEV;
+	}
+	irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+						"slimbus_irq");
+	if (!irq) {
+		dev_err(&pdev->dev, "no slimbus IRQ resource\n");
+		return -ENODEV;
+	}
+	bam_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+						"slimbus_bam_irq");
+	if (!bam_irq) {
+		dev_err(&pdev->dev, "no slimbus BAM IRQ resource\n");
+		return -ENODEV;
+	}
+
+	dev = kzalloc(sizeof(struct msm_slim_ctrl), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(dev)) {
+		dev_err(&pdev->dev, "no memory for MSM slimbus controller\n");
+		return PTR_ERR(dev);
+	}
+	dev->wr_comp = kzalloc(sizeof(struct completion *) * MSM_TX_BUFS,
+				GFP_KERNEL);
+	if (!dev->wr_comp) {
+		ret = -ENOMEM;
+		goto err_nobulk;
+	}
+
+	/* typical txn numbers and size used in bulk operation */
+	dev->bulk.buf_sz = SLIM_MAX_TXNS * 8;
+	dev->bulk.base = kzalloc(dev->bulk.buf_sz, GFP_KERNEL | GFP_DMA);
+	if (!dev->bulk.base) {
+		ret = -ENOMEM;
+		goto err_nobulk;
+	}
+
+	dev->dev = &pdev->dev;
+	platform_set_drvdata(pdev, dev);
+	slim_set_ctrldata(&dev->ctrl, dev);
+
+	/* Create IPC log context */
+	dev->ipc_slimbus_log = ipc_log_context_create(IPC_SLIMBUS_LOG_PAGES,
+						dev_name(dev->dev), 0);
+	if (!dev->ipc_slimbus_log)
+		dev_err(&pdev->dev, "error creating ipc_logging context\n");
+	else {
+		/* Initialize the log mask */
+		dev->ipc_log_mask = INFO_LEV;
+		dev->default_ipc_log_mask = INFO_LEV;
+		SLIM_INFO(dev, "start logging for slim dev %s\n",
+				dev_name(dev->dev));
+	}
+	ret = sysfs_create_file(&dev->dev->kobj, &dev_attr_debug_mask.attr);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to create dev. attr\n");
+		dev->sysfs_created = false;
+	} else
+		dev->sysfs_created = true;
+
+	dev->base = ioremap(slim_mem->start, resource_size(slim_mem));
+	if (!dev->base) {
+		dev_err(&pdev->dev, "IOremap failed\n");
+		ret = -ENOMEM;
+		goto err_ioremap_failed;
+	}
+	dev->bam.base = ioremap(bam_mem->start, resource_size(bam_mem));
+	if (!dev->bam.base) {
+		dev_err(&pdev->dev, "BAM IOremap failed\n");
+		ret = -ENOMEM;
+		goto err_ioremap_bam_failed;
+	}
+	if (pdev->dev.of_node) {
+
+		ret = of_property_read_u32(pdev->dev.of_node, "cell-index",
+					&dev->ctrl.nr);
+		if (ret) {
+			dev_err(&pdev->dev, "Cell index not specified:%d", ret);
+			goto err_ctrl_failed;
+		}
+		rxreg_access = of_property_read_bool(pdev->dev.of_node,
+					"qcom,rxreg-access");
+		of_property_read_u32(pdev->dev.of_node, "qcom,apps-ch-pipes",
+					&dev->pdata.apps_pipes);
+		of_property_read_u32(pdev->dev.of_node, "qcom,ea-pc",
+					&dev->pdata.eapc);
+		ret = of_property_read_string(pdev->dev.of_node,
+					"qcom,slim-mdm", &ext_modem_id);
+		if (!ret)
+			slim_mdm = true;
+	} else {
+		dev->ctrl.nr = pdev->id;
+	}
+	/*
+	 * Keep PGD's logical address as manager's. Query it when first data
+	 * channel request comes in
+	 */
+	dev->pgdla = SLIM_LA_MGR;
+	dev->ctrl.nchans = MSM_SLIM_NCHANS;
+	dev->ctrl.nports = MSM_SLIM_NPORTS;
+	dev->framer.rootfreq = SLIM_ROOT_FREQ >> 3;
+	dev->framer.superfreq =
+		dev->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
+	dev->ctrl.a_framer = &dev->framer;
+	dev->ctrl.clkgear = SLIM_MAX_CLK_GEAR;
+	dev->ctrl.set_laddr = ngd_set_laddr;
+	dev->ctrl.get_laddr = ngd_get_laddr;
+	dev->ctrl.allocbw = ngd_allocbw;
+	dev->ctrl.xfer_msg = ngd_xfer_msg;
+	dev->ctrl.xfer_user_msg = ngd_user_msg;
+	dev->ctrl.xfer_bulk_wr = ngd_bulk_wr;
+	dev->ctrl.wakeup = NULL;
+	dev->ctrl.alloc_port = msm_alloc_port;
+	dev->ctrl.dealloc_port = msm_dealloc_port;
+	dev->ctrl.port_xfer = msm_slim_port_xfer;
+	dev->ctrl.port_xfer_status = msm_slim_port_xfer_status;
+	dev->bam_mem = bam_mem;
+	dev->rx_slim = ngd_slim_rx;
+
+	init_completion(&dev->reconf);
+	init_completion(&dev->ctrl_up);
+	mutex_init(&dev->tx_lock);
+	mutex_init(&dev->ssr_lock);
+	spin_lock_init(&dev->tx_buf_lock);
+	spin_lock_init(&dev->rx_lock);
+	dev->ee = 1;
+	dev->irq = irq->start;
+	dev->bam.irq = bam_irq->start;
+	atomic_set(&dev->ssr_in_progress, 0);
+
+	if (rxreg_access)
+		dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
+	else
+		dev->use_rx_msgqs = MSM_MSGQ_RESET;
+
+	/* Enable TX message queues by default as recommended by HW */
+	dev->use_tx_msgqs = MSM_MSGQ_RESET;
+
+	init_completion(&dev->rx_msgq_notify);
+	init_completion(&dev->qmi.slave_notify);
+
+	/* Register with framework */
+	ret = slim_add_numbered_controller(&dev->ctrl);
+	if (ret) {
+		dev_err(dev->dev, "error adding controller\n");
+		goto err_ctrl_failed;
+	}
+
+	dev->ctrl.dev.parent = &pdev->dev;
+	dev->ctrl.dev.of_node = pdev->dev.of_node;
+	dev->state = MSM_CTRL_DOWN;
+
+	/*
+	 * As this does not perform expensive
+	 * operations, it can execute in an
+	 * interrupt context. This avoids
+	 * context switches, provides
+	 * extensive benifits and performance
+	 * improvements.
+	 */
+	ret = request_irq(dev->irq,
+			ngd_slim_interrupt,
+			IRQF_TRIGGER_HIGH,
+			"ngd_slim_irq", dev);
+
+	if (ret) {
+		dev_err(&pdev->dev, "request IRQ failed\n");
+		goto err_request_irq_failed;
+	}
+
+	init_completion(&dev->qmi.qmi_comp);
+	dev->err = -EPROBE_DEFER;
+	pm_runtime_use_autosuspend(dev->dev);
+	pm_runtime_set_autosuspend_delay(dev->dev, MSM_SLIM_AUTOSUSPEND);
+	pm_runtime_set_suspended(dev->dev);
+	pm_runtime_enable(dev->dev);
+
+	if (slim_mdm) {
+		dev->ext_mdm.nb.notifier_call = mdm_ssr_notify_cb;
+		dev->ext_mdm.domr = subsys_notif_register_notifier(ext_modem_id,
+							&dev->ext_mdm.nb);
+		if (IS_ERR_OR_NULL(dev->ext_mdm.domr))
+			dev_err(dev->dev,
+				"subsys_notif_register_notifier failed %p",
+				dev->ext_mdm.domr);
+	}
+
+	INIT_WORK(&dev->dsp.dom_up, ngd_dom_up);
+	dev->qmi.nb.notifier_call = ngd_qmi_available;
+	pm_runtime_get_noresume(dev->dev);
+
+	/* Fire up the Rx message queue thread */
+	dev->rx_msgq_thread = kthread_run(ngd_slim_rx_msgq_thread, dev,
+					"ngd_rx_thread%d", dev->ctrl.nr);
+	if (IS_ERR(dev->rx_msgq_thread)) {
+		ret = PTR_ERR(dev->rx_msgq_thread);
+		dev_err(dev->dev, "Failed to start Rx thread:%d\n", ret);
+		goto err_rx_thread_create_failed;
+	}
+
+	/* Start thread to probe, and notify slaves */
+	dev->qmi.slave_thread = kthread_run(ngd_notify_slaves, dev,
+					"ngd_notify_sl%d", dev->ctrl.nr);
+	if (IS_ERR(dev->qmi.slave_thread)) {
+		ret = PTR_ERR(dev->qmi.slave_thread);
+		dev_err(dev->dev, "Failed to start notifier thread:%d\n", ret);
+		goto err_notify_thread_create_failed;
+	}
+	SLIM_INFO(dev, "NGD SB controller is up!\n");
+	return 0;
+
+err_notify_thread_create_failed:
+	kthread_stop(dev->rx_msgq_thread);
+err_rx_thread_create_failed:
+	free_irq(dev->irq, dev);
+err_request_irq_failed:
+err_ctrl_failed:
+	iounmap(dev->bam.base);
+err_ioremap_bam_failed:
+	iounmap(dev->base);
+err_ioremap_failed:
+	if (dev->sysfs_created)
+		sysfs_remove_file(&dev->dev->kobj,
+				&dev_attr_debug_mask.attr);
+	kfree(dev->bulk.base);
+err_nobulk:
+	kfree(dev->wr_comp);
+	kfree(dev);
+	return ret;
+}
+
+static int ngd_slim_remove(struct platform_device *pdev)
+{
+	struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
+
+	ngd_slim_enable(dev, false);
+	if (dev->sysfs_created)
+		sysfs_remove_file(&dev->dev->kobj,
+				&dev_attr_debug_mask.attr);
+	qmi_svc_event_notifier_unregister(SLIMBUS_QMI_SVC_ID,
+				SLIMBUS_QMI_SVC_V1,
+				SLIMBUS_QMI_INS_ID, &dev->qmi.nb);
+	pm_runtime_disable(&pdev->dev);
+	if (dev->dsp.dom_t == MSM_SLIM_DOM_SS)
+		subsys_notif_unregister_notifier(dev->dsp.domr,
+						&dev->dsp.nb);
+	if (dev->dsp.dom_t == MSM_SLIM_DOM_PD)
+		service_notif_unregister_notifier(dev->dsp.domr,
+						&dev->dsp.nb);
+	if (!IS_ERR_OR_NULL(dev->ext_mdm.domr))
+		subsys_notif_unregister_notifier(dev->ext_mdm.domr,
+						&dev->ext_mdm.nb);
+	kfree(dev->bulk.base);
+	free_irq(dev->irq, dev);
+	slim_del_controller(&dev->ctrl);
+	kthread_stop(dev->rx_msgq_thread);
+	iounmap(dev->bam.base);
+	iounmap(dev->base);
+	kfree(dev->wr_comp);
+	kfree(dev);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int ngd_slim_runtime_idle(struct device *device)
+{
+	struct platform_device *pdev = to_platform_device(device);
+	struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
+
+	mutex_lock(&dev->tx_lock);
+	if (dev->state == MSM_CTRL_AWAKE)
+		dev->state = MSM_CTRL_IDLE;
+	mutex_unlock(&dev->tx_lock);
+	dev_dbg(device, "pm_runtime: idle...\n");
+	pm_request_autosuspend(device);
+	return -EAGAIN;
+}
+#endif
+
+/*
+ * If PM_RUNTIME is not defined, these 2 functions become helper
+ * functions to be called from system suspend/resume. So they are not
+ * inside ifdef CONFIG_PM_RUNTIME
+ */
+static int ngd_slim_runtime_resume(struct device *device)
+{
+	struct platform_device *pdev = to_platform_device(device);
+	struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
+	int ret = 0;
+
+	mutex_lock(&dev->tx_lock);
+	if (dev->state >= MSM_CTRL_ASLEEP)
+		ret = ngd_slim_power_up(dev, false);
+	if (ret) {
+		/* Did SSR cause this power up failure */
+		if (dev->state != MSM_CTRL_DOWN)
+			dev->state = MSM_CTRL_ASLEEP;
+		else
+			SLIM_WARN(dev, "HW wakeup attempt during SSR\n");
+	} else {
+		dev->state = MSM_CTRL_AWAKE;
+	}
+	mutex_unlock(&dev->tx_lock);
+	SLIM_INFO(dev, "Slim runtime resume: ret %d\n", ret);
+	return ret;
+}
+
+#ifdef CONFIG_PM
+static int ngd_slim_runtime_suspend(struct device *device)
+{
+	struct platform_device *pdev = to_platform_device(device);
+	struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
+	int ret = 0;
+
+	mutex_lock(&dev->tx_lock);
+	ret = ngd_slim_power_down(dev);
+	if (ret && ret != -EBUSY)
+		SLIM_INFO(dev, "slim resource not idle:%d\n", ret);
+	if (!ret || ret == -ETIMEDOUT)
+		dev->state = MSM_CTRL_ASLEEP;
+	mutex_unlock(&dev->tx_lock);
+	SLIM_INFO(dev, "Slim runtime suspend: ret %d\n", ret);
+	return ret;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int ngd_slim_suspend(struct device *dev)
+{
+	int ret = -EBUSY;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
+
+	if (!pm_runtime_enabled(dev) ||
+		(!pm_runtime_suspended(dev) &&
+			cdev->state == MSM_CTRL_IDLE)) {
+		ret = ngd_slim_runtime_suspend(dev);
+		/*
+		 * If runtime-PM still thinks it's active, then make sure its
+		 * status is in sync with HW status.
+		 * Since this suspend calls QMI api, it results in holding a
+		 * wakelock. That results in failure of first suspend.
+		 * Subsequent suspend should not call low-power transition
+		 * again since the HW is already in suspended state.
+		 */
+		if (!ret) {
+			pm_runtime_disable(dev);
+			pm_runtime_set_suspended(dev);
+			pm_runtime_enable(dev);
+		}
+	}
+	if (ret == -EBUSY) {
+		/*
+		 * There is a possibility that some audio stream is active
+		 * during suspend. We dont want to return suspend failure in
+		 * that case so that display and relevant components can still
+		 * go to suspend.
+		 * If there is some other error, then it should be passed-on
+		 * to system level suspend
+		 */
+		ret = 0;
+	}
+	SLIM_INFO(cdev, "system suspend\n");
+	return ret;
+}
+
+static int ngd_slim_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
+	/*
+	 * Rely on runtime-PM to call resume in case it is enabled.
+	 * Even if it's not enabled, rely on 1st client transaction to do
+	 * clock/power on
+	 */
+	SLIM_INFO(cdev, "system resume\n");
+	return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops ngd_slim_dev_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(
+		ngd_slim_suspend,
+		ngd_slim_resume
+	)
+	SET_RUNTIME_PM_OPS(
+		ngd_slim_runtime_suspend,
+		ngd_slim_runtime_resume,
+		ngd_slim_runtime_idle
+	)
+};
+
+static const struct of_device_id ngd_slim_dt_match[] = {
+	{
+		.compatible = "qcom,slim-ngd",
+	},
+	{}
+};
+
+static struct platform_driver ngd_slim_driver = {
+	.probe = ngd_slim_probe,
+	.remove = ngd_slim_remove,
+	.driver	= {
+		.name = NGD_SLIM_NAME,
+		.owner = THIS_MODULE,
+		.pm = &ngd_slim_dev_pm_ops,
+		.of_match_table = ngd_slim_dt_match,
+	},
+};
+
+static int ngd_slim_init(void)
+{
+	return platform_driver_register(&ngd_slim_driver);
+}
+late_initcall(ngd_slim_init);
+
+static void ngd_slim_exit(void)
+{
+	platform_driver_unregister(&ngd_slim_driver);
+}
+module_exit(ngd_slim_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM Slimbus controller");
+MODULE_ALIAS("platform:msm-slim-ngd");
diff --git a/drivers/slimbus/slim-msm.c b/drivers/slimbus/slim-msm.c
new file mode 100644
index 0000000..b426a2c
--- /dev/null
+++ b/drivers/slimbus/slim-msm.c
@@ -0,0 +1,1634 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/pm_runtime.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/slimbus/slimbus.h>
+#include <linux/msm-sps.h>
+#include <linux/gcd.h>
+#include "slim-msm.h"
+
+int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len)
+{
+	spin_lock(&dev->rx_lock);
+	if ((dev->tail + 1) % MSM_CONCUR_MSG == dev->head) {
+		spin_unlock(&dev->rx_lock);
+		dev_err(dev->dev, "RX QUEUE full!");
+		return -EXFULL;
+	}
+	memcpy((u8 *)dev->rx_msgs[dev->tail], (u8 *)buf, len);
+	dev->tail = (dev->tail + 1) % MSM_CONCUR_MSG;
+	spin_unlock(&dev->rx_lock);
+	return 0;
+}
+
+int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->rx_lock, flags);
+	if (dev->tail == dev->head) {
+		spin_unlock_irqrestore(&dev->rx_lock, flags);
+		return -ENODATA;
+	}
+	memcpy(buf, (u8 *)dev->rx_msgs[dev->head], 40);
+	dev->head = (dev->head + 1) % MSM_CONCUR_MSG;
+	spin_unlock_irqrestore(&dev->rx_lock, flags);
+	return 0;
+}
+
+int msm_slim_get_ctrl(struct msm_slim_ctrl *dev)
+{
+#ifdef CONFIG_PM
+	int ref = 0;
+	int ret = pm_runtime_get_sync(dev->dev);
+
+	if (ret >= 0) {
+		ref = atomic_read(&dev->dev->power.usage_count);
+		if (ref <= 0) {
+			SLIM_WARN(dev, "reference count -ve:%d", ref);
+			ret = -ENODEV;
+		}
+	}
+	return ret;
+#else
+	return -ENODEV;
+#endif
+}
+void msm_slim_put_ctrl(struct msm_slim_ctrl *dev)
+{
+#ifdef CONFIG_PM
+	int ref;
+
+	pm_runtime_mark_last_busy(dev->dev);
+	ref = atomic_read(&dev->dev->power.usage_count);
+	if (ref <= 0)
+		SLIM_WARN(dev, "reference count mismatch:%d", ref);
+	else
+		pm_runtime_put_sync(dev->dev);
+#endif
+}
+
+irqreturn_t msm_slim_port_irq_handler(struct msm_slim_ctrl *dev, u32 pstat)
+{
+	int i;
+	u32 int_en = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
+							dev->ver));
+	/*
+	 * different port-interrupt than what we enabled, ignore.
+	 * This may happen if overflow/underflow is reported, but
+	 * was disabled due to unavailability of buffers provided by
+	 * client.
+	 */
+	if ((pstat & int_en) == 0)
+		return IRQ_HANDLED;
+	for (i = 0; i < dev->port_nums; i++) {
+		struct msm_slim_endp *endpoint = &dev->pipes[i];
+
+		if (pstat & (1 << endpoint->port_b)) {
+			u32 val = readl_relaxed(PGD_PORT(PGD_PORT_STATn,
+					endpoint->port_b, dev->ver));
+			if (val & MSM_PORT_OVERFLOW) {
+				dev->ctrl.ports[i].err =
+						SLIM_P_OVERFLOW;
+			} else if (val & MSM_PORT_UNDERFLOW) {
+				dev->ctrl.ports[i].err =
+					SLIM_P_UNDERFLOW;
+			}
+		}
+	}
+	/*
+	 * Disable port interrupt here. Re-enable when more
+	 * buffers are provided for this port.
+	 */
+	writel_relaxed((int_en & (~pstat)),
+			PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
+					dev->ver));
+	/* clear port interrupts */
+	writel_relaxed(pstat, PGD_THIS_EE(PGD_PORT_INT_CL_EEn,
+							dev->ver));
+	SLIM_INFO(dev, "disabled overflow/underflow for port 0x%x", pstat);
+
+	/*
+	 * Guarantee that port interrupt bit(s) clearing writes go
+	 * through before exiting ISR
+	 */
+	mb();
+	return IRQ_HANDLED;
+}
+
+int msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep)
+{
+	int ret;
+	struct sps_pipe *endpoint;
+	struct sps_connect *config = &ep->config;
+
+	/* Allocate the endpoint */
+	endpoint = sps_alloc_endpoint();
+	if (!endpoint) {
+		dev_err(dev->dev, "sps_alloc_endpoint failed\n");
+		return -ENOMEM;
+	}
+
+	/* Get default connection configuration for an endpoint */
+	ret = sps_get_config(endpoint, config);
+	if (ret) {
+		dev_err(dev->dev, "sps_get_config failed 0x%x\n", ret);
+		goto sps_config_failed;
+	}
+
+	ep->sps = endpoint;
+	return 0;
+
+sps_config_failed:
+	sps_free_endpoint(endpoint);
+	return ret;
+}
+
+void msm_slim_free_endpoint(struct msm_slim_endp *ep)
+{
+	sps_free_endpoint(ep->sps);
+	ep->sps = NULL;
+}
+
+int msm_slim_sps_mem_alloc(
+		struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem, u32 len)
+{
+	dma_addr_t phys;
+
+	mem->size = len;
+	mem->min_size = 0;
+	mem->base = dma_alloc_coherent(dev->dev, mem->size, &phys, GFP_KERNEL);
+
+	if (!mem->base) {
+		dev_err(dev->dev, "dma_alloc_coherent(%d) failed\n", len);
+		return -ENOMEM;
+	}
+
+	mem->phys_base = phys;
+	memset(mem->base, 0x00, mem->size);
+	return 0;
+}
+
+void
+msm_slim_sps_mem_free(struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem)
+{
+	if (mem->base && mem->phys_base)
+		dma_free_coherent(dev->dev, mem->size, mem->base,
+							mem->phys_base);
+	else
+		dev_err(dev->dev, "cant dma free. they are NULL\n");
+	mem->size = 0;
+	mem->base = NULL;
+	mem->phys_base = 0;
+}
+
+void msm_hw_set_port(struct msm_slim_ctrl *dev, u8 pipenum, u8 portnum)
+{
+	struct slim_controller *ctrl;
+	struct slim_ch *chan;
+	struct msm_slim_pshpull_parm *parm;
+	u32 set_cfg = 0;
+	struct slim_port_cfg cfg = dev->ctrl.ports[portnum].cfg;
+
+	if (!dev) {
+		pr_err("%s:Dev node is null\n", __func__);
+		return;
+	}
+	if (portnum >= dev->port_nums) {
+		pr_err("%s:Invalid port\n", __func__);
+		return;
+	}
+	ctrl = &dev->ctrl;
+	chan = ctrl->ports[portnum].ch;
+	parm = &dev->pipes[portnum].psh_pull;
+
+	if (cfg.watermark)
+		set_cfg = (cfg.watermark << 1);
+	else
+		set_cfg = DEF_WATERMARK;
+
+	if (cfg.port_opts & SLIM_OPT_NO_PACK)
+		set_cfg |= DEF_NO_PACK;
+	else
+		set_cfg |= DEF_PACK;
+
+	if (cfg.port_opts & SLIM_OPT_ALIGN_MSB)
+		set_cfg |= DEF_ALIGN_MSB;
+	else
+		set_cfg |= DEF_ALIGN_LSB;
+
+	set_cfg |= ENABLE_PORT;
+
+	writel_relaxed(set_cfg, PGD_PORT(PGD_PORT_CFGn, pipenum, dev->ver));
+	writel_relaxed(DEF_BLKSZ, PGD_PORT(PGD_PORT_BLKn, pipenum, dev->ver));
+	writel_relaxed(DEF_TRANSZ, PGD_PORT(PGD_PORT_TRANn, pipenum, dev->ver));
+
+	if (chan->prot == SLIM_PUSH || chan->prot == SLIM_PULL) {
+		set_cfg = 0;
+		set_cfg |= ((0xFFFF & parm->num_samples)<<16);
+		set_cfg |= (0xFFFF & parm->rpt_period);
+		writel_relaxed(set_cfg, PGD_PORT(PGD_PORT_PSHPLLn,
+							pipenum, dev->ver));
+	}
+	/* Make sure that port registers are updated before returning */
+	mb();
+}
+
+static void msm_slim_disconn_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
+{
+	struct msm_slim_endp *endpoint = &dev->pipes[pn];
+	struct sps_register_event sps_event;
+	u32 int_port = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
+					dev->ver));
+	writel_relaxed(0, PGD_PORT(PGD_PORT_CFGn, (endpoint->port_b),
+					dev->ver));
+	writel_relaxed((int_port & ~(1 << endpoint->port_b)),
+		PGD_THIS_EE(PGD_PORT_INT_EN_EEn, dev->ver));
+	/* Make sure port register is updated */
+	mb();
+	memset(&sps_event, 0, sizeof(sps_event));
+	sps_register_event(endpoint->sps, &sps_event);
+	sps_disconnect(endpoint->sps);
+	dev->pipes[pn].connected = false;
+}
+
+static void msm_slim_calc_pshpull_parm(struct msm_slim_ctrl *dev,
+					u8 pn, struct slim_ch *prop)
+{
+	struct msm_slim_endp *endpoint = &dev->pipes[pn];
+	struct msm_slim_pshpull_parm *parm = &endpoint->psh_pull;
+	int	chan_freq, round_off, divisor, super_freq;
+
+	super_freq = dev->ctrl.a_framer->superfreq;
+
+	if (prop->baser == SLIM_RATE_4000HZ)
+		chan_freq = 4000 * prop->ratem;
+	else if (prop->baser == SLIM_RATE_11025HZ)
+		chan_freq = 11025 * prop->ratem;
+	else
+		chan_freq = prop->baser * prop->ratem;
+
+	/*
+	 * If channel frequency is multiple of super frame frequency
+	 * ISO protocol is suggested
+	 */
+	if (!(chan_freq % super_freq)) {
+		prop->prot = SLIM_HARD_ISO;
+		return;
+	}
+	round_off = DIV_ROUND_UP(chan_freq, super_freq);
+	divisor = gcd(round_off * super_freq, chan_freq);
+	parm->num_samples = chan_freq/divisor;
+	parm->rpt_period = (round_off * super_freq)/divisor;
+}
+
+int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
+{
+	struct msm_slim_endp *endpoint;
+	struct sps_connect *cfg;
+	struct slim_ch *prop;
+	u32 stat;
+	int ret;
+
+	if (!dev || pn >= dev->port_nums)
+		return -ENODEV;
+	endpoint = &dev->pipes[pn];
+	cfg = &endpoint->config;
+	prop = dev->ctrl.ports[pn].ch;
+
+	endpoint = &dev->pipes[pn];
+	ret = sps_get_config(dev->pipes[pn].sps, cfg);
+	if (ret) {
+		dev_err(dev->dev, "sps pipe-port get config error%x\n", ret);
+		return ret;
+	}
+	cfg->options = SPS_O_DESC_DONE | SPS_O_ERROR |
+				SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
+
+	if (prop->prot == SLIM_PUSH || prop->prot ==  SLIM_PULL)
+		msm_slim_calc_pshpull_parm(dev, pn, prop);
+
+	if (dev->pipes[pn].connected &&
+			dev->ctrl.ports[pn].state == SLIM_P_CFG) {
+		return -EISCONN;
+	} else if (dev->pipes[pn].connected) {
+		writel_relaxed(0, PGD_PORT(PGD_PORT_CFGn,
+			(endpoint->port_b), dev->ver));
+		/* Make sure port disabling goes through */
+		mb();
+		/* Is pipe already connected in desired direction */
+		if ((dev->ctrl.ports[pn].flow == SLIM_SRC &&
+			cfg->mode == SPS_MODE_DEST) ||
+			(dev->ctrl.ports[pn].flow == SLIM_SINK &&
+			 cfg->mode == SPS_MODE_SRC)) {
+			msm_hw_set_port(dev, endpoint->port_b, pn);
+			return 0;
+		}
+		msm_slim_disconn_pipe_port(dev, pn);
+	}
+
+	stat = readl_relaxed(PGD_PORT(PGD_PORT_STATn, endpoint->port_b,
+					dev->ver));
+	if (dev->ctrl.ports[pn].flow == SLIM_SRC) {
+		cfg->destination = dev->bam.hdl;
+		cfg->source = SPS_DEV_HANDLE_MEM;
+		cfg->dest_pipe_index = ((stat & (0xFF << 4)) >> 4);
+		cfg->src_pipe_index = 0;
+		dev_dbg(dev->dev, "flow src:pipe num:%d",
+					cfg->dest_pipe_index);
+		cfg->mode = SPS_MODE_DEST;
+	} else {
+		cfg->source = dev->bam.hdl;
+		cfg->destination = SPS_DEV_HANDLE_MEM;
+		cfg->src_pipe_index = ((stat & (0xFF << 4)) >> 4);
+		cfg->dest_pipe_index = 0;
+		dev_dbg(dev->dev, "flow dest:pipe num:%d",
+					cfg->src_pipe_index);
+		cfg->mode = SPS_MODE_SRC;
+	}
+	/* Space for desciptor FIFOs */
+	ret = msm_slim_sps_mem_alloc(dev, &cfg->desc,
+				MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
+	if (ret)
+		pr_err("mem alloc for descr failed:%d", ret);
+	else
+		ret = sps_connect(dev->pipes[pn].sps, cfg);
+
+	if (!ret) {
+		dev->pipes[pn].connected = true;
+		msm_hw_set_port(dev, endpoint->port_b, pn);
+	}
+	return ret;
+}
+
+int msm_alloc_port(struct slim_controller *ctrl, u8 pn)
+{
+	struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+	struct msm_slim_endp *endpoint;
+	int ret = 0;
+
+	if (ctrl->ports[pn].req == SLIM_REQ_HALF_DUP ||
+		ctrl->ports[pn].req == SLIM_REQ_MULTI_CH)
+		return -EPROTONOSUPPORT;
+	if (pn >= dev->port_nums)
+		return -ENODEV;
+
+	endpoint = &dev->pipes[pn];
+	ret = msm_slim_init_endpoint(dev, endpoint);
+	dev_dbg(dev->dev, "sps register bam error code:%x\n", ret);
+	return ret;
+}
+
+void msm_dealloc_port(struct slim_controller *ctrl, u8 pn)
+{
+	struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+	struct msm_slim_endp *endpoint;
+
+	if (pn >= dev->port_nums)
+		return;
+	endpoint = &dev->pipes[pn];
+	if (dev->pipes[pn].connected) {
+		struct sps_connect *config = &endpoint->config;
+
+		msm_slim_disconn_pipe_port(dev, pn);
+		msm_slim_sps_mem_free(dev, &config->desc);
+	}
+	if (endpoint->sps)
+		msm_slim_free_endpoint(endpoint);
+}
+
+enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
+				u8 pn, phys_addr_t *done_buf, u32 *done_len)
+{
+	struct msm_slim_ctrl *dev = slim_get_ctrldata(ctr);
+	struct sps_iovec sio;
+	int ret;
+
+	if (done_len)
+		*done_len = 0;
+	if (done_buf)
+		*done_buf = 0;
+	if (!dev->pipes[pn].connected)
+		return SLIM_P_DISCONNECT;
+	ret = sps_get_iovec(dev->pipes[pn].sps, &sio);
+	if (!ret) {
+		if (done_len)
+			*done_len = sio.size;
+		if (done_buf)
+			*done_buf = (phys_addr_t)sio.addr;
+	}
+	dev_dbg(dev->dev, "get iovec returned %d\n", ret);
+	return SLIM_P_INPROGRESS;
+}
+
+static void msm_slim_port_cb(struct sps_event_notify *ev)
+{
+
+	struct completion *comp = ev->data.transfer.user;
+	struct sps_iovec *iovec = &ev->data.transfer.iovec;
+
+	if (ev->event_id == SPS_EVENT_DESC_DONE) {
+
+		pr_debug("desc done iovec = (0x%x 0x%x 0x%x)\n",
+			iovec->addr, iovec->size, iovec->flags);
+
+	} else {
+		pr_err("%s: ERR event %d\n",
+					__func__, ev->event_id);
+	}
+	if (comp)
+		complete(comp);
+}
+
+int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, phys_addr_t iobuf,
+			u32 len, struct completion *comp)
+{
+	struct sps_register_event sreg;
+	int ret;
+	struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+
+	if (pn >= dev->port_nums)
+		return -ENODEV;
+
+	if (!dev->pipes[pn].connected)
+		return -ENOTCONN;
+
+	sreg.options = (SPS_EVENT_DESC_DONE|SPS_EVENT_ERROR);
+	sreg.mode = SPS_TRIGGER_WAIT;
+	sreg.xfer_done = NULL;
+	sreg.callback = msm_slim_port_cb;
+	sreg.user = NULL;
+	ret = sps_register_event(dev->pipes[pn].sps, &sreg);
+	if (ret) {
+		dev_dbg(dev->dev, "sps register event error:%x\n", ret);
+		return ret;
+	}
+	ret = sps_transfer_one(dev->pipes[pn].sps, iobuf, len, comp,
+				SPS_IOVEC_FLAG_INT);
+	dev_dbg(dev->dev, "sps submit xfer error code:%x\n", ret);
+	if (!ret) {
+		/* Enable port interrupts */
+		u32 int_port = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
+						dev->ver));
+		if (!(int_port & (1 << (dev->pipes[pn].port_b))))
+			writel_relaxed((int_port |
+				(1 << dev->pipes[pn].port_b)),
+				PGD_THIS_EE(PGD_PORT_INT_EN_EEn, dev->ver));
+		/* Make sure that port registers are updated before returning */
+		mb();
+	}
+
+	return ret;
+}
+
+/* Queue up Tx message buffer */
+static int msm_slim_post_tx_msgq(struct msm_slim_ctrl *dev, u8 *buf, int len)
+{
+	int ret;
+	struct msm_slim_endp *endpoint = &dev->tx_msgq;
+	struct sps_mem_buffer *mem = &endpoint->buf;
+	struct sps_pipe *pipe = endpoint->sps;
+	int ix = (buf - (u8 *)mem->base);
+
+	phys_addr_t phys_addr = mem->phys_base + ix;
+
+	for (ret = 0; ret < ((len + 3) >> 2); ret++)
+		pr_debug("BAM TX buf[%d]:0x%x", ret, ((u32 *)buf)[ret]);
+
+	ret = sps_transfer_one(pipe, phys_addr, ((len + 3) & 0xFC), NULL,
+				SPS_IOVEC_FLAG_EOT);
+	if (ret)
+		dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
+
+	return ret;
+}
+
+void msm_slim_tx_msg_return(struct msm_slim_ctrl *dev, int err)
+{
+	struct msm_slim_endp *endpoint = &dev->tx_msgq;
+	struct sps_mem_buffer *mem = &endpoint->buf;
+	struct sps_pipe *pipe = endpoint->sps;
+	struct sps_iovec iovec;
+	int idx, ret = 0;
+	phys_addr_t addr;
+
+	if (dev->use_tx_msgqs != MSM_MSGQ_ENABLED) {
+		/* use 1 buffer, non-blocking writes are not possible */
+		if (dev->wr_comp[0]) {
+			struct completion *comp = dev->wr_comp[0];
+
+			dev->wr_comp[0] = NULL;
+			complete(comp);
+		}
+		return;
+	}
+	while (!ret) {
+		memset(&iovec, 0, sizeof(iovec));
+		ret = sps_get_iovec(pipe, &iovec);
+		addr = DESC_FULL_ADDR(iovec.flags, iovec.addr);
+		if (ret || addr == 0) {
+			if (ret)
+				pr_err("SLIM TX get IOVEC failed:%d", ret);
+			return;
+		}
+		if (addr == dev->bulk.wr_dma) {
+			dma_unmap_single(dev->dev, dev->bulk.wr_dma,
+					 dev->bulk.size, DMA_TO_DEVICE);
+			if (!dev->bulk.cb)
+				SLIM_WARN(dev, "no callback for bulk WR?");
+			else
+				dev->bulk.cb(dev->bulk.ctx, err);
+			dev->bulk.in_progress = false;
+			pm_runtime_mark_last_busy(dev->dev);
+			return;
+		} else if (addr < mem->phys_base ||
+			   (addr > (mem->phys_base +
+				    (MSM_TX_BUFS * SLIM_MSGQ_BUF_LEN)))) {
+			SLIM_WARN(dev, "BUF out of bounds:base:0x%pa, io:0x%pa",
+					&mem->phys_base, &addr);
+			continue;
+		}
+		idx = (int) ((addr - mem->phys_base)
+			/ SLIM_MSGQ_BUF_LEN);
+		if (dev->wr_comp[idx]) {
+			struct completion *comp = dev->wr_comp[idx];
+
+			dev->wr_comp[idx] = NULL;
+			complete(comp);
+		}
+		if (err) {
+			int i;
+			u32 *addr = (u32 *)mem->base +
+					(idx * (SLIM_MSGQ_BUF_LEN >> 2));
+			/* print the descriptor that resulted in error */
+			for (i = 0; i < (SLIM_MSGQ_BUF_LEN >> 2); i++)
+				SLIM_WARN(dev, "err desc[%d]:0x%x", i, addr[i]);
+		}
+		/* reclaim all packets that were delivered out of order */
+		if (idx != dev->tx_head)
+			SLIM_WARN(dev, "SLIM OUT OF ORDER TX:idx:%d, head:%d",
+				idx, dev->tx_head);
+		dev->tx_head = (dev->tx_head + 1) % MSM_TX_BUFS;
+	}
+}
+
+static u32 *msm_slim_modify_tx_buf(struct msm_slim_ctrl *dev,
+					struct completion *comp)
+{
+	struct msm_slim_endp *endpoint = &dev->tx_msgq;
+	struct sps_mem_buffer *mem = &endpoint->buf;
+	u32 *retbuf = NULL;
+
+	if ((dev->tx_tail + 1) % MSM_TX_BUFS == dev->tx_head)
+		return NULL;
+
+	retbuf = (u32 *)((u8 *)mem->base +
+				(dev->tx_tail * SLIM_MSGQ_BUF_LEN));
+	dev->wr_comp[dev->tx_tail] = comp;
+	dev->tx_tail = (dev->tx_tail + 1) % MSM_TX_BUFS;
+	return retbuf;
+}
+u32 *msm_slim_manage_tx_msgq(struct msm_slim_ctrl *dev, bool getbuf,
+					struct completion *comp, int err)
+{
+	int ret = 0;
+	int retries = 0;
+	u32 *retbuf = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->tx_buf_lock, flags);
+	if (!getbuf) {
+		msm_slim_tx_msg_return(dev, err);
+		spin_unlock_irqrestore(&dev->tx_buf_lock, flags);
+		return NULL;
+	}
+
+	retbuf = msm_slim_modify_tx_buf(dev, comp);
+	if (retbuf) {
+		spin_unlock_irqrestore(&dev->tx_buf_lock, flags);
+		return retbuf;
+	}
+
+	do {
+		msm_slim_tx_msg_return(dev, err);
+		retbuf = msm_slim_modify_tx_buf(dev, comp);
+		if (!retbuf)
+			ret = -EAGAIN;
+		else {
+			if (retries > 0)
+				SLIM_INFO(dev, "SLIM TX retrieved:%d retries",
+							retries);
+			spin_unlock_irqrestore(&dev->tx_buf_lock, flags);
+			return retbuf;
+		}
+
+		/*
+		 * superframe size will vary based on clock gear
+		 * 1 superframe will consume at least 1 message
+		 * if HW is in good condition. With MX_RETRIES,
+		 * make sure we wait for ~2 superframes
+		 * before deciding HW couldn't process descriptors
+		 */
+		udelay(50);
+		retries++;
+	} while (ret && (retries < INIT_MX_RETRIES));
+
+	spin_unlock_irqrestore(&dev->tx_buf_lock, flags);
+	return NULL;
+}
+
+int msm_send_msg_buf(struct msm_slim_ctrl *dev, u32 *buf, u8 len, u32 tx_reg)
+{
+	if (dev->use_tx_msgqs != MSM_MSGQ_ENABLED) {
+		int i;
+
+		for (i = 0; i < (len + 3) >> 2; i++) {
+			dev_dbg(dev->dev, "AHB TX data:0x%x\n", buf[i]);
+			writel_relaxed(buf[i], dev->base + tx_reg + (i * 4));
+		}
+		/* Guarantee that message is sent before returning */
+		mb();
+		return 0;
+	}
+	return msm_slim_post_tx_msgq(dev, (u8 *)buf, len);
+}
+
+u32 *msm_get_msg_buf(struct msm_slim_ctrl *dev, int len,
+			struct completion *comp)
+{
+	/*
+	 * Currently we block a transaction until the current one completes.
+	 * In case we need multiple transactions, use message Q
+	 */
+	if (dev->use_tx_msgqs != MSM_MSGQ_ENABLED) {
+		dev->wr_comp[0] = comp;
+		return dev->tx_buf;
+	}
+
+	return msm_slim_manage_tx_msgq(dev, true, comp, 0);
+}
+
+static void
+msm_slim_rx_msgq_event(struct msm_slim_ctrl *dev, struct sps_event_notify *ev)
+{
+	if (ev->event_id == SPS_EVENT_DESC_DONE)
+		complete(&dev->rx_msgq_notify);
+	else
+		dev_err(dev->dev, "%s: unknown event %d\n",
+					__func__, ev->event_id);
+}
+
+static void
+msm_slim_handle_rx(struct msm_slim_ctrl *dev, struct sps_event_notify *ev)
+{
+	int ret = 0;
+	u32 mc = 0;
+	u32 mt = 0;
+	u8 msg_len = 0;
+
+	if (ev->event_id != SPS_EVENT_EOT) {
+		dev_err(dev->dev, "%s: unknown event %d\n",
+					__func__, ev->event_id);
+		return;
+	}
+
+	do {
+		ret = msm_slim_rx_msgq_get(dev, dev->current_rx_buf,
+					   dev->current_count);
+		if (ret == -ENODATA) {
+			return;
+		} else if (ret) {
+			SLIM_ERR(dev, "rx_msgq_get() failed 0x%x\n",
+								ret);
+			return;
+		}
+
+		/* Traverse first byte of message for message length */
+		if (dev->current_count++ == 0) {
+			msg_len = *(dev->current_rx_buf) & 0x1F;
+			mt = (*(dev->current_rx_buf) >> 5) & 0x7;
+			mc = (*(dev->current_rx_buf) >> 8) & 0xff;
+			dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
+		}
+
+		msg_len = (msg_len < 4) ? 0 : (msg_len - 4);
+
+		if (!msg_len) {
+			dev->rx_slim(dev, (u8 *)dev->current_rx_buf);
+			dev->current_count = 0;
+		}
+
+	} while (1);
+}
+
+static void msm_slim_rx_msgq_cb(struct sps_event_notify *notify)
+{
+	struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)notify->user;
+	/* is this manager controller or NGD controller? */
+	if (dev->ctrl.wakeup)
+		msm_slim_rx_msgq_event(dev, notify);
+	else
+		msm_slim_handle_rx(dev, notify);
+}
+
+/* Queue up Rx message buffer */
+static int msm_slim_post_rx_msgq(struct msm_slim_ctrl *dev, int ix)
+{
+	int ret;
+	struct msm_slim_endp *endpoint = &dev->rx_msgq;
+	struct sps_mem_buffer *mem = &endpoint->buf;
+	struct sps_pipe *pipe = endpoint->sps;
+
+	/* Rx message queue buffers are 4 bytes in length */
+	u8 *virt_addr = mem->base + (4 * ix);
+	phys_addr_t phys_addr = mem->phys_base + (4 * ix);
+
+	ret = sps_transfer_one(pipe, phys_addr, 4, virt_addr, 0);
+	if (ret)
+		dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
+
+	return ret;
+}
+
+int msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset)
+{
+	struct msm_slim_endp *endpoint = &dev->rx_msgq;
+	struct sps_mem_buffer *mem = &endpoint->buf;
+	struct sps_pipe *pipe = endpoint->sps;
+	struct sps_iovec iovec;
+	phys_addr_t addr;
+	int index;
+	int ret;
+
+	ret = sps_get_iovec(pipe, &iovec);
+	if (ret) {
+		dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
+		goto err_exit;
+	}
+
+	addr = DESC_FULL_ADDR(iovec.flags, iovec.addr);
+	pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
+		iovec.addr, iovec.size, iovec.flags);
+
+	/* no more descriptors */
+	if (!ret && (iovec.addr == 0) && (iovec.size == 0)) {
+		ret = -ENODATA;
+		goto err_exit;
+	}
+
+	/* Calculate buffer index */
+	index = (addr - mem->phys_base) / 4;
+	*(data + offset) = *((u32 *)mem->base + index);
+
+	pr_debug("buf = 0x%p, data = 0x%x\n", (u32 *)mem->base + index, *data);
+
+	/* Add buffer back to the queue */
+	(void)msm_slim_post_rx_msgq(dev, index);
+
+err_exit:
+	return ret;
+}
+
+int msm_slim_connect_endp(struct msm_slim_ctrl *dev,
+				struct msm_slim_endp *endpoint)
+{
+	int i, ret;
+	struct sps_register_event sps_error_event; /* SPS_ERROR */
+	struct sps_register_event sps_descr_event; /* DESCR_DONE */
+	struct sps_connect *config = &endpoint->config;
+	unsigned long flags;
+
+	ret = sps_connect(endpoint->sps, config);
+	if (ret) {
+		dev_err(dev->dev, "sps_connect failed 0x%x\n", ret);
+		return ret;
+	}
+
+	memset(&sps_descr_event, 0x00, sizeof(sps_descr_event));
+
+	if (endpoint == &dev->rx_msgq) {
+		sps_descr_event.mode = SPS_TRIGGER_CALLBACK;
+		sps_descr_event.options = SPS_O_EOT;
+		sps_descr_event.user = (void *)dev;
+		sps_descr_event.callback = msm_slim_rx_msgq_cb;
+		sps_descr_event.xfer_done = NULL;
+
+		ret = sps_register_event(endpoint->sps, &sps_descr_event);
+		if (ret) {
+			dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
+			goto sps_reg_event_failed;
+		}
+	}
+
+	/* Register callback for errors */
+	memset(&sps_error_event, 0x00, sizeof(sps_error_event));
+	sps_error_event.mode = SPS_TRIGGER_CALLBACK;
+	sps_error_event.options = SPS_O_ERROR;
+	sps_error_event.user = (void *)dev;
+	sps_error_event.callback = msm_slim_rx_msgq_cb;
+
+	ret = sps_register_event(endpoint->sps, &sps_error_event);
+	if (ret) {
+		dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
+		goto sps_reg_event_failed;
+	}
+
+	/*
+	 * Call transfer_one for each 4-byte buffer
+	 * Use (buf->size/4) - 1 for the number of buffer to post
+	 */
+
+	if (endpoint == &dev->rx_msgq) {
+		/* Setup the transfer */
+		for (i = 0; i < (MSM_SLIM_DESC_NUM - 1); i++) {
+			ret = msm_slim_post_rx_msgq(dev, i);
+			if (ret) {
+				dev_err(dev->dev,
+					"post_rx_msgq() failed 0x%x\n", ret);
+				goto sps_transfer_failed;
+			}
+		}
+		dev->use_rx_msgqs = MSM_MSGQ_ENABLED;
+	} else {
+		spin_lock_irqsave(&dev->tx_buf_lock, flags);
+		dev->tx_tail = 0;
+		dev->tx_head = 0;
+		for (i = 0; i < MSM_TX_BUFS; i++)
+			dev->wr_comp[i] = NULL;
+		spin_unlock_irqrestore(&dev->tx_buf_lock, flags);
+		dev->use_tx_msgqs = MSM_MSGQ_ENABLED;
+	}
+
+	return 0;
+sps_transfer_failed:
+	memset(&sps_error_event, 0x00, sizeof(sps_error_event));
+	sps_register_event(endpoint->sps, &sps_error_event);
+sps_reg_event_failed:
+	sps_disconnect(endpoint->sps);
+	return ret;
+}
+
+static int msm_slim_init_rx_msgq(struct msm_slim_ctrl *dev, u32 pipe_reg)
+{
+	int ret;
+	u32 pipe_offset;
+	struct msm_slim_endp *endpoint = &dev->rx_msgq;
+	struct sps_connect *config = &endpoint->config;
+	struct sps_mem_buffer *descr = &config->desc;
+	struct sps_mem_buffer *mem = &endpoint->buf;
+
+	if (dev->use_rx_msgqs == MSM_MSGQ_DISABLED)
+		return 0;
+
+	/* Allocate the endpoint */
+	ret = msm_slim_init_endpoint(dev, endpoint);
+	if (ret) {
+		dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
+		goto sps_init_endpoint_failed;
+	}
+
+	/* Get the pipe indices for the message queues */
+	pipe_offset = (readl_relaxed(dev->base + pipe_reg) & 0xfc) >> 2;
+	dev_dbg(dev->dev, "Message queue pipe offset %d\n", pipe_offset);
+
+	config->mode = SPS_MODE_SRC;
+	config->source = dev->bam.hdl;
+	config->destination = SPS_DEV_HANDLE_MEM;
+	config->src_pipe_index = pipe_offset;
+	config->options = SPS_O_EOT | SPS_O_ERROR |
+				SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
+
+	/* Allocate memory for the FIFO descriptors */
+	ret = msm_slim_sps_mem_alloc(dev, descr,
+				MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
+	if (ret) {
+		dev_err(dev->dev, "unable to allocate SPS descriptors\n");
+		goto alloc_descr_failed;
+	}
+
+	/* Allocate memory for the message buffer(s), N descrs, 4-byte mesg */
+	ret = msm_slim_sps_mem_alloc(dev, mem, MSM_SLIM_DESC_NUM * 4);
+	if (ret) {
+		dev_err(dev->dev, "dma_alloc_coherent failed\n");
+		goto alloc_buffer_failed;
+	}
+
+	ret = msm_slim_connect_endp(dev, endpoint);
+
+	if (!ret)
+		return 0;
+
+	msm_slim_sps_mem_free(dev, mem);
+alloc_buffer_failed:
+	msm_slim_sps_mem_free(dev, descr);
+alloc_descr_failed:
+	msm_slim_free_endpoint(endpoint);
+sps_init_endpoint_failed:
+	dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
+	return ret;
+}
+
+static int msm_slim_init_tx_msgq(struct msm_slim_ctrl *dev, u32 pipe_reg)
+{
+	int ret;
+	u32 pipe_offset;
+	struct msm_slim_endp *endpoint = &dev->tx_msgq;
+	struct sps_connect *config = &endpoint->config;
+	struct sps_mem_buffer *descr = &config->desc;
+	struct sps_mem_buffer *mem = &endpoint->buf;
+
+	if (dev->use_tx_msgqs == MSM_MSGQ_DISABLED)
+		return 0;
+
+	/* Allocate the endpoint */
+	ret = msm_slim_init_endpoint(dev, endpoint);
+	if (ret) {
+		dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
+		goto sps_init_endpoint_failed;
+	}
+
+	/* Get the pipe indices for the message queues */
+	pipe_offset = (readl_relaxed(dev->base + pipe_reg) & 0xfc) >> 2;
+	pipe_offset += 1;
+	dev_dbg(dev->dev, "TX Message queue pipe offset %d\n", pipe_offset);
+
+	config->mode = SPS_MODE_DEST;
+	config->source = SPS_DEV_HANDLE_MEM;
+	config->destination = dev->bam.hdl;
+	config->dest_pipe_index = pipe_offset;
+	config->src_pipe_index = 0;
+	config->options = SPS_O_ERROR | SPS_O_NO_Q |
+				SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
+
+	/* Desc and TX buf are circular queues */
+	/* Allocate memory for the FIFO descriptors */
+	ret = msm_slim_sps_mem_alloc(dev, descr,
+				(MSM_TX_BUFS + 1) * sizeof(struct sps_iovec));
+	if (ret) {
+		dev_err(dev->dev, "unable to allocate SPS descriptors\n");
+		goto alloc_descr_failed;
+	}
+
+	/* Allocate TX buffer from which descriptors are created */
+	ret = msm_slim_sps_mem_alloc(dev, mem, ((MSM_TX_BUFS + 1) *
+					SLIM_MSGQ_BUF_LEN));
+	if (ret) {
+		dev_err(dev->dev, "dma_alloc_coherent failed\n");
+		goto alloc_buffer_failed;
+	}
+	ret = msm_slim_connect_endp(dev, endpoint);
+
+	if (!ret)
+		return 0;
+
+	msm_slim_sps_mem_free(dev, mem);
+alloc_buffer_failed:
+	msm_slim_sps_mem_free(dev, descr);
+alloc_descr_failed:
+	msm_slim_free_endpoint(endpoint);
+sps_init_endpoint_failed:
+	dev->use_tx_msgqs = MSM_MSGQ_DISABLED;
+	return ret;
+}
+
+static int msm_slim_data_port_assign(struct msm_slim_ctrl *dev)
+{
+	int i, data_ports = 0;
+	/* First 7 bits are for message Qs */
+	for (i = 7; i < 32; i++) {
+		/* Check what pipes are owned by Apps. */
+		if ((dev->pdata.apps_pipes >> i) & 0x1) {
+			if (dev->pipes)
+				dev->pipes[data_ports].port_b = i - 7;
+			data_ports++;
+		}
+	}
+	return data_ports;
+}
+/* Registers BAM h/w resource with SPS driver and initializes msgq endpoints */
+int msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem,
+			u32 pipe_reg, bool remote)
+{
+	int ret;
+	unsigned long bam_handle;
+	struct sps_bam_props bam_props = {0};
+
+	static struct sps_bam_sec_config_props sec_props = {
+		.ees = {
+			[0] = {		/* LPASS */
+				.vmid = 0,
+				.pipe_mask = 0xFFFF98,
+			},
+			[1] = {		/* Krait Apps */
+				.vmid = 1,
+				.pipe_mask = 0x3F000007,
+			},
+			[2] = {		/* Modem */
+				.vmid = 2,
+				.pipe_mask = 0x00000060,
+			},
+		},
+	};
+
+	if (dev->bam.hdl) {
+		bam_handle = dev->bam.hdl;
+		goto init_pipes;
+	}
+	bam_props.ee = dev->ee;
+	bam_props.virt_addr = dev->bam.base;
+	bam_props.phys_addr = bam_mem->start;
+	bam_props.irq = dev->bam.irq;
+	if (!remote) {
+		bam_props.manage = SPS_BAM_MGR_LOCAL;
+		bam_props.sec_config = SPS_BAM_SEC_DO_CONFIG;
+	} else {
+		bam_props.manage = SPS_BAM_MGR_DEVICE_REMOTE |
+					SPS_BAM_MGR_MULTI_EE;
+		bam_props.sec_config = SPS_BAM_SEC_DO_NOT_CONFIG;
+	}
+	bam_props.summing_threshold = MSM_SLIM_PERF_SUMM_THRESHOLD;
+
+	bam_props.p_sec_config_props = &sec_props;
+
+	bam_props.options = SPS_O_DESC_DONE | SPS_O_ERROR |
+				SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
+
+	/* override apps channel pipes if specified in platform-data or DT */
+	if (dev->pdata.apps_pipes)
+		sec_props.ees[dev->ee].pipe_mask = dev->pdata.apps_pipes;
+
+	/* Register the BAM device with the SPS driver */
+	ret = sps_register_bam_device(&bam_props, &bam_handle);
+	if (ret) {
+		dev_err(dev->dev, "disabling BAM: reg-bam failed 0x%x\n", ret);
+		dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
+		dev->use_tx_msgqs = MSM_MSGQ_DISABLED;
+		return ret;
+	}
+	dev->bam.hdl = bam_handle;
+	dev_dbg(dev->dev, "SLIM BAM registered, handle = 0x%lx\n", bam_handle);
+
+init_pipes:
+	if (dev->port_nums)
+		goto init_msgq;
+
+	/* get the # of ports first */
+	dev->port_nums = msm_slim_data_port_assign(dev);
+	if (dev->port_nums && !dev->pipes) {
+		dev->pipes = kzalloc(sizeof(struct msm_slim_endp) *
+					dev->port_nums,
+					GFP_KERNEL);
+		if (IS_ERR_OR_NULL(dev->pipes)) {
+			dev_err(dev->dev, "no memory for data ports");
+			sps_deregister_bam_device(bam_handle);
+			return PTR_ERR(dev->pipes);
+		}
+		/* assign the ports now */
+		msm_slim_data_port_assign(dev);
+	}
+
+init_msgq:
+	ret = msm_slim_init_rx_msgq(dev, pipe_reg);
+	if (ret)
+		dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret);
+	if (ret && bam_handle)
+		dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
+
+	ret = msm_slim_init_tx_msgq(dev, pipe_reg);
+	if (ret)
+		dev_err(dev->dev, "msm_slim_init_tx_msgq failed 0x%x\n", ret);
+	if (ret && bam_handle)
+		dev->use_tx_msgqs = MSM_MSGQ_DISABLED;
+
+	/*
+	 * If command interface for BAM fails, register interface is used for
+	 * commands.
+	 * It is possible that other BAM usecases (e.g. apps channels) will
+	 * still need BAM. Since BAM is successfully initialized, we can
+	 * continue using it for non-command use cases.
+	 */
+
+	return 0;
+}
+
+void msm_slim_disconnect_endp(struct msm_slim_ctrl *dev,
+					struct msm_slim_endp *endpoint,
+					enum msm_slim_msgq *msgq_flag)
+{
+	if (*msgq_flag >= MSM_MSGQ_ENABLED) {
+		sps_disconnect(endpoint->sps);
+		*msgq_flag = MSM_MSGQ_RESET;
+	}
+}
+
+static int msm_slim_discard_rx_data(struct msm_slim_ctrl *dev,
+					struct msm_slim_endp *endpoint)
+{
+	struct sps_iovec sio;
+	int desc_num = 0, ret = 0;
+
+	ret = sps_get_unused_desc_num(endpoint->sps, &desc_num);
+	if (ret) {
+		dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
+		return ret;
+	}
+	while (desc_num--)
+		sps_get_iovec(endpoint->sps, &sio);
+	return ret;
+}
+
+static void msm_slim_remove_ep(struct msm_slim_ctrl *dev,
+					struct msm_slim_endp *endpoint,
+					enum msm_slim_msgq *msgq_flag)
+{
+	struct sps_connect *config = &endpoint->config;
+	struct sps_mem_buffer *descr = &config->desc;
+	struct sps_mem_buffer *mem = &endpoint->buf;
+
+	msm_slim_sps_mem_free(dev, mem);
+	msm_slim_sps_mem_free(dev, descr);
+	msm_slim_free_endpoint(endpoint);
+}
+
+void msm_slim_deinit_ep(struct msm_slim_ctrl *dev,
+				struct msm_slim_endp *endpoint,
+				enum msm_slim_msgq *msgq_flag)
+{
+	int ret = 0;
+	struct sps_connect *config = &endpoint->config;
+
+	if (*msgq_flag == MSM_MSGQ_ENABLED) {
+		if (config->mode == SPS_MODE_SRC) {
+			ret = msm_slim_discard_rx_data(dev, endpoint);
+			if (ret)
+				SLIM_WARN(dev, "discarding Rx data failed\n");
+		}
+		msm_slim_disconnect_endp(dev, endpoint, msgq_flag);
+		msm_slim_remove_ep(dev, endpoint, msgq_flag);
+	}
+}
+
+static void msm_slim_sps_unreg_event(struct sps_pipe *sps)
+{
+	struct sps_register_event sps_event;
+
+	memset(&sps_event, 0x00, sizeof(sps_event));
+	/* Disable interrupt and signal notification for Rx/Tx pipe */
+	sps_register_event(sps, &sps_event);
+}
+
+void msm_slim_sps_exit(struct msm_slim_ctrl *dev, bool dereg)
+{
+	int i;
+
+	if (dev->use_rx_msgqs >= MSM_MSGQ_ENABLED)
+		msm_slim_sps_unreg_event(dev->rx_msgq.sps);
+	if (dev->use_tx_msgqs >= MSM_MSGQ_ENABLED)
+		msm_slim_sps_unreg_event(dev->tx_msgq.sps);
+
+	for (i = 0; i < dev->port_nums; i++) {
+		if (dev->pipes[i].connected)
+			msm_slim_disconn_pipe_port(dev, i);
+	}
+	if (dereg) {
+		for (i = 0; i < dev->port_nums; i++) {
+			if (dev->pipes[i].connected)
+				msm_dealloc_port(&dev->ctrl, i);
+		}
+		sps_deregister_bam_device(dev->bam.hdl);
+		dev->bam.hdl = 0L;
+		kfree(dev->pipes);
+		dev->pipes = NULL;
+	}
+	dev->port_nums = 0;
+}
+
+/* Slimbus QMI Messaging */
+#define SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01 0x0020
+#define SLIMBUS_QMI_SELECT_INSTANCE_RESP_V01 0x0020
+#define SLIMBUS_QMI_POWER_REQ_V01 0x0021
+#define SLIMBUS_QMI_POWER_RESP_V01 0x0021
+#define SLIMBUS_QMI_CHECK_FRAMER_STATUS_REQ 0x0022
+#define SLIMBUS_QMI_CHECK_FRAMER_STATUS_RESP 0x0022
+
+#define SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN 7
+#define SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN 7
+#define SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN 14
+#define SLIMBUS_QMI_SELECT_INSTANCE_RESP_MAX_MSG_LEN 7
+#define SLIMBUS_QMI_CHECK_FRAMER_STAT_RESP_MAX_MSG_LEN 7
+
+enum slimbus_mode_enum_type_v01 {
+	/* To force a 32 bit signed enum. Do not change or use*/
+	SLIMBUS_MODE_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
+	SLIMBUS_MODE_SATELLITE_V01 = 1,
+	SLIMBUS_MODE_MASTER_V01 = 2,
+	SLIMBUS_MODE_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
+};
+
+enum slimbus_pm_enum_type_v01 {
+	/* To force a 32 bit signed enum. Do not change or use*/
+	SLIMBUS_PM_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
+	SLIMBUS_PM_INACTIVE_V01 = 1,
+	SLIMBUS_PM_ACTIVE_V01 = 2,
+	SLIMBUS_PM_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
+};
+
+struct slimbus_select_inst_req_msg_v01 {
+	/* Mandatory */
+	/* Hardware Instance Selection */
+	uint32_t instance;
+
+	/* Optional */
+	/* Optional Mode Request Operation */
+	/* Must be set to true if mode is being passed */
+	uint8_t mode_valid;
+	enum slimbus_mode_enum_type_v01 mode;
+};
+
+struct slimbus_select_inst_resp_msg_v01 {
+	/* Mandatory */
+	/* Result Code */
+	struct qmi_response_type_v01 resp;
+};
+
+struct slimbus_power_req_msg_v01 {
+	/* Mandatory */
+	/* Power Request Operation */
+	enum slimbus_pm_enum_type_v01 pm_req;
+};
+
+struct slimbus_power_resp_msg_v01 {
+	/* Mandatory */
+	/* Result Code */
+	struct qmi_response_type_v01 resp;
+};
+
+struct slimbus_chkfrm_resp_msg {
+	/* Mandatory */
+	/* Result Code */
+	struct qmi_response_type_v01 resp;
+};
+
+
+static struct elem_info slimbus_select_inst_req_msg_v01_ei[] = {
+	{
+		.data_type = QMI_UNSIGNED_4_BYTE,
+		.elem_len  = 1,
+		.elem_size = sizeof(uint32_t),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x01,
+		.offset    = offsetof(struct slimbus_select_inst_req_msg_v01,
+				      instance),
+		.ei_array  = NULL,
+	},
+	{
+		.data_type = QMI_OPT_FLAG,
+		.elem_len  = 1,
+		.elem_size = sizeof(uint8_t),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x10,
+		.offset    = offsetof(struct slimbus_select_inst_req_msg_v01,
+				      mode_valid),
+		.ei_array  = NULL,
+	},
+	{
+		.data_type = QMI_UNSIGNED_4_BYTE,
+		.elem_len  = 1,
+		.elem_size = sizeof(enum slimbus_mode_enum_type_v01),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x10,
+		.offset    = offsetof(struct slimbus_select_inst_req_msg_v01,
+				      mode),
+		.ei_array  = NULL,
+	},
+	{
+		.data_type = QMI_EOTI,
+		.elem_len  = 0,
+		.elem_size = 0,
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x00,
+		.offset    = 0,
+		.ei_array  = NULL,
+	},
+};
+
+static struct elem_info slimbus_select_inst_resp_msg_v01_ei[] = {
+	{
+		.data_type = QMI_STRUCT,
+		.elem_len  = 1,
+		.elem_size = sizeof(struct qmi_response_type_v01),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x02,
+		.offset    = offsetof(struct slimbus_select_inst_resp_msg_v01,
+				      resp),
+		.ei_array  = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type = QMI_EOTI,
+		.elem_len  = 0,
+		.elem_size = 0,
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x00,
+		.offset    = 0,
+		.ei_array  = NULL,
+	},
+};
+
+static struct elem_info slimbus_power_req_msg_v01_ei[] = {
+	{
+		.data_type = QMI_UNSIGNED_4_BYTE,
+		.elem_len  = 1,
+		.elem_size = sizeof(enum slimbus_pm_enum_type_v01),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x01,
+		.offset    = offsetof(struct slimbus_power_req_msg_v01, pm_req),
+		.ei_array  = NULL,
+	},
+	{
+		.data_type = QMI_EOTI,
+		.elem_len  = 0,
+		.elem_size = 0,
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x00,
+		.offset    = 0,
+		.ei_array  = NULL,
+	},
+};
+
+static struct elem_info slimbus_power_resp_msg_v01_ei[] = {
+	{
+		.data_type = QMI_STRUCT,
+		.elem_len  = 1,
+		.elem_size = sizeof(struct qmi_response_type_v01),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x02,
+		.offset    = offsetof(struct slimbus_power_resp_msg_v01, resp),
+		.ei_array  = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type = QMI_EOTI,
+		.elem_len  = 0,
+		.elem_size = 0,
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x00,
+		.offset    = 0,
+		.ei_array  = NULL,
+	},
+};
+
+static struct elem_info slimbus_chkfrm_resp_msg_v01_ei[] = {
+	{
+		.data_type = QMI_STRUCT,
+		.elem_len  = 1,
+		.elem_size = sizeof(struct qmi_response_type_v01),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x02,
+		.offset    = offsetof(struct slimbus_chkfrm_resp_msg, resp),
+		.ei_array  = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type = QMI_EOTI,
+		.elem_len  = 0,
+		.elem_size = 0,
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x00,
+		.offset    = 0,
+		.ei_array  = NULL,
+	},
+};
+
+static void msm_slim_qmi_recv_msg(struct kthread_work *work)
+{
+	int rc;
+	struct msm_slim_qmi *qmi =
+			container_of(work, struct msm_slim_qmi, kwork);
+
+	/* Drain all packets received */
+	do {
+		rc = qmi_recv_msg(qmi->handle);
+	} while (rc == 0);
+	if (rc != -ENOMSG)
+		pr_err("%s: Error receiving QMI message:%d\n", __func__, rc);
+}
+
+static void msm_slim_qmi_notify(struct qmi_handle *handle,
+				enum qmi_event_type event, void *notify_priv)
+{
+	struct msm_slim_ctrl *dev = notify_priv;
+	struct msm_slim_qmi *qmi = &dev->qmi;
+
+	switch (event) {
+	case QMI_RECV_MSG:
+		kthread_queue_work(&qmi->kworker, &qmi->kwork);
+		break;
+	default:
+		break;
+	}
+}
+
+static const char *get_qmi_error(struct qmi_response_type_v01 *r)
+{
+	if (r->result == QMI_RESULT_SUCCESS_V01 || r->error == QMI_ERR_NONE_V01)
+		return "No Error";
+	else if (r->error == QMI_ERR_NO_MEMORY_V01)
+		return "Out of Memory";
+	else if (r->error == QMI_ERR_INTERNAL_V01)
+		return "Unexpected error occurred";
+	else if (r->error == QMI_ERR_INCOMPATIBLE_STATE_V01)
+		return "Slimbus s/w already configured to a different mode";
+	else if (r->error == QMI_ERR_INVALID_ID_V01)
+		return "Slimbus hardware instance is not valid";
+	else
+		return "Unknown error";
+}
+
+static int msm_slim_qmi_send_select_inst_req(struct msm_slim_ctrl *dev,
+				struct slimbus_select_inst_req_msg_v01 *req)
+{
+	struct slimbus_select_inst_resp_msg_v01 resp = { { 0, 0 } };
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	req_desc.msg_id = SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01;
+	req_desc.max_msg_len = SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN;
+	req_desc.ei_array = slimbus_select_inst_req_msg_v01_ei;
+
+	resp_desc.msg_id = SLIMBUS_QMI_SELECT_INSTANCE_RESP_V01;
+	resp_desc.max_msg_len = SLIMBUS_QMI_SELECT_INSTANCE_RESP_MAX_MSG_LEN;
+	resp_desc.ei_array = slimbus_select_inst_resp_msg_v01_ei;
+
+	rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, req, sizeof(*req),
+			&resp_desc, &resp, sizeof(resp), SLIM_QMI_RESP_TOUT);
+	if (rc < 0) {
+		SLIM_ERR(dev, "%s: QMI send req failed %d\n", __func__, rc);
+		return rc;
+	}
+
+	/* Check the response */
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		SLIM_ERR(dev, "%s: QMI request failed 0x%x (%s)\n", __func__,
+				resp.resp.result, get_qmi_error(&resp.resp));
+		return -EREMOTEIO;
+	}
+
+	return 0;
+}
+
+static int msm_slim_qmi_send_power_request(struct msm_slim_ctrl *dev,
+				struct slimbus_power_req_msg_v01 *req)
+{
+	struct slimbus_power_resp_msg_v01 resp = { { 0, 0 } };
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	req_desc.msg_id = SLIMBUS_QMI_POWER_REQ_V01;
+	req_desc.max_msg_len = SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN;
+	req_desc.ei_array = slimbus_power_req_msg_v01_ei;
+
+	resp_desc.msg_id = SLIMBUS_QMI_POWER_RESP_V01;
+	resp_desc.max_msg_len = SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN;
+	resp_desc.ei_array = slimbus_power_resp_msg_v01_ei;
+
+	rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, req, sizeof(*req),
+			&resp_desc, &resp, sizeof(resp), SLIM_QMI_RESP_TOUT);
+	if (rc < 0) {
+		SLIM_ERR(dev, "%s: QMI send req failed %d\n", __func__, rc);
+		return rc;
+	}
+
+	/* Check the response */
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		SLIM_ERR(dev, "%s: QMI request failed 0x%x (%s)\n", __func__,
+				resp.resp.result, get_qmi_error(&resp.resp));
+		return -EREMOTEIO;
+	}
+
+	return 0;
+}
+
+int msm_slim_qmi_init(struct msm_slim_ctrl *dev, bool apps_is_master)
+{
+	int rc = 0;
+	struct qmi_handle *handle;
+	struct slimbus_select_inst_req_msg_v01 req;
+
+	kthread_init_worker(&dev->qmi.kworker);
+
+	dev->qmi.task = kthread_run(kthread_worker_fn,
+			&dev->qmi.kworker, "msm_slim_qmi_clnt%d", dev->ctrl.nr);
+
+	if (IS_ERR(dev->qmi.task)) {
+		pr_err("%s: Failed to create QMI client kthread\n", __func__);
+		return -ENOMEM;
+	}
+
+	kthread_init_work(&dev->qmi.kwork, msm_slim_qmi_recv_msg);
+
+	handle = qmi_handle_create(msm_slim_qmi_notify, dev);
+	if (!handle) {
+		rc = -ENOMEM;
+		pr_err("%s: QMI client handle alloc failed\n", __func__);
+		goto qmi_handle_create_failed;
+	}
+
+	rc = qmi_connect_to_service(handle, SLIMBUS_QMI_SVC_ID,
+						SLIMBUS_QMI_SVC_V1,
+						SLIMBUS_QMI_INS_ID);
+	if (rc < 0) {
+		SLIM_ERR(dev, "%s: QMI server not found\n", __func__);
+		goto qmi_connect_to_service_failed;
+	}
+
+	/* Instance is 0 based */
+	req.instance = (dev->ctrl.nr >> 1);
+	req.mode_valid = 1;
+
+	/* Mode indicates the role of the ADSP */
+	if (apps_is_master)
+		req.mode = SLIMBUS_MODE_SATELLITE_V01;
+	else
+		req.mode = SLIMBUS_MODE_MASTER_V01;
+
+	dev->qmi.handle = handle;
+
+	rc = msm_slim_qmi_send_select_inst_req(dev, &req);
+	if (rc) {
+		pr_err("%s: failed to select h/w instance\n", __func__);
+		goto qmi_select_instance_failed;
+	}
+
+	return 0;
+
+qmi_select_instance_failed:
+	dev->qmi.handle = NULL;
+qmi_connect_to_service_failed:
+	qmi_handle_destroy(handle);
+qmi_handle_create_failed:
+	kthread_flush_worker(&dev->qmi.kworker);
+	kthread_stop(dev->qmi.task);
+	dev->qmi.task = NULL;
+	return rc;
+}
+
+void msm_slim_qmi_exit(struct msm_slim_ctrl *dev)
+{
+	if (!dev->qmi.handle || !dev->qmi.task)
+		return;
+	qmi_handle_destroy(dev->qmi.handle);
+	kthread_flush_worker(&dev->qmi.kworker);
+	kthread_stop(dev->qmi.task);
+	dev->qmi.task = NULL;
+	dev->qmi.handle = NULL;
+}
+
+int msm_slim_qmi_power_request(struct msm_slim_ctrl *dev, bool active)
+{
+	struct slimbus_power_req_msg_v01 req;
+
+	if (active)
+		req.pm_req = SLIMBUS_PM_ACTIVE_V01;
+	else
+		req.pm_req = SLIMBUS_PM_INACTIVE_V01;
+
+	return msm_slim_qmi_send_power_request(dev, &req);
+}
+
+int msm_slim_qmi_check_framer_request(struct msm_slim_ctrl *dev)
+{
+	struct slimbus_chkfrm_resp_msg resp = { { 0, 0 } };
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	req_desc.msg_id = SLIMBUS_QMI_CHECK_FRAMER_STATUS_REQ;
+	req_desc.max_msg_len = 0;
+	req_desc.ei_array = NULL;
+
+	resp_desc.msg_id = SLIMBUS_QMI_CHECK_FRAMER_STATUS_RESP;
+	resp_desc.max_msg_len = SLIMBUS_QMI_CHECK_FRAMER_STAT_RESP_MAX_MSG_LEN;
+	resp_desc.ei_array = slimbus_chkfrm_resp_msg_v01_ei;
+
+	rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, NULL, 0,
+		&resp_desc, &resp, sizeof(resp), SLIM_QMI_RESP_TOUT);
+	if (rc < 0) {
+		SLIM_ERR(dev, "%s: QMI send req failed %d\n", __func__, rc);
+		return rc;
+	}
+	/* Check the response */
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		SLIM_ERR(dev, "%s: QMI request failed 0x%x (%s)\n",
+			__func__, resp.resp.result, get_qmi_error(&resp.resp));
+		return -EREMOTEIO;
+	}
+	return 0;
+}
diff --git a/drivers/slimbus/slim-msm.h b/drivers/slimbus/slim-msm.h
new file mode 100644
index 0000000..65b9fae
--- /dev/null
+++ b/drivers/slimbus/slim-msm.h
@@ -0,0 +1,440 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SLIM_MSM_H
+#define _SLIM_MSM_H
+
+#include <linux/irq.h>
+#include <linux/kthread.h>
+#include <soc/qcom/msm_qmi_interface.h>
+#include <linux/ipc_logging.h>
+
+/* Per spec.max 40 bytes per received message */
+#define SLIM_MSGQ_BUF_LEN	40
+
+#define MSM_TX_BUFS		32
+
+#define SLIM_USR_MC_GENERIC_ACK		0x25
+#define SLIM_USR_MC_MASTER_CAPABILITY	0x0
+#define SLIM_USR_MC_REPORT_SATELLITE	0x1
+#define SLIM_USR_MC_ADDR_QUERY		0xD
+#define SLIM_USR_MC_ADDR_REPLY		0xE
+#define SLIM_USR_MC_DEFINE_CHAN		0x20
+#define SLIM_USR_MC_DEF_ACT_CHAN	0x21
+#define SLIM_USR_MC_CHAN_CTRL		0x23
+#define SLIM_USR_MC_RECONFIG_NOW	0x24
+#define SLIM_USR_MC_REQ_BW		0x28
+#define SLIM_USR_MC_CONNECT_SRC		0x2C
+#define SLIM_USR_MC_CONNECT_SINK	0x2D
+#define SLIM_USR_MC_DISCONNECT_PORT	0x2E
+
+#define SLIM_USR_MC_REPEAT_CHANGE_VALUE	0x0
+#define MSM_SLIM_VE_MAX_MAP_ADDR	0xFFF
+#define SLIM_MAX_VE_SLC_BYTES		16
+
+#define MSM_SLIM_AUTOSUSPEND		MSEC_PER_SEC
+
+#define SLIM_RX_MSGQ_TIMEOUT_VAL	0x10000
+/*
+ * Messages that can be received simultaneously:
+ * Client reads, LPASS master responses, announcement messages
+ * Receive upto 10 messages simultaneously.
+ */
+#define MSM_SLIM_DESC_NUM		32
+
+/* MSM Slimbus peripheral settings */
+#define MSM_SLIM_PERF_SUMM_THRESHOLD	0x8000
+#define MSM_SLIM_NPORTS			24
+#define MSM_SLIM_NCHANS			32
+
+#define QC_MFGID_LSB	0x2
+#define QC_MFGID_MSB	0x17
+#define QC_CHIPID_SL	0x10
+#define QC_DEVID_SAT1	0x3
+#define QC_DEVID_SAT2	0x4
+#define QC_DEVID_PGD	0x5
+
+#define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
+		((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
+
+#define INIT_MX_RETRIES 3
+#define DEF_RETRY_MS	10
+#define MSM_CONCUR_MSG	8
+#define SAT_CONCUR_MSG	8
+
+#define DEF_WATERMARK	(8 << 1)
+#define DEF_ALIGN_LSB	0
+#define DEF_ALIGN_MSB	(1 << 7)
+#define DEF_PACK	(1 << 6)
+#define DEF_NO_PACK	0
+#define ENABLE_PORT	1
+
+#define DEF_BLKSZ	0
+#define DEF_TRANSZ	0
+
+#define SAT_MAGIC_LSB	0xD9
+#define SAT_MAGIC_MSB	0xC5
+#define SAT_MSG_VER	0x1
+#define SAT_MSG_PROT	0x1
+#define MSM_SAT_SUCCSS	0x20
+#define MSM_MAX_NSATS	2
+#define MSM_MAX_SATCH	32
+
+/* Slimbus QMI service */
+#define SLIMBUS_QMI_SVC_ID 0x0301
+#define SLIMBUS_QMI_SVC_V1 1
+#define SLIMBUS_QMI_INS_ID 0
+
+/* QMI response timeout of 500ms */
+#define SLIM_QMI_RESP_TOUT 1000
+
+#define PGD_THIS_EE(r, v) ((v) ? PGD_THIS_EE_V2(r) : PGD_THIS_EE_V1(r))
+#define PGD_PORT(r, p, v) ((v) ? PGD_PORT_V2(r, p) : PGD_PORT_V1(r, p))
+#define CFG_PORT(r, v) ((v) ? CFG_PORT_V2(r) : CFG_PORT_V1(r))
+
+#define PGD_THIS_EE_V2(r) (dev->base + (r ## _V2) + (dev->ee * 0x1000))
+#define PGD_PORT_V2(r, p) (dev->base + (r ## _V2) + ((p) * 0x1000))
+#define CFG_PORT_V2(r) ((r ## _V2))
+/* Component registers */
+enum comp_reg_v2 {
+	COMP_CFG_V2		= 4,
+	COMP_TRUST_CFG_V2	= 0x3000,
+};
+
+/* Manager PGD registers */
+enum pgd_reg_v2 {
+	PGD_CFG_V2		= 0x800,
+	PGD_STAT_V2		= 0x804,
+	PGD_INT_EN_V2		= 0x810,
+	PGD_INT_STAT_V2		= 0x814,
+	PGD_INT_CLR_V2		= 0x818,
+	PGD_OWN_EEn_V2		= 0x300C,
+	PGD_PORT_INT_EN_EEn_V2	= 0x5000,
+	PGD_PORT_INT_ST_EEn_V2	= 0x5004,
+	PGD_PORT_INT_CL_EEn_V2	= 0x5008,
+	PGD_PORT_CFGn_V2	= 0x14000,
+	PGD_PORT_STATn_V2	= 0x14004,
+	PGD_PORT_PARAMn_V2	= 0x14008,
+	PGD_PORT_BLKn_V2	= 0x1400C,
+	PGD_PORT_TRANn_V2	= 0x14010,
+	PGD_PORT_MCHANn_V2	= 0x14014,
+	PGD_PORT_PSHPLLn_V2	= 0x14018,
+	PGD_PORT_PC_CFGn_V2	= 0x8000,
+	PGD_PORT_PC_VALn_V2	= 0x8004,
+	PGD_PORT_PC_VFR_TSn_V2	= 0x8008,
+	PGD_PORT_PC_VFR_STn_V2	= 0x800C,
+	PGD_PORT_PC_VFR_CLn_V2	= 0x8010,
+	PGD_IE_STAT_V2		= 0x820,
+	PGD_VE_STAT_V2		= 0x830,
+};
+
+#define PGD_THIS_EE_V1(r) (dev->base + (r ## _V1) + (dev->ee * 16))
+#define PGD_PORT_V1(r, p) (dev->base + (r ## _V1) + ((p) * 32))
+#define CFG_PORT_V1(r) ((r ## _V1))
+/* Component registers */
+enum comp_reg_v1 {
+	COMP_CFG_V1		= 0,
+	COMP_TRUST_CFG_V1	= 0x14,
+};
+
+/* Manager PGD registers */
+enum pgd_reg_v1 {
+	PGD_CFG_V1		= 0x1000,
+	PGD_STAT_V1		= 0x1004,
+	PGD_INT_EN_V1		= 0x1010,
+	PGD_INT_STAT_V1		= 0x1014,
+	PGD_INT_CLR_V1		= 0x1018,
+	PGD_OWN_EEn_V1		= 0x1020,
+	PGD_PORT_INT_EN_EEn_V1	= 0x1030,
+	PGD_PORT_INT_ST_EEn_V1	= 0x1034,
+	PGD_PORT_INT_CL_EEn_V1	= 0x1038,
+	PGD_PORT_CFGn_V1	= 0x1080,
+	PGD_PORT_STATn_V1	= 0x1084,
+	PGD_PORT_PARAMn_V1	= 0x1088,
+	PGD_PORT_BLKn_V1	= 0x108C,
+	PGD_PORT_TRANn_V1	= 0x1090,
+	PGD_PORT_MCHANn_V1	= 0x1094,
+	PGD_PORT_PSHPLLn_V1	= 0x1098,
+	PGD_PORT_PC_CFGn_V1	= 0x1600,
+	PGD_PORT_PC_VALn_V1	= 0x1604,
+	PGD_PORT_PC_VFR_TSn_V1	= 0x1608,
+	PGD_PORT_PC_VFR_STn_V1	= 0x160C,
+	PGD_PORT_PC_VFR_CLn_V1	= 0x1610,
+	PGD_IE_STAT_V1		= 0x1700,
+	PGD_VE_STAT_V1		= 0x1710,
+};
+
+enum msm_slim_port_status {
+	MSM_PORT_OVERFLOW	= 1 << 2,
+	MSM_PORT_UNDERFLOW	= 1 << 3,
+	MSM_PORT_DISCONNECT	= 1 << 19,
+};
+
+enum msm_ctrl_state {
+	MSM_CTRL_AWAKE,
+	MSM_CTRL_IDLE,
+	MSM_CTRL_ASLEEP,
+	MSM_CTRL_DOWN,
+};
+
+enum msm_slim_msgq {
+	MSM_MSGQ_DISABLED,
+	MSM_MSGQ_RESET,
+	MSM_MSGQ_ENABLED,
+	MSM_MSGQ_DOWN,
+};
+
+struct msm_slim_sps_bam {
+	unsigned long		hdl;
+	void __iomem		*base;
+	int			irq;
+};
+
+/*
+ * struct slim_pshpull_parm: Structure to store push pull protocol parameters
+ * @num_samples: Number of samples in a period
+ * @rpt_period: Repeat period value
+ */
+struct msm_slim_pshpull_parm {
+	int		num_samples;
+	int		rpt_period;
+};
+
+struct msm_slim_endp {
+	struct sps_pipe			*sps;
+	struct sps_connect		config;
+	struct sps_register_event	event;
+	struct sps_mem_buffer		buf;
+	bool				connected;
+	int				port_b;
+	struct msm_slim_pshpull_parm	psh_pull;
+};
+
+struct msm_slim_qmi {
+	struct qmi_handle		*handle;
+	struct task_struct		*task;
+	struct task_struct		*slave_thread;
+	struct completion		slave_notify;
+	struct kthread_work		kwork;
+	struct kthread_worker		kworker;
+	struct completion		qmi_comp;
+	struct notifier_block		nb;
+};
+
+enum msm_slim_dom {
+	MSM_SLIM_DOM_NONE,
+	MSM_SLIM_DOM_PD,
+	MSM_SLIM_DOM_SS,
+};
+
+struct msm_slim_ss {
+	struct notifier_block nb;
+	void *domr;
+	enum msm_ctrl_state state;
+	struct work_struct dom_up;
+	enum msm_slim_dom dom_t;
+};
+
+struct msm_slim_pdata {
+	u32 apps_pipes;
+	u32 eapc;
+};
+
+struct msm_slim_bulk_wr {
+	dma_addr_t	wr_dma;
+	void		*base;
+	int		size;
+	int		buf_sz;
+	int		(*cb)(void *ctx, int err);
+	void		*ctx;
+	bool		in_progress;
+};
+
+struct msm_slim_ctrl {
+	struct slim_controller  ctrl;
+	struct slim_framer	framer;
+	struct device		*dev;
+	void __iomem		*base;
+	struct resource		*slew_mem;
+	struct resource		*bam_mem;
+	u32			curr_bw;
+	u8			msg_cnt;
+	u32			tx_buf[10];
+	u8			rx_msgs[MSM_CONCUR_MSG][SLIM_MSGQ_BUF_LEN];
+	int			tx_tail;
+	int			tx_head;
+	spinlock_t		rx_lock;
+	int			head;
+	int			tail;
+	int			irq;
+	int			err;
+	int			ee;
+	struct completion	**wr_comp;
+	struct msm_slim_sat	*satd[MSM_MAX_NSATS];
+	struct msm_slim_endp	*pipes;
+	struct msm_slim_sps_bam	bam;
+	struct msm_slim_endp	tx_msgq;
+	struct msm_slim_endp	rx_msgq;
+	struct completion	rx_msgq_notify;
+	struct task_struct	*rx_msgq_thread;
+	struct clk		*rclk;
+	struct clk		*hclk;
+	struct mutex		tx_lock;
+	struct mutex		ssr_lock;
+	spinlock_t		tx_buf_lock;
+	u8			pgdla;
+	enum msm_slim_msgq	use_rx_msgqs;
+	enum msm_slim_msgq	use_tx_msgqs;
+	int			port_nums;
+	struct completion	reconf;
+	bool			reconf_busy;
+	bool			chan_active;
+	enum msm_ctrl_state	state;
+	struct completion	ctrl_up;
+	int			nsats;
+	u32			ver;
+	struct msm_slim_qmi	qmi;
+	struct msm_slim_pdata	pdata;
+	struct msm_slim_ss	ext_mdm;
+	struct msm_slim_ss	dsp;
+	struct msm_slim_bulk_wr	bulk;
+	int			default_ipc_log_mask;
+	int			ipc_log_mask;
+	bool			sysfs_created;
+	void			*ipc_slimbus_log;
+	void (*rx_slim)(struct msm_slim_ctrl *dev, u8 *buf);
+	u32			current_rx_buf[10];
+	int			current_count;
+	atomic_t		ssr_in_progress;
+};
+
+struct msm_sat_chan {
+	u8 chan;
+	u16 chanh;
+	int req_rem;
+	int req_def;
+	bool reconf;
+};
+
+struct msm_slim_sat {
+	struct slim_device	satcl;
+	struct msm_slim_ctrl	*dev;
+	struct workqueue_struct *wq;
+	struct work_struct	wd;
+	u8			sat_msgs[SAT_CONCUR_MSG][40];
+	struct msm_sat_chan	*satch;
+	u8			nsatch;
+	bool			sent_capability;
+	bool			pending_reconf;
+	bool			pending_capability;
+	int			shead;
+	int			stail;
+	spinlock_t lock;
+};
+
+enum rsc_grp {
+	EE_MGR_RSC_GRP	= 1 << 10,
+	EE_NGD_2	= 2 << 6,
+	EE_NGD_1	= 0,
+};
+
+
+/* IPC logging stuff */
+#define IPC_SLIMBUS_LOG_PAGES 5
+
+/* Log levels */
+enum {
+	FATAL_LEV = 0U,
+	ERR_LEV = 1U,
+	WARN_LEV = 2U,
+	INFO_LEV = 3U,
+	DBG_LEV = 4U,
+};
+
+/* Default IPC log level INFO */
+#define SLIM_DBG(dev, x...) do { \
+	pr_debug(x); \
+	if (dev->ipc_slimbus_log && dev->ipc_log_mask >= DBG_LEV) { \
+		ipc_log_string(dev->ipc_slimbus_log, x); \
+	} \
+} while (0)
+
+#define SLIM_INFO(dev, x...) do { \
+	pr_debug(x); \
+	if (dev->ipc_slimbus_log && dev->ipc_log_mask >= INFO_LEV) {\
+		ipc_log_string(dev->ipc_slimbus_log, x); \
+	} \
+} while (0)
+
+/* warnings and errors show up on console always */
+#define SLIM_WARN(dev, x...) do { \
+	pr_warn(x); \
+	if (dev->ipc_slimbus_log && dev->ipc_log_mask >= WARN_LEV) \
+		ipc_log_string(dev->ipc_slimbus_log, x); \
+} while (0)
+
+/* ERROR condition in the driver sets the hs_serial_debug_mask
+ * to ERR_FATAL level, so that this message can be seen
+ * in IPC logging. Further errors continue to log on the console
+ */
+#define SLIM_ERR(dev, x...) do { \
+	pr_err(x); \
+	if (dev->ipc_slimbus_log && dev->ipc_log_mask >= ERR_LEV) { \
+		ipc_log_string(dev->ipc_slimbus_log, x); \
+		dev->default_ipc_log_mask = dev->ipc_log_mask; \
+		dev->ipc_log_mask = FATAL_LEV; \
+	} \
+} while (0)
+
+#define SLIM_RST_LOGLVL(dev) { \
+	dev->ipc_log_mask = dev->default_ipc_log_mask; \
+}
+
+int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len);
+int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf);
+int msm_slim_get_ctrl(struct msm_slim_ctrl *dev);
+void msm_slim_put_ctrl(struct msm_slim_ctrl *dev);
+irqreturn_t msm_slim_port_irq_handler(struct msm_slim_ctrl *dev, u32 pstat);
+int msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep);
+void msm_slim_free_endpoint(struct msm_slim_endp *ep);
+void msm_hw_set_port(struct msm_slim_ctrl *dev, u8 pipenum, u8 portnum);
+int msm_alloc_port(struct slim_controller *ctrl, u8 pn);
+void msm_dealloc_port(struct slim_controller *ctrl, u8 pn);
+int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn);
+enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
+				u8 pn, phys_addr_t *done_buf, u32 *done_len);
+int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, phys_addr_t iobuf,
+			u32 len, struct completion *comp);
+int msm_send_msg_buf(struct msm_slim_ctrl *dev, u32 *buf, u8 len, u32 tx_reg);
+u32 *msm_get_msg_buf(struct msm_slim_ctrl *dev, int len,
+			struct completion *comp);
+u32 *msm_slim_manage_tx_msgq(struct msm_slim_ctrl *dev, bool getbuf,
+			struct completion *comp, int err);
+int msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset);
+int msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem,
+			u32 pipe_reg, bool remote);
+void msm_slim_sps_exit(struct msm_slim_ctrl *dev, bool dereg);
+
+int msm_slim_connect_endp(struct msm_slim_ctrl *dev,
+				struct msm_slim_endp *endpoint);
+void msm_slim_disconnect_endp(struct msm_slim_ctrl *dev,
+					struct msm_slim_endp *endpoint,
+					enum msm_slim_msgq *msgq_flag);
+void msm_slim_deinit_ep(struct msm_slim_ctrl *dev,
+				struct msm_slim_endp *endpoint,
+				enum msm_slim_msgq *msgq_flag);
+
+void msm_slim_qmi_exit(struct msm_slim_ctrl *dev);
+int msm_slim_qmi_init(struct msm_slim_ctrl *dev, bool apps_is_master);
+int msm_slim_qmi_power_request(struct msm_slim_ctrl *dev, bool active);
+int msm_slim_qmi_check_framer_request(struct msm_slim_ctrl *dev);
+#endif
diff --git a/drivers/slimbus/slimbus.c b/drivers/slimbus/slimbus.c
new file mode 100644
index 0000000..aa7ff12
--- /dev/null
+++ b/drivers/slimbus/slimbus.c
@@ -0,0 +1,3439 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/completion.h>
+#include <linux/idr.h>
+#include <linux/pm_runtime.h>
+#include <linux/slimbus/slimbus.h>
+
+#define SLIM_PORT_HDL(la, f, p) ((la)<<24 | (f) << 16 | (p))
+
+#define SLIM_HDL_TO_LA(hdl)	((u32)((hdl) & 0xFF000000) >> 24)
+#define SLIM_HDL_TO_FLOW(hdl)	(((u32)(hdl) & 0xFF0000) >> 16)
+#define SLIM_HDL_TO_PORT(hdl)	((u32)(hdl) & 0xFF)
+
+#define SLIM_HDL_TO_CHIDX(hdl)	((u16)(hdl) & 0xFF)
+#define SLIM_GRP_TO_NCHAN(hdl)	((u16)(hdl >> 8) & 0xFF)
+
+#define SLIM_SLAVE_PORT(p, la)	(((la)<<16) | (p))
+#define SLIM_MGR_PORT(p)	((0xFF << 16) | (p))
+#define SLIM_LA_MANAGER		0xFF
+
+#define SLIM_START_GRP		(1 << 8)
+#define SLIM_END_GRP		(1 << 9)
+
+#define SLIM_MAX_INTR_COEFF_3	(SLIM_SL_PER_SUPERFRAME/3)
+#define SLIM_MAX_INTR_COEFF_1	SLIM_SL_PER_SUPERFRAME
+
+static DEFINE_MUTEX(slim_lock);
+static DEFINE_IDR(ctrl_idr);
+static struct device_type slim_dev_type;
+static struct device_type slim_ctrl_type;
+
+#define DEFINE_SLIM_LDEST_TXN(name, mc, len, rl, rbuf, wbuf, la) \
+	struct slim_msg_txn name = { rl, 0, mc, SLIM_MSG_DEST_LOGICALADDR, 0,\
+					len, 0, la, false, rbuf, wbuf, NULL, }
+
+#define DEFINE_SLIM_BCAST_TXN(name, mc, len, rl, rbuf, wbuf, la) \
+	struct slim_msg_txn name = { rl, 0, mc, SLIM_MSG_DEST_BROADCAST, 0,\
+					len, 0, la, false, rbuf, wbuf, NULL, }
+
+static const struct slim_device_id *slim_match(const struct slim_device_id *id,
+					const struct slim_device *slim_dev)
+{
+	while (id->name[0]) {
+		if (strcmp(slim_dev->name, id->name) == 0)
+			return id;
+		id++;
+	}
+	return NULL;
+}
+
+const struct slim_device_id *slim_get_device_id(const struct slim_device *sdev)
+{
+	const struct slim_driver *sdrv = to_slim_driver(sdev->dev.driver);
+
+	return slim_match(sdrv->id_table, sdev);
+}
+EXPORT_SYMBOL(slim_get_device_id);
+
+static int slim_device_match(struct device *dev, struct device_driver *driver)
+{
+	struct slim_device *slim_dev;
+	struct slim_driver *drv = to_slim_driver(driver);
+
+	if (dev->type == &slim_dev_type)
+		slim_dev = to_slim_device(dev);
+	else
+		return 0;
+	if (drv->id_table)
+		return slim_match(drv->id_table, slim_dev) != NULL;
+
+	if (driver->name)
+		return strcmp(slim_dev->name, driver->name) == 0;
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int slim_legacy_suspend(struct device *dev, pm_message_t mesg)
+{
+	struct slim_device *slim_dev = NULL;
+	struct slim_driver *driver;
+
+	if (dev->type == &slim_dev_type)
+		slim_dev = to_slim_device(dev);
+
+	if (!slim_dev || !dev->driver)
+		return 0;
+
+	driver = to_slim_driver(dev->driver);
+	if (!driver->suspend)
+		return 0;
+
+	return driver->suspend(slim_dev, mesg);
+}
+
+static int slim_legacy_resume(struct device *dev)
+{
+	struct slim_device *slim_dev = NULL;
+	struct slim_driver *driver;
+
+	if (dev->type == &slim_dev_type)
+		slim_dev = to_slim_device(dev);
+
+	if (!slim_dev || !dev->driver)
+		return 0;
+
+	driver = to_slim_driver(dev->driver);
+	if (!driver->resume)
+		return 0;
+
+	return driver->resume(slim_dev);
+}
+
+static int slim_pm_suspend(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	if (pm)
+		return pm_generic_suspend(dev);
+	else
+		return slim_legacy_suspend(dev, PMSG_SUSPEND);
+}
+
+static int slim_pm_resume(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	if (pm)
+		return pm_generic_resume(dev);
+	else
+		return slim_legacy_resume(dev);
+}
+
+#else
+#define slim_pm_suspend		NULL
+#define slim_pm_resume		NULL
+#endif
+
+static const struct dev_pm_ops slimbus_pm = {
+	.suspend = slim_pm_suspend,
+	.resume = slim_pm_resume,
+	SET_RUNTIME_PM_OPS(
+		pm_generic_suspend,
+		pm_generic_resume,
+		NULL
+		)
+};
+struct bus_type slimbus_type = {
+	.name		= "slimbus",
+	.match		= slim_device_match,
+	.pm		= &slimbus_pm,
+};
+EXPORT_SYMBOL(slimbus_type);
+
+struct device slimbus_dev = {
+	.init_name = "slimbus",
+};
+
+static void __exit slimbus_exit(void)
+{
+	device_unregister(&slimbus_dev);
+	bus_unregister(&slimbus_type);
+}
+
+static int __init slimbus_init(void)
+{
+	int retval;
+
+	retval = bus_register(&slimbus_type);
+	if (!retval)
+		retval = device_register(&slimbus_dev);
+
+	if (retval)
+		bus_unregister(&slimbus_type);
+
+	return retval;
+}
+postcore_initcall(slimbus_init);
+module_exit(slimbus_exit);
+
+static int slim_drv_probe(struct device *dev)
+{
+	const struct slim_driver *sdrv = to_slim_driver(dev->driver);
+	struct slim_device *sbdev = to_slim_device(dev);
+	struct slim_controller *ctrl = sbdev->ctrl;
+
+	if (sdrv->probe) {
+		int ret;
+
+		ret = sdrv->probe(sbdev);
+		if (ret)
+			return ret;
+		if (sdrv->device_up)
+			queue_work(ctrl->wq, &sbdev->wd);
+		return 0;
+	}
+	return -ENODEV;
+}
+
+static int slim_drv_remove(struct device *dev)
+{
+	const struct slim_driver *sdrv = to_slim_driver(dev->driver);
+	struct slim_device *sbdev = to_slim_device(dev);
+
+	sbdev->notified = false;
+	if (sdrv->remove)
+		return sdrv->remove(to_slim_device(dev));
+	return -ENODEV;
+}
+
+static void slim_drv_shutdown(struct device *dev)
+{
+	const struct slim_driver *sdrv = to_slim_driver(dev->driver);
+
+	if (sdrv->shutdown)
+		sdrv->shutdown(to_slim_device(dev));
+}
+
+/*
+ * slim_driver_register: Client driver registration with slimbus
+ * @drv:Client driver to be associated with client-device.
+ * This API will register the client driver with the slimbus
+ * It is called from the driver's module-init function.
+ */
+int slim_driver_register(struct slim_driver *drv)
+{
+	drv->driver.bus = &slimbus_type;
+	if (drv->probe)
+		drv->driver.probe = slim_drv_probe;
+
+	if (drv->remove)
+		drv->driver.remove = slim_drv_remove;
+
+	if (drv->shutdown)
+		drv->driver.shutdown = slim_drv_shutdown;
+
+	return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL(slim_driver_register);
+
+/*
+ * slim_driver_unregister: Undo effects of slim_driver_register
+ * @drv: Client driver to be unregistered
+ */
+void slim_driver_unregister(struct slim_driver *drv)
+{
+	if (drv)
+		driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL(slim_driver_unregister);
+
+#define slim_ctrl_attr_gr NULL
+
+static void slim_ctrl_release(struct device *dev)
+{
+	struct slim_controller *ctrl = to_slim_controller(dev);
+
+	complete(&ctrl->dev_released);
+}
+
+static struct device_type slim_ctrl_type = {
+	.groups		= slim_ctrl_attr_gr,
+	.release	= slim_ctrl_release,
+};
+
+static struct slim_controller *slim_ctrl_get(struct slim_controller *ctrl)
+{
+	if (!ctrl || !get_device(&ctrl->dev))
+		return NULL;
+
+	return ctrl;
+}
+
+static void slim_ctrl_put(struct slim_controller *ctrl)
+{
+	if (ctrl)
+		put_device(&ctrl->dev);
+}
+
+#define slim_device_attr_gr NULL
+#define slim_device_uevent NULL
+static void slim_dev_release(struct device *dev)
+{
+	struct slim_device *sbdev = to_slim_device(dev);
+
+	slim_ctrl_put(sbdev->ctrl);
+}
+
+static struct device_type slim_dev_type = {
+	.groups		= slim_device_attr_gr,
+	.uevent		= slim_device_uevent,
+	.release	= slim_dev_release,
+};
+
+static void slim_report(struct work_struct *work)
+{
+	struct slim_driver *sbdrv;
+	struct slim_device *sbdev =
+			container_of(work, struct slim_device, wd);
+	if (!sbdev->dev.driver)
+		return;
+	/* check if device-up or down needs to be called */
+	if ((!sbdev->reported && !sbdev->notified) ||
+			(sbdev->reported && sbdev->notified))
+		return;
+
+	sbdrv = to_slim_driver(sbdev->dev.driver);
+	/*
+	 * address no longer valid, means device reported absent, whereas
+	 * address valid, means device reported present
+	 */
+	if (sbdev->notified && !sbdev->reported) {
+		sbdev->notified = false;
+		if (sbdrv->device_down)
+			sbdrv->device_down(sbdev);
+	} else if (!sbdev->notified && sbdev->reported) {
+		sbdev->notified = true;
+		if (sbdrv->device_up)
+			sbdrv->device_up(sbdev);
+	}
+}
+
+/*
+ * slim_add_device: Add a new device without register board info.
+ * @ctrl: Controller to which this device is to be added to.
+ * Called when device doesn't have an explicit client-driver to be probed, or
+ * the client-driver is a module installed dynamically.
+ */
+int slim_add_device(struct slim_controller *ctrl, struct slim_device *sbdev)
+{
+	sbdev->dev.bus = &slimbus_type;
+	sbdev->dev.parent = ctrl->dev.parent;
+	sbdev->dev.type = &slim_dev_type;
+	sbdev->dev.driver = NULL;
+	sbdev->ctrl = ctrl;
+	slim_ctrl_get(ctrl);
+	dev_set_name(&sbdev->dev, "%s", sbdev->name);
+	mutex_init(&sbdev->sldev_reconf);
+	INIT_LIST_HEAD(&sbdev->mark_define);
+	INIT_LIST_HEAD(&sbdev->mark_suspend);
+	INIT_LIST_HEAD(&sbdev->mark_removal);
+	INIT_WORK(&sbdev->wd, slim_report);
+	mutex_lock(&ctrl->m_ctrl);
+	list_add_tail(&sbdev->dev_list, &ctrl->devs);
+	mutex_unlock(&ctrl->m_ctrl);
+	/* probe slave on this controller */
+	return device_register(&sbdev->dev);
+}
+EXPORT_SYMBOL(slim_add_device);
+
+struct sbi_boardinfo {
+	struct list_head	list;
+	struct slim_boardinfo	board_info;
+};
+
+static LIST_HEAD(board_list);
+static LIST_HEAD(slim_ctrl_list);
+static DEFINE_MUTEX(board_lock);
+
+/* If controller is not present, only add to boards list */
+static void slim_match_ctrl_to_boardinfo(struct slim_controller *ctrl,
+				struct slim_boardinfo *bi)
+{
+	int ret;
+
+	if (ctrl->nr != bi->bus_num)
+		return;
+
+	ret = slim_add_device(ctrl, bi->slim_slave);
+	if (ret != 0)
+		dev_err(ctrl->dev.parent, "can't create new device for %s\n",
+			bi->slim_slave->name);
+}
+
+/*
+ * slim_register_board_info: Board-initialization routine.
+ * @info: List of all devices on all controllers present on the board.
+ * @n: number of entries.
+ * API enumerates respective devices on corresponding controller.
+ * Called from board-init function.
+ */
+int slim_register_board_info(struct slim_boardinfo const *info, unsigned int n)
+{
+	struct sbi_boardinfo *bi;
+	int i;
+
+	bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
+	if (!bi)
+		return -ENOMEM;
+
+	for (i = 0; i < n; i++, bi++, info++) {
+		struct slim_controller *ctrl;
+
+		memcpy(&bi->board_info, info, sizeof(*info));
+		mutex_lock(&board_lock);
+		list_add_tail(&bi->list, &board_list);
+		list_for_each_entry(ctrl, &slim_ctrl_list, list)
+			slim_match_ctrl_to_boardinfo(ctrl, &bi->board_info);
+		mutex_unlock(&board_lock);
+	}
+	return 0;
+}
+EXPORT_SYMBOL(slim_register_board_info);
+
+/*
+ * slim_ctrl_add_boarddevs: Add devices registered by board-info
+ * @ctrl: Controller to which these devices are to be added to.
+ * This API is called by controller when it is up and running.
+ * If devices on a controller were registered before controller,
+ * this will make sure that they get probed when controller is up.
+ */
+void slim_ctrl_add_boarddevs(struct slim_controller *ctrl)
+{
+	struct sbi_boardinfo *bi;
+
+	mutex_lock(&board_lock);
+	list_add_tail(&ctrl->list, &slim_ctrl_list);
+	list_for_each_entry(bi, &board_list, list)
+		slim_match_ctrl_to_boardinfo(ctrl, &bi->board_info);
+	mutex_unlock(&board_lock);
+}
+EXPORT_SYMBOL(slim_ctrl_add_boarddevs);
+
+/*
+ * slim_busnum_to_ctrl: Map bus number to controller
+ * @busnum: Bus number
+ * Returns controller representing this bus number
+ */
+struct slim_controller *slim_busnum_to_ctrl(u32 bus_num)
+{
+	struct slim_controller *ctrl;
+
+	mutex_lock(&board_lock);
+	list_for_each_entry(ctrl, &slim_ctrl_list, list)
+		if (bus_num == ctrl->nr) {
+			mutex_unlock(&board_lock);
+			return ctrl;
+		}
+	mutex_unlock(&board_lock);
+	return NULL;
+}
+EXPORT_SYMBOL(slim_busnum_to_ctrl);
+
+static int slim_register_controller(struct slim_controller *ctrl)
+{
+	int ret = 0;
+
+	/* Can't register until after driver model init */
+	if (WARN_ON(!slimbus_type.p)) {
+		ret = -EPROBE_DEFER;
+		goto out_list;
+	}
+
+	dev_set_name(&ctrl->dev, "sb-%d", ctrl->nr);
+	ctrl->dev.bus = &slimbus_type;
+	ctrl->dev.type = &slim_ctrl_type;
+	ctrl->num_dev = 0;
+	if (!ctrl->min_cg)
+		ctrl->min_cg = SLIM_MIN_CLK_GEAR;
+	if (!ctrl->max_cg)
+		ctrl->max_cg = SLIM_MAX_CLK_GEAR;
+	spin_lock_init(&ctrl->txn_lock);
+	mutex_init(&ctrl->m_ctrl);
+	mutex_init(&ctrl->sched.m_reconf);
+	ret = device_register(&ctrl->dev);
+	if (ret)
+		goto out_list;
+
+	dev_dbg(&ctrl->dev, "Bus [%s] registered:dev:%p\n", ctrl->name,
+							&ctrl->dev);
+
+	if (ctrl->nports) {
+		ctrl->ports = kcalloc(ctrl->nports, sizeof(struct slim_port),
+					GFP_KERNEL);
+		if (!ctrl->ports) {
+			ret = -ENOMEM;
+			goto err_port_failed;
+		}
+	}
+	if (ctrl->nchans) {
+		ctrl->chans = kcalloc(ctrl->nchans, sizeof(struct slim_ich),
+					GFP_KERNEL);
+		if (!ctrl->chans) {
+			ret = -ENOMEM;
+			goto err_chan_failed;
+		}
+
+		ctrl->sched.chc1 = kcalloc(ctrl->nchans,
+					sizeof(struct slim_ich *), GFP_KERNEL);
+		if (!ctrl->sched.chc1) {
+			kfree(ctrl->chans);
+			ret = -ENOMEM;
+			goto err_chan_failed;
+		}
+		ctrl->sched.chc3 = kcalloc(ctrl->nchans,
+					sizeof(struct slim_ich *), GFP_KERNEL);
+		if (!ctrl->sched.chc3) {
+			kfree(ctrl->sched.chc1);
+			kfree(ctrl->chans);
+			ret = -ENOMEM;
+			goto err_chan_failed;
+		}
+	}
+#ifdef DEBUG
+	ctrl->sched.slots = kzalloc(SLIM_SL_PER_SUPERFRAME, GFP_KERNEL);
+#endif
+	init_completion(&ctrl->pause_comp);
+
+	INIT_LIST_HEAD(&ctrl->devs);
+	ctrl->wq = create_singlethread_workqueue(dev_name(&ctrl->dev));
+	if (!ctrl->wq)
+		goto err_workq_failed;
+
+	return 0;
+
+err_workq_failed:
+	kfree(ctrl->sched.chc3);
+	kfree(ctrl->sched.chc1);
+	kfree(ctrl->chans);
+err_chan_failed:
+	kfree(ctrl->ports);
+err_port_failed:
+	device_unregister(&ctrl->dev);
+out_list:
+	mutex_lock(&slim_lock);
+	idr_remove(&ctrl_idr, ctrl->nr);
+	mutex_unlock(&slim_lock);
+	return ret;
+}
+
+/* slim_remove_device: Remove the effect of slim_add_device() */
+void slim_remove_device(struct slim_device *sbdev)
+{
+	struct slim_controller *ctrl = sbdev->ctrl;
+
+	mutex_lock(&ctrl->m_ctrl);
+	list_del_init(&sbdev->dev_list);
+	mutex_unlock(&ctrl->m_ctrl);
+	device_unregister(&sbdev->dev);
+}
+EXPORT_SYMBOL(slim_remove_device);
+
+static void slim_ctrl_remove_device(struct slim_controller *ctrl,
+				struct slim_boardinfo *bi)
+{
+	if (ctrl->nr == bi->bus_num)
+		slim_remove_device(bi->slim_slave);
+}
+
+/*
+ * slim_del_controller: Controller tear-down.
+ * Controller added with the above API is teared down using this API.
+ */
+int slim_del_controller(struct slim_controller *ctrl)
+{
+	struct slim_controller *found;
+	struct sbi_boardinfo *bi;
+
+	/* First make sure that this bus was added */
+	mutex_lock(&slim_lock);
+	found = idr_find(&ctrl_idr, ctrl->nr);
+	mutex_unlock(&slim_lock);
+	if (found != ctrl)
+		return -EINVAL;
+
+	/* Remove all clients */
+	mutex_lock(&board_lock);
+	list_for_each_entry(bi, &board_list, list)
+		slim_ctrl_remove_device(ctrl, &bi->board_info);
+	mutex_unlock(&board_lock);
+
+	init_completion(&ctrl->dev_released);
+	device_unregister(&ctrl->dev);
+
+	wait_for_completion(&ctrl->dev_released);
+	list_del(&ctrl->list);
+	destroy_workqueue(ctrl->wq);
+	/* free bus id */
+	mutex_lock(&slim_lock);
+	idr_remove(&ctrl_idr, ctrl->nr);
+	mutex_unlock(&slim_lock);
+
+	kfree(ctrl->sched.chc1);
+	kfree(ctrl->sched.chc3);
+#ifdef DEBUG
+	kfree(ctrl->sched.slots);
+#endif
+	kfree(ctrl->chans);
+	kfree(ctrl->ports);
+
+	return 0;
+}
+EXPORT_SYMBOL(slim_del_controller);
+
+/*
+ * slim_add_numbered_controller: Controller bring-up.
+ * @ctrl: Controller to be registered.
+ * A controller is registered with the framework using this API. ctrl->nr is the
+ * desired number with which slimbus framework registers the controller.
+ * Function will return -EBUSY if the number is in use.
+ */
+int slim_add_numbered_controller(struct slim_controller *ctrl)
+{
+	int	id;
+
+	mutex_lock(&slim_lock);
+	id = idr_alloc(&ctrl_idr, ctrl, ctrl->nr, ctrl->nr + 1, GFP_KERNEL);
+	mutex_unlock(&slim_lock);
+
+	if (id < 0)
+		return id;
+
+	ctrl->nr = id;
+	return slim_register_controller(ctrl);
+}
+EXPORT_SYMBOL(slim_add_numbered_controller);
+
+/*
+ * slim_report_absent: Controller calls this function when a device
+ *	reports absent, OR when the device cannot be communicated with
+ * @sbdev: Device that cannot be reached, or sent report absent
+ */
+void slim_report_absent(struct slim_device *sbdev)
+{
+	struct slim_controller *ctrl;
+	int i;
+
+	if (!sbdev)
+		return;
+	ctrl = sbdev->ctrl;
+	if (!ctrl)
+		return;
+	/* invalidate logical addresses */
+	mutex_lock(&ctrl->m_ctrl);
+	for (i = 0; i < ctrl->num_dev; i++) {
+		if (sbdev->laddr == ctrl->addrt[i].laddr)
+			ctrl->addrt[i].valid = false;
+	}
+	mutex_unlock(&ctrl->m_ctrl);
+	sbdev->reported = false;
+	queue_work(ctrl->wq, &sbdev->wd);
+}
+EXPORT_SYMBOL(slim_report_absent);
+
+static int slim_remove_ch(struct slim_controller *ctrl, struct slim_ich *slc);
+/*
+ * slim_framer_booted: This function is called by controller after the active
+ * framer has booted (using Bus Reset sequence, or after it has shutdown and has
+ * come back up). Components, devices on the bus may be in undefined state,
+ * and this function triggers their drivers to do the needful
+ * to bring them back in Reset state so that they can acquire sync, report
+ * present and be operational again.
+ */
+void slim_framer_booted(struct slim_controller *ctrl)
+{
+	struct slim_device *sbdev;
+	struct list_head *pos, *next;
+	int i;
+
+	if (!ctrl)
+		return;
+
+	/* Since framer has rebooted, reset all data channels */
+	mutex_lock(&ctrl->sched.m_reconf);
+	for (i = 0; i < ctrl->nchans; i++) {
+		struct slim_ich *slc = &ctrl->chans[i];
+
+		if (slc->state > SLIM_CH_DEFINED)
+			slim_remove_ch(ctrl, slc);
+	}
+	mutex_unlock(&ctrl->sched.m_reconf);
+	mutex_lock(&ctrl->m_ctrl);
+	list_for_each_safe(pos, next, &ctrl->devs) {
+		struct slim_driver *sbdrv;
+
+		sbdev = list_entry(pos, struct slim_device, dev_list);
+		mutex_unlock(&ctrl->m_ctrl);
+		if (sbdev && sbdev->dev.driver) {
+			sbdrv = to_slim_driver(sbdev->dev.driver);
+			if (sbdrv->reset_device)
+				sbdrv->reset_device(sbdev);
+		}
+		mutex_lock(&ctrl->m_ctrl);
+	}
+	mutex_unlock(&ctrl->m_ctrl);
+}
+EXPORT_SYMBOL(slim_framer_booted);
+
+/*
+ * slim_msg_response: Deliver Message response received from a device to the
+ *	framework.
+ * @ctrl: Controller handle
+ * @reply: Reply received from the device
+ * @len: Length of the reply
+ * @tid: Transaction ID received with which framework can associate reply.
+ * Called by controller to inform framework about the response received.
+ * This helps in making the API asynchronous, and controller-driver doesn't need
+ * to manage 1 more table other than the one managed by framework mapping TID
+ * with buffers
+ */
+void slim_msg_response(struct slim_controller *ctrl, u8 *reply, u8 tid, u8 len)
+{
+	int i;
+	unsigned long flags;
+	bool async;
+	struct slim_msg_txn *txn;
+
+	spin_lock_irqsave(&ctrl->txn_lock, flags);
+	txn = ctrl->txnt[tid];
+	if (txn == NULL || txn->rbuf == NULL) {
+		spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+		if (txn == NULL)
+			dev_err(&ctrl->dev, "Got response to invalid TID:%d, len:%d",
+				tid, len);
+		else
+			dev_err(&ctrl->dev, "Invalid client buffer passed\n");
+		return;
+	}
+	async = txn->async;
+	for (i = 0; i < len; i++)
+		txn->rbuf[i] = reply[i];
+	if (txn->comp)
+		complete(txn->comp);
+	ctrl->txnt[tid] = NULL;
+	spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+	if (async)
+		kfree(txn);
+}
+EXPORT_SYMBOL(slim_msg_response);
+
+static int slim_processtxn(struct slim_controller *ctrl,
+				struct slim_msg_txn *txn, bool need_tid)
+{
+	u8 i = 0;
+	int ret = 0;
+	unsigned long flags;
+
+	if (need_tid) {
+		spin_lock_irqsave(&ctrl->txn_lock, flags);
+		for (i = 0; i < ctrl->last_tid; i++) {
+			if (ctrl->txnt[i] == NULL)
+				break;
+		}
+		if (i >= ctrl->last_tid) {
+			if (ctrl->last_tid == 255) {
+				spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+				return -ENOMEM;
+			}
+			ctrl->last_tid++;
+		}
+		ctrl->txnt[i] = txn;
+		txn->tid = i;
+		spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+	}
+
+	ret = ctrl->xfer_msg(ctrl, txn);
+	return ret;
+}
+
+static int ctrl_getlogical_addr(struct slim_controller *ctrl, const u8 *eaddr,
+				u8 e_len, u8 *entry)
+{
+	u8 i;
+
+	for (i = 0; i < ctrl->num_dev; i++) {
+		if (ctrl->addrt[i].valid &&
+			memcmp(ctrl->addrt[i].eaddr, eaddr, e_len) == 0) {
+			*entry = i;
+			return 0;
+		}
+	}
+	return -ENXIO;
+}
+
+/*
+ * slim_assign_laddr: Assign logical address to a device enumerated.
+ * @ctrl: Controller with which device is enumerated.
+ * @e_addr: 6-byte elemental address of the device.
+ * @e_len: buffer length for e_addr
+ * @laddr: Return logical address (if valid flag is false)
+ * @valid: true if laddr holds a valid address that controller wants to
+ *	set for this enumeration address. Otherwise framework sets index into
+ *	address table as logical address.
+ * Called by controller in response to REPORT_PRESENT. Framework will assign
+ * a logical address to this enumeration address.
+ * Function returns -EXFULL to indicate that all logical addresses are already
+ * taken.
+ */
+int slim_assign_laddr(struct slim_controller *ctrl, const u8 *e_addr,
+				u8 e_len, u8 *laddr, bool valid)
+{
+	int ret;
+	u8 i = 0;
+	bool exists = false;
+	struct slim_device *sbdev;
+	struct list_head *pos, *next;
+	void *new_addrt = NULL;
+
+	mutex_lock(&ctrl->m_ctrl);
+	/* already assigned */
+	if (ctrl_getlogical_addr(ctrl, e_addr, e_len, &i) == 0) {
+		*laddr = ctrl->addrt[i].laddr;
+		exists = true;
+	} else {
+		if (ctrl->num_dev >= 254) {
+			ret = -EXFULL;
+			goto ret_assigned_laddr;
+		}
+		for (i = 0; i < ctrl->num_dev; i++) {
+			if (ctrl->addrt[i].valid == false)
+				break;
+		}
+		if (i == ctrl->num_dev) {
+			new_addrt = krealloc(ctrl->addrt,
+					(ctrl->num_dev + 1) *
+					sizeof(struct slim_addrt),
+					GFP_KERNEL);
+			if (!new_addrt) {
+				ret = -ENOMEM;
+				goto ret_assigned_laddr;
+			}
+			ctrl->addrt = new_addrt;
+			ctrl->num_dev++;
+		}
+		memcpy(ctrl->addrt[i].eaddr, e_addr, e_len);
+		ctrl->addrt[i].valid = true;
+		/* Preferred address is index into table */
+		if (!valid)
+			*laddr = i;
+	}
+
+	ret = ctrl->set_laddr(ctrl, (const u8 *)&ctrl->addrt[i].eaddr, 6,
+				*laddr);
+	if (ret) {
+		ctrl->addrt[i].valid = false;
+		goto ret_assigned_laddr;
+	}
+	ctrl->addrt[i].laddr = *laddr;
+
+	dev_dbg(&ctrl->dev, "setting slimbus l-addr:%x\n", *laddr);
+ret_assigned_laddr:
+	mutex_unlock(&ctrl->m_ctrl);
+	if (exists || ret)
+		return ret;
+
+	pr_info("slimbus:%d laddr:0x%x, EAPC:0x%x:0x%x", ctrl->nr, *laddr,
+				e_addr[1], e_addr[2]);
+	mutex_lock(&ctrl->m_ctrl);
+	list_for_each_safe(pos, next, &ctrl->devs) {
+		sbdev = list_entry(pos, struct slim_device, dev_list);
+		if (memcmp(sbdev->e_addr, e_addr, 6) == 0) {
+			struct slim_driver *sbdrv;
+
+			sbdev->laddr = *laddr;
+			sbdev->reported = true;
+			if (sbdev->dev.driver) {
+				sbdrv = to_slim_driver(sbdev->dev.driver);
+				if (sbdrv->device_up)
+					queue_work(ctrl->wq, &sbdev->wd);
+			}
+			break;
+		}
+	}
+	mutex_unlock(&ctrl->m_ctrl);
+	return 0;
+}
+EXPORT_SYMBOL(slim_assign_laddr);
+
+/*
+ * slim_get_logical_addr: Return the logical address of a slimbus device.
+ * @sb: client handle requesting the adddress.
+ * @e_addr: Elemental address of the device.
+ * @e_len: Length of e_addr
+ * @laddr: output buffer to store the address
+ * context: can sleep
+ * -EINVAL is returned in case of invalid parameters, and -ENXIO is returned if
+ *  the device with this elemental address is not found.
+ */
+int slim_get_logical_addr(struct slim_device *sb, const u8 *e_addr,
+				u8 e_len, u8 *laddr)
+{
+	int ret = 0;
+	u8 entry;
+	struct slim_controller *ctrl = sb->ctrl;
+
+	if (!ctrl || !laddr || !e_addr || e_len != 6)
+		return -EINVAL;
+	mutex_lock(&ctrl->m_ctrl);
+	ret = ctrl_getlogical_addr(ctrl, e_addr, e_len, &entry);
+	if (!ret)
+		*laddr = ctrl->addrt[entry].laddr;
+	mutex_unlock(&ctrl->m_ctrl);
+	if (ret == -ENXIO && ctrl->get_laddr) {
+		ret = ctrl->get_laddr(ctrl, e_addr, e_len, laddr);
+		if (!ret)
+			ret = slim_assign_laddr(ctrl, e_addr, e_len, laddr,
+						true);
+	}
+	return ret;
+}
+EXPORT_SYMBOL(slim_get_logical_addr);
+
+static int slim_ele_access_sanity(struct slim_ele_access *msg, int oper,
+				u8 *rbuf, const u8 *wbuf, u8 len)
+{
+	if (!msg || msg->num_bytes > 16 || msg->start_offset + len > 0xC00)
+		return -EINVAL;
+	switch (oper) {
+	case SLIM_MSG_MC_REQUEST_VALUE:
+	case SLIM_MSG_MC_REQUEST_INFORMATION:
+		if (rbuf == NULL)
+			return -EINVAL;
+		return 0;
+	case SLIM_MSG_MC_CHANGE_VALUE:
+	case SLIM_MSG_MC_CLEAR_INFORMATION:
+		if (wbuf == NULL)
+			return -EINVAL;
+		return 0;
+	case SLIM_MSG_MC_REQUEST_CHANGE_VALUE:
+	case SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION:
+		if (rbuf == NULL || wbuf == NULL)
+			return -EINVAL;
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+static u16 slim_slicecodefromsize(u32 req)
+{
+	u8 codetosize[8] = {1, 2, 3, 4, 6, 8, 12, 16};
+
+	if (req >= 8)
+		return 0;
+	else
+		return codetosize[req];
+}
+
+static u16 slim_slicesize(u32 code)
+{
+	u8 sizetocode[16] = {0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7};
+
+	if (code == 0)
+		code = 1;
+	if (code > 16)
+		code = 16;
+	return sizetocode[code - 1];
+}
+
+
+/* Message APIs Unicast message APIs used by slimbus slave drivers */
+
+/*
+ * Message API access routines.
+ * @sb: client handle requesting elemental message reads, writes.
+ * @msg: Input structure for start-offset, number of bytes to read.
+ * @rbuf: data buffer to be filled with values read.
+ * @len: data buffer size
+ * @wbuf: data buffer containing value/information to be written
+ * context: can sleep
+ * Returns:
+ * -EINVAL: Invalid parameters
+ * -ETIMEDOUT: If controller could not complete the request. This may happen if
+ *  the bus lines are not clocked, controller is not powered-on, slave with
+ *  given address is not enumerated/responding.
+ */
+int slim_request_val_element(struct slim_device *sb,
+				struct slim_ele_access *msg, u8 *buf, u8 len)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+
+	if (!ctrl)
+		return -EINVAL;
+	return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_REQUEST_VALUE, buf,
+			NULL, len);
+}
+EXPORT_SYMBOL(slim_request_val_element);
+
+int slim_request_inf_element(struct slim_device *sb,
+				struct slim_ele_access *msg, u8 *buf, u8 len)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+
+	if (!ctrl)
+		return -EINVAL;
+	return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_REQUEST_INFORMATION,
+			buf, NULL, len);
+}
+EXPORT_SYMBOL(slim_request_inf_element);
+
+int slim_change_val_element(struct slim_device *sb, struct slim_ele_access *msg,
+				const u8 *buf, u8 len)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+
+	if (!ctrl)
+		return -EINVAL;
+	return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_CHANGE_VALUE, NULL, buf,
+					len);
+}
+EXPORT_SYMBOL(slim_change_val_element);
+
+int slim_clear_inf_element(struct slim_device *sb, struct slim_ele_access *msg,
+				u8 *buf, u8 len)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+
+	if (!ctrl)
+		return -EINVAL;
+	return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_CLEAR_INFORMATION, NULL,
+					buf, len);
+}
+EXPORT_SYMBOL(slim_clear_inf_element);
+
+int slim_request_change_val_element(struct slim_device *sb,
+					struct slim_ele_access *msg, u8 *rbuf,
+					const u8 *wbuf, u8 len)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+
+	if (!ctrl)
+		return -EINVAL;
+	return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_REQUEST_CHANGE_VALUE,
+					rbuf, wbuf, len);
+}
+EXPORT_SYMBOL(slim_request_change_val_element);
+
+int slim_request_clear_inf_element(struct slim_device *sb,
+					struct slim_ele_access *msg, u8 *rbuf,
+					const u8 *wbuf, u8 len)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+
+	if (!ctrl)
+		return -EINVAL;
+	return slim_xfer_msg(ctrl, sb, msg,
+					SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION,
+					rbuf, wbuf, len);
+}
+EXPORT_SYMBOL(slim_request_clear_inf_element);
+
+/*
+ * Broadcast message API:
+ * call this API directly with sbdev = NULL.
+ * For broadcast reads, make sure that buffers are big-enough to incorporate
+ * replies from all logical addresses.
+ * All controllers may not support broadcast
+ */
+int slim_xfer_msg(struct slim_controller *ctrl, struct slim_device *sbdev,
+			struct slim_ele_access *msg, u16 mc, u8 *rbuf,
+			const u8 *wbuf, u8 len)
+{
+	DECLARE_COMPLETION_ONSTACK(complete);
+	DEFINE_SLIM_LDEST_TXN(txn_stack, mc, len, 6, rbuf, wbuf, sbdev->laddr);
+	struct slim_msg_txn *txn;
+	int ret;
+	u16 sl, cur;
+
+	if (msg->comp && rbuf) {
+		txn = kmalloc(sizeof(struct slim_msg_txn),
+						GFP_KERNEL);
+		if (IS_ERR_OR_NULL(txn))
+			return PTR_ERR(txn);
+		*txn = txn_stack;
+		txn->async = true;
+		txn->comp = msg->comp;
+	} else {
+		txn = &txn_stack;
+		if (rbuf)
+			txn->comp = &complete;
+	}
+
+	ret = slim_ele_access_sanity(msg, mc, rbuf, wbuf, len);
+	if (ret)
+		goto xfer_err;
+
+	sl = slim_slicesize(len);
+	dev_dbg(&ctrl->dev, "SB xfer msg:os:%x, len:%d, MC:%x, sl:%x\n",
+				msg->start_offset, len, mc, sl);
+
+	cur = slim_slicecodefromsize(sl);
+	txn->ec = ((sl | (1 << 3)) | ((msg->start_offset & 0xFFF) << 4));
+
+	if (wbuf)
+		txn->rl += len;
+	if (rbuf) {
+		unsigned long flags;
+
+		txn->rl++;
+		ret = slim_processtxn(ctrl, txn, true);
+
+		/* sync read */
+		if (!ret && !msg->comp) {
+			ret = wait_for_completion_timeout(&complete, HZ);
+			if (!ret) {
+				dev_err(&ctrl->dev, "slimbus Read timed out");
+				spin_lock_irqsave(&ctrl->txn_lock, flags);
+				/* Invalidate the transaction */
+				ctrl->txnt[txn->tid] = NULL;
+				spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+				ret = -ETIMEDOUT;
+			} else
+				ret = 0;
+		} else if (ret < 0 && !msg->comp) {
+			dev_err(&ctrl->dev, "slimbus Read error");
+			spin_lock_irqsave(&ctrl->txn_lock, flags);
+			/* Invalidate the transaction */
+			ctrl->txnt[txn->tid] = NULL;
+			spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+		}
+
+	} else
+		ret = slim_processtxn(ctrl, txn, false);
+xfer_err:
+	return ret;
+}
+EXPORT_SYMBOL(slim_xfer_msg);
+
+/*
+ * User message:
+ * slim_user_msg: Send user message that is interpreted by destination device
+ * @sb: Client handle sending the message
+ * @la: Destination device for this user message
+ * @mt: Message Type (Soruce-referred, or Destination-referred)
+ * @mc: Message Code
+ * @msg: Message structure (start offset, number of bytes) to be sent
+ * @buf: data buffer to be sent
+ * @len: data buffer size in bytes
+ */
+int slim_user_msg(struct slim_device *sb, u8 la, u8 mt, u8 mc,
+				struct slim_ele_access *msg, u8 *buf, u8 len)
+{
+	if (!sb || !sb->ctrl || !msg || mt == SLIM_MSG_MT_CORE)
+		return -EINVAL;
+	if (!sb->ctrl->xfer_user_msg)
+		return -EPROTONOSUPPORT;
+	return sb->ctrl->xfer_user_msg(sb->ctrl, la, mt, mc, msg, buf, len);
+}
+EXPORT_SYMBOL(slim_user_msg);
+
+/*
+ * Queue bulk of message writes:
+ * slim_bulk_msg_write: Write bulk of messages (e.g. downloading FW)
+ * @sb: Client handle sending these messages
+ * @la: Destination device for these messages
+ * @mt: Message Type
+ * @mc: Message Code
+ * @msgs: List of messages to be written in bulk
+ * @n: Number of messages in the list
+ * @cb: Callback if client needs this to be non-blocking
+ * @ctx: Context for this callback
+ * If supported by controller, this message list will be sent in bulk to the HW
+ * If the client specifies this to be non-blocking, the callback will be
+ * called from atomic context.
+ */
+int slim_bulk_msg_write(struct slim_device *sb, u8 mt, u8 mc,
+			struct slim_val_inf msgs[], int n,
+			int (*comp_cb)(void *ctx, int err), void *ctx)
+{
+	int i, ret = 0;
+
+	if (!sb || !sb->ctrl || !msgs || n <= 0)
+		return -EINVAL;
+	if (!sb->ctrl->xfer_bulk_wr) {
+		pr_warn("controller does not support bulk WR, serializing");
+		for (i = 0; i < n; i++) {
+			struct slim_ele_access ele;
+
+			ele.comp = NULL;
+			ele.start_offset = msgs[i].start_offset;
+			ele.num_bytes = msgs[i].num_bytes;
+			ret = slim_xfer_msg(sb->ctrl, sb, &ele, mc,
+					msgs[i].rbuf, msgs[i].wbuf,
+					ele.num_bytes);
+			if (ret)
+				return ret;
+		}
+		return ret;
+	}
+	return sb->ctrl->xfer_bulk_wr(sb->ctrl, sb->laddr, mt, mc, msgs, n,
+					comp_cb, ctx);
+}
+EXPORT_SYMBOL(slim_bulk_msg_write);
+
+/*
+ * slim_alloc_mgrports: Allocate port on manager side.
+ * @sb: device/client handle.
+ * @req: Port request type.
+ * @nports: Number of ports requested
+ * @rh: output buffer to store the port handles
+ * @hsz: size of buffer storing handles
+ * context: can sleep
+ * This port will be typically used by SW. e.g. client driver wants to receive
+ * some data from audio codec HW using a data channel.
+ * Port allocated using this API will be used to receive the data.
+ * If half-duplex ports are requested, two adjacent ports are allocated for
+ * 1 half-duplex port. So the handle-buffer size should be twice the number
+ * of half-duplex ports to be allocated.
+ * -EDQUOT is returned if all ports are in use.
+ */
+int slim_alloc_mgrports(struct slim_device *sb, enum slim_port_req req,
+				int nports, u32 *rh, int hsz)
+{
+	int i, j;
+	int ret = -EINVAL;
+	int nphysp = nports;
+	struct slim_controller *ctrl = sb->ctrl;
+
+	if (!rh || !ctrl)
+		return -EINVAL;
+	if (req == SLIM_REQ_HALF_DUP)
+		nphysp *= 2;
+	if (hsz/sizeof(u32) < nphysp)
+		return -EINVAL;
+	mutex_lock(&ctrl->m_ctrl);
+
+	for (i = 0; i < ctrl->nports; i++) {
+		bool multiok = true;
+
+		if (ctrl->ports[i].state != SLIM_P_FREE)
+			continue;
+		/* Start half duplex channel at even port */
+		if (req == SLIM_REQ_HALF_DUP && (i % 2))
+			continue;
+		/* Allocate ports contiguously for multi-ch */
+		if (ctrl->nports < (i + nphysp)) {
+			i = ctrl->nports;
+			break;
+		}
+		if (req == SLIM_REQ_MULTI_CH) {
+			multiok = true;
+			for (j = i; j < i + nphysp; j++) {
+				if (ctrl->ports[j].state != SLIM_P_FREE) {
+					multiok = false;
+					break;
+				}
+			}
+			if (!multiok)
+				continue;
+		}
+		break;
+	}
+	if (i >= ctrl->nports) {
+		ret = -EDQUOT;
+		goto alloc_err;
+	}
+	ret = 0;
+	for (j = i; j < i + nphysp; j++) {
+		ctrl->ports[j].state = SLIM_P_UNCFG;
+		ctrl->ports[j].req = req;
+		if (req == SLIM_REQ_HALF_DUP && (j % 2))
+			ctrl->ports[j].flow = SLIM_SINK;
+		else
+			ctrl->ports[j].flow = SLIM_SRC;
+		if (ctrl->alloc_port)
+			ret = ctrl->alloc_port(ctrl, j);
+		if (ret) {
+			for (; j >= i; j--)
+				ctrl->ports[j].state = SLIM_P_FREE;
+			goto alloc_err;
+		}
+		*rh++ = SLIM_PORT_HDL(SLIM_LA_MANAGER, 0, j);
+	}
+alloc_err:
+	mutex_unlock(&ctrl->m_ctrl);
+	return ret;
+}
+EXPORT_SYMBOL(slim_alloc_mgrports);
+
+/* Deallocate the port(s) allocated using the API above */
+int slim_dealloc_mgrports(struct slim_device *sb, u32 *hdl, int nports)
+{
+	int i;
+	struct slim_controller *ctrl = sb->ctrl;
+
+	if (!ctrl || !hdl)
+		return -EINVAL;
+
+	mutex_lock(&ctrl->m_ctrl);
+
+	for (i = 0; i < nports; i++) {
+		u8 pn;
+
+		pn = SLIM_HDL_TO_PORT(hdl[i]);
+
+		if (pn >= ctrl->nports || ctrl->ports[pn].state == SLIM_P_CFG) {
+			int j, ret;
+
+			if (pn >= ctrl->nports) {
+				dev_err(&ctrl->dev, "invalid port number");
+				ret = -EINVAL;
+			} else {
+				dev_err(&ctrl->dev,
+					"Can't dealloc connected port:%d", i);
+				ret = -EISCONN;
+			}
+			for (j = i - 1; j >= 0; j--) {
+				pn = SLIM_HDL_TO_PORT(hdl[j]);
+				ctrl->ports[pn].state = SLIM_P_UNCFG;
+			}
+			mutex_unlock(&ctrl->m_ctrl);
+			return ret;
+		}
+		if (ctrl->dealloc_port)
+			ctrl->dealloc_port(ctrl, pn);
+		ctrl->ports[pn].state = SLIM_P_FREE;
+	}
+	mutex_unlock(&ctrl->m_ctrl);
+	return 0;
+}
+EXPORT_SYMBOL(slim_dealloc_mgrports);
+
+/*
+ * slim_config_mgrports: Configure manager side ports
+ * @sb: device/client handle.
+ * @ph: array of port handles for which this configuration is valid
+ * @nports: Number of ports in ph
+ * @cfg: configuration requested for port(s)
+ * Configure port settings if they are different than the default ones.
+ * Returns success if the config could be applied. Returns -EISCONN if the
+ * port is in use
+ */
+int slim_config_mgrports(struct slim_device *sb, u32 *ph, int nports,
+				struct slim_port_cfg *cfg)
+{
+	int i;
+	struct slim_controller *ctrl;
+
+	if (!sb || !ph || !nports || !sb->ctrl || !cfg)
+		return -EINVAL;
+
+	ctrl = sb->ctrl;
+	mutex_lock(&ctrl->sched.m_reconf);
+	for (i = 0; i < nports; i++) {
+		u8 pn = SLIM_HDL_TO_PORT(ph[i]);
+
+		if (ctrl->ports[pn].state == SLIM_P_CFG)
+			return -EISCONN;
+		ctrl->ports[pn].cfg = *cfg;
+	}
+	mutex_unlock(&ctrl->sched.m_reconf);
+	return 0;
+}
+EXPORT_SYMBOL(slim_config_mgrports);
+
+/*
+ * slim_get_slaveport: Get slave port handle
+ * @la: slave device logical address.
+ * @idx: port index at slave
+ * @rh: return handle
+ * @flw: Flow type (source or destination)
+ * This API only returns a slave port's representation as expected by slimbus
+ * driver. This port is not managed by the slimbus driver. Caller is expected
+ * to have visibility of this port since it's a device-port.
+ */
+int slim_get_slaveport(u8 la, int idx, u32 *rh, enum slim_port_flow flw)
+{
+	if (rh == NULL)
+		return -EINVAL;
+	*rh = SLIM_PORT_HDL(la, flw, idx);
+	return 0;
+}
+EXPORT_SYMBOL(slim_get_slaveport);
+
+static int connect_port_ch(struct slim_controller *ctrl, u8 ch, u32 ph,
+				enum slim_port_flow flow)
+{
+	int ret;
+	u8 buf[2];
+	u32 la = SLIM_HDL_TO_LA(ph);
+	u8 pn = (u8)SLIM_HDL_TO_PORT(ph);
+	DEFINE_SLIM_LDEST_TXN(txn, 0, 2, 6, NULL, buf, la);
+
+	if (flow == SLIM_SRC)
+		txn.mc = SLIM_MSG_MC_CONNECT_SOURCE;
+	else
+		txn.mc = SLIM_MSG_MC_CONNECT_SINK;
+	buf[0] = pn;
+	buf[1] = ctrl->chans[ch].chan;
+	if (la == SLIM_LA_MANAGER)
+		ctrl->ports[pn].flow = flow;
+	ret = slim_processtxn(ctrl, &txn, false);
+	if (!ret && la == SLIM_LA_MANAGER)
+		ctrl->ports[pn].state = SLIM_P_CFG;
+	return ret;
+}
+
+static int disconnect_port_ch(struct slim_controller *ctrl, u32 ph)
+{
+	int ret;
+	u32 la = SLIM_HDL_TO_LA(ph);
+	u8 pn = (u8)SLIM_HDL_TO_PORT(ph);
+	DEFINE_SLIM_LDEST_TXN(txn, 0, 1, 5, NULL, &pn, la);
+
+	txn.mc = SLIM_MSG_MC_DISCONNECT_PORT;
+	ret = slim_processtxn(ctrl, &txn, false);
+	if (ret)
+		return ret;
+	if (la == SLIM_LA_MANAGER) {
+		ctrl->ports[pn].state = SLIM_P_UNCFG;
+		ctrl->ports[pn].cfg.watermark = 0;
+		ctrl->ports[pn].cfg.port_opts = 0;
+		ctrl->ports[pn].ch = NULL;
+	}
+	return 0;
+}
+
+/*
+ * slim_connect_src: Connect source port to channel.
+ * @sb: client handle
+ * @srch: source handle to be connected to this channel
+ * @chanh: Channel with which the ports need to be associated with.
+ * Per slimbus specification, a channel may have 1 source port.
+ * Channel specified in chanh needs to be allocated first.
+ * Returns -EALREADY if source is already configured for this channel.
+ * Returns -ENOTCONN if channel is not allocated
+ * Returns -EINVAL if invalid direction is specified for non-manager port,
+ * or if the manager side port number is out of bounds, or in incorrect state
+ */
+int slim_connect_src(struct slim_device *sb, u32 srch, u16 chanh)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	int ret;
+	u8 chan = SLIM_HDL_TO_CHIDX(chanh);
+	struct slim_ich *slc = &ctrl->chans[chan];
+	enum slim_port_flow flow = SLIM_HDL_TO_FLOW(srch);
+	u8 la = SLIM_HDL_TO_LA(srch);
+	u8 pn = SLIM_HDL_TO_PORT(srch);
+
+	/* manager ports don't have direction when they are allocated */
+	if (la != SLIM_LA_MANAGER && flow != SLIM_SRC)
+		return -EINVAL;
+
+	mutex_lock(&ctrl->sched.m_reconf);
+
+	if (la == SLIM_LA_MANAGER) {
+		if (pn >= ctrl->nports ||
+			ctrl->ports[pn].state != SLIM_P_UNCFG) {
+			ret = -EINVAL;
+			goto connect_src_err;
+		}
+	}
+
+	if (slc->state == SLIM_CH_FREE) {
+		ret = -ENOTCONN;
+		goto connect_src_err;
+	}
+	/*
+	 * Once channel is removed, its ports can be considered disconnected
+	 * So its ports can be reassigned. Source port is zeroed
+	 * when channel is deallocated.
+	 */
+	if (slc->srch) {
+		ret = -EALREADY;
+		goto connect_src_err;
+	}
+	ctrl->ports[pn].ch = &slc->prop;
+	ret = connect_port_ch(ctrl, chan, srch, SLIM_SRC);
+
+	if (!ret)
+		slc->srch = srch;
+
+connect_src_err:
+	mutex_unlock(&ctrl->sched.m_reconf);
+	return ret;
+}
+EXPORT_SYMBOL(slim_connect_src);
+
+/*
+ * slim_connect_sink: Connect sink port(s) to channel.
+ * @sb: client handle
+ * @sinkh: sink handle(s) to be connected to this channel
+ * @nsink: number of sinks
+ * @chanh: Channel with which the ports need to be associated with.
+ * Per slimbus specification, a channel may have multiple sink-ports.
+ * Channel specified in chanh needs to be allocated first.
+ * Returns -EALREADY if sink is already configured for this channel.
+ * Returns -ENOTCONN if channel is not allocated
+ * Returns -EINVAL if invalid parameters are passed, or invalid direction is
+ * specified for non-manager port, or if the manager side port number is out of
+ * bounds, or in incorrect state
+ */
+int slim_connect_sink(struct slim_device *sb, u32 *sinkh, int nsink, u16 chanh)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	int j;
+	int ret = 0;
+	u8 chan = SLIM_HDL_TO_CHIDX(chanh);
+	struct slim_ich *slc = &ctrl->chans[chan];
+	void *new_sinkh = NULL;
+
+	if (!sinkh || !nsink)
+		return -EINVAL;
+
+	mutex_lock(&ctrl->sched.m_reconf);
+
+	/*
+	 * Once channel is removed, its ports can be considered disconnected
+	 * So its ports can be reassigned. Sink ports are freed when channel
+	 * is deallocated.
+	 */
+	if (slc->state == SLIM_CH_FREE) {
+		ret = -ENOTCONN;
+		goto connect_sink_err;
+	}
+
+	for (j = 0; j < nsink; j++) {
+		enum slim_port_flow flow = SLIM_HDL_TO_FLOW(sinkh[j]);
+		u8 la = SLIM_HDL_TO_LA(sinkh[j]);
+		u8 pn = SLIM_HDL_TO_PORT(sinkh[j]);
+
+		if (la != SLIM_LA_MANAGER && flow != SLIM_SINK) {
+			ret = -EINVAL;
+		} else if (la == SLIM_LA_MANAGER &&
+			   (pn >= ctrl->nports ||
+			    ctrl->ports[pn].state != SLIM_P_UNCFG)) {
+			ret = -EINVAL;
+		} else {
+			ctrl->ports[pn].ch = &slc->prop;
+			ret = connect_port_ch(ctrl, chan, sinkh[j], SLIM_SINK);
+		}
+		if (ret) {
+			for (j = j - 1; j >= 0; j--)
+				disconnect_port_ch(ctrl, sinkh[j]);
+			goto connect_sink_err;
+		}
+	}
+
+	new_sinkh = krealloc(slc->sinkh, (sizeof(u32) * (slc->nsink + nsink)),
+				GFP_KERNEL);
+	if (!new_sinkh) {
+		ret = -ENOMEM;
+		for (j = 0; j < nsink; j++)
+			disconnect_port_ch(ctrl, sinkh[j]);
+		goto connect_sink_err;
+	}
+
+	slc->sinkh = new_sinkh;
+	memcpy(slc->sinkh + slc->nsink, sinkh, (sizeof(u32) * nsink));
+	slc->nsink += nsink;
+
+connect_sink_err:
+	mutex_unlock(&ctrl->sched.m_reconf);
+	return ret;
+}
+EXPORT_SYMBOL(slim_connect_sink);
+
+/*
+ * slim_disconnect_ports: Disconnect port(s) from channel
+ * @sb: client handle
+ * @ph: ports to be disconnected
+ * @nph: number of ports.
+ * Disconnects ports from a channel.
+ */
+int slim_disconnect_ports(struct slim_device *sb, u32 *ph, int nph)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	int i;
+
+	mutex_lock(&ctrl->sched.m_reconf);
+
+	for (i = 0; i < nph; i++)
+		disconnect_port_ch(ctrl, ph[i]);
+	mutex_unlock(&ctrl->sched.m_reconf);
+	return 0;
+}
+EXPORT_SYMBOL(slim_disconnect_ports);
+
+/*
+ * slim_port_xfer: Schedule buffer to be transferred/received using port-handle.
+ * @sb: client handle
+ * @ph: port-handle
+ * @iobuf: buffer to be transferred or populated
+ * @len: buffer size.
+ * @comp: completion signal to indicate transfer done or error.
+ * context: can sleep
+ * Returns number of bytes transferred/received if used synchronously.
+ * Will return 0 if used asynchronously.
+ * Client will call slim_port_get_xfer_status to get error and/or number of
+ * bytes transferred if used asynchronously.
+ */
+int slim_port_xfer(struct slim_device *sb, u32 ph, phys_addr_t iobuf, u32 len,
+				struct completion *comp)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	u8 pn = SLIM_HDL_TO_PORT(ph);
+
+	dev_dbg(&ctrl->dev, "port xfer: num:%d", pn);
+	return ctrl->port_xfer(ctrl, pn, iobuf, len, comp);
+}
+EXPORT_SYMBOL(slim_port_xfer);
+
+/*
+ * slim_port_get_xfer_status: Poll for port transfers, or get transfer status
+ *	after completion is done.
+ * @sb: client handle
+ * @ph: port-handle
+ * @done_buf: return pointer (iobuf from slim_port_xfer) which is processed.
+ * @done_len: Number of bytes transferred.
+ * This can be called when port_xfer complition is signalled.
+ * The API will return port transfer error (underflow/overflow/disconnect)
+ * and/or done_len will reflect number of bytes transferred. Note that
+ * done_len may be valid even if port error (overflow/underflow) has happened.
+ * e.g. If the transfer was scheduled with a few bytes to be transferred and
+ * client has not supplied more data to be transferred, done_len will indicate
+ * number of bytes transferred with underflow error. To avoid frequent underflow
+ * errors, multiple transfers can be queued (e.g. ping-pong buffers) so that
+ * channel has data to be transferred even if client is not ready to transfer
+ * data all the time. done_buf will indicate address of the last buffer
+ * processed from the multiple transfers.
+ */
+enum slim_port_err slim_port_get_xfer_status(struct slim_device *sb, u32 ph,
+			phys_addr_t *done_buf, u32 *done_len)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	u8 pn = SLIM_HDL_TO_PORT(ph);
+	u32 la = SLIM_HDL_TO_LA(ph);
+	enum slim_port_err err;
+
+	dev_dbg(&ctrl->dev, "get status port num:%d", pn);
+	/*
+	 * Framework only has insight into ports managed by ported device
+	 * used by the manager and not slave
+	 */
+	if (la != SLIM_LA_MANAGER) {
+		if (done_buf)
+			*done_buf = 0;
+		if (done_len)
+			*done_len = 0;
+		return SLIM_P_NOT_OWNED;
+	}
+	err = ctrl->port_xfer_status(ctrl, pn, done_buf, done_len);
+	if (err == SLIM_P_INPROGRESS)
+		err = ctrl->ports[pn].err;
+	return err;
+}
+EXPORT_SYMBOL(slim_port_get_xfer_status);
+
+static void slim_add_ch(struct slim_controller *ctrl, struct slim_ich *slc)
+{
+	struct slim_ich **arr;
+	int i, j;
+	int *len;
+	int sl = slc->seglen << slc->rootexp;
+	/* Channel is already active and other end is transmitting data */
+	if (slc->state >= SLIM_CH_ACTIVE)
+		return;
+	if (slc->coeff == SLIM_COEFF_1) {
+		arr = ctrl->sched.chc1;
+		len = &ctrl->sched.num_cc1;
+	} else {
+		arr = ctrl->sched.chc3;
+		len = &ctrl->sched.num_cc3;
+		sl *= 3;
+	}
+
+	*len += 1;
+
+	/* Insert the channel based on rootexp and seglen */
+	for (i = 0; i < *len - 1; i++) {
+		/*
+		 * Primary key: exp low to high.
+		 * Secondary key: seglen: high to low
+		 */
+		if ((slc->rootexp > arr[i]->rootexp) ||
+			((slc->rootexp == arr[i]->rootexp) &&
+			(slc->seglen < arr[i]->seglen)))
+			continue;
+		else
+			break;
+	}
+	for (j = *len - 1; j > i; j--)
+		arr[j] = arr[j - 1];
+	arr[i] = slc;
+	if (!ctrl->allocbw)
+		ctrl->sched.usedslots += sl;
+}
+
+static int slim_remove_ch(struct slim_controller *ctrl, struct slim_ich *slc)
+{
+	struct slim_ich **arr;
+	int i;
+	u32 la, ph;
+	int *len;
+
+	if (slc->coeff == SLIM_COEFF_1) {
+		arr = ctrl->sched.chc1;
+		len = &ctrl->sched.num_cc1;
+	} else {
+		arr = ctrl->sched.chc3;
+		len = &ctrl->sched.num_cc3;
+	}
+
+	for (i = 0; i < *len; i++) {
+		if (arr[i] == slc)
+			break;
+	}
+	if (i >= *len)
+		return -EXFULL;
+	for (; i < *len - 1; i++)
+		arr[i] = arr[i + 1];
+	*len -= 1;
+	arr[*len] = NULL;
+
+	slc->state = SLIM_CH_ALLOCATED;
+	slc->def = 0;
+	slc->newintr = 0;
+	slc->newoff = 0;
+	for (i = 0; i < slc->nsink; i++) {
+		ph = slc->sinkh[i];
+		la = SLIM_HDL_TO_LA(ph);
+		/*
+		 * For ports managed by manager's ported device, no need to send
+		 * disconnect. It is client's responsibility to call disconnect
+		 * on ports owned by the slave device
+		 */
+		if (la == SLIM_LA_MANAGER) {
+			ctrl->ports[SLIM_HDL_TO_PORT(ph)].state = SLIM_P_UNCFG;
+			ctrl->ports[SLIM_HDL_TO_PORT(ph)].ch = NULL;
+		}
+	}
+
+	ph = slc->srch;
+	la = SLIM_HDL_TO_LA(ph);
+	if (la == SLIM_LA_MANAGER) {
+		u8 pn = SLIM_HDL_TO_PORT(ph);
+
+		ctrl->ports[pn].state = SLIM_P_UNCFG;
+		ctrl->ports[pn].cfg.watermark = 0;
+		ctrl->ports[pn].cfg.port_opts = 0;
+	}
+
+	kfree(slc->sinkh);
+	slc->sinkh = NULL;
+	slc->srch = 0;
+	slc->nsink = 0;
+	return 0;
+}
+
+static u32 slim_calc_prrate(struct slim_controller *ctrl, struct slim_ch *prop)
+{
+	u32 rate = 0, rate4k = 0, rate11k = 0;
+	u32 exp = 0;
+	u32 pr = 0;
+	bool exact = true;
+	bool done = false;
+	enum slim_ch_rate ratefam;
+
+	if (prop->prot >= SLIM_ASYNC_SMPLX)
+		return 0;
+	if (prop->baser == SLIM_RATE_1HZ) {
+		rate = prop->ratem / 4000;
+		rate4k = rate;
+		if (rate * 4000 == prop->ratem)
+			ratefam = SLIM_RATE_4000HZ;
+		else {
+			rate = prop->ratem / 11025;
+			rate11k = rate;
+			if (rate * 11025 == prop->ratem)
+				ratefam = SLIM_RATE_11025HZ;
+			else
+				ratefam = SLIM_RATE_1HZ;
+		}
+	} else {
+		ratefam = prop->baser;
+		rate = prop->ratem;
+	}
+	if (ratefam == SLIM_RATE_1HZ) {
+		exact = false;
+		if ((rate4k + 1) * 4000 < (rate11k + 1) * 11025) {
+			rate = rate4k + 1;
+			ratefam = SLIM_RATE_4000HZ;
+		} else {
+			rate = rate11k + 1;
+			ratefam = SLIM_RATE_11025HZ;
+		}
+	}
+	/* covert rate to coeff-exp */
+	while (!done) {
+		while ((rate & 0x1) != 0x1) {
+			rate >>= 1;
+			exp++;
+		}
+		if (rate > 3) {
+			/* roundup if not exact */
+			rate++;
+			exact = false;
+		} else
+			done = true;
+	}
+	if (ratefam == SLIM_RATE_4000HZ) {
+		if (rate == 1)
+			pr = 0x10;
+		else {
+			pr = 0;
+			exp++;
+		}
+	} else {
+		pr = 8;
+		exp++;
+	}
+	if (exp <= 7) {
+		pr |= exp;
+		if (exact)
+			pr |= 0x80;
+	} else
+		pr = 0;
+	return pr;
+}
+
+static int slim_nextdefine_ch(struct slim_device *sb, u8 chan)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	u32 chrate = 0;
+	u32 exp = 0;
+	u32 coeff = 0;
+	bool exact = true;
+	bool done = false;
+	int ret = 0;
+	struct slim_ich *slc = &ctrl->chans[chan];
+	struct slim_ch *prop = &slc->prop;
+
+	slc->prrate = slim_calc_prrate(ctrl, prop);
+	dev_dbg(&ctrl->dev, "ch:%d, chan PR rate:%x\n", chan, slc->prrate);
+	if (prop->baser == SLIM_RATE_4000HZ)
+		chrate = 4000 * prop->ratem;
+	else if (prop->baser == SLIM_RATE_11025HZ)
+		chrate = 11025 * prop->ratem;
+	else
+		chrate = prop->ratem;
+	/* max allowed sample freq = 768 seg/frame */
+	if (chrate > 3600000)
+		return -EDQUOT;
+	if (prop->baser == SLIM_RATE_4000HZ &&
+			ctrl->a_framer->superfreq == 4000)
+		coeff = prop->ratem;
+	else if (prop->baser == SLIM_RATE_11025HZ &&
+			ctrl->a_framer->superfreq == 3675)
+		coeff = 3 * prop->ratem;
+	else {
+		u32 tempr = 0;
+
+		tempr = chrate * SLIM_CL_PER_SUPERFRAME_DIV8;
+		coeff = tempr / ctrl->a_framer->rootfreq;
+		if (coeff * ctrl->a_framer->rootfreq != tempr) {
+			coeff++;
+			exact = false;
+		}
+	}
+
+	/* convert coeff to coeff-exponent */
+	exp = 0;
+	while (!done) {
+		while ((coeff & 0x1) != 0x1) {
+			coeff >>= 1;
+			exp++;
+		}
+		if (coeff > 3) {
+			coeff++;
+			exact = false;
+		} else
+			done = true;
+	}
+	if (prop->prot == SLIM_HARD_ISO && !exact)
+		return -EPROTONOSUPPORT;
+	else if (prop->prot == SLIM_AUTO_ISO) {
+		if (exact)
+			prop->prot = SLIM_HARD_ISO;
+		else
+			prop->prot = SLIM_PUSH;
+	}
+	slc->rootexp = exp;
+	slc->seglen = prop->sampleszbits/SLIM_CL_PER_SL;
+	if (prop->prot != SLIM_HARD_ISO)
+		slc->seglen++;
+	if (prop->prot >= SLIM_EXT_SMPLX)
+		slc->seglen++;
+	/* convert coeff to enum */
+	if (coeff == 1) {
+		if (exp > 9)
+			ret = -EIO;
+		coeff = SLIM_COEFF_1;
+	} else {
+		if (exp > 8)
+			ret = -EIO;
+		coeff = SLIM_COEFF_3;
+	}
+	slc->coeff = coeff;
+
+	return ret;
+}
+
+/*
+ * slim_alloc_ch: Allocate a slimbus channel and return its handle.
+ * @sb: client handle.
+ * @chanh: return channel handle
+ * Slimbus channels are limited to 256 per specification.
+ * -EXFULL is returned if all channels are in use.
+ * Although slimbus specification supports 256 channels, a controller may not
+ * support that many channels.
+ */
+int slim_alloc_ch(struct slim_device *sb, u16 *chanh)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	u16 i;
+
+	if (!ctrl)
+		return -EINVAL;
+	mutex_lock(&ctrl->sched.m_reconf);
+	for (i = 0; i < ctrl->nchans; i++) {
+		if (ctrl->chans[i].state == SLIM_CH_FREE)
+			break;
+	}
+	if (i >= ctrl->nchans) {
+		mutex_unlock(&ctrl->sched.m_reconf);
+		return -EXFULL;
+	}
+	*chanh = i;
+	ctrl->chans[i].nextgrp = 0;
+	ctrl->chans[i].state = SLIM_CH_ALLOCATED;
+	ctrl->chans[i].chan = (u8)(ctrl->reserved + i);
+
+	mutex_unlock(&ctrl->sched.m_reconf);
+	return 0;
+}
+EXPORT_SYMBOL(slim_alloc_ch);
+
+/*
+ * slim_query_ch: Get reference-counted handle for a channel number. Every
+ * channel is reference counted by upto one as producer and the others as
+ * consumer)
+ * @sb: client handle
+ * @chan: slimbus channel number
+ * @chanh: return channel handle
+ * If request channel number is not in use, it is allocated, and reference
+ * count is set to one. If the channel was was already allocated, this API
+ * will return handle to that channel and reference count is incremented.
+ * -EXFULL is returned if all channels are in use
+ */
+int slim_query_ch(struct slim_device *sb, u8 ch, u16 *chanh)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	u16 i, j;
+	int ret = 0;
+
+	if (!ctrl || !chanh)
+		return -EINVAL;
+	mutex_lock(&ctrl->sched.m_reconf);
+	/* start with modulo number */
+	i = ch % ctrl->nchans;
+
+	for (j = 0; j < ctrl->nchans; j++) {
+		if (ctrl->chans[i].chan == ch) {
+			*chanh = i;
+			ctrl->chans[i].ref++;
+			if (ctrl->chans[i].state == SLIM_CH_FREE)
+				ctrl->chans[i].state = SLIM_CH_ALLOCATED;
+			goto query_out;
+		}
+		i = (i + 1) % ctrl->nchans;
+	}
+
+	/* Channel not in table yet */
+	ret = -EXFULL;
+	for (j = 0; j < ctrl->nchans; j++) {
+		if (ctrl->chans[i].state == SLIM_CH_FREE) {
+			ctrl->chans[i].state =
+				SLIM_CH_ALLOCATED;
+			*chanh = i;
+			ctrl->chans[i].ref++;
+			ctrl->chans[i].chan = ch;
+			ctrl->chans[i].nextgrp = 0;
+			ret = 0;
+			break;
+		}
+		i = (i + 1) % ctrl->nchans;
+	}
+query_out:
+	mutex_unlock(&ctrl->sched.m_reconf);
+	dev_dbg(&ctrl->dev, "query ch:%d,hdl:%d,ref:%d,ret:%d",
+				ch, i, ctrl->chans[i].ref, ret);
+	return ret;
+}
+EXPORT_SYMBOL(slim_query_ch);
+
+/*
+ * slim_dealloc_ch: Deallocate channel allocated using the API above
+ * -EISCONN is returned if the channel is tried to be deallocated without
+ *  being removed first.
+ *  -ENOTCONN is returned if deallocation is tried on a channel that's not
+ *  allocated.
+ */
+int slim_dealloc_ch(struct slim_device *sb, u16 chanh)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	u8 chan = SLIM_HDL_TO_CHIDX(chanh);
+	struct slim_ich *slc = &ctrl->chans[chan];
+
+	if (!ctrl)
+		return -EINVAL;
+
+	mutex_lock(&ctrl->sched.m_reconf);
+	if (slc->state == SLIM_CH_FREE) {
+		mutex_unlock(&ctrl->sched.m_reconf);
+		return -ENOTCONN;
+	}
+	if (slc->ref > 1) {
+		slc->ref--;
+		mutex_unlock(&ctrl->sched.m_reconf);
+		dev_dbg(&ctrl->dev, "remove chan:%d,hdl:%d,ref:%d",
+					slc->chan, chanh, slc->ref);
+		return 0;
+	}
+	if (slc->state >= SLIM_CH_PENDING_ACTIVE) {
+		dev_err(&ctrl->dev, "Channel:%d should be removed first", chan);
+		mutex_unlock(&ctrl->sched.m_reconf);
+		return -EISCONN;
+	}
+	slc->ref--;
+	slc->state = SLIM_CH_FREE;
+	mutex_unlock(&ctrl->sched.m_reconf);
+	dev_dbg(&ctrl->dev, "remove chan:%d,hdl:%d,ref:%d",
+				slc->chan, chanh, slc->ref);
+	return 0;
+}
+EXPORT_SYMBOL(slim_dealloc_ch);
+
+/*
+ * slim_get_ch_state: Channel state.
+ * This API returns the channel's state (active, suspended, inactive etc)
+ */
+enum slim_ch_state slim_get_ch_state(struct slim_device *sb, u16 chanh)
+{
+	u8 chan = SLIM_HDL_TO_CHIDX(chanh);
+	struct slim_ich *slc = &sb->ctrl->chans[chan];
+
+	return slc->state;
+}
+EXPORT_SYMBOL(slim_get_ch_state);
+
+/*
+ * slim_define_ch: Define a channel.This API defines channel parameters for a
+ *	given channel.
+ * @sb: client handle.
+ * @prop: slim_ch structure with channel parameters desired to be used.
+ * @chanh: list of channels to be defined.
+ * @nchan: number of channels in a group (1 if grp is false)
+ * @grp: Are the channels grouped
+ * @grph: return group handle if grouping of channels is desired.
+ * Channels can be grouped if multiple channels use same parameters
+ * (e.g. 5.1 audio has 6 channels with same parameters. They will all be grouped
+ * and given 1 handle for simplicity and avoid repeatedly calling the API)
+ * -EISCONN is returned if channel is already used with different parameters.
+ * -ENXIO is returned if the channel is not yet allocated.
+ */
+int slim_define_ch(struct slim_device *sb, struct slim_ch *prop, u16 *chanh,
+			u8 nchan, bool grp, u16 *grph)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	int i, ret = 0;
+
+	if (!ctrl || !chanh || !prop || !nchan)
+		return -EINVAL;
+	mutex_lock(&ctrl->sched.m_reconf);
+	for (i = 0; i < nchan; i++) {
+		u8 chan = SLIM_HDL_TO_CHIDX(chanh[i]);
+		struct slim_ich *slc = &ctrl->chans[chan];
+
+		dev_dbg(&ctrl->dev, "define_ch: ch:%d, state:%d", chan,
+				(int)ctrl->chans[chan].state);
+		if (slc->state < SLIM_CH_ALLOCATED) {
+			ret = -ENXIO;
+			goto err_define_ch;
+		}
+		if (slc->state >= SLIM_CH_DEFINED && slc->ref >= 2) {
+			if (prop->ratem != slc->prop.ratem ||
+			prop->sampleszbits != slc->prop.sampleszbits ||
+			prop->baser != slc->prop.baser) {
+				ret = -EISCONN;
+				goto err_define_ch;
+			}
+		} else if (slc->state > SLIM_CH_DEFINED) {
+			ret = -EISCONN;
+			goto err_define_ch;
+		} else {
+			ctrl->chans[chan].prop = *prop;
+			ret = slim_nextdefine_ch(sb, chan);
+			if (ret)
+				goto err_define_ch;
+		}
+		if (i < (nchan - 1))
+			ctrl->chans[chan].nextgrp = chanh[i + 1];
+		if (i == 0)
+			ctrl->chans[chan].nextgrp |= SLIM_START_GRP;
+		if (i == (nchan - 1))
+			ctrl->chans[chan].nextgrp |= SLIM_END_GRP;
+	}
+
+	if (grp)
+		*grph = ((nchan << 8) | SLIM_HDL_TO_CHIDX(chanh[0]));
+	for (i = 0; i < nchan; i++) {
+		u8 chan = SLIM_HDL_TO_CHIDX(chanh[i]);
+		struct slim_ich *slc = &ctrl->chans[chan];
+
+		if (slc->state == SLIM_CH_ALLOCATED)
+			slc->state = SLIM_CH_DEFINED;
+	}
+err_define_ch:
+	dev_dbg(&ctrl->dev, "define_ch: ch:%d, ret:%d", *chanh, ret);
+	mutex_unlock(&ctrl->sched.m_reconf);
+	return ret;
+}
+EXPORT_SYMBOL(slim_define_ch);
+
+static u32 getsubfrmcoding(u32 *ctrlw, u32 *subfrml, u32 *msgsl)
+{
+	u32 code = 0;
+
+	if (*ctrlw == *subfrml) {
+		*ctrlw = 8;
+		*subfrml = 8;
+		*msgsl = SLIM_SL_PER_SUPERFRAME - SLIM_FRM_SLOTS_PER_SUPERFRAME
+				- SLIM_GDE_SLOTS_PER_SUPERFRAME;
+		return 0;
+	}
+	if (*subfrml == 6) {
+		code = 0;
+		*msgsl = 256;
+	} else if (*subfrml == 8) {
+		code = 1;
+		*msgsl = 192;
+	} else if (*subfrml == 24) {
+		code = 2;
+		*msgsl = 64;
+	} else { /* 32 */
+		code = 3;
+		*msgsl = 48;
+	}
+
+	if (*ctrlw < 8) {
+		if (*ctrlw >= 6) {
+			*ctrlw = 6;
+			code |= 0x14;
+		} else {
+			if (*ctrlw == 5)
+				*ctrlw = 4;
+			code |= (*ctrlw << 2);
+		}
+	} else {
+		code -= 2;
+		if (*ctrlw >= 24) {
+			*ctrlw = 24;
+			code |= 0x1e;
+		} else if (*ctrlw >= 16) {
+			*ctrlw = 16;
+			code |= 0x1c;
+		} else if (*ctrlw >= 12) {
+			*ctrlw = 12;
+			code |= 0x1a;
+		} else {
+			*ctrlw = 8;
+			code |= 0x18;
+		}
+	}
+
+	*msgsl = (*msgsl * *ctrlw) - SLIM_FRM_SLOTS_PER_SUPERFRAME -
+				SLIM_GDE_SLOTS_PER_SUPERFRAME;
+	return code;
+}
+
+static void shiftsegoffsets(struct slim_controller *ctrl, struct slim_ich **ach,
+				int sz, u32 shft)
+{
+	int i;
+	u32 oldoff;
+
+	for (i = 0; i < sz; i++) {
+		struct slim_ich *slc;
+
+		if (ach[i] == NULL)
+			continue;
+		slc = ach[i];
+		if (slc->state == SLIM_CH_PENDING_REMOVAL)
+			continue;
+		oldoff = slc->newoff;
+		slc->newoff += shft;
+		/* seg. offset must be <= interval */
+		if (slc->newoff >= slc->newintr)
+			slc->newoff -= slc->newintr;
+	}
+}
+
+static inline int slim_sched_4k_coeff1_chans(struct slim_controller *ctrl,
+			struct slim_ich **slc, int *coeff, int *opensl1,
+			u32 expshft, u32 curintr, u32 curmaxsl,
+			int curexp, int finalexp)
+{
+	int coeff1;
+	struct slim_ich *slc1;
+
+	if (unlikely(!coeff || !slc || !ctrl || !opensl1))
+		return -EINVAL;
+
+	coeff1 = *coeff;
+	slc1 = *slc;
+	while ((coeff1 < ctrl->sched.num_cc1) &&
+	       (curexp == (int)(slc1->rootexp + expshft))) {
+		if (slc1->state == SLIM_CH_PENDING_REMOVAL) {
+			coeff1++;
+			slc1 = ctrl->sched.chc1[coeff1];
+			continue;
+		}
+		if (opensl1[1] >= opensl1[0] ||
+		    (finalexp == (int)slc1->rootexp &&
+		     curintr <= 24 && opensl1[0] == curmaxsl)) {
+			opensl1[1] -= slc1->seglen;
+			slc1->newoff = curmaxsl + opensl1[1];
+			if (opensl1[1] < 0 && opensl1[0] == curmaxsl) {
+				opensl1[0] += opensl1[1];
+				opensl1[1] = 0;
+				if (opensl1[0] < 0) {
+					dev_dbg(&ctrl->dev,
+						"reconfig failed:%d\n",
+						__LINE__);
+					return -EXFULL;
+				}
+			}
+		} else {
+			if (slc1->seglen > opensl1[0]) {
+				dev_dbg(&ctrl->dev,
+					"reconfig failed:%d\n",	__LINE__);
+				return -EXFULL;
+			}
+			slc1->newoff = opensl1[0] - slc1->seglen;
+			opensl1[0] = slc1->newoff;
+		}
+		slc1->newintr = curintr;
+		coeff1++;
+		slc1 = ctrl->sched.chc1[coeff1];
+	}
+	*coeff = coeff1;
+	*slc = slc1;
+	return 0;
+}
+
+static int slim_sched_chans(struct slim_device *sb, u32 clkgear,
+			u32 *ctrlw, u32 *subfrml)
+{
+	int coeff1, coeff3;
+	enum slim_ch_coeff bias;
+	struct slim_controller *ctrl = sb->ctrl;
+	int last1 = ctrl->sched.num_cc1 - 1;
+	int last3 = ctrl->sched.num_cc3 - 1;
+
+	/*
+	 * Find first channels with coeff 1 & 3 as starting points for
+	 * scheduling
+	 */
+	for (coeff3 = 0; coeff3 < ctrl->sched.num_cc3; coeff3++) {
+		struct slim_ich *slc = ctrl->sched.chc3[coeff3];
+
+		if (slc->state == SLIM_CH_PENDING_REMOVAL)
+			continue;
+		else
+			break;
+	}
+	for (coeff1 = 0; coeff1 < ctrl->sched.num_cc1; coeff1++) {
+		struct slim_ich *slc = ctrl->sched.chc1[coeff1];
+
+		if (slc->state == SLIM_CH_PENDING_REMOVAL)
+			continue;
+		else
+			break;
+	}
+	if (coeff3 == ctrl->sched.num_cc3 && coeff1 == ctrl->sched.num_cc1) {
+		*ctrlw = 8;
+		*subfrml = 8;
+		return 0;
+	} else if (coeff3 == ctrl->sched.num_cc3)
+		bias = SLIM_COEFF_1;
+	else
+		bias = SLIM_COEFF_3;
+
+	/*
+	 * Find last chan in coeff1, 3 list, we will use to know when we
+	 * have done scheduling all coeff1 channels
+	 */
+	while (last1 >= 0) {
+		if (ctrl->sched.chc1[last1] != NULL &&
+			(ctrl->sched.chc1[last1])->state !=
+			SLIM_CH_PENDING_REMOVAL)
+			break;
+		last1--;
+	}
+	while (last3 >= 0) {
+		if (ctrl->sched.chc3[last3] != NULL &&
+			(ctrl->sched.chc3[last3])->state !=
+			SLIM_CH_PENDING_REMOVAL)
+			break;
+		last3--;
+	}
+
+	if (bias == SLIM_COEFF_1) {
+		struct slim_ich *slc1 = ctrl->sched.chc1[coeff1];
+		u32 expshft = SLIM_MAX_CLK_GEAR - clkgear;
+		int curexp, finalexp;
+		u32 curintr, curmaxsl;
+		int opensl1[2];
+		int maxctrlw1;
+		int ret;
+
+		finalexp = (ctrl->sched.chc1[last1])->rootexp;
+		curexp = (int)expshft - 1;
+
+		curintr = (SLIM_MAX_INTR_COEFF_1 * 2) >> (curexp + 1);
+		curmaxsl = curintr >> 1;
+		opensl1[0] = opensl1[1] = curmaxsl;
+
+		while ((coeff1 < ctrl->sched.num_cc1) || (curintr > 24)) {
+			curintr >>= 1;
+			curmaxsl >>= 1;
+
+			/* update 4K family open slot records */
+			if (opensl1[1] < opensl1[0])
+				opensl1[1] -= curmaxsl;
+			else
+				opensl1[1] = opensl1[0] - curmaxsl;
+			opensl1[0] = curmaxsl;
+			if (opensl1[1] < 0) {
+				opensl1[0] += opensl1[1];
+				opensl1[1] = 0;
+			}
+			if (opensl1[0] <= 0) {
+				dev_dbg(&ctrl->dev, "reconfig failed:%d\n",
+					__LINE__);
+				return -EXFULL;
+			}
+			curexp++;
+			/* schedule 4k family channels */
+			ret = slim_sched_4k_coeff1_chans(ctrl, &slc1, &coeff1,
+					opensl1, expshft, curintr, curmaxsl,
+					curexp, finalexp);
+			if (ret)
+				return ret;
+		}
+		/* Leave some slots for messaging space */
+		if (opensl1[1] <= 0 && opensl1[0] <= 0)
+			return -EXFULL;
+		if (opensl1[1] > opensl1[0]) {
+			int temp = opensl1[0];
+
+			opensl1[0] = opensl1[1];
+			opensl1[1] = temp;
+			shiftsegoffsets(ctrl, ctrl->sched.chc1,
+					ctrl->sched.num_cc1, curmaxsl);
+		}
+		/* choose subframe mode to maximize bw */
+		maxctrlw1 = opensl1[0];
+		if (opensl1[0] == curmaxsl)
+			maxctrlw1 += opensl1[1];
+		if (curintr >= 24) {
+			*subfrml = 24;
+			*ctrlw = maxctrlw1;
+		} else if (curintr == 12) {
+			if (maxctrlw1 > opensl1[1] * 4) {
+				*subfrml = 24;
+				*ctrlw = maxctrlw1;
+			} else {
+				*subfrml = 6;
+				*ctrlw = opensl1[1];
+			}
+		} else {
+			*subfrml = 6;
+			*ctrlw = maxctrlw1;
+		}
+	} else {
+		struct slim_ich *slc1 = NULL;
+		struct slim_ich *slc3 = ctrl->sched.chc3[coeff3];
+		u32 expshft = SLIM_MAX_CLK_GEAR - clkgear;
+		int curexp, finalexp, exp1;
+		u32 curintr, curmaxsl;
+		int opensl3[2];
+		int opensl1[6];
+		bool opensl1valid = false;
+		int maxctrlw1, maxctrlw3, i;
+
+		finalexp = (ctrl->sched.chc3[last3])->rootexp;
+		if (last1 >= 0) {
+			slc1 = ctrl->sched.chc1[coeff1];
+			exp1 = (ctrl->sched.chc1[last1])->rootexp;
+			if (exp1 > finalexp)
+				finalexp = exp1;
+		}
+		curexp = (int)expshft - 1;
+
+		curintr = (SLIM_MAX_INTR_COEFF_3 * 2) >> (curexp + 1);
+		curmaxsl = curintr >> 1;
+		opensl3[0] = opensl3[1] = curmaxsl;
+
+		while (coeff1 < ctrl->sched.num_cc1 ||
+			coeff3 < ctrl->sched.num_cc3 ||
+			curintr > 32) {
+			curintr >>= 1;
+			curmaxsl >>= 1;
+
+			/* update 12k family open slot records */
+			if (opensl3[1] < opensl3[0])
+				opensl3[1] -= curmaxsl;
+			else
+				opensl3[1] = opensl3[0] - curmaxsl;
+			opensl3[0] = curmaxsl;
+			if (opensl3[1] < 0) {
+				opensl3[0] += opensl3[1];
+				opensl3[1] = 0;
+			}
+			if (opensl3[0] <= 0) {
+				dev_dbg(&ctrl->dev, "reconfig failed:%d\n",
+						__LINE__);
+				return -EXFULL;
+			}
+			curexp++;
+
+			/* schedule 12k family channels */
+			while (coeff3 < ctrl->sched.num_cc3 &&
+				curexp == (int)slc3->rootexp + expshft) {
+				if (slc3->state == SLIM_CH_PENDING_REMOVAL) {
+					coeff3++;
+					slc3 = ctrl->sched.chc3[coeff3];
+					continue;
+				}
+				opensl1valid = false;
+				if (opensl3[1] >= opensl3[0] ||
+					(finalexp == (int)slc3->rootexp &&
+					 curintr <= 32 &&
+					 opensl3[0] == curmaxsl &&
+					 last1 < 0)) {
+					opensl3[1] -= slc3->seglen;
+					slc3->newoff = curmaxsl + opensl3[1];
+					if (opensl3[1] < 0 &&
+						opensl3[0] == curmaxsl) {
+						opensl3[0] += opensl3[1];
+						opensl3[1] = 0;
+					}
+					if (opensl3[0] < 0) {
+						dev_dbg(&ctrl->dev,
+						"reconfig failed:%d\n",
+						__LINE__);
+						return -EXFULL;
+					}
+				} else {
+					if (slc3->seglen > opensl3[0]) {
+						dev_dbg(&ctrl->dev,
+						"reconfig failed:%d\n",
+						__LINE__);
+						return -EXFULL;
+					}
+					slc3->newoff = opensl3[0] -
+							slc3->seglen;
+					opensl3[0] = slc3->newoff;
+				}
+				slc3->newintr = curintr;
+				coeff3++;
+				slc3 = ctrl->sched.chc3[coeff3];
+			}
+			/* update 4k openslot records */
+			if (opensl1valid == false) {
+				for (i = 0; i < 3; i++) {
+					opensl1[i * 2] = opensl3[0];
+					opensl1[(i * 2) + 1] = opensl3[1];
+				}
+			} else {
+				int opensl1p[6];
+
+				memcpy(opensl1p, opensl1, sizeof(opensl1));
+				for (i = 0; i < 3; i++) {
+					if (opensl1p[i] < opensl1p[i + 3])
+						opensl1[(i * 2) + 1] =
+							opensl1p[i];
+					else
+						opensl1[(i * 2) + 1] =
+							opensl1p[i + 3];
+				}
+				for (i = 0; i < 3; i++) {
+					opensl1[(i * 2) + 1] -= curmaxsl;
+					opensl1[i * 2] = curmaxsl;
+					if (opensl1[(i * 2) + 1] < 0) {
+						opensl1[i * 2] +=
+							opensl1[(i * 2) + 1];
+						opensl1[(i * 2) + 1] = 0;
+					}
+					if (opensl1[i * 2] < 0) {
+						dev_dbg(&ctrl->dev,
+						"reconfig failed:%d\n",
+						__LINE__);
+						return -EXFULL;
+					}
+				}
+			}
+			/* schedule 4k family channels */
+			while (coeff1 < ctrl->sched.num_cc1 &&
+				curexp == (int)slc1->rootexp + expshft) {
+				/* searchorder effective when opensl valid */
+				static const int srcho[] = { 5, 2, 4, 1, 3, 0 };
+				int maxopensl = 0;
+				int maxi = 0;
+
+				if (slc1->state == SLIM_CH_PENDING_REMOVAL) {
+					coeff1++;
+					slc1 = ctrl->sched.chc1[coeff1];
+					continue;
+				}
+				opensl1valid = true;
+				for (i = 0; i < 6; i++) {
+					if (opensl1[srcho[i]] > maxopensl) {
+						maxopensl = opensl1[srcho[i]];
+						maxi = srcho[i];
+					}
+				}
+				opensl1[maxi] -= slc1->seglen;
+				slc1->newoff = (curmaxsl * maxi) +
+						opensl1[maxi];
+				if (opensl1[maxi] < 0 && (maxi & 1) == 1 &&
+				    opensl1[maxi - 1] == curmaxsl) {
+					opensl1[maxi - 1] += opensl1[maxi];
+					if (opensl3[0] > opensl1[maxi - 1])
+						opensl3[0] = opensl1[maxi - 1];
+					opensl3[1] = 0;
+					opensl1[maxi] = 0;
+					if (opensl1[maxi - 1] < 0) {
+						dev_dbg(&ctrl->dev,
+							"reconfig failed:%d\n",
+							__LINE__);
+						return -EXFULL;
+					}
+				} else if (opensl1[maxi] < 0) {
+					dev_dbg(&ctrl->dev,
+						"reconfig failed:%d\n",
+						__LINE__);
+					return -EXFULL;
+				} else if (opensl3[maxi & 1] > opensl1[maxi]) {
+					opensl3[maxi & 1] = opensl1[maxi];
+				}
+				slc1->newintr = curintr * 3;
+				coeff1++;
+				slc1 = ctrl->sched.chc1[coeff1];
+			}
+		}
+		/* Leave some slots for messaging space */
+		if (opensl3[1] <= 0 && opensl3[0] <= 0)
+			return -EXFULL;
+		/* swap 1st and 2nd bucket if 2nd bucket has more open slots */
+		if (opensl3[1] > opensl3[0]) {
+			int temp = opensl3[0];
+
+			opensl3[0] = opensl3[1];
+			opensl3[1] = temp;
+			temp = opensl1[5];
+			opensl1[5] = opensl1[4];
+			opensl1[4] = opensl1[3];
+			opensl1[3] = opensl1[2];
+			opensl1[2] = opensl1[1];
+			opensl1[1] = opensl1[0];
+			opensl1[0] = temp;
+			shiftsegoffsets(ctrl, ctrl->sched.chc1,
+					ctrl->sched.num_cc1, curmaxsl);
+			shiftsegoffsets(ctrl, ctrl->sched.chc3,
+					ctrl->sched.num_cc3, curmaxsl);
+		}
+		/* subframe mode to maximize BW */
+		maxctrlw3 = opensl3[0];
+		maxctrlw1 = opensl1[0];
+		if (opensl3[0] == curmaxsl)
+			maxctrlw3 += opensl3[1];
+		for (i = 0; i < 5 && opensl1[i] == curmaxsl; i++)
+			maxctrlw1 += opensl1[i + 1];
+		if (curintr >= 32) {
+			*subfrml = 32;
+			*ctrlw = maxctrlw3;
+		} else if (curintr == 16) {
+			if (maxctrlw3 > (opensl3[1] * 4)) {
+				*subfrml = 32;
+				*ctrlw = maxctrlw3;
+			} else {
+				*subfrml = 8;
+				*ctrlw = opensl3[1];
+			}
+		} else {
+			if ((maxctrlw1 * 8) >= (maxctrlw3 * 24)) {
+				*subfrml = 24;
+				*ctrlw = maxctrlw1;
+			} else {
+				*subfrml = 8;
+				*ctrlw = maxctrlw3;
+			}
+		}
+	}
+	return 0;
+}
+
+#ifdef DEBUG
+static int slim_verifychansched(struct slim_controller *ctrl, u32 ctrlw,
+				u32 subfrml, u32 clkgear)
+{
+	int sl, i;
+	int cc1 = 0;
+	int cc3 = 0;
+	struct slim_ich *slc = NULL;
+
+	if (!ctrl->sched.slots)
+		return 0;
+	memset(ctrl->sched.slots, 0, SLIM_SL_PER_SUPERFRAME);
+	dev_dbg(&ctrl->dev, "Clock gear is:%d\n", clkgear);
+	for (sl = 0; sl < SLIM_SL_PER_SUPERFRAME; sl += subfrml) {
+		for (i = 0; i < ctrlw; i++)
+			ctrl->sched.slots[sl + i] = 33;
+	}
+	while (cc1 < ctrl->sched.num_cc1) {
+		slc = ctrl->sched.chc1[cc1];
+		if (slc == NULL) {
+			dev_err(&ctrl->dev, "SLC1 null in verify: chan%d\n",
+				cc1);
+			return -EIO;
+		}
+		dev_dbg(&ctrl->dev, "chan:%d, offset:%d, intr:%d, seglen:%d\n",
+				(slc - ctrl->chans), slc->newoff,
+				slc->newintr, slc->seglen);
+
+		if (slc->state != SLIM_CH_PENDING_REMOVAL) {
+			for (sl = slc->newoff;
+				sl < SLIM_SL_PER_SUPERFRAME;
+				sl += slc->newintr) {
+				for (i = 0; i < slc->seglen; i++) {
+					if (ctrl->sched.slots[sl + i])
+						return -EXFULL;
+					ctrl->sched.slots[sl + i] = cc1 + 1;
+				}
+			}
+		}
+		cc1++;
+	}
+	while (cc3 < ctrl->sched.num_cc3) {
+		slc = ctrl->sched.chc3[cc3];
+		if (slc == NULL) {
+			dev_err(&ctrl->dev, "SLC3 null in verify: chan%d\n",
+				cc3);
+			return -EIO;
+		}
+		dev_dbg(&ctrl->dev, "chan:%d, offset:%d, intr:%d, seglen:%d\n",
+				(slc - ctrl->chans), slc->newoff,
+				slc->newintr, slc->seglen);
+		if (slc->state != SLIM_CH_PENDING_REMOVAL) {
+			for (sl = slc->newoff;
+				sl < SLIM_SL_PER_SUPERFRAME;
+				sl += slc->newintr) {
+				for (i = 0; i < slc->seglen; i++) {
+					if (ctrl->sched.slots[sl + i])
+						return -EXFULL;
+					ctrl->sched.slots[sl + i] = cc3 + 1;
+				}
+			}
+		}
+		cc3++;
+	}
+
+	return 0;
+}
+#else
+static int slim_verifychansched(struct slim_controller *ctrl, u32 ctrlw,
+				u32 subfrml, u32 clkgear)
+{
+	return 0;
+}
+#endif
+
+static void slim_sort_chan_grp(struct slim_controller *ctrl,
+				struct slim_ich *slc)
+{
+	u8  last = (u8)-1;
+	u8 second = 0;
+
+	for (; last > 0; last--) {
+		struct slim_ich *slc1 = slc;
+		struct slim_ich *slc2;
+		u8 next = SLIM_HDL_TO_CHIDX(slc1->nextgrp);
+
+		slc2 = &ctrl->chans[next];
+		for (second = 1; second <= last && slc2 &&
+			(slc2->state == SLIM_CH_ACTIVE ||
+			 slc2->state == SLIM_CH_PENDING_ACTIVE); second++) {
+			if (slc1->newoff > slc2->newoff) {
+				u32 temp = slc2->newoff;
+
+				slc2->newoff = slc1->newoff;
+				slc1->newoff = temp;
+			}
+			if (slc2->nextgrp & SLIM_END_GRP) {
+				last = second;
+				break;
+			}
+			slc1 = slc2;
+			next = SLIM_HDL_TO_CHIDX(slc1->nextgrp);
+			slc2 = &ctrl->chans[next];
+		}
+		if (slc2 == NULL)
+			last = second - 1;
+	}
+}
+
+
+static int slim_allocbw(struct slim_device *sb, int *subfrmc, int *clkgear)
+{
+	u32 msgsl = 0;
+	u32 ctrlw = 0;
+	u32 subfrml = 0;
+	int ret = -EIO;
+	struct slim_controller *ctrl = sb->ctrl;
+	u32 usedsl = ctrl->sched.usedslots + ctrl->sched.pending_msgsl;
+	u32 availsl = SLIM_SL_PER_SUPERFRAME - SLIM_FRM_SLOTS_PER_SUPERFRAME -
+			SLIM_GDE_SLOTS_PER_SUPERFRAME;
+	*clkgear = SLIM_MAX_CLK_GEAR;
+
+	dev_dbg(&ctrl->dev, "used sl:%u, availlable sl:%u\n", usedsl, availsl);
+	dev_dbg(&ctrl->dev, "pending:chan sl:%u, :msg sl:%u, clkgear:%u\n",
+				ctrl->sched.usedslots,
+				ctrl->sched.pending_msgsl, *clkgear);
+	/*
+	 * If number of slots are 0, that means channels are inactive.
+	 * It is very likely that the manager will call clock pause very soon.
+	 * By making sure that bus is in MAX_GEAR, clk pause sequence will take
+	 * minimum amount of time.
+	 */
+	if (ctrl->sched.usedslots != 0) {
+		while ((usedsl * 2 <= availsl) && (*clkgear > ctrl->min_cg)) {
+			*clkgear -= 1;
+			usedsl *= 2;
+		}
+	}
+
+	/*
+	 * Try scheduling data channels at current clock gear, if all channels
+	 * can be scheduled, or reserved BW can't be satisfied, increase clock
+	 * gear and try again
+	 */
+	for (; *clkgear <= ctrl->max_cg; (*clkgear)++) {
+		ret = slim_sched_chans(sb, *clkgear, &ctrlw, &subfrml);
+
+		if (ret == 0) {
+			*subfrmc = getsubfrmcoding(&ctrlw, &subfrml, &msgsl);
+			if ((msgsl >> (ctrl->max_cg - *clkgear) <
+				ctrl->sched.pending_msgsl) &&
+				(*clkgear < ctrl->max_cg))
+				continue;
+			else
+				break;
+		}
+	}
+	if (ret == 0) {
+		int i;
+		/* Sort channel-groups */
+		for (i = 0; i < ctrl->sched.num_cc1; i++) {
+			struct slim_ich *slc = ctrl->sched.chc1[i];
+
+			if (slc->state == SLIM_CH_PENDING_REMOVAL)
+				continue;
+			if ((slc->nextgrp & SLIM_START_GRP) &&
+				!(slc->nextgrp & SLIM_END_GRP)) {
+				slim_sort_chan_grp(ctrl, slc);
+			}
+		}
+		for (i = 0; i < ctrl->sched.num_cc3; i++) {
+			struct slim_ich *slc = ctrl->sched.chc3[i];
+
+			if (slc->state == SLIM_CH_PENDING_REMOVAL)
+				continue;
+			if ((slc->nextgrp & SLIM_START_GRP) &&
+				!(slc->nextgrp & SLIM_END_GRP)) {
+				slim_sort_chan_grp(ctrl, slc);
+			}
+		}
+
+		ret = slim_verifychansched(ctrl, ctrlw, subfrml, *clkgear);
+	}
+
+	return ret;
+}
+
+static void slim_change_existing_chans(struct slim_controller *ctrl, int coeff)
+{
+	struct slim_ich **arr;
+	int len, i;
+
+	if (coeff == SLIM_COEFF_1) {
+		arr = ctrl->sched.chc1;
+		len = ctrl->sched.num_cc1;
+	} else {
+		arr = ctrl->sched.chc3;
+		len = ctrl->sched.num_cc3;
+	}
+	for (i = 0; i < len; i++) {
+		struct slim_ich *slc = arr[i];
+
+		if (slc->state == SLIM_CH_ACTIVE ||
+			slc->state == SLIM_CH_SUSPENDED)
+			slc->offset = slc->newoff;
+			slc->interval = slc->newintr;
+	}
+}
+static void slim_chan_changes(struct slim_device *sb, bool revert)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+
+	while (!list_empty(&sb->mark_define)) {
+		struct slim_ich *slc;
+		struct slim_pending_ch *pch =
+				list_entry(sb->mark_define.next,
+					struct slim_pending_ch, pending);
+		slc = &ctrl->chans[pch->chan];
+		if (revert) {
+			if (slc->state == SLIM_CH_PENDING_ACTIVE) {
+				u32 sl = slc->seglen << slc->rootexp;
+
+				if (slc->coeff == SLIM_COEFF_3)
+					sl *= 3;
+				if (!ctrl->allocbw)
+					ctrl->sched.usedslots -= sl;
+				slim_remove_ch(ctrl, slc);
+				slc->state = SLIM_CH_DEFINED;
+			}
+		} else {
+			slc->state = SLIM_CH_ACTIVE;
+			slc->def++;
+		}
+		list_del_init(&pch->pending);
+		kfree(pch);
+	}
+
+	while (!list_empty(&sb->mark_removal)) {
+		struct slim_pending_ch *pch =
+				list_entry(sb->mark_removal.next,
+					struct slim_pending_ch, pending);
+		struct slim_ich *slc = &ctrl->chans[pch->chan];
+		u32 sl = slc->seglen << slc->rootexp;
+
+		if (revert || slc->def > 0) {
+			if (slc->coeff == SLIM_COEFF_3)
+				sl *= 3;
+			if (!ctrl->allocbw)
+				ctrl->sched.usedslots += sl;
+			if (revert)
+				slc->def++;
+			slc->state = SLIM_CH_ACTIVE;
+		} else
+			slim_remove_ch(ctrl, slc);
+		list_del_init(&pch->pending);
+		kfree(pch);
+	}
+
+	while (!list_empty(&sb->mark_suspend)) {
+		struct slim_pending_ch *pch =
+				list_entry(sb->mark_suspend.next,
+					struct slim_pending_ch, pending);
+		struct slim_ich *slc = &ctrl->chans[pch->chan];
+
+		if (revert)
+			slc->state = SLIM_CH_ACTIVE;
+		list_del_init(&pch->pending);
+		kfree(pch);
+	}
+	/* Change already active channel if reconfig succeeded */
+	if (!revert) {
+		slim_change_existing_chans(ctrl, SLIM_COEFF_1);
+		slim_change_existing_chans(ctrl, SLIM_COEFF_3);
+	}
+}
+
+/*
+ * slim_reconfigure_now: Request reconfiguration now.
+ * @sb: client handle
+ * This API does what commit flag in other scheduling APIs do.
+ * -EXFULL is returned if there is no space in TDM to reserve the
+ * bandwidth. -EBUSY is returned if reconfiguration request is already in
+ * progress.
+ */
+int slim_reconfigure_now(struct slim_device *sb)
+{
+	u8 i;
+	u8 wbuf[4];
+	u32 clkgear, subframe;
+	u32 curexp;
+	int ret;
+	struct slim_controller *ctrl = sb->ctrl;
+	u32 expshft;
+	u32 segdist;
+	struct slim_pending_ch *pch;
+	DEFINE_SLIM_BCAST_TXN(txn, SLIM_MSG_MC_BEGIN_RECONFIGURATION, 0, 3,
+				NULL, NULL, sb->laddr);
+
+	mutex_lock(&ctrl->sched.m_reconf);
+	/*
+	 * If there are no pending changes from this client, avoid sending
+	 * the reconfiguration sequence
+	 */
+	if (sb->pending_msgsl == sb->cur_msgsl &&
+		list_empty(&sb->mark_define) &&
+		list_empty(&sb->mark_suspend)) {
+		struct list_head *pos, *next;
+
+		list_for_each_safe(pos, next, &sb->mark_removal) {
+			struct slim_ich *slc;
+
+			pch = list_entry(pos, struct slim_pending_ch, pending);
+			slc = &ctrl->chans[pch->chan];
+			if (slc->def > 0)
+				slc->def--;
+			/* Disconnect source port to free it up */
+			if (SLIM_HDL_TO_LA(slc->srch) == sb->laddr)
+				slc->srch = 0;
+			/*
+			 * If controller overrides BW allocation,
+			 * delete this in remove channel itself
+			 */
+			if (slc->def != 0 && !ctrl->allocbw) {
+				list_del(&pch->pending);
+				kfree(pch);
+			}
+		}
+		if (list_empty(&sb->mark_removal)) {
+			mutex_unlock(&ctrl->sched.m_reconf);
+			pr_info("SLIM_CL: skip reconfig sequence");
+			return 0;
+		}
+	}
+
+	ctrl->sched.pending_msgsl += sb->pending_msgsl - sb->cur_msgsl;
+	list_for_each_entry(pch, &sb->mark_define, pending) {
+		struct slim_ich *slc = &ctrl->chans[pch->chan];
+
+		slim_add_ch(ctrl, slc);
+		if (slc->state < SLIM_CH_ACTIVE)
+			slc->state = SLIM_CH_PENDING_ACTIVE;
+	}
+
+	list_for_each_entry(pch, &sb->mark_removal, pending) {
+		struct slim_ich *slc = &ctrl->chans[pch->chan];
+		u32 sl = slc->seglen << slc->rootexp;
+
+		if (slc->coeff == SLIM_COEFF_3)
+			sl *= 3;
+		if (!ctrl->allocbw)
+			ctrl->sched.usedslots -= sl;
+		slc->state = SLIM_CH_PENDING_REMOVAL;
+	}
+	list_for_each_entry(pch, &sb->mark_suspend, pending) {
+		struct slim_ich *slc = &ctrl->chans[pch->chan];
+
+		slc->state = SLIM_CH_SUSPENDED;
+	}
+
+	/*
+	 * Controller can override default channel scheduling algorithm.
+	 * (e.g. if controller needs to use fixed channel scheduling based
+	 * on number of channels)
+	 */
+	if (ctrl->allocbw)
+		ret = ctrl->allocbw(sb, &subframe, &clkgear);
+	else
+		ret = slim_allocbw(sb, &subframe, &clkgear);
+
+	if (!ret) {
+		ret = slim_processtxn(ctrl, &txn, false);
+		dev_dbg(&ctrl->dev, "sending begin_reconfig:ret:%d\n", ret);
+	}
+
+	if (!ret && subframe != ctrl->sched.subfrmcode) {
+		wbuf[0] = (u8)(subframe & 0xFF);
+		txn.mc = SLIM_MSG_MC_NEXT_SUBFRAME_MODE;
+		txn.len = 1;
+		txn.rl = 4;
+		txn.wbuf = wbuf;
+		ret = slim_processtxn(ctrl, &txn, false);
+		dev_dbg(&ctrl->dev, "sending subframe:%d,ret:%d\n",
+				(int)wbuf[0], ret);
+	}
+	if (!ret && clkgear != ctrl->clkgear) {
+		wbuf[0] = (u8)(clkgear & 0xFF);
+		txn.mc = SLIM_MSG_MC_NEXT_CLOCK_GEAR;
+		txn.len = 1;
+		txn.rl = 4;
+		txn.wbuf = wbuf;
+		ret = slim_processtxn(ctrl, &txn, false);
+		dev_dbg(&ctrl->dev, "sending clkgear:%d,ret:%d\n",
+				(int)wbuf[0], ret);
+	}
+	if (ret)
+		goto revert_reconfig;
+
+	expshft = SLIM_MAX_CLK_GEAR - clkgear;
+	/* activate/remove channel */
+	list_for_each_entry(pch, &sb->mark_define, pending) {
+		struct slim_ich *slc = &ctrl->chans[pch->chan];
+		/* Define content */
+		wbuf[0] = slc->chan;
+		wbuf[1] = slc->prrate;
+		wbuf[2] = slc->prop.dataf | (slc->prop.auxf << 4);
+		wbuf[3] = slc->prop.sampleszbits / SLIM_CL_PER_SL;
+		txn.mc = SLIM_MSG_MC_NEXT_DEFINE_CONTENT;
+		txn.len = 4;
+		txn.rl = 7;
+		txn.wbuf = wbuf;
+		dev_dbg(&ctrl->dev, "define content, activate:%x, %x, %x, %x\n",
+				wbuf[0], wbuf[1], wbuf[2], wbuf[3]);
+		/* Right now, channel link bit is not supported */
+		ret = slim_processtxn(ctrl, &txn, false);
+		if (ret)
+			goto revert_reconfig;
+
+		txn.mc = SLIM_MSG_MC_NEXT_ACTIVATE_CHANNEL;
+		txn.len = 1;
+		txn.rl = 4;
+		ret = slim_processtxn(ctrl, &txn, false);
+		if (ret)
+			goto revert_reconfig;
+	}
+
+	list_for_each_entry(pch, &sb->mark_removal, pending) {
+		struct slim_ich *slc = &ctrl->chans[pch->chan];
+
+		dev_dbg(&ctrl->dev, "remove chan:%x\n", pch->chan);
+		wbuf[0] = slc->chan;
+		txn.mc = SLIM_MSG_MC_NEXT_REMOVE_CHANNEL;
+		txn.len = 1;
+		txn.rl = 4;
+		txn.wbuf = wbuf;
+		ret = slim_processtxn(ctrl, &txn, false);
+		if (ret)
+			goto revert_reconfig;
+	}
+	list_for_each_entry(pch, &sb->mark_suspend, pending) {
+		struct slim_ich *slc = &ctrl->chans[pch->chan];
+
+		dev_dbg(&ctrl->dev, "suspend chan:%x\n", pch->chan);
+		wbuf[0] = slc->chan;
+		txn.mc = SLIM_MSG_MC_NEXT_DEACTIVATE_CHANNEL;
+		txn.len = 1;
+		txn.rl = 4;
+		txn.wbuf = wbuf;
+		ret = slim_processtxn(ctrl, &txn, false);
+		if (ret)
+			goto revert_reconfig;
+	}
+
+	/* Define CC1 channel */
+	for (i = 0; i < ctrl->sched.num_cc1; i++) {
+		struct slim_ich *slc = ctrl->sched.chc1[i];
+
+		if (slc->state == SLIM_CH_PENDING_REMOVAL)
+			continue;
+		curexp = slc->rootexp + expshft;
+		segdist = (slc->newoff << curexp) & 0x1FF;
+		expshft = SLIM_MAX_CLK_GEAR - clkgear;
+		dev_dbg(&ctrl->dev, "new-intr:%d, old-intr:%d, dist:%d\n",
+				slc->newintr, slc->interval, segdist);
+		dev_dbg(&ctrl->dev, "new-off:%d, old-off:%d\n",
+				slc->newoff, slc->offset);
+
+		if (slc->state < SLIM_CH_ACTIVE || slc->def < slc->ref ||
+			slc->newintr != slc->interval ||
+			slc->newoff != slc->offset) {
+			segdist |= 0x200;
+			segdist >>= curexp;
+			segdist |= (slc->newoff << (curexp + 1)) & 0xC00;
+			wbuf[0] = slc->chan;
+			wbuf[1] = (u8)(segdist & 0xFF);
+			wbuf[2] = (u8)((segdist & 0xF00) >> 8) |
+					(slc->prop.prot << 4);
+			wbuf[3] = slc->seglen;
+			txn.mc = SLIM_MSG_MC_NEXT_DEFINE_CHANNEL;
+			txn.len = 4;
+			txn.rl = 7;
+			txn.wbuf = wbuf;
+			ret = slim_processtxn(ctrl, &txn, false);
+			if (ret)
+				goto revert_reconfig;
+		}
+	}
+
+	/* Define CC3 channels */
+	for (i = 0; i < ctrl->sched.num_cc3; i++) {
+		struct slim_ich *slc = ctrl->sched.chc3[i];
+
+		if (slc->state == SLIM_CH_PENDING_REMOVAL)
+			continue;
+		curexp = slc->rootexp + expshft;
+		segdist = (slc->newoff << curexp) & 0x1FF;
+		expshft = SLIM_MAX_CLK_GEAR - clkgear;
+		dev_dbg(&ctrl->dev, "new-intr:%d, old-intr:%d, dist:%d\n",
+				slc->newintr, slc->interval, segdist);
+		dev_dbg(&ctrl->dev, "new-off:%d, old-off:%d\n",
+				slc->newoff, slc->offset);
+
+		if (slc->state < SLIM_CH_ACTIVE || slc->def < slc->ref ||
+			slc->newintr != slc->interval ||
+			slc->newoff != slc->offset) {
+			segdist |= 0x200;
+			segdist >>= curexp;
+			segdist |= 0xC00;
+			wbuf[0] = slc->chan;
+			wbuf[1] = (u8)(segdist & 0xFF);
+			wbuf[2] = (u8)((segdist & 0xF00) >> 8) |
+					(slc->prop.prot << 4);
+			wbuf[3] = (u8)(slc->seglen);
+			txn.mc = SLIM_MSG_MC_NEXT_DEFINE_CHANNEL;
+			txn.len = 4;
+			txn.rl = 7;
+			txn.wbuf = wbuf;
+			ret = slim_processtxn(ctrl, &txn, false);
+			if (ret)
+				goto revert_reconfig;
+		}
+	}
+	txn.mc = SLIM_MSG_MC_RECONFIGURE_NOW;
+	txn.len = 0;
+	txn.rl = 3;
+	txn.wbuf = NULL;
+	ret = slim_processtxn(ctrl, &txn, false);
+	dev_dbg(&ctrl->dev, "reconfig now:ret:%d\n", ret);
+	if (!ret) {
+		ctrl->sched.subfrmcode = subframe;
+		ctrl->clkgear = clkgear;
+		ctrl->sched.msgsl = ctrl->sched.pending_msgsl;
+		sb->cur_msgsl = sb->pending_msgsl;
+		slim_chan_changes(sb, false);
+		mutex_unlock(&ctrl->sched.m_reconf);
+		return 0;
+	}
+
+revert_reconfig:
+	/* Revert channel changes */
+	slim_chan_changes(sb, true);
+	mutex_unlock(&ctrl->sched.m_reconf);
+	return ret;
+}
+EXPORT_SYMBOL(slim_reconfigure_now);
+
+static int add_pending_ch(struct list_head *listh, u8 chan)
+{
+	struct slim_pending_ch *pch;
+
+	pch = kmalloc(sizeof(struct slim_pending_ch), GFP_KERNEL);
+	if (!pch)
+		return -ENOMEM;
+	pch->chan = chan;
+	list_add_tail(&pch->pending, listh);
+	return 0;
+}
+
+/*
+ * slim_control_ch: Channel control API.
+ * @sb: client handle
+ * @chanh: group or channel handle to be controlled
+ * @chctrl: Control command (activate/suspend/remove)
+ * @commit: flag to indicate whether the control should take effect right-away.
+ * This API activates, removes or suspends a channel (or group of channels)
+ * chanh indicates the channel or group handle (returned by the define_ch API).
+ * Reconfiguration may be time-consuming since it can change all other active
+ * channel allocations on the bus, change in clock gear used by the slimbus,
+ * and change in the control space width used for messaging.
+ * commit makes sure that multiple channels can be activated/deactivated before
+ * reconfiguration is started.
+ * -EXFULL is returned if there is no space in TDM to reserve the bandwidth.
+ * -EISCONN/-ENOTCONN is returned if the channel is already connected or not
+ * yet defined.
+ * -EINVAL is returned if individual control of a grouped-channel is attempted.
+ */
+int slim_control_ch(struct slim_device *sb, u16 chanh,
+			enum slim_ch_control chctrl, bool commit)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	int ret = 0;
+	/* Get rid of the group flag in MSB if any */
+	u8 chan = SLIM_HDL_TO_CHIDX(chanh);
+	u8 nchan = 0;
+	struct slim_ich *slc = &ctrl->chans[chan];
+
+	if (!(slc->nextgrp & SLIM_START_GRP))
+		return -EINVAL;
+
+	mutex_lock(&sb->sldev_reconf);
+	mutex_lock(&ctrl->sched.m_reconf);
+	do {
+		struct slim_pending_ch *pch;
+		u8 add_mark_removal  = true;
+
+		slc = &ctrl->chans[chan];
+		dev_dbg(&ctrl->dev, "chan:%d,ctrl:%d,def:%d", chan, chctrl,
+					slc->def);
+		if (slc->state < SLIM_CH_DEFINED) {
+			ret = -ENOTCONN;
+			break;
+		}
+		if (chctrl == SLIM_CH_SUSPEND) {
+			ret = add_pending_ch(&sb->mark_suspend, chan);
+			if (ret)
+				break;
+		} else if (chctrl == SLIM_CH_ACTIVATE) {
+			if (slc->state > SLIM_CH_ACTIVE) {
+				ret = -EISCONN;
+				break;
+			}
+			ret = add_pending_ch(&sb->mark_define, chan);
+			if (ret)
+				break;
+		} else {
+			if (slc->state < SLIM_CH_ACTIVE) {
+				ret = -ENOTCONN;
+				break;
+			}
+			/* If channel removal request comes when pending
+			 * in the mark_define, remove it from the define
+			 * list instead of adding it to removal list
+			 */
+			if (!list_empty(&sb->mark_define)) {
+				struct list_head *pos, *next;
+
+				list_for_each_safe(pos, next,
+						  &sb->mark_define) {
+					pch = list_entry(pos,
+						struct slim_pending_ch,
+						pending);
+					if (pch->chan == chan) {
+						list_del(&pch->pending);
+						kfree(pch);
+						add_mark_removal = false;
+						break;
+					}
+				}
+			}
+			if (add_mark_removal == true) {
+				ret = add_pending_ch(&sb->mark_removal, chan);
+				if (ret)
+					break;
+			}
+		}
+
+		nchan++;
+		if (nchan < SLIM_GRP_TO_NCHAN(chanh))
+			chan = SLIM_HDL_TO_CHIDX(slc->nextgrp);
+	} while (nchan < SLIM_GRP_TO_NCHAN(chanh));
+	mutex_unlock(&ctrl->sched.m_reconf);
+	if (!ret && commit == true)
+		ret = slim_reconfigure_now(sb);
+	mutex_unlock(&sb->sldev_reconf);
+	return ret;
+}
+EXPORT_SYMBOL(slim_control_ch);
+
+/*
+ * slim_reservemsg_bw: Request to reserve bandwidth for messages.
+ * @sb: client handle
+ * @bw_bps: message bandwidth in bits per second to be requested
+ * @commit: indicates whether the reconfiguration needs to be acted upon.
+ * This API call can be grouped with slim_control_ch API call with only one of
+ * the APIs specifying the commit flag to avoid reconfiguration being called too
+ * frequently. -EXFULL is returned if there is no space in TDM to reserve the
+ * bandwidth. -EBUSY is returned if reconfiguration is requested, but a request
+ * is already in progress.
+ */
+int slim_reservemsg_bw(struct slim_device *sb, u32 bw_bps, bool commit)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	int ret = 0;
+	int sl;
+
+	mutex_lock(&sb->sldev_reconf);
+	if ((bw_bps >> 3) >= ctrl->a_framer->rootfreq)
+		sl = SLIM_SL_PER_SUPERFRAME;
+	else {
+		sl = (bw_bps * (SLIM_CL_PER_SUPERFRAME_DIV8/SLIM_CL_PER_SL/2) +
+			(ctrl->a_framer->rootfreq/2 - 1)) /
+			(ctrl->a_framer->rootfreq/2);
+	}
+	dev_dbg(&ctrl->dev, "request:bw:%d, slots:%d, current:%d\n", bw_bps, sl,
+						sb->cur_msgsl);
+	sb->pending_msgsl = sl;
+	if (commit == true)
+		ret = slim_reconfigure_now(sb);
+	mutex_unlock(&sb->sldev_reconf);
+	return ret;
+}
+EXPORT_SYMBOL(slim_reservemsg_bw);
+
+/*
+ * slim_ctrl_clk_pause: Called by slimbus controller to request clock to be
+ *	paused or woken up out of clock pause
+ * or woken up from clock pause
+ * @ctrl: controller requesting bus to be paused or woken up
+ * @wakeup: Wakeup this controller from clock pause.
+ * @restart: Restart time value per spec used for clock pause. This value
+ *	isn't used when controller is to be woken up.
+ * This API executes clock pause reconfiguration sequence if wakeup is false.
+ * If wakeup is true, controller's wakeup is called
+ * Slimbus clock is idle and can be disabled by the controller later.
+ */
+int slim_ctrl_clk_pause(struct slim_controller *ctrl, bool wakeup, u8 restart)
+{
+	int ret = 0;
+	int i;
+	DEFINE_SLIM_BCAST_TXN(txn, SLIM_MSG_CLK_PAUSE_SEQ_FLG |
+				SLIM_MSG_MC_BEGIN_RECONFIGURATION, 0, 3,
+				NULL, NULL, 0);
+
+	if (wakeup == false && restart > SLIM_CLK_UNSPECIFIED)
+		return -EINVAL;
+	mutex_lock(&ctrl->m_ctrl);
+	if (wakeup) {
+		if (ctrl->clk_state == SLIM_CLK_ACTIVE) {
+			mutex_unlock(&ctrl->m_ctrl);
+			return 0;
+		}
+		wait_for_completion(&ctrl->pause_comp);
+		/*
+		 * Slimbus framework will call controller wakeup
+		 * Controller should make sure that it sets active framer
+		 * out of clock pause by doing appropriate setting
+		 */
+		if (ctrl->clk_state == SLIM_CLK_PAUSED && ctrl->wakeup)
+			ret = ctrl->wakeup(ctrl);
+		/*
+		 * If wakeup fails, make sure that next attempt can succeed.
+		 * Since we already consumed pause_comp, complete it so
+		 * that next wakeup isn't blocked forever
+		 */
+		if (!ret)
+			ctrl->clk_state = SLIM_CLK_ACTIVE;
+		else
+			complete(&ctrl->pause_comp);
+		mutex_unlock(&ctrl->m_ctrl);
+		return ret;
+	}
+
+	switch (ctrl->clk_state) {
+	case SLIM_CLK_ENTERING_PAUSE:
+	case SLIM_CLK_PAUSE_FAILED:
+		/*
+		 * If controller is already trying to enter clock pause,
+		 * let it finish.
+		 * In case of error, retry
+		 * In both cases, previous clock pause has signalled
+		 * completion.
+		 */
+		wait_for_completion(&ctrl->pause_comp);
+		/* retry upon failure */
+		if (ctrl->clk_state == SLIM_CLK_PAUSE_FAILED) {
+			ctrl->clk_state = SLIM_CLK_ACTIVE;
+		} else {
+			mutex_unlock(&ctrl->m_ctrl);
+			/*
+			 * Signal completion so that wakeup can wait on
+			 * it.
+			 */
+			complete(&ctrl->pause_comp);
+			return 0;
+		}
+		break;
+	case SLIM_CLK_PAUSED:
+		/* already paused */
+		mutex_unlock(&ctrl->m_ctrl);
+		return 0;
+	case SLIM_CLK_ACTIVE:
+	default:
+		break;
+	}
+	/* Pending response for a message */
+	for (i = 0; i < ctrl->last_tid; i++) {
+		if (ctrl->txnt[i]) {
+			ret = -EBUSY;
+			pr_info("slim_clk_pause: txn-rsp for %d pending", i);
+			mutex_unlock(&ctrl->m_ctrl);
+			return -EBUSY;
+		}
+	}
+	ctrl->clk_state = SLIM_CLK_ENTERING_PAUSE;
+	mutex_unlock(&ctrl->m_ctrl);
+
+	mutex_lock(&ctrl->sched.m_reconf);
+	/* Data channels active */
+	if (ctrl->sched.usedslots) {
+		pr_info("slim_clk_pause: data channel active");
+		ret = -EBUSY;
+		goto clk_pause_ret;
+	}
+
+	ret = slim_processtxn(ctrl, &txn, false);
+	if (ret)
+		goto clk_pause_ret;
+
+	txn.mc = SLIM_MSG_CLK_PAUSE_SEQ_FLG | SLIM_MSG_MC_NEXT_PAUSE_CLOCK;
+	txn.len = 1;
+	txn.rl = 4;
+	txn.wbuf = &restart;
+	ret = slim_processtxn(ctrl, &txn, false);
+	if (ret)
+		goto clk_pause_ret;
+
+	txn.mc = SLIM_MSG_CLK_PAUSE_SEQ_FLG | SLIM_MSG_MC_RECONFIGURE_NOW;
+	txn.len = 0;
+	txn.rl = 3;
+	txn.wbuf = NULL;
+	ret = slim_processtxn(ctrl, &txn, false);
+	if (ret)
+		goto clk_pause_ret;
+
+clk_pause_ret:
+	if (ret)
+		ctrl->clk_state = SLIM_CLK_PAUSE_FAILED;
+	else
+		ctrl->clk_state = SLIM_CLK_PAUSED;
+	complete(&ctrl->pause_comp);
+	mutex_unlock(&ctrl->sched.m_reconf);
+	return ret;
+}
+EXPORT_SYMBOL(slim_ctrl_clk_pause);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Slimbus module");
+MODULE_ALIAS("platform:slimbus");
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index c99a5d6..30d2a7f 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -247,6 +247,14 @@
 	 use this memory and no unauthorized access is made to the
 	 buffer
 
+config QCOM_EARLY_RANDOM
+        bool "Initialize random pool very early"
+        help
+          The standard random pool may not initialize until late in the boot
+          process which means that any calls to get random numbers before then
+          may not be truly random. Select this option to make an early call
+          to get some random data to put in the pool. If unsure, say N.
+
 config MSM_SMEM
 	depends on ARCH_QCOM
 	depends on REMOTE_SPINLOCK_MSM
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 943efd7..c55ebf1 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -13,6 +13,7 @@
 obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o
 CFLAGS_scm.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1)
 obj-$(CONFIG_QCOM_SCM)  +=      scm.o scm-boot.o
+obj-$(CONFIG_QCOM_EARLY_RANDOM)	+= early_random.o
 obj-$(CONFIG_SOC_BUS) += socinfo.o
 obj-$(CONFIG_MSM_BOOT_STATS) += boot_stats.o
 obj-$(CONFIG_MSM_CORE_HANG_DETECT) += core_hang_detect.o
diff --git a/drivers/soc/qcom/early_random.c b/drivers/soc/qcom/early_random.c
new file mode 100644
index 0000000..0c562ec
--- /dev/null
+++ b/drivers/soc/qcom/early_random.c
@@ -0,0 +1,63 @@
+/* Copyright (c) 2013-2014, 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/random.h>
+#include <linux/io.h>
+
+#include <soc/qcom/scm.h>
+
+#include <asm/cacheflush.h>
+
+#define TZ_SVC_CRYPTO	10
+#define PRNG_CMD_ID	0x01
+
+struct tz_prng_data {
+	uint8_t		*out_buf;
+	uint32_t	out_buf_sz;
+} __packed;
+
+DEFINE_SCM_BUFFER(common_scm_buf)
+#define RANDOM_BUFFER_SIZE	PAGE_SIZE
+char random_buffer[RANDOM_BUFFER_SIZE] __aligned(PAGE_SIZE);
+
+void __init init_random_pool(void)
+{
+	struct tz_prng_data data;
+	int ret;
+	u32 resp;
+	struct scm_desc desc;
+
+	data.out_buf = (uint8_t *) virt_to_phys(random_buffer);
+	desc.args[0] = (unsigned long) data.out_buf;
+	desc.args[1] = data.out_buf_sz = SZ_512;
+	desc.arginfo = SCM_ARGS(2, SCM_RW, SCM_VAL);
+
+	dmac_flush_range(random_buffer, random_buffer + RANDOM_BUFFER_SIZE);
+
+	if (!is_scm_armv8())
+		ret = scm_call_noalloc(TZ_SVC_CRYPTO, PRNG_CMD_ID, &data,
+				sizeof(data), &resp, sizeof(resp),
+				common_scm_buf,
+				SCM_BUFFER_SIZE(common_scm_buf));
+	else
+		ret = scm_call2(SCM_SIP_FNID(TZ_SVC_CRYPTO, PRNG_CMD_ID),
+					&desc);
+
+	if (!ret) {
+		dmac_inv_range(random_buffer, random_buffer +
+						RANDOM_BUFFER_SIZE);
+		add_device_randomness(random_buffer, SZ_512);
+	}
+}
+
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index f323db2..a30c8e3 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -230,6 +230,9 @@
 config USB_F_CCID
 	tristate
 
+config USB_F_GSI
+	tristate
+
 # this first set of drivers all depend on bulk-capable hardware.
 
 config USB_CONFIGFS
@@ -539,6 +542,13 @@
 	  USB CCID function driver creates transport layer between the
 	  userspace CCID component and the Windows Host.
 
+config USB_CONFIGFS_F_GSI
+	bool "USB GSI function"
+	select USB_F_GSI
+	depends on USB_CONFIGFS
+	help
+	  Generic function driver to support h/w acceleration to IPA over GSI.
+
 choice
 	tristate "USB Gadget Drivers"
 	default USB_ETH
diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile
index 637c419..e78080c 100644
--- a/drivers/usb/gadget/function/Makefile
+++ b/drivers/usb/gadget/function/Makefile
@@ -60,3 +60,5 @@
 obj-$(CONFIG_USB_F_CDEV)	+= usb_f_cdev.o
 usb_f_ccid-y			:= f_ccid.o
 obj-$(CONFIG_USB_F_CCID)   	+= usb_f_ccid.o
+usb_f_gsi-y			:= f_gsi.o rndis.o
+obj-$(CONFIG_USB_F_GSI)         += usb_f_gsi.o
diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c
index 2ca16a57..cfb57ac 100644
--- a/drivers/usb/gadget/function/f_accessory.c
+++ b/drivers/usb/gadget/function/f_accessory.c
@@ -136,12 +136,47 @@
 	.bInterfaceProtocol     = 0,
 };
 
+static struct usb_endpoint_descriptor acc_superspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor acc_superspeed_in_comp_desc = {
+	.bLength =		sizeof(acc_superspeed_in_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+};
+
+static struct usb_endpoint_descriptor acc_superspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor acc_superspeed_out_comp_desc = {
+	.bLength =		sizeof(acc_superspeed_out_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+};
+
+
 static struct usb_endpoint_descriptor acc_highspeed_in_desc = {
 	.bLength                = USB_DT_ENDPOINT_SIZE,
 	.bDescriptorType        = USB_DT_ENDPOINT,
 	.bEndpointAddress       = USB_DIR_IN,
 	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
-	.wMaxPacketSize         = __constant_cpu_to_le16(512),
+	.wMaxPacketSize         = cpu_to_le16(512),
 };
 
 static struct usb_endpoint_descriptor acc_highspeed_out_desc = {
@@ -149,7 +184,7 @@
 	.bDescriptorType        = USB_DT_ENDPOINT,
 	.bEndpointAddress       = USB_DIR_OUT,
 	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
-	.wMaxPacketSize         = __constant_cpu_to_le16(512),
+	.wMaxPacketSize         = cpu_to_le16(512),
 };
 
 static struct usb_endpoint_descriptor acc_fullspeed_in_desc = {
@@ -180,6 +215,15 @@
 	NULL,
 };
 
+static struct usb_descriptor_header *ss_acc_descs[] = {
+	(struct usb_descriptor_header *) &acc_interface_desc,
+	(struct usb_descriptor_header *) &acc_superspeed_in_desc,
+	(struct usb_descriptor_header *) &acc_superspeed_in_comp_desc,
+	(struct usb_descriptor_header *) &acc_superspeed_out_desc,
+	(struct usb_descriptor_header *) &acc_superspeed_out_comp_desc,
+	NULL,
+};
+
 static struct usb_string acc_string_defs[] = {
 	[INTERFACE_STRING_INDEX].s	= "Android Accessory Interface",
 	{  },	/* end of list */
@@ -772,6 +816,9 @@
 	.read = acc_read,
 	.write = acc_write,
 	.unlocked_ioctl = acc_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = acc_ioctl,
+#endif
 	.open = acc_open,
 	.release = acc_release,
 };
@@ -957,6 +1004,14 @@
 			acc_fullspeed_out_desc.bEndpointAddress;
 	}
 
+	/* support super speed hardware */
+	if (gadget_is_superspeed(c->cdev->gadget)) {
+		acc_superspeed_in_desc.bEndpointAddress =
+			acc_fullspeed_in_desc.bEndpointAddress;
+		acc_superspeed_out_desc.bEndpointAddress =
+			acc_fullspeed_out_desc.bEndpointAddress;
+	}
+
 	DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
 			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
 			f->name, dev->ep_in->name, dev->ep_out->name);
@@ -1313,6 +1368,7 @@
 	dev->function.strings = acc_strings,
 	dev->function.fs_descriptors = fs_acc_descs;
 	dev->function.hs_descriptors = hs_acc_descs;
+	dev->function.ss_descriptors = ss_acc_descs;
 	dev->function.bind = acc_function_bind_configfs;
 	dev->function.unbind = acc_function_unbind;
 	dev->function.set_alt = acc_function_set_alt;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index cc718a4..cc3ac26 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -3534,9 +3534,11 @@
 
 	ffs->func = func;
 	ret = ffs_func_eps_enable(func);
-	if (likely(ret >= 0))
+	if (likely(ret >= 0)) {
 		ffs_event_add(ffs, FUNCTIONFS_ENABLE);
-
+		/* Disable USB LPM later on bus_suspend */
+		usb_gadget_autopm_get_async(ffs->gadget);
+	}
 	ffs_log("exit: ret %d", ret);
 
 	return ret;
@@ -3544,8 +3546,13 @@
 
 static void ffs_func_disable(struct usb_function *f)
 {
+	struct ffs_function *func = ffs_func_from_usb(f);
+	struct ffs_data *ffs = func->ffs;
+
 	ffs_log("enter");
 	ffs_func_set_alt(f, 0, (unsigned)-1);
+	/* matching put to allow LPM on disconnect */
+	usb_gadget_autopm_put_async(ffs->gadget);
 	ffs_log("exit");
 }
 
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
new file mode 100644
index 0000000..03ee7ee
--- /dev/null
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -0,0 +1,3066 @@
+/* Copyright (c) 2015-2016, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "f_gsi.h"
+#include "rndis.h"
+
+#define log_event_err(x, ...) pr_err(x, ##__VA_ARGS__)
+#define log_event_dbg(x, ...) pr_debug(x, ##__VA_ARGS__)
+#define log_event_info(x, ...) pr_info(x, ##__VA_ARGS__)
+
+static unsigned int gsi_in_aggr_size;
+module_param(gsi_in_aggr_size, uint, 0644);
+MODULE_PARM_DESC(gsi_in_aggr_size,
+		"Aggr size of bus transfer to host");
+
+static unsigned int gsi_out_aggr_size;
+module_param(gsi_out_aggr_size, uint, 0644);
+MODULE_PARM_DESC(gsi_out_aggr_size,
+		"Aggr size of bus transfer to device");
+
+static unsigned int num_in_bufs = GSI_NUM_IN_BUFFERS;
+module_param(num_in_bufs, uint, 0644);
+MODULE_PARM_DESC(num_in_bufs,
+		"Number of IN buffers");
+
+static unsigned int num_out_bufs = GSI_NUM_OUT_BUFFERS;
+module_param(num_out_bufs, uint, 0644);
+MODULE_PARM_DESC(num_out_bufs,
+		"Number of OUT buffers");
+
+static struct workqueue_struct *ipa_usb_wq;
+
+static void gsi_rndis_ipa_reset_trigger(struct f_gsi *rndis);
+static void ipa_disconnect_handler(struct gsi_data_port *d_port);
+static int gsi_ctrl_send_notification(struct f_gsi *gsi,
+		enum gsi_ctrl_notify_state);
+static int gsi_alloc_trb_buffer(struct f_gsi *gsi);
+static void gsi_free_trb_buffer(struct f_gsi *gsi);
+
+static void post_event(struct gsi_data_port *port, u8 event)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&port->evt_q.q_lock, flags);
+
+	port->evt_q.tail++;
+	/* Check for wraparound and make room */
+	port->evt_q.tail = port->evt_q.tail % MAXQUEUELEN;
+
+	/* Check for overflow */
+	if (port->evt_q.tail == port->evt_q.head) {
+		log_event_err("%s: event queue overflow error", __func__);
+		spin_unlock_irqrestore(&port->evt_q.q_lock, flags);
+		return;
+	}
+	/* Add event to queue */
+	port->evt_q.event[port->evt_q.tail] = event;
+	spin_unlock_irqrestore(&port->evt_q.q_lock, flags);
+}
+
+static void __maybe_unused post_event_to_evt_queue(struct gsi_data_port *port,
+								u8 event)
+{
+	post_event(port, event);
+	queue_work(port->ipa_usb_wq, &port->usb_ipa_w);
+}
+
+static u8 read_event(struct gsi_data_port *port)
+{
+	u8 event;
+	unsigned long flags;
+
+	spin_lock_irqsave(&port->evt_q.q_lock, flags);
+	if (port->evt_q.head == port->evt_q.tail) {
+		log_event_dbg("%s: event queue empty", __func__);
+		spin_unlock_irqrestore(&port->evt_q.q_lock, flags);
+		return EVT_NONE;
+	}
+
+	port->evt_q.head++;
+	/* Check for wraparound and make room */
+	port->evt_q.head = port->evt_q.head % MAXQUEUELEN;
+
+	event = port->evt_q.event[port->evt_q.head];
+	spin_unlock_irqrestore(&port->evt_q.q_lock, flags);
+
+	return event;
+}
+
+static u8 peek_event(struct gsi_data_port *port)
+{
+	u8 event;
+	unsigned long flags;
+	u8 peek_index = 0;
+
+	spin_lock_irqsave(&port->evt_q.q_lock, flags);
+	if (port->evt_q.head == port->evt_q.tail) {
+		log_event_dbg("%s: event queue empty", __func__);
+		spin_unlock_irqrestore(&port->evt_q.q_lock, flags);
+		return EVT_NONE;
+	}
+
+	peek_index = (port->evt_q.head + 1) % MAXQUEUELEN;
+	event = port->evt_q.event[peek_index];
+	spin_unlock_irqrestore(&port->evt_q.q_lock, flags);
+
+	return event;
+}
+
+static void __maybe_unused reset_event_queue(struct gsi_data_port *port)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&port->evt_q.q_lock, flags);
+	port->evt_q.head = port->evt_q.tail = MAXQUEUELEN - 1;
+	memset(&port->evt_q.event[0], EVT_NONE, MAXQUEUELEN);
+	spin_unlock_irqrestore(&port->evt_q.q_lock, flags);
+}
+
+static int gsi_wakeup_host(struct f_gsi *gsi)
+{
+
+	int ret;
+	struct usb_gadget *gadget;
+	struct usb_function *func;
+
+	func = &gsi->function;
+	gadget = gsi->function.config->cdev->gadget;
+
+	log_event_dbg("Entering %s", __func__);
+
+	if (!gadget) {
+		log_event_err("FAILED: d_port->cdev->gadget == NULL");
+		return -ENODEV;
+	}
+
+	/*
+	 * In Super-Speed mode, remote wakeup is not allowed for suspended
+	 * functions which have been disallowed by the host to issue Function
+	 * Remote Wakeup.
+	 * Note - We deviate here from the USB 3.0 spec and allow
+	 * non-suspended functions to issue remote-wakeup even if they were not
+	 * allowed to do so by the host. This is done in order to support non
+	 * fully USB 3.0 compatible hosts.
+	 */
+	if ((gadget->speed == USB_SPEED_SUPER) && (func->func_is_suspended)) {
+		log_event_dbg("%s: Calling usb_func_wakeup", __func__);
+		ret = usb_func_wakeup(func);
+	} else {
+		log_event_dbg("%s: Calling usb_gadget_wakeup", __func__);
+		ret = usb_gadget_wakeup(gadget);
+	}
+
+	if ((ret == -EBUSY) || (ret == -EAGAIN))
+		log_event_dbg("RW delayed due to LPM exit.");
+	else if (ret)
+		log_event_err("wakeup failed. ret=%d.", ret);
+
+	return ret;
+}
+
+/*
+ * Callback for when when network interface is up
+ * and userspace is ready to answer DHCP requests,  or remote wakeup
+ */
+int ipa_usb_notify_cb(enum ipa_usb_notify_event event,
+	void *driver_data)
+{
+	struct f_gsi *gsi = driver_data;
+	unsigned long flags;
+
+	if (!gsi) {
+		log_event_err("%s: invalid driver data", __func__);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&gsi->d_port.lock, flags);
+
+	switch (event) {
+	case IPA_USB_DEVICE_READY:
+
+		if (gsi->d_port.net_ready_trigger) {
+			log_event_err("%s: Already triggered", __func__);
+			spin_unlock_irqrestore(&gsi->d_port.lock, flags);
+			return 1;
+		}
+
+		log_event_err("%s: Set net_ready_trigger", __func__);
+		gsi->d_port.net_ready_trigger = true;
+
+		if (gsi->prot_id == IPA_USB_ECM)
+			gsi_ctrl_send_notification(gsi,
+					GSI_CTRL_NOTIFY_CONNECT);
+
+		/*
+		 * Do not post EVT_CONNECTED for RNDIS.
+		 * Data path for RNDIS is enabled on EVT_HOST_READY.
+		 */
+		if (gsi->prot_id != IPA_USB_RNDIS) {
+			post_event(&gsi->d_port, EVT_CONNECTED);
+			queue_work(gsi->d_port.ipa_usb_wq,
+					&gsi->d_port.usb_ipa_w);
+		}
+		break;
+
+	case IPA_USB_REMOTE_WAKEUP:
+		gsi_wakeup_host(gsi);
+		break;
+
+	case IPA_USB_SUSPEND_COMPLETED:
+		post_event(&gsi->d_port, EVT_IPA_SUSPEND);
+		queue_work(gsi->d_port.ipa_usb_wq, &gsi->d_port.usb_ipa_w);
+		break;
+	}
+
+	spin_unlock_irqrestore(&gsi->d_port.lock, flags);
+	return 1;
+}
+
+static int ipa_connect_channels(struct gsi_data_port *d_port)
+{
+	int ret;
+	struct f_gsi *gsi = d_port_to_gsi(d_port);
+	struct ipa_usb_xdci_chan_params *in_params =
+				&d_port->ipa_in_channel_params;
+	struct ipa_usb_xdci_chan_params *out_params =
+				&d_port->ipa_out_channel_params;
+	struct ipa_usb_xdci_connect_params *conn_params =
+				&d_port->ipa_conn_pms;
+	struct usb_composite_dev *cdev = gsi->function.config->cdev;
+	struct gsi_channel_info gsi_channel_info;
+	struct ipa_req_chan_out_params ipa_in_channel_out_params;
+	struct ipa_req_chan_out_params ipa_out_channel_out_params;
+
+	usb_gsi_ep_op(d_port->in_ep, &d_port->in_request,
+		GSI_EP_OP_PREPARE_TRBS);
+	usb_gsi_ep_op(d_port->in_ep, &d_port->in_request,
+			GSI_EP_OP_STARTXFER);
+	d_port->in_xfer_rsc_index = usb_gsi_ep_op(d_port->in_ep, NULL,
+			GSI_EP_OP_GET_XFER_IDX);
+
+	memset(in_params, 0x0, sizeof(*in_params));
+	gsi_channel_info.ch_req = &d_port->in_request;
+	usb_gsi_ep_op(d_port->in_ep, (void *)&gsi_channel_info,
+			GSI_EP_OP_GET_CH_INFO);
+	in_params->client =
+		(gsi->prot_id != IPA_USB_DIAG) ? IPA_CLIENT_USB_CONS :
+						IPA_CLIENT_USB_DPL_CONS;
+	in_params->ipa_ep_cfg.mode.mode = IPA_BASIC;
+	in_params->teth_prot = gsi->prot_id;
+	in_params->gevntcount_low_addr =
+		gsi_channel_info.gevntcount_low_addr;
+	in_params->gevntcount_hi_addr =
+		gsi_channel_info.gevntcount_hi_addr;
+	in_params->dir = GSI_CHAN_DIR_FROM_GSI;
+	in_params->xfer_ring_len = gsi_channel_info.xfer_ring_len;
+	in_params->xfer_ring_base_addr = gsi_channel_info.xfer_ring_base_addr;
+	in_params->xfer_scratch.last_trb_addr_iova =
+					gsi_channel_info.last_trb_addr;
+	in_params->xfer_ring_base_addr = in_params->xfer_ring_base_addr_iova =
+					gsi_channel_info.xfer_ring_base_addr;
+	in_params->data_buff_base_len = d_port->in_request.buf_len *
+					d_port->in_request.num_bufs;
+	in_params->data_buff_base_addr = in_params->data_buff_base_addr_iova =
+					d_port->in_request.dma;
+	in_params->xfer_scratch.const_buffer_size =
+		gsi_channel_info.const_buffer_size;
+	in_params->xfer_scratch.depcmd_low_addr =
+		gsi_channel_info.depcmd_low_addr;
+	in_params->xfer_scratch.depcmd_hi_addr =
+		gsi_channel_info.depcmd_hi_addr;
+
+	if (d_port->out_ep) {
+		usb_gsi_ep_op(d_port->out_ep, &d_port->out_request,
+			GSI_EP_OP_PREPARE_TRBS);
+		usb_gsi_ep_op(d_port->out_ep, &d_port->out_request,
+				GSI_EP_OP_STARTXFER);
+		d_port->out_xfer_rsc_index =
+			usb_gsi_ep_op(d_port->out_ep,
+				NULL, GSI_EP_OP_GET_XFER_IDX);
+		memset(out_params, 0x0, sizeof(*out_params));
+		gsi_channel_info.ch_req = &d_port->out_request;
+		usb_gsi_ep_op(d_port->out_ep, (void *)&gsi_channel_info,
+				GSI_EP_OP_GET_CH_INFO);
+
+		out_params->client = IPA_CLIENT_USB_PROD;
+		out_params->ipa_ep_cfg.mode.mode = IPA_BASIC;
+		out_params->teth_prot = gsi->prot_id;
+		out_params->gevntcount_low_addr =
+			gsi_channel_info.gevntcount_low_addr;
+		out_params->gevntcount_hi_addr =
+			gsi_channel_info.gevntcount_hi_addr;
+		out_params->dir = GSI_CHAN_DIR_TO_GSI;
+		out_params->xfer_ring_len =
+			gsi_channel_info.xfer_ring_len;
+		out_params->xfer_ring_base_addr =
+			out_params->xfer_ring_base_addr_iova =
+			gsi_channel_info.xfer_ring_base_addr;
+		out_params->data_buff_base_len = d_port->out_request.buf_len *
+			d_port->out_request.num_bufs;
+		out_params->data_buff_base_addr =
+			out_params->data_buff_base_addr_iova =
+			d_port->out_request.dma;
+		out_params->xfer_scratch.last_trb_addr_iova =
+			gsi_channel_info.last_trb_addr;
+		out_params->xfer_scratch.const_buffer_size =
+			gsi_channel_info.const_buffer_size;
+		out_params->xfer_scratch.depcmd_low_addr =
+			gsi_channel_info.depcmd_low_addr;
+		out_params->xfer_scratch.depcmd_hi_addr =
+			gsi_channel_info.depcmd_hi_addr;
+	}
+
+	/* Populate connection params */
+	conn_params->max_pkt_size =
+		(cdev->gadget->speed == USB_SPEED_SUPER) ?
+		IPA_USB_SUPER_SPEED_1024B : IPA_USB_HIGH_SPEED_512B;
+	conn_params->ipa_to_usb_xferrscidx =
+			d_port->in_xfer_rsc_index;
+	conn_params->usb_to_ipa_xferrscidx =
+			d_port->out_xfer_rsc_index;
+	conn_params->usb_to_ipa_xferrscidx_valid =
+			(gsi->prot_id != IPA_USB_DIAG) ? true : false;
+	conn_params->ipa_to_usb_xferrscidx_valid = true;
+	conn_params->teth_prot = gsi->prot_id;
+	conn_params->teth_prot_params.max_xfer_size_bytes_to_dev = 23700;
+	if (gsi_out_aggr_size)
+		conn_params->teth_prot_params.max_xfer_size_bytes_to_dev
+				= gsi_out_aggr_size;
+	else
+		conn_params->teth_prot_params.max_xfer_size_bytes_to_dev
+				= d_port->out_aggr_size;
+	if (gsi_in_aggr_size)
+		conn_params->teth_prot_params.max_xfer_size_bytes_to_host
+					= gsi_in_aggr_size;
+	else
+		conn_params->teth_prot_params.max_xfer_size_bytes_to_host
+					= d_port->in_aggr_size;
+	conn_params->teth_prot_params.max_packet_number_to_dev =
+		DEFAULT_MAX_PKT_PER_XFER;
+	conn_params->max_supported_bandwidth_mbps =
+		(cdev->gadget->speed == USB_SPEED_SUPER) ? 3600 : 400;
+
+	memset(&ipa_in_channel_out_params, 0x0,
+				sizeof(ipa_in_channel_out_params));
+	memset(&ipa_out_channel_out_params, 0x0,
+				sizeof(ipa_out_channel_out_params));
+
+	log_event_dbg("%s: Calling xdci_connect", __func__);
+	ret = ipa_usb_xdci_connect(out_params, in_params,
+					&ipa_out_channel_out_params,
+					&ipa_in_channel_out_params,
+					conn_params);
+	if (ret) {
+		log_event_err("%s: IPA connect failed %d", __func__, ret);
+		return ret;
+	}
+	log_event_dbg("%s: xdci_connect done", __func__);
+
+	log_event_dbg("%s: IN CH HDL %x", __func__,
+			ipa_in_channel_out_params.clnt_hdl);
+	log_event_dbg("%s: IN CH DBL addr %x", __func__,
+			ipa_in_channel_out_params.db_reg_phs_addr_lsb);
+
+	log_event_dbg("%s: OUT CH HDL %x", __func__,
+			ipa_out_channel_out_params.clnt_hdl);
+	log_event_dbg("%s: OUT CH DBL addr %x", __func__,
+			ipa_out_channel_out_params.db_reg_phs_addr_lsb);
+
+	d_port->in_channel_handle = ipa_in_channel_out_params.clnt_hdl;
+	d_port->in_db_reg_phs_addr_lsb =
+		ipa_in_channel_out_params.db_reg_phs_addr_lsb;
+	d_port->in_db_reg_phs_addr_msb =
+		ipa_in_channel_out_params.db_reg_phs_addr_msb;
+
+	if (gsi->prot_id != IPA_USB_DIAG) {
+		d_port->out_channel_handle =
+			ipa_out_channel_out_params.clnt_hdl;
+		d_port->out_db_reg_phs_addr_lsb =
+			ipa_out_channel_out_params.db_reg_phs_addr_lsb;
+		d_port->out_db_reg_phs_addr_msb =
+			ipa_out_channel_out_params.db_reg_phs_addr_msb;
+	}
+	return ret;
+}
+
+static void ipa_data_path_enable(struct gsi_data_port *d_port)
+{
+	struct f_gsi *gsi = d_port_to_gsi(d_port);
+	struct usb_gsi_request req;
+	u64 dbl_register_addr;
+	bool block_db = false;
+
+
+	log_event_dbg("in_db_reg_phs_addr_lsb = %x",
+			gsi->d_port.in_db_reg_phs_addr_lsb);
+	usb_gsi_ep_op(gsi->d_port.in_ep,
+			(void *)&gsi->d_port.in_db_reg_phs_addr_lsb,
+			GSI_EP_OP_STORE_DBL_INFO);
+
+	if (gsi->d_port.out_ep) {
+		log_event_dbg("out_db_reg_phs_addr_lsb = %x",
+				gsi->d_port.out_db_reg_phs_addr_lsb);
+		usb_gsi_ep_op(gsi->d_port.out_ep,
+				(void *)&gsi->d_port.out_db_reg_phs_addr_lsb,
+				GSI_EP_OP_STORE_DBL_INFO);
+
+		usb_gsi_ep_op(gsi->d_port.out_ep, &gsi->d_port.out_request,
+				GSI_EP_OP_ENABLE_GSI);
+	}
+
+	/* Unblock doorbell to GSI */
+	usb_gsi_ep_op(d_port->in_ep, (void *)&block_db,
+				GSI_EP_OP_SET_CLR_BLOCK_DBL);
+
+	dbl_register_addr = gsi->d_port.in_db_reg_phs_addr_msb;
+	dbl_register_addr = dbl_register_addr << 32;
+	dbl_register_addr =
+		dbl_register_addr | gsi->d_port.in_db_reg_phs_addr_lsb;
+
+	/* use temp gsi request to pass 64 bit dbl reg addr and num_bufs */
+	req.buf_base_addr = &dbl_register_addr;
+
+	req.num_bufs = gsi->d_port.in_request.num_bufs;
+	usb_gsi_ep_op(gsi->d_port.in_ep, &req, GSI_EP_OP_RING_IN_DB);
+
+	if (gsi->d_port.out_ep) {
+		usb_gsi_ep_op(gsi->d_port.out_ep, &gsi->d_port.out_request,
+			GSI_EP_OP_UPDATEXFER);
+	}
+}
+
+static void ipa_disconnect_handler(struct gsi_data_port *d_port)
+{
+	struct f_gsi *gsi = d_port_to_gsi(d_port);
+	bool block_db = true;
+
+	log_event_dbg("%s: EP Disable for data", __func__);
+
+	/* Block doorbell to GSI to avoid USB wrapper from
+	 * ringing doorbell in case IPA clocks are OFF
+	 */
+	usb_gsi_ep_op(d_port->in_ep, (void *)&block_db,
+				GSI_EP_OP_SET_CLR_BLOCK_DBL);
+
+	usb_gsi_ep_op(gsi->d_port.in_ep, NULL, GSI_EP_OP_DISABLE);
+
+	if (gsi->d_port.out_ep)
+		usb_gsi_ep_op(gsi->d_port.out_ep, NULL, GSI_EP_OP_DISABLE);
+	gsi->d_port.net_ready_trigger = false;
+}
+
+static void ipa_disconnect_work_handler(struct gsi_data_port *d_port)
+{
+	int ret;
+	struct f_gsi *gsi = d_port_to_gsi(d_port);
+
+	log_event_dbg("%s: Calling xdci_disconnect", __func__);
+
+	ret = ipa_usb_xdci_disconnect(gsi->d_port.out_channel_handle,
+			gsi->d_port.in_channel_handle, gsi->prot_id);
+	if (ret)
+		log_event_err("%s: IPA disconnect failed %d",
+				__func__, ret);
+
+	log_event_dbg("%s: xdci_disconnect done", __func__);
+
+	/* invalidate channel handles*/
+	gsi->d_port.in_channel_handle = -EINVAL;
+	gsi->d_port.out_channel_handle = -EINVAL;
+
+	usb_gsi_ep_op(gsi->d_port.in_ep, NULL, GSI_EP_OP_FREE_TRBS);
+
+	if (gsi->d_port.out_ep)
+		usb_gsi_ep_op(gsi->d_port.out_ep, NULL, GSI_EP_OP_FREE_TRBS);
+
+	/* free buffers allocated with each TRB */
+	gsi_free_trb_buffer(gsi);
+}
+
+static int ipa_suspend_work_handler(struct gsi_data_port *d_port)
+{
+	int ret = 0;
+	bool block_db, f_suspend;
+	struct f_gsi *gsi = d_port_to_gsi(d_port);
+
+	f_suspend = gsi->function.func_wakeup_allowed;
+	if (!usb_gsi_ep_op(gsi->d_port.in_ep, (void *) &f_suspend,
+				GSI_EP_OP_CHECK_FOR_SUSPEND)) {
+		ret = -EFAULT;
+		goto done;
+	}
+	log_event_dbg("%s: Calling xdci_suspend", __func__);
+
+	ret = ipa_usb_xdci_suspend(gsi->d_port.out_channel_handle,
+			gsi->d_port.in_channel_handle, gsi->prot_id, true);
+	if (!ret) {
+		d_port->sm_state = STATE_SUSPENDED;
+		log_event_dbg("%s: STATE SUSPENDED", __func__);
+		goto done;
+	}
+
+	if (ret == -EFAULT) {
+		block_db = false;
+		usb_gsi_ep_op(d_port->in_ep, (void *)&block_db,
+					GSI_EP_OP_SET_CLR_BLOCK_DBL);
+		gsi_wakeup_host(gsi);
+	} else if (ret == -EINPROGRESS) {
+		d_port->sm_state = STATE_SUSPEND_IN_PROGRESS;
+	} else {
+		log_event_err("%s: Error %d for %d", __func__, ret,
+							gsi->prot_id);
+	}
+
+	log_event_dbg("%s: xdci_suspend ret %d", __func__, ret);
+
+done:
+	return ret;
+}
+
+static void ipa_resume_work_handler(struct gsi_data_port *d_port)
+{
+	bool block_db;
+	struct f_gsi *gsi = d_port_to_gsi(d_port);
+	int ret;
+
+	log_event_dbg("%s: Calling xdci_resume", __func__);
+
+	ret = ipa_usb_xdci_resume(gsi->d_port.out_channel_handle,
+					gsi->d_port.in_channel_handle,
+					gsi->prot_id);
+	if (ret)
+		log_event_dbg("%s: xdci_resume ret %d", __func__, ret);
+
+	log_event_dbg("%s: xdci_resume done", __func__);
+
+	block_db = false;
+	usb_gsi_ep_op(d_port->in_ep, (void *)&block_db,
+			GSI_EP_OP_SET_CLR_BLOCK_DBL);
+}
+
+static void ipa_work_handler(struct work_struct *w)
+{
+	struct gsi_data_port *d_port = container_of(w, struct gsi_data_port,
+						  usb_ipa_w);
+	u8 event;
+	int ret = 0;
+	struct usb_gadget *gadget = d_port->gadget;
+	struct device *dev;
+	struct device *gad_dev;
+	struct f_gsi *gsi;
+	bool block_db;
+
+	event = read_event(d_port);
+
+	log_event_dbg("%s: event = %x sm_state %x", __func__,
+			event, d_port->sm_state);
+
+	if (gadget) {
+		dev = &gadget->dev;
+		if (!dev || !dev->parent) {
+			log_event_err("%s(): dev or dev->parent is NULL.\n",
+					__func__);
+			return;
+		}
+		gad_dev = dev->parent;
+	} else {
+		log_event_err("%s(): gadget is NULL.\n", __func__);
+		return;
+	}
+
+	gsi = d_port_to_gsi(d_port);
+
+	switch (d_port->sm_state) {
+	case STATE_UNINITIALIZED:
+		break;
+	case STATE_INITIALIZED:
+		if (event == EVT_CONNECT_IN_PROGRESS) {
+			usb_gadget_autopm_get(d_port->gadget);
+			log_event_dbg("%s: get = %d", __func__,
+				atomic_read(&gad_dev->power.usage_count));
+			/* allocate buffers used with each TRB */
+			ret = gsi_alloc_trb_buffer(gsi);
+			if (ret) {
+				log_event_err("%s: gsi_alloc_trb_failed\n",
+								__func__);
+				break;
+			}
+			ipa_connect_channels(d_port);
+			d_port->sm_state = STATE_CONNECT_IN_PROGRESS;
+			log_event_dbg("%s: ST_INIT_EVT_CONN_IN_PROG",
+					__func__);
+		} else if (event == EVT_HOST_READY) {
+			/*
+			 * When in a composition such as RNDIS + ADB,
+			 * RNDIS host sends a GEN_CURRENT_PACKET_FILTER msg
+			 * to enable/disable flow control eg. during RNDIS
+			 * adaptor disable/enable from device manager.
+			 * In the case of the msg to disable flow control,
+			 * connect IPA channels and enable data path.
+			 * EVT_HOST_READY is posted to the state machine
+			 * in the handler for this msg.
+			 */
+			usb_gadget_autopm_get(d_port->gadget);
+			log_event_dbg("%s: get = %d", __func__,
+				atomic_read(&gad_dev->power.usage_count));
+			/* allocate buffers used with each TRB */
+			ret = gsi_alloc_trb_buffer(gsi);
+			if (ret) {
+				log_event_err("%s: gsi_alloc_trb_failed\n",
+								__func__);
+				break;
+			}
+			ipa_connect_channels(d_port);
+			ipa_data_path_enable(d_port);
+			d_port->sm_state = STATE_CONNECTED;
+			log_event_dbg("%s: ST_INIT_EVT_HOST_READY", __func__);
+		}
+		break;
+	case STATE_CONNECT_IN_PROGRESS:
+		if (event == EVT_HOST_READY) {
+			ipa_data_path_enable(d_port);
+			d_port->sm_state = STATE_CONNECTED;
+			log_event_dbg("%s: ST_CON_IN_PROG_EVT_HOST_READY",
+					 __func__);
+		} else if (event == EVT_CONNECTED) {
+			ipa_data_path_enable(d_port);
+			d_port->sm_state = STATE_CONNECTED;
+			log_event_dbg("%s: ST_CON_IN_PROG_EVT_CON %d",
+					__func__, __LINE__);
+		} else if (event == EVT_SUSPEND) {
+			if (peek_event(d_port) == EVT_DISCONNECTED) {
+				read_event(d_port);
+				ipa_disconnect_work_handler(d_port);
+				d_port->sm_state = STATE_INITIALIZED;
+				usb_gadget_autopm_put_async(d_port->gadget);
+				log_event_dbg("%s: ST_CON_IN_PROG_EVT_SUS_DIS",
+						__func__);
+				log_event_dbg("%s: put_async1 = %d", __func__,
+						atomic_read(
+						&gad_dev->power.usage_count));
+				break;
+			}
+			ret = ipa_suspend_work_handler(d_port);
+			if (!ret) {
+				usb_gadget_autopm_put_async(d_port->gadget);
+				log_event_dbg("%s: ST_CON_IN_PROG_EVT_SUS",
+						__func__);
+				log_event_dbg("%s: put_async2 = %d", __func__,
+						atomic_read(
+						&gad_dev->power.usage_count));
+			}
+		} else if (event == EVT_DISCONNECTED) {
+			ipa_disconnect_work_handler(d_port);
+			d_port->sm_state = STATE_INITIALIZED;
+			usb_gadget_autopm_put_async(d_port->gadget);
+			log_event_dbg("%s: ST_CON_IN_PROG_EVT_DIS",
+						__func__);
+			log_event_dbg("%s: put_async3 = %d",
+					__func__, atomic_read(
+						&gad_dev->power.usage_count));
+		}
+		break;
+	case STATE_CONNECTED:
+		if (event == EVT_DISCONNECTED || event == EVT_HOST_NRDY) {
+			if (peek_event(d_port) == EVT_HOST_READY) {
+				read_event(d_port);
+				log_event_dbg("%s: NO_OP NRDY_RDY", __func__);
+				break;
+			}
+
+			if (event == EVT_HOST_NRDY) {
+				log_event_dbg("%s: ST_CON_HOST_NRDY\n",
+								__func__);
+				block_db = true;
+				/* stop USB ringing doorbell to GSI(OUT_EP) */
+				usb_gsi_ep_op(d_port->in_ep, (void *)&block_db,
+						GSI_EP_OP_SET_CLR_BLOCK_DBL);
+				gsi_rndis_ipa_reset_trigger(gsi);
+				usb_gsi_ep_op(d_port->in_ep, NULL,
+						GSI_EP_OP_ENDXFER);
+				usb_gsi_ep_op(d_port->out_ep, NULL,
+						GSI_EP_OP_ENDXFER);
+			}
+
+			ipa_disconnect_work_handler(d_port);
+			d_port->sm_state = STATE_INITIALIZED;
+			usb_gadget_autopm_put_async(d_port->gadget);
+			log_event_dbg("%s: ST_CON_EVT_DIS", __func__);
+			log_event_dbg("%s: put_async4 = %d",
+					__func__, atomic_read(
+						&gad_dev->power.usage_count));
+		} else if (event == EVT_SUSPEND) {
+			if (peek_event(d_port) == EVT_DISCONNECTED) {
+				read_event(d_port);
+				ipa_disconnect_work_handler(d_port);
+				d_port->sm_state = STATE_INITIALIZED;
+				usb_gadget_autopm_put_async(d_port->gadget);
+				log_event_dbg("%s: ST_CON_EVT_SUS_DIS",
+						__func__);
+				log_event_dbg("%s: put_async5 = %d",
+						__func__, atomic_read(
+						&gad_dev->power.usage_count));
+				break;
+			}
+			ret = ipa_suspend_work_handler(d_port);
+			if (!ret) {
+				usb_gadget_autopm_put_async(d_port->gadget);
+				log_event_dbg("%s: ST_CON_EVT_SUS",
+						__func__);
+				log_event_dbg("%s: put_async6 = %d",
+						__func__, atomic_read(
+						&gad_dev->power.usage_count));
+			}
+		} else if (event == EVT_CONNECTED) {
+			d_port->sm_state = STATE_CONNECTED;
+			log_event_dbg("%s: ST_CON_EVT_CON", __func__);
+		}
+		break;
+	case STATE_DISCONNECTED:
+		if (event == EVT_CONNECT_IN_PROGRESS) {
+			ipa_connect_channels(d_port);
+			d_port->sm_state = STATE_CONNECT_IN_PROGRESS;
+			log_event_dbg("%s: ST_DIS_EVT_CON_IN_PROG", __func__);
+		} else if (event == EVT_UNINITIALIZED) {
+			d_port->sm_state = STATE_UNINITIALIZED;
+			log_event_dbg("%s: ST_DIS_EVT_UNINIT", __func__);
+		}
+		break;
+	case STATE_SUSPEND_IN_PROGRESS:
+		if (event == EVT_IPA_SUSPEND) {
+			d_port->sm_state = STATE_SUSPENDED;
+			usb_gadget_autopm_put_async(d_port->gadget);
+			log_event_dbg("%s: ST_SUS_IN_PROG_EVT_IPA_SUS",
+					__func__);
+			log_event_dbg("%s: put_async6 = %d",
+						__func__, atomic_read(
+						&gad_dev->power.usage_count));
+		} else	if (event == EVT_RESUMED) {
+			ipa_resume_work_handler(d_port);
+			d_port->sm_state = STATE_CONNECTED;
+			/*
+			 * Increment usage count here to disallow gadget
+			 * parent suspend. This counter will decrement
+			 * after IPA disconnect is done in disconnect work
+			 * (due to cable disconnect) or in suspended state.
+			 */
+			usb_gadget_autopm_get_noresume(d_port->gadget);
+			log_event_dbg("%s: ST_SUS_IN_PROG_EVT_RES", __func__);
+			log_event_dbg("%s: get_nores1 = %d", __func__,
+					atomic_read(
+						&gad_dev->power.usage_count));
+		} else if (event == EVT_DISCONNECTED) {
+			ipa_disconnect_work_handler(d_port);
+			d_port->sm_state = STATE_INITIALIZED;
+			usb_gadget_autopm_put_async(d_port->gadget);
+			log_event_dbg("%s: ST_SUS_IN_PROG_EVT_DIS", __func__);
+			log_event_dbg("%s: put_async7 = %d", __func__,
+					atomic_read(
+						&gad_dev->power.usage_count));
+		}
+		break;
+
+	case STATE_SUSPENDED:
+		if (event == EVT_RESUMED) {
+			usb_gadget_autopm_get(d_port->gadget);
+			log_event_dbg("%s: ST_SUS_EVT_RES", __func__);
+			log_event_dbg("%s: get = %d", __func__,
+				atomic_read(&gad_dev->power.usage_count));
+			ipa_resume_work_handler(d_port);
+			d_port->sm_state = STATE_CONNECTED;
+		} else if (event == EVT_DISCONNECTED) {
+			ipa_disconnect_work_handler(d_port);
+			d_port->sm_state = STATE_INITIALIZED;
+			log_event_dbg("%s: ST_SUS_EVT_DIS", __func__);
+		}
+		break;
+	default:
+		log_event_dbg("%s: Invalid state to SM", __func__);
+	}
+
+	if (peek_event(d_port) != EVT_NONE) {
+		log_event_dbg("%s: New events to process", __func__);
+		queue_work(d_port->ipa_usb_wq, &d_port->usb_ipa_w);
+	}
+}
+
+static struct gsi_ctrl_pkt *gsi_ctrl_pkt_alloc(unsigned int len, gfp_t flags)
+{
+	struct gsi_ctrl_pkt *pkt;
+
+	pkt = kzalloc(sizeof(struct gsi_ctrl_pkt), flags);
+	if (!pkt)
+		return ERR_PTR(-ENOMEM);
+
+	pkt->buf = kmalloc(len, flags);
+	if (!pkt->buf) {
+		kfree(pkt);
+		return ERR_PTR(-ENOMEM);
+	}
+	pkt->len = len;
+
+	return pkt;
+}
+
+static void gsi_ctrl_pkt_free(struct gsi_ctrl_pkt *pkt)
+{
+	if (pkt) {
+		kfree(pkt->buf);
+		kfree(pkt);
+	}
+}
+
+static void gsi_ctrl_clear_cpkt_queues(struct f_gsi *gsi, bool skip_req_q)
+{
+	struct gsi_ctrl_pkt *cpkt = NULL;
+	struct list_head *act, *tmp;
+
+	spin_lock(&gsi->c_port.lock);
+	if (skip_req_q)
+		goto clean_resp_q;
+
+	list_for_each_safe(act, tmp, &gsi->c_port.cpkt_req_q) {
+		cpkt = list_entry(act, struct gsi_ctrl_pkt, list);
+		list_del(&cpkt->list);
+		gsi_ctrl_pkt_free(cpkt);
+	}
+clean_resp_q:
+	list_for_each_safe(act, tmp, &gsi->c_port.cpkt_resp_q) {
+		cpkt = list_entry(act, struct gsi_ctrl_pkt, list);
+		list_del(&cpkt->list);
+		gsi_ctrl_pkt_free(cpkt);
+	}
+	spin_unlock(&gsi->c_port.lock);
+}
+
+static int gsi_ctrl_send_cpkt_tomodem(struct f_gsi *gsi, void *buf, size_t len)
+{
+	unsigned long flags;
+	struct gsi_ctrl_port *c_port = &gsi->c_port;
+	struct gsi_ctrl_pkt *cpkt;
+
+	spin_lock_irqsave(&c_port->lock, flags);
+	/* drop cpkt if port is not open */
+	if (!gsi->c_port.is_open) {
+		log_event_dbg("%s: ctrl device %s is not open",
+			   __func__, gsi->c_port.name);
+		c_port->cpkt_drop_cnt++;
+		spin_unlock_irqrestore(&c_port->lock, flags);
+		return -ENODEV;
+	}
+
+	cpkt = gsi_ctrl_pkt_alloc(len, GFP_ATOMIC);
+	if (IS_ERR(cpkt)) {
+		log_event_err("%s: Reset func pkt allocation failed", __func__);
+		spin_unlock_irqrestore(&c_port->lock, flags);
+		return -ENOMEM;
+	}
+
+	memcpy(cpkt->buf, buf, len);
+	cpkt->len = len;
+
+	list_add_tail(&cpkt->list, &c_port->cpkt_req_q);
+	c_port->host_to_modem++;
+	spin_unlock_irqrestore(&c_port->lock, flags);
+
+	log_event_dbg("%s: Wake up read queue", __func__);
+	wake_up(&c_port->read_wq);
+
+	return 0;
+}
+
+static int gsi_ctrl_dev_open(struct inode *ip, struct file *fp)
+{
+	struct gsi_ctrl_port *c_port = container_of(fp->private_data,
+						struct gsi_ctrl_port,
+						ctrl_device);
+
+	if (!c_port) {
+		log_event_err("%s: gsi ctrl port %p", __func__, c_port);
+		return -ENODEV;
+	}
+
+	log_event_dbg("%s: open ctrl dev %s", __func__, c_port->name);
+
+	if (c_port->is_open) {
+		log_event_err("%s: Already opened", __func__);
+		return -EBUSY;
+	}
+
+	c_port->is_open = true;
+
+	return 0;
+}
+
+static int gsi_ctrl_dev_release(struct inode *ip, struct file *fp)
+{
+	struct gsi_ctrl_port *c_port = container_of(fp->private_data,
+						struct gsi_ctrl_port,
+						ctrl_device);
+
+	if (!c_port) {
+		log_event_err("%s: gsi ctrl port %p", __func__, c_port);
+		return -ENODEV;
+	}
+
+	log_event_dbg("close ctrl dev %s", c_port->name);
+
+	c_port->is_open = false;
+
+	return 0;
+}
+
+static ssize_t
+gsi_ctrl_dev_read(struct file *fp, char __user *buf, size_t count, loff_t *pos)
+{
+	struct gsi_ctrl_port *c_port = container_of(fp->private_data,
+						struct gsi_ctrl_port,
+						ctrl_device);
+
+	struct gsi_ctrl_pkt *cpkt = NULL;
+	unsigned long flags;
+	int ret = 0;
+
+	log_event_dbg("%s: Enter %zu", __func__, count);
+
+	if (!c_port) {
+		log_event_err("%s: gsi ctrl port %p", __func__, c_port);
+		return -ENODEV;
+	}
+
+	if (count > GSI_MAX_CTRL_PKT_SIZE) {
+		log_event_err("Large buff size %zu, should be %d",
+			count, GSI_MAX_CTRL_PKT_SIZE);
+		return -EINVAL;
+	}
+
+	/* block until a new packet is available */
+	spin_lock_irqsave(&c_port->lock, flags);
+	while (list_empty(&c_port->cpkt_req_q)) {
+		log_event_dbg("Requests list is empty. Wait.");
+		spin_unlock_irqrestore(&c_port->lock, flags);
+		ret = wait_event_interruptible(c_port->read_wq,
+			!list_empty(&c_port->cpkt_req_q));
+		if (ret < 0) {
+			log_event_err("Waiting failed");
+			return -ERESTARTSYS;
+		}
+		log_event_dbg("Received request packet");
+		spin_lock_irqsave(&c_port->lock, flags);
+	}
+
+	cpkt = list_first_entry(&c_port->cpkt_req_q, struct gsi_ctrl_pkt,
+							list);
+	list_del(&cpkt->list);
+	spin_unlock_irqrestore(&c_port->lock, flags);
+
+	if (cpkt->len > count) {
+		log_event_err("cpkt size large:%d > buf size:%zu",
+				cpkt->len, count);
+		gsi_ctrl_pkt_free(cpkt);
+		return -ENOMEM;
+	}
+
+	log_event_dbg("%s: cpkt size:%d", __func__, cpkt->len);
+
+	ret = copy_to_user(buf, cpkt->buf, cpkt->len);
+	if (ret) {
+		log_event_err("copy_to_user failed: err %d", ret);
+		ret = -EFAULT;
+	} else {
+		log_event_dbg("%s: copied %d bytes to user", __func__,
+							cpkt->len);
+		ret = cpkt->len;
+		c_port->copied_to_modem++;
+	}
+
+	gsi_ctrl_pkt_free(cpkt);
+
+	log_event_dbg("%s: Exit %zu", __func__, count);
+
+	return ret;
+}
+
+static ssize_t gsi_ctrl_dev_write(struct file *fp, const char __user *buf,
+		size_t count, loff_t *pos)
+{
+	int ret = 0;
+	unsigned long flags;
+	struct gsi_ctrl_pkt *cpkt;
+	struct gsi_ctrl_port *c_port = container_of(fp->private_data,
+						struct gsi_ctrl_port,
+						ctrl_device);
+	struct f_gsi *gsi = c_port_to_gsi(c_port);
+	struct usb_request *req = c_port->notify_req;
+
+	log_event_dbg("Enter %zu", count);
+
+	if (!c_port || !req || !req->buf) {
+		log_event_err("%s: c_port %p req %p req->buf %p",
+			__func__, c_port, req, req ? req->buf : req);
+		return -ENODEV;
+	}
+
+	if (!count || count > GSI_MAX_CTRL_PKT_SIZE) {
+		log_event_err("error: ctrl pkt length %zu", count);
+		return -EINVAL;
+	}
+
+	if (!atomic_read(&gsi->connected)) {
+		log_event_err("USB cable not connected\n");
+		return -ECONNRESET;
+	}
+
+	if (gsi->function.func_is_suspended &&
+			!gsi->function.func_wakeup_allowed) {
+		c_port->cpkt_drop_cnt++;
+		log_event_err("drop ctrl pkt of len %zu", count);
+		return -ENOTSUPP;
+	}
+
+	cpkt = gsi_ctrl_pkt_alloc(count, GFP_KERNEL);
+	if (IS_ERR(cpkt)) {
+		log_event_err("failed to allocate ctrl pkt");
+		return -ENOMEM;
+	}
+
+	ret = copy_from_user(cpkt->buf, buf, count);
+	if (ret) {
+		log_event_err("copy_from_user failed err:%d", ret);
+		gsi_ctrl_pkt_free(cpkt);
+		return ret;
+	}
+	c_port->copied_from_modem++;
+
+	spin_lock_irqsave(&c_port->lock, flags);
+	list_add_tail(&cpkt->list, &c_port->cpkt_resp_q);
+	spin_unlock_irqrestore(&c_port->lock, flags);
+
+	ret = gsi_ctrl_send_notification(gsi,
+			GSI_CTRL_NOTIFY_RESPONSE_AVAILABLE);
+
+	c_port->modem_to_host++;
+	log_event_dbg("Exit %zu", count);
+
+	return ret ? ret : count;
+}
+
+static long gsi_ctrl_dev_ioctl(struct file *fp, unsigned int cmd,
+		unsigned long arg)
+{
+	struct gsi_ctrl_port *c_port = container_of(fp->private_data,
+						struct gsi_ctrl_port,
+						ctrl_device);
+	struct f_gsi *gsi = c_port_to_gsi(c_port);
+	struct ep_info info;
+	int val, ret = 0;
+
+	if (!c_port) {
+		log_event_err("%s: gsi ctrl port %p", __func__, c_port);
+		return -ENODEV;
+	}
+
+	switch (cmd) {
+	case QTI_CTRL_MODEM_OFFLINE:
+		if (gsi->prot_id == IPA_USB_DIAG) {
+			log_event_dbg("%s:Modem Offline not handled", __func__);
+			goto exit_ioctl;
+		}
+		atomic_set(&c_port->ctrl_online, 0);
+		gsi_ctrl_send_notification(gsi, GSI_CTRL_NOTIFY_OFFLINE);
+		gsi_ctrl_clear_cpkt_queues(gsi, true);
+		break;
+	case QTI_CTRL_MODEM_ONLINE:
+		if (gsi->prot_id == IPA_USB_DIAG) {
+			log_event_dbg("%s:Modem Online not handled", __func__);
+			goto exit_ioctl;
+		}
+
+		atomic_set(&c_port->ctrl_online, 1);
+		break;
+	case QTI_CTRL_GET_LINE_STATE:
+		val = atomic_read(&gsi->connected);
+		ret = copy_to_user((void __user *)arg, &val, sizeof(val));
+		if (ret) {
+			log_event_err("copy_to_user fail LINE_STATE");
+			ret = -EFAULT;
+		}
+		log_event_dbg("%s: Sent line_state: %d for prot id:%d",
+				__func__,
+				atomic_read(&gsi->connected), gsi->prot_id);
+		break;
+	case QTI_CTRL_EP_LOOKUP:
+	case GSI_MBIM_EP_LOOKUP:
+		log_event_dbg("%s: EP_LOOKUP for prot id:%d", __func__,
+							gsi->prot_id);
+		if (!atomic_read(&gsi->connected)) {
+			log_event_dbg("EP_LOOKUP failed: not connected");
+			ret = -EAGAIN;
+			break;
+		}
+
+		if (gsi->prot_id == IPA_USB_DIAG &&
+				(gsi->d_port.in_channel_handle == -EINVAL)) {
+			ret = -EAGAIN;
+			break;
+		}
+
+		if (gsi->d_port.in_channel_handle == -EINVAL &&
+			gsi->d_port.out_channel_handle == -EINVAL) {
+			ret = -EAGAIN;
+			break;
+		}
+
+		info.ph_ep_info.ep_type = GSI_MBIM_DATA_EP_TYPE_HSUSB;
+		info.ph_ep_info.peripheral_iface_id = gsi->data_id;
+		info.ipa_ep_pair.cons_pipe_num =
+		(gsi->prot_id == IPA_USB_DIAG) ? -1 :
+				gsi->d_port.out_channel_handle;
+		info.ipa_ep_pair.prod_pipe_num = gsi->d_port.in_channel_handle;
+
+		log_event_dbg("%s: prot id :%d ep_type:%d intf:%d",
+				__func__, gsi->prot_id, info.ph_ep_info.ep_type,
+				info.ph_ep_info.peripheral_iface_id);
+
+		log_event_dbg("%s: ipa_cons_idx:%d ipa_prod_idx:%d",
+				__func__, info.ipa_ep_pair.cons_pipe_num,
+				info.ipa_ep_pair.prod_pipe_num);
+
+		ret = copy_to_user((void __user *)arg, &info,
+			sizeof(info));
+		if (ret) {
+			log_event_err("copy_to_user fail MBIM");
+			ret = -EFAULT;
+		}
+		break;
+	case GSI_MBIM_GET_NTB_SIZE:
+		ret = copy_to_user((void __user *)arg,
+			&gsi->d_port.ntb_info.ntb_input_size,
+			sizeof(gsi->d_port.ntb_info.ntb_input_size));
+		if (ret) {
+			log_event_err("copy_to_user failNTB_SIZE");
+			ret = -EFAULT;
+		}
+		log_event_dbg("Sent NTB size %d",
+				gsi->d_port.ntb_info.ntb_input_size);
+		break;
+	case GSI_MBIM_GET_DATAGRAM_COUNT:
+		ret = copy_to_user((void __user *)arg,
+			&gsi->d_port.ntb_info.ntb_max_datagrams,
+			sizeof(gsi->d_port.ntb_info.ntb_max_datagrams));
+		if (ret) {
+			log_event_err("copy_to_user fail DATAGRAM");
+			ret = -EFAULT;
+		}
+		log_event_dbg("Sent NTB datagrams count %d",
+			gsi->d_port.ntb_info.ntb_max_datagrams);
+		break;
+	default:
+		log_event_err("wrong parameter");
+		ret = -EINVAL;
+	}
+
+exit_ioctl:
+	return ret;
+}
+
+static unsigned int gsi_ctrl_dev_poll(struct file *fp, poll_table *wait)
+{
+	struct gsi_ctrl_port *c_port = container_of(fp->private_data,
+						struct gsi_ctrl_port,
+						ctrl_device);
+	unsigned long flags;
+	unsigned int mask = 0;
+
+	if (!c_port) {
+		log_event_err("%s: gsi ctrl port %p", __func__, c_port);
+		return -ENODEV;
+	}
+
+	poll_wait(fp, &c_port->read_wq, wait);
+
+	spin_lock_irqsave(&c_port->lock, flags);
+	if (!list_empty(&c_port->cpkt_req_q)) {
+		mask |= POLLIN | POLLRDNORM;
+		log_event_dbg("%s sets POLLIN for %s", __func__, c_port->name);
+	}
+	spin_unlock_irqrestore(&c_port->lock, flags);
+
+	return mask;
+}
+
+/* file operations for rmnet/mbim/dpl devices */
+static const struct file_operations gsi_ctrl_dev_fops = {
+	.owner = THIS_MODULE,
+	.open = gsi_ctrl_dev_open,
+	.release = gsi_ctrl_dev_release,
+	.read = gsi_ctrl_dev_read,
+	.write = gsi_ctrl_dev_write,
+	.unlocked_ioctl = gsi_ctrl_dev_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = gsi_ctrl_dev_ioctl,
+#endif
+	.poll = gsi_ctrl_dev_poll,
+};
+
+/* peak (theoretical) bulk transfer rate in bits-per-second */
+static unsigned int gsi_xfer_bitrate(struct usb_gadget *g)
+{
+	if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
+		return 13 * 1024 * 8 * 1000 * 8;
+	else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+		return 13 * 512 * 8 * 1000 * 8;
+	else
+		return 19 * 64 * 1 * 1000 * 8;
+}
+
+static int gsi_function_ctrl_port_init(struct f_gsi *gsi)
+{
+	int ret;
+	int sz = GSI_CTRL_NAME_LEN;
+	bool ctrl_dev_create = true;
+
+	if (!gsi) {
+		log_event_err("%s: gsi prot ctx is NULL", __func__);
+		return -EINVAL;
+	}
+
+	INIT_LIST_HEAD(&gsi->c_port.cpkt_req_q);
+	INIT_LIST_HEAD(&gsi->c_port.cpkt_resp_q);
+
+	spin_lock_init(&gsi->c_port.lock);
+
+	init_waitqueue_head(&gsi->c_port.read_wq);
+
+	if (gsi->prot_id == IPA_USB_RMNET)
+		strlcat(gsi->c_port.name, GSI_RMNET_CTRL_NAME, sz);
+	else if (gsi->prot_id == IPA_USB_MBIM)
+		strlcat(gsi->c_port.name, GSI_MBIM_CTRL_NAME, sz);
+	else if (gsi->prot_id == IPA_USB_DIAG)
+		strlcat(gsi->c_port.name, GSI_DPL_CTRL_NAME, sz);
+	else
+		ctrl_dev_create = false;
+
+	if (!ctrl_dev_create)
+		return 0;
+
+	gsi->c_port.ctrl_device.name = gsi->c_port.name;
+	gsi->c_port.ctrl_device.fops = &gsi_ctrl_dev_fops;
+	gsi->c_port.ctrl_device.minor = MISC_DYNAMIC_MINOR;
+
+	ret = misc_register(&gsi->c_port.ctrl_device);
+	if (ret) {
+		log_event_err("%s: misc register failed prot id %d",
+				__func__, gsi->prot_id);
+		return ret;
+	}
+
+	return 0;
+}
+
+static struct net_device *gsi_rndis_get_netdev(const char *netname)
+{
+	struct net_device *net_dev;
+
+	net_dev = dev_get_by_name(&init_net, netname);
+	if (!net_dev)
+		return ERR_PTR(-EINVAL);
+
+	/*
+	 * Decrement net_dev refcount as it was incremented in
+	 * dev_get_by_name().
+	 */
+	dev_put(net_dev);
+	return net_dev;
+}
+
+static void gsi_rndis_open(struct f_gsi *rndis)
+{
+	struct usb_composite_dev *cdev = rndis->function.config->cdev;
+
+	log_event_dbg("%s", __func__);
+
+	rndis_set_param_medium(rndis->params, RNDIS_MEDIUM_802_3,
+				gsi_xfer_bitrate(cdev->gadget) / 100);
+	rndis_signal_connect(rndis->params);
+}
+
+static void gsi_rndis_ipa_reset_trigger(struct f_gsi *rndis)
+{
+	unsigned long flags;
+
+	if (!rndis) {
+		log_event_err("%s: gsi prot ctx is %p", __func__, rndis);
+		return;
+	}
+
+	spin_lock_irqsave(&rndis->d_port.lock, flags);
+	if (!rndis) {
+		log_event_err("%s: No RNDIS instance", __func__);
+		spin_unlock_irqrestore(&rndis->d_port.lock, flags);
+		return;
+	}
+
+	rndis->d_port.net_ready_trigger = false;
+	spin_unlock_irqrestore(&rndis->d_port.lock, flags);
+}
+
+void gsi_rndis_flow_ctrl_enable(bool enable, struct rndis_params *param)
+{
+	struct f_gsi *rndis = param->v;
+	struct gsi_data_port *d_port;
+
+	if (!rndis) {
+		log_event_err("%s: gsi prot ctx is %p", __func__, rndis);
+		return;
+	}
+
+	d_port = &rndis->d_port;
+
+	if (enable) {
+		log_event_dbg("%s: posting HOST_NRDY\n", __func__);
+		post_event(d_port, EVT_HOST_NRDY);
+	} else {
+		log_event_dbg("%s: posting HOST_READY\n", __func__);
+		post_event(d_port, EVT_HOST_READY);
+	}
+
+	queue_work(rndis->d_port.ipa_usb_wq, &rndis->d_port.usb_ipa_w);
+}
+
+static int queue_notification_request(struct f_gsi *gsi)
+{
+	int ret;
+	unsigned long flags;
+	struct usb_cdc_notification *event;
+	struct gsi_ctrl_pkt *cpkt;
+
+	ret = usb_func_ep_queue(&gsi->function, gsi->c_port.notify,
+			   gsi->c_port.notify_req, GFP_ATOMIC);
+	if (ret == -ENOTSUPP || (ret < 0 && ret != -EAGAIN)) {
+		spin_lock_irqsave(&gsi->c_port.lock, flags);
+		/* check if device disconnected while we dropped lock */
+		if (atomic_read(&gsi->connected) &&
+			!list_empty(&gsi->c_port.cpkt_resp_q)) {
+			cpkt = list_first_entry(&gsi->c_port.cpkt_resp_q,
+					struct gsi_ctrl_pkt, list);
+			list_del(&cpkt->list);
+			atomic_dec(&gsi->c_port.notify_count);
+			log_event_err("%s: drop ctrl pkt of len %d error %d",
+						__func__, cpkt->len, ret);
+			gsi_ctrl_pkt_free(cpkt);
+		}
+		gsi->c_port.cpkt_drop_cnt++;
+		spin_unlock_irqrestore(&gsi->c_port.lock, flags);
+	} else {
+		ret = 0;
+		event = gsi->c_port.notify_req->buf;
+		log_event_dbg("%s: Queued Notify type %02x", __func__,
+				event->bNotificationType);
+	}
+
+	return ret;
+}
+static int gsi_ctrl_send_notification(struct f_gsi *gsi,
+		enum gsi_ctrl_notify_state state)
+{
+	__le32 *data;
+	struct usb_cdc_notification *event;
+	struct usb_request *req = gsi->c_port.notify_req;
+	struct usb_composite_dev *cdev = gsi->function.config->cdev;
+
+	if (!atomic_read(&gsi->connected)) {
+		log_event_dbg("%s: cable disconnect", __func__);
+		return -ENODEV;
+	}
+
+	event = req->buf;
+
+	switch (state) {
+	case GSI_CTRL_NOTIFY_NONE:
+		if (atomic_read(&gsi->c_port.notify_count) > 0)
+			log_event_dbg("GSI_CTRL_NOTIFY_NONE %d",
+			atomic_read(&gsi->c_port.notify_count));
+		else
+			log_event_dbg("No pending notifications");
+		return 0;
+	case GSI_CTRL_NOTIFY_CONNECT:
+		event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
+		event->wValue = cpu_to_le16(1);
+		event->wLength = cpu_to_le16(0);
+		gsi->c_port.notify_state = GSI_CTRL_NOTIFY_SPEED;
+		break;
+	case GSI_CTRL_NOTIFY_SPEED:
+		event->bNotificationType = USB_CDC_NOTIFY_SPEED_CHANGE;
+		event->wValue = cpu_to_le16(0);
+		event->wLength = cpu_to_le16(8);
+
+		/* SPEED_CHANGE data is up/down speeds in bits/sec */
+		data = req->buf + sizeof(*event);
+		data[0] = cpu_to_le32(gsi_xfer_bitrate(cdev->gadget));
+		data[1] = data[0];
+
+		log_event_dbg("notify speed %d",
+				gsi_xfer_bitrate(cdev->gadget));
+		gsi->c_port.notify_state = GSI_CTRL_NOTIFY_NONE;
+		break;
+	case GSI_CTRL_NOTIFY_OFFLINE:
+		event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
+		event->wValue = cpu_to_le16(0);
+		event->wLength = cpu_to_le16(0);
+		gsi->c_port.notify_state = GSI_CTRL_NOTIFY_NONE;
+		break;
+	case GSI_CTRL_NOTIFY_RESPONSE_AVAILABLE:
+		event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
+		event->wValue = cpu_to_le16(0);
+		event->wLength = cpu_to_le16(0);
+		gsi->c_port.notify_state = GSI_CTRL_NOTIFY_RESPONSE_AVAILABLE;
+
+		if (gsi->prot_id == IPA_USB_RNDIS) {
+			data = req->buf;
+			data[0] = cpu_to_le32(1);
+			data[1] = cpu_to_le32(0);
+		}
+		break;
+	default:
+		log_event_err("%s:unknown notify state", __func__);
+		return -EINVAL;
+	}
+
+	log_event_dbg("send Notify type %02x", event->bNotificationType);
+
+	if (atomic_inc_return(&gsi->c_port.notify_count) != 1) {
+		log_event_dbg("delay ep_queue: notify req is busy %d",
+			atomic_read(&gsi->c_port.notify_count));
+		return 0;
+	}
+
+	return queue_notification_request(gsi);
+}
+
+static void gsi_ctrl_notify_resp_complete(struct usb_ep *ep,
+					struct usb_request *req)
+{
+	struct f_gsi *gsi = req->context;
+	struct usb_cdc_notification *event = req->buf;
+	int status = req->status;
+
+	switch (status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		gsi->c_port.notify_state = GSI_CTRL_NOTIFY_NONE;
+		atomic_set(&gsi->c_port.notify_count, 0);
+		log_event_dbg("ESHUTDOWN/ECONNRESET, connection gone");
+		gsi_ctrl_clear_cpkt_queues(gsi, false);
+		gsi_ctrl_send_cpkt_tomodem(gsi, NULL, 0);
+		break;
+	default:
+		log_event_err("Unknown event %02x --> %d",
+			event->bNotificationType, req->status);
+		/* FALLTHROUGH */
+	case 0:
+		/*
+		 * handle multiple pending resp available
+		 * notifications by queuing same until we're done,
+		 * rest of the notification require queuing new
+		 * request.
+		 */
+		if (!atomic_dec_and_test(&gsi->c_port.notify_count)) {
+			log_event_dbg("notify_count = %d",
+			atomic_read(&gsi->c_port.notify_count));
+			 queue_notification_request(gsi);
+		} else if (gsi->c_port.notify_state != GSI_CTRL_NOTIFY_NONE &&
+				gsi->c_port.notify_state !=
+				GSI_CTRL_NOTIFY_RESPONSE_AVAILABLE) {
+			gsi_ctrl_send_notification(gsi,
+					gsi->c_port.notify_state);
+		}
+		break;
+	}
+}
+
+static void gsi_rndis_response_available(void *_rndis)
+{
+	struct f_gsi *gsi = _rndis;
+
+	gsi_ctrl_send_notification(gsi, GSI_CTRL_NOTIFY_RESPONSE_AVAILABLE);
+}
+
+static void gsi_rndis_command_complete(struct usb_ep *ep,
+		struct usb_request *req)
+{
+	struct f_gsi *rndis = req->context;
+	int status;
+
+	status = rndis_msg_parser(rndis->params, (u8 *) req->buf);
+	if (status < 0)
+		log_event_err("RNDIS command error %d, %d/%d",
+			status, req->actual, req->length);
+}
+
+static void
+gsi_ctrl_set_ntb_cmd_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	/* now for SET_NTB_INPUT_SIZE only */
+	unsigned int in_size = 0;
+	struct f_gsi *gsi = req->context;
+	struct gsi_ntb_info *ntb = NULL;
+
+	log_event_dbg("dev:%p", gsi);
+
+	req->context = NULL;
+	if (req->status || req->actual != req->length) {
+		log_event_err("Bad control-OUT transfer");
+		goto invalid;
+	}
+
+	if (req->length == 4) {
+		in_size = get_unaligned_le32(req->buf);
+		if (in_size < USB_CDC_NCM_NTB_MIN_IN_SIZE ||
+		in_size > le32_to_cpu(mbim_gsi_ntb_parameters.dwNtbInMaxSize))
+			goto invalid;
+	} else if (req->length == 8) {
+		ntb = (struct gsi_ntb_info *)req->buf;
+		in_size = get_unaligned_le32(&(ntb->ntb_input_size));
+		if (in_size < USB_CDC_NCM_NTB_MIN_IN_SIZE ||
+		in_size > le32_to_cpu(mbim_gsi_ntb_parameters.dwNtbInMaxSize))
+			goto invalid;
+
+		gsi->d_port.ntb_info.ntb_max_datagrams =
+			get_unaligned_le16(&(ntb->ntb_max_datagrams));
+	} else {
+		goto invalid;
+	}
+
+	log_event_dbg("Set NTB INPUT SIZE %d", in_size);
+
+	gsi->d_port.ntb_info.ntb_input_size = in_size;
+	return;
+
+invalid:
+	log_event_err("Illegal NTB INPUT SIZE %d from host", in_size);
+	usb_ep_set_halt(ep);
+}
+
+static void gsi_ctrl_cmd_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_gsi *gsi = req->context;
+
+	gsi_ctrl_send_cpkt_tomodem(gsi, req->buf, req->actual);
+}
+
+static void gsi_ctrl_reset_cmd_complete(struct usb_ep *ep,
+		struct usb_request *req)
+{
+	struct f_gsi *gsi = req->context;
+
+	gsi_ctrl_send_cpkt_tomodem(gsi, req->buf, 0);
+}
+
+static int
+gsi_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct f_gsi *gsi = func_to_gsi(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request *req = cdev->req;
+	int id, value = -EOPNOTSUPP;
+	u16 w_index = le16_to_cpu(ctrl->wIndex);
+	u16 w_value = le16_to_cpu(ctrl->wValue);
+	u16 w_length = le16_to_cpu(ctrl->wLength);
+	struct gsi_ctrl_pkt *cpkt;
+	u8 *buf;
+	u32 n;
+
+	if (!atomic_read(&gsi->connected)) {
+		log_event_dbg("usb cable is not connected");
+		return -ENOTCONN;
+	}
+
+	/* rmnet and dpl does not have ctrl_id */
+	if (gsi->ctrl_id == -ENODEV)
+		id = gsi->data_id;
+	else
+		id = gsi->ctrl_id;
+
+	/* composite driver infrastructure handles everything except
+	 * CDC class messages; interface activation uses set_alt().
+	 */
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+		| USB_CDC_RESET_FUNCTION:
+
+		log_event_dbg("USB_CDC_RESET_FUNCTION");
+		value = 0;
+		req->complete = gsi_ctrl_reset_cmd_complete;
+		req->context = gsi;
+		break;
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_SEND_ENCAPSULATED_COMMAND:
+		log_event_dbg("USB_CDC_SEND_ENCAPSULATED_COMMAND");
+
+		if (w_value || w_index != id)
+			goto invalid;
+		/* read the request; process it later */
+		value = w_length;
+		req->context = gsi;
+		if (gsi->prot_id == IPA_USB_RNDIS)
+			req->complete = gsi_rndis_command_complete;
+		else
+			req->complete = gsi_ctrl_cmd_complete;
+		/* later, rndis_response_available() sends a notification */
+		break;
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_GET_ENCAPSULATED_RESPONSE:
+		log_event_dbg("USB_CDC_GET_ENCAPSULATED_RESPONSE");
+		if (w_value || w_index != id)
+			goto invalid;
+
+		if (gsi->prot_id == IPA_USB_RNDIS) {
+			/* return the result */
+			buf = rndis_get_next_response(gsi->params, &n);
+			if (buf) {
+				memcpy(req->buf, buf, n);
+				rndis_free_response(gsi->params, buf);
+				value = n;
+			}
+			break;
+		}
+
+		spin_lock(&gsi->c_port.lock);
+		if (list_empty(&gsi->c_port.cpkt_resp_q)) {
+			log_event_dbg("ctrl resp queue empty");
+			spin_unlock(&gsi->c_port.lock);
+			break;
+		}
+
+		cpkt = list_first_entry(&gsi->c_port.cpkt_resp_q,
+					struct gsi_ctrl_pkt, list);
+		list_del(&cpkt->list);
+		spin_unlock(&gsi->c_port.lock);
+
+		value = min_t(unsigned int, w_length, cpkt->len);
+		memcpy(req->buf, cpkt->buf, value);
+		gsi_ctrl_pkt_free(cpkt);
+
+		log_event_dbg("copied encap_resp %d bytes",
+			value);
+		break;
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+		log_event_dbg("%s: USB_CDC_REQ_SET_CONTROL_LINE_STATE DTR:%d\n",
+				__func__, w_value & GSI_CTRL_DTR ? 1 : 0);
+		gsi_ctrl_send_cpkt_tomodem(gsi, NULL, 0);
+		value = 0;
+		break;
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_SET_ETHERNET_PACKET_FILTER:
+		/* see 6.2.30: no data, wIndex = interface,
+		 * wValue = packet filter bitmap
+		 */
+		if (w_length != 0 || w_index != id)
+			goto invalid;
+		log_event_dbg("packet filter %02x", w_value);
+		/* REVISIT locking of cdc_filter.  This assumes the UDC
+		 * driver won't have a concurrent packet TX irq running on
+		 * another CPU; or that if it does, this write is atomic...
+		 */
+		gsi->d_port.cdc_filter = w_value;
+		value = 0;
+		break;
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+		| USB_CDC_GET_NTB_PARAMETERS:
+		log_event_dbg("USB_CDC_GET_NTB_PARAMETERS");
+
+		if (w_length == 0 || w_value != 0 || w_index != id)
+			break;
+
+		value = w_length > sizeof(mbim_gsi_ntb_parameters) ?
+			sizeof(mbim_gsi_ntb_parameters) : w_length;
+		memcpy(req->buf, &mbim_gsi_ntb_parameters, value);
+		break;
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+		| USB_CDC_GET_NTB_INPUT_SIZE:
+
+		log_event_dbg("USB_CDC_GET_NTB_INPUT_SIZE");
+
+		if (w_length < 4 || w_value != 0 || w_index != id)
+			break;
+
+		put_unaligned_le32(gsi->d_port.ntb_info.ntb_input_size,
+				req->buf);
+		value = 4;
+		log_event_dbg("Reply to host INPUT SIZE %d",
+			 gsi->d_port.ntb_info.ntb_input_size);
+		break;
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+		| USB_CDC_SET_NTB_INPUT_SIZE:
+		log_event_dbg("USB_CDC_SET_NTB_INPUT_SIZE");
+
+		if (w_length != 4 && w_length != 8) {
+			log_event_err("wrong NTB length %d", w_length);
+			break;
+		}
+
+		if (w_value != 0 || w_index != id)
+			break;
+
+		req->complete = gsi_ctrl_set_ntb_cmd_complete;
+		req->length = w_length;
+		req->context = gsi;
+
+		value = req->length;
+		break;
+	default:
+invalid:
+		log_event_err("inval ctrl req%02x.%02x v%04x i%04x l%d",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		log_event_dbg("req%02x.%02x v%04x i%04x l%d",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = (value < w_length);
+		req->length = value;
+		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0)
+			log_event_err("response on err %d", value);
+	}
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+/*
+ * Because the data interface supports multiple altsettings,
+ * function *MUST* implement a get_alt() method.
+ */
+static int gsi_get_alt(struct usb_function *f, unsigned int intf)
+{
+	struct f_gsi *gsi = func_to_gsi(f);
+
+	/* RNDIS, RMNET and DPL only support alt 0*/
+	if (intf == gsi->ctrl_id || gsi->prot_id == IPA_USB_RNDIS ||
+			gsi->prot_id == IPA_USB_RMNET ||
+			gsi->prot_id == IPA_USB_DIAG)
+		return 0;
+	else if (intf == gsi->data_id)
+		return gsi->data_interface_up;
+
+	return -EINVAL;
+}
+
+static int gsi_alloc_trb_buffer(struct f_gsi *gsi)
+{
+	u32 len_in = 0, len_out = 0;
+	int ret = 0;
+
+	log_event_dbg("allocate trb's buffer\n");
+
+	if (gsi->d_port.in_ep && !gsi->d_port.in_request.buf_base_addr) {
+		log_event_dbg("IN: num_bufs:=%zu, buf_len=%zu\n",
+			gsi->d_port.in_request.num_bufs,
+			gsi->d_port.in_request.buf_len);
+
+		len_in = gsi->d_port.in_request.buf_len *
+				gsi->d_port.in_request.num_bufs;
+		gsi->d_port.in_request.buf_base_addr =
+			dma_zalloc_coherent(gsi->d_port.gadget->dev.parent,
+			len_in, &gsi->d_port.in_request.dma, GFP_KERNEL);
+		if (!gsi->d_port.in_request.buf_base_addr) {
+			dev_err(&gsi->d_port.gadget->dev,
+					"IN buf_base_addr allocate failed %s\n",
+					gsi->function.name);
+			ret = -ENOMEM;
+			goto fail1;
+		}
+	}
+
+	if (gsi->d_port.out_ep && !gsi->d_port.out_request.buf_base_addr) {
+		log_event_dbg("OUT: num_bufs:=%zu, buf_len=%zu\n",
+			gsi->d_port.out_request.num_bufs,
+			gsi->d_port.out_request.buf_len);
+
+		len_out = gsi->d_port.out_request.buf_len *
+				gsi->d_port.out_request.num_bufs;
+		gsi->d_port.out_request.buf_base_addr =
+			dma_zalloc_coherent(gsi->d_port.gadget->dev.parent,
+			len_out, &gsi->d_port.out_request.dma, GFP_KERNEL);
+		if (!gsi->d_port.out_request.buf_base_addr) {
+			dev_err(&gsi->d_port.gadget->dev,
+					"OUT buf_base_addr allocate failed %s\n",
+					gsi->function.name);
+			ret = -ENOMEM;
+			goto fail;
+		}
+	}
+
+	log_event_dbg("finished allocating trb's buffer\n");
+	return ret;
+
+fail:
+	if (len_in && gsi->d_port.in_request.buf_base_addr) {
+		dma_free_coherent(gsi->d_port.gadget->dev.parent, len_in,
+				gsi->d_port.in_request.buf_base_addr,
+				gsi->d_port.in_request.dma);
+		gsi->d_port.in_request.buf_base_addr = NULL;
+	}
+fail1:
+	return ret;
+}
+
+static void gsi_free_trb_buffer(struct f_gsi *gsi)
+{
+	u32 len;
+
+	log_event_dbg("freeing trb's buffer\n");
+
+	if (gsi->d_port.out_ep &&
+			gsi->d_port.out_request.buf_base_addr) {
+		len = gsi->d_port.out_request.buf_len *
+			gsi->d_port.out_request.num_bufs;
+		dma_free_coherent(gsi->d_port.gadget->dev.parent, len,
+			gsi->d_port.out_request.buf_base_addr,
+			gsi->d_port.out_request.dma);
+		gsi->d_port.out_request.buf_base_addr = NULL;
+	}
+
+	if (gsi->d_port.in_ep &&
+			gsi->d_port.in_request.buf_base_addr) {
+		len = gsi->d_port.in_request.buf_len *
+			gsi->d_port.in_request.num_bufs;
+		dma_free_coherent(gsi->d_port.gadget->dev.parent, len,
+			gsi->d_port.in_request.buf_base_addr,
+			gsi->d_port.in_request.dma);
+		gsi->d_port.in_request.buf_base_addr = NULL;
+	}
+}
+
+static int gsi_set_alt(struct usb_function *f, unsigned int intf,
+						unsigned int alt)
+{
+	struct f_gsi	 *gsi = func_to_gsi(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct net_device	*net;
+	int ret;
+
+	log_event_dbg("intf=%u, alt=%u", intf, alt);
+
+	/* Control interface has only altsetting 0 */
+	if (intf == gsi->ctrl_id || gsi->prot_id == IPA_USB_RMNET) {
+		if (alt != 0)
+			goto fail;
+
+		if (!gsi->c_port.notify)
+			goto fail;
+
+		if (gsi->c_port.notify->driver_data) {
+			log_event_dbg("reset gsi control %d", intf);
+			usb_ep_disable(gsi->c_port.notify);
+		}
+
+		ret = config_ep_by_speed(cdev->gadget, f,
+					gsi->c_port.notify);
+		if (ret) {
+			gsi->c_port.notify->desc = NULL;
+			log_event_err("Config-fail notify ep %s: err %d",
+				gsi->c_port.notify->name, ret);
+			goto fail;
+		}
+
+		ret = usb_ep_enable(gsi->c_port.notify);
+		if (ret) {
+			log_event_err("usb ep#%s enable failed, err#%d",
+				gsi->c_port.notify->name, ret);
+			goto fail;
+		}
+		gsi->c_port.notify->driver_data = gsi;
+	}
+
+	/* Data interface has two altsettings, 0 and 1 */
+	if (intf == gsi->data_id) {
+		gsi->d_port.net_ready_trigger = false;
+		/* for rndis and rmnet alt is always 0 update alt accordingly */
+		if (gsi->prot_id == IPA_USB_RNDIS ||
+				gsi->prot_id == IPA_USB_RMNET ||
+				gsi->prot_id == IPA_USB_DIAG) {
+			if (gsi->d_port.in_ep &&
+				!gsi->d_port.in_ep->driver_data)
+				alt = 1;
+			else
+				alt = 0;
+		}
+
+		if (alt > 1)
+			goto notify_ep_disable;
+
+		if (gsi->data_interface_up == alt)
+			return 0;
+
+		if (gsi->d_port.in_ep && gsi->d_port.in_ep->driver_data)
+			gsi->d_port.ntb_info.ntb_input_size =
+				MBIM_NTB_DEFAULT_IN_SIZE;
+		if (alt == 1) {
+			if (gsi->d_port.in_ep && !gsi->d_port.in_ep->desc
+				&& config_ep_by_speed(cdev->gadget, f,
+					gsi->d_port.in_ep)) {
+				gsi->d_port.in_ep->desc = NULL;
+				goto notify_ep_disable;
+			}
+
+			if (gsi->d_port.out_ep && !gsi->d_port.out_ep->desc
+				&& config_ep_by_speed(cdev->gadget, f,
+					gsi->d_port.out_ep)) {
+				gsi->d_port.out_ep->desc = NULL;
+				goto notify_ep_disable;
+			}
+
+			/* Configure EPs for GSI */
+			if (gsi->d_port.in_ep) {
+				if (gsi->prot_id == IPA_USB_DIAG)
+					gsi->d_port.in_ep->ep_intr_num = 3;
+				else
+					gsi->d_port.in_ep->ep_intr_num = 2;
+				usb_gsi_ep_op(gsi->d_port.in_ep,
+					&gsi->d_port.in_request,
+						GSI_EP_OP_CONFIG);
+			}
+
+			if (gsi->d_port.out_ep) {
+				gsi->d_port.out_ep->ep_intr_num = 1;
+				usb_gsi_ep_op(gsi->d_port.out_ep,
+					&gsi->d_port.out_request,
+						GSI_EP_OP_CONFIG);
+			}
+
+			gsi->d_port.gadget = cdev->gadget;
+
+			if (gsi->prot_id == IPA_USB_RNDIS) {
+				gsi_rndis_open(gsi);
+				net = gsi_rndis_get_netdev("rndis0");
+				if (IS_ERR(net))
+					goto notify_ep_disable;
+
+				log_event_dbg("RNDIS RX/TX early activation");
+				gsi->d_port.cdc_filter = 0;
+				rndis_set_param_dev(gsi->params, net,
+						&gsi->d_port.cdc_filter);
+			}
+
+			if (gsi->prot_id == IPA_USB_ECM)
+				gsi->d_port.cdc_filter = DEFAULT_FILTER;
+
+			/*
+			 * For RNDIS the event is posted from the flow control
+			 * handler which is invoked when the host sends the
+			 * GEN_CURRENT_PACKET_FILTER message.
+			 */
+			if (gsi->prot_id != IPA_USB_RNDIS)
+				post_event(&gsi->d_port,
+						EVT_CONNECT_IN_PROGRESS);
+			queue_work(gsi->d_port.ipa_usb_wq,
+					&gsi->d_port.usb_ipa_w);
+		}
+
+		if (alt == 0 && ((gsi->d_port.in_ep &&
+				!gsi->d_port.in_ep->driver_data) ||
+				(gsi->d_port.out_ep &&
+				!gsi->d_port.out_ep->driver_data)))
+			ipa_disconnect_handler(&gsi->d_port);
+
+		gsi->data_interface_up = alt;
+		log_event_dbg("DATA_INTERFACE id = %d, status = %d",
+				gsi->data_id, gsi->data_interface_up);
+	}
+
+	atomic_set(&gsi->connected, 1);
+
+	/* send 0 len pkt to qti to notify state change */
+	if (gsi->prot_id == IPA_USB_DIAG)
+		gsi_ctrl_send_cpkt_tomodem(gsi, NULL, 0);
+
+	return 0;
+
+notify_ep_disable:
+	if (gsi->c_port.notify && gsi->c_port.notify->driver_data)
+		usb_ep_disable(gsi->c_port.notify);
+fail:
+	return -EINVAL;
+}
+
+static void gsi_disable(struct usb_function *f)
+{
+	struct f_gsi *gsi = func_to_gsi(f);
+
+	atomic_set(&gsi->connected, 0);
+
+	if (gsi->prot_id == IPA_USB_RNDIS)
+		rndis_uninit(gsi->params);
+
+	 /* Disable Control Path */
+	if (gsi->c_port.notify &&
+		gsi->c_port.notify->driver_data) {
+		usb_ep_disable(gsi->c_port.notify);
+		gsi->c_port.notify->driver_data = NULL;
+		gsi->c_port.notify_state = GSI_CTRL_NOTIFY_NONE;
+	}
+
+	atomic_set(&gsi->c_port.notify_count, 0);
+
+	gsi_ctrl_clear_cpkt_queues(gsi, false);
+	/* send 0 len pkt to qti/qbi to notify state change */
+	gsi_ctrl_send_cpkt_tomodem(gsi, NULL, 0);
+
+	/* Disable Data Path  - only if it was initialized already (alt=1) */
+	if (!gsi->data_interface_up) {
+		log_event_dbg("%s: data intf is closed", __func__);
+		return;
+	}
+
+	gsi->data_interface_up = false;
+
+	log_event_dbg("%s deactivated", gsi->function.name);
+	ipa_disconnect_handler(&gsi->d_port);
+	post_event(&gsi->d_port, EVT_DISCONNECTED);
+	queue_work(gsi->d_port.ipa_usb_wq, &gsi->d_port.usb_ipa_w);
+}
+
+static void gsi_suspend(struct usb_function *f)
+{
+	bool block_db;
+	struct f_gsi *gsi = func_to_gsi(f);
+	bool remote_wakeup_allowed;
+
+	/* Check if function is already suspended in gsi_func_suspend() */
+	if (f->func_is_suspended) {
+		log_event_dbg("%s: func already suspended, return\n", __func__);
+		return;
+	}
+
+	if (f->config->cdev->gadget->speed == USB_SPEED_SUPER)
+		remote_wakeup_allowed = f->func_wakeup_allowed;
+	else
+		remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup;
+
+	log_event_info("%s: remote_wakeup_allowed %d",
+					__func__, remote_wakeup_allowed);
+
+	if (!remote_wakeup_allowed) {
+		if (gsi->prot_id == IPA_USB_RNDIS)
+			rndis_flow_control(gsi->params, true);
+		/*
+		 * When remote wakeup is disabled, IPA is disconnected
+		 * because it cannot send new data until the USB bus is
+		 * resumed. Endpoint descriptors info is saved before it
+		 * gets reset by the BAM disconnect API. This lets us
+		 * restore this info when the USB bus is resumed.
+		 */
+		if (gsi->d_port.in_ep)
+			gsi->in_ep_desc_backup = gsi->d_port.in_ep->desc;
+		if (gsi->d_port.out_ep)
+			gsi->out_ep_desc_backup = gsi->d_port.out_ep->desc;
+
+		ipa_disconnect_handler(&gsi->d_port);
+
+		post_event(&gsi->d_port, EVT_DISCONNECTED);
+		queue_work(gsi->d_port.ipa_usb_wq, &gsi->d_port.usb_ipa_w);
+		log_event_dbg("%s: Disconnecting", __func__);
+	} else {
+		block_db = true;
+		usb_gsi_ep_op(gsi->d_port.in_ep, (void *)&block_db,
+				GSI_EP_OP_SET_CLR_BLOCK_DBL);
+		post_event(&gsi->d_port, EVT_SUSPEND);
+		queue_work(gsi->d_port.ipa_usb_wq, &gsi->d_port.usb_ipa_w);
+	}
+
+	log_event_dbg("gsi suspended");
+}
+
+static void gsi_resume(struct usb_function *f)
+{
+	struct f_gsi *gsi = func_to_gsi(f);
+	bool remote_wakeup_allowed;
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+	log_event_dbg("%s", __func__);
+
+	/*
+	 * If the function is in USB3 Function Suspend state, resume is
+	 * canceled. In this case resume is done by a Function Resume request.
+	 */
+	if ((cdev->gadget->speed == USB_SPEED_SUPER) &&
+		f->func_is_suspended)
+		return;
+
+	if (f->config->cdev->gadget->speed == USB_SPEED_SUPER)
+		remote_wakeup_allowed = f->func_wakeup_allowed;
+	else
+		remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup;
+
+	if (!remote_wakeup_allowed) {
+
+		/* Configure EPs for GSI */
+		if (gsi->d_port.out_ep) {
+			gsi->d_port.out_ep->desc = gsi->out_ep_desc_backup;
+			gsi->d_port.out_ep->ep_intr_num = 1;
+			usb_gsi_ep_op(gsi->d_port.out_ep,
+				&gsi->d_port.out_request, GSI_EP_OP_CONFIG);
+		}
+		gsi->d_port.in_ep->desc = gsi->in_ep_desc_backup;
+		if (gsi->prot_id != IPA_USB_DIAG)
+			gsi->d_port.in_ep->ep_intr_num = 2;
+		else
+			gsi->d_port.in_ep->ep_intr_num = 3;
+
+		usb_gsi_ep_op(gsi->d_port.in_ep, &gsi->d_port.in_request,
+				GSI_EP_OP_CONFIG);
+		post_event(&gsi->d_port, EVT_CONNECT_IN_PROGRESS);
+
+		/*
+		 * Linux host does not send RNDIS_MSG_INIT or non-zero
+		 * RNDIS_MESSAGE_PACKET_FILTER after performing bus resume.
+		 * Trigger state machine explicitly on resume.
+		 */
+		if (gsi->prot_id == IPA_USB_RNDIS)
+			rndis_flow_control(gsi->params, false);
+	} else
+		post_event(&gsi->d_port, EVT_RESUMED);
+
+	queue_work(gsi->d_port.ipa_usb_wq, &gsi->d_port.usb_ipa_w);
+
+	if (gsi->c_port.notify && !gsi->c_port.notify->desc)
+		config_ep_by_speed(cdev->gadget, f, gsi->c_port.notify);
+
+	atomic_set(&gsi->c_port.notify_count, 0);
+	log_event_dbg("%s: completed", __func__);
+}
+
+static int gsi_func_suspend(struct usb_function *f, u8 options)
+{
+	bool func_wakeup_allowed;
+
+	log_event_dbg("func susp %u cmd for %s",
+		options, f->name ? f->name : "");
+
+	func_wakeup_allowed =
+		((options & FUNC_SUSPEND_OPT_RW_EN_MASK) != 0);
+
+	if (options & FUNC_SUSPEND_OPT_SUSP_MASK) {
+		f->func_wakeup_allowed = func_wakeup_allowed;
+		if (!f->func_is_suspended) {
+			gsi_suspend(f);
+			f->func_is_suspended = true;
+		}
+	} else {
+		if (f->func_is_suspended) {
+			f->func_is_suspended = false;
+			gsi_resume(f);
+		}
+		f->func_wakeup_allowed = func_wakeup_allowed;
+	}
+
+	return 0;
+}
+
+static int gsi_update_function_bind_params(struct f_gsi *gsi,
+	struct usb_composite_dev *cdev,
+	struct gsi_function_bind_info *info)
+{
+	struct usb_ep *ep;
+	struct usb_cdc_notification *event;
+	struct usb_function *f = &gsi->function;
+	int status;
+
+	/* maybe allocate device-global string IDs */
+	if (info->string_defs[0].id != 0)
+		goto skip_string_id_alloc;
+
+	if (info->ctrl_str_idx >= 0 && info->ctrl_desc) {
+		/* ctrl interface label */
+		status = usb_string_id(cdev);
+		if (status < 0)
+			return status;
+		info->string_defs[info->ctrl_str_idx].id = status;
+		info->ctrl_desc->iInterface = status;
+	}
+
+	if (info->data_str_idx >= 0 && info->data_desc) {
+		/* data interface label */
+		status = usb_string_id(cdev);
+		if (status < 0)
+			return status;
+		info->string_defs[info->data_str_idx].id = status;
+		info->data_desc->iInterface = status;
+	}
+
+	if (info->iad_str_idx >= 0 && info->iad_desc) {
+		/* IAD iFunction label */
+		status = usb_string_id(cdev);
+		if (status < 0)
+			return status;
+		info->string_defs[info->iad_str_idx].id = status;
+		info->iad_desc->iFunction = status;
+	}
+
+	if (info->mac_str_idx >= 0 && info->cdc_eth_desc) {
+		/* IAD iFunction label */
+		status = usb_string_id(cdev);
+		if (status < 0)
+			return status;
+		info->string_defs[info->mac_str_idx].id = status;
+		info->cdc_eth_desc->iMACAddress = status;
+	}
+
+skip_string_id_alloc:
+	if (info->ctrl_desc)
+		info->ctrl_desc->bInterfaceNumber = gsi->ctrl_id;
+
+	if (info->iad_desc)
+		info->iad_desc->bFirstInterface = gsi->ctrl_id;
+
+	if (info->union_desc) {
+		info->union_desc->bMasterInterface0 = gsi->ctrl_id;
+		info->union_desc->bSlaveInterface0 = gsi->data_id;
+	}
+
+	if (info->data_desc)
+		info->data_desc->bInterfaceNumber = gsi->data_id;
+
+	if (info->data_nop_desc)
+		info->data_nop_desc->bInterfaceNumber = gsi->data_id;
+
+	/* allocate instance-specific endpoints */
+	if (info->fs_in_desc) {
+		ep = usb_ep_autoconfig_by_name(cdev->gadget,
+				info->fs_in_desc, info->in_epname);
+		if (!ep)
+			goto fail;
+		gsi->d_port.in_ep = ep;
+		msm_ep_config(gsi->d_port.in_ep);
+		ep->driver_data = cdev;	/* claim */
+	}
+
+	if (info->fs_out_desc) {
+		ep = usb_ep_autoconfig_by_name(cdev->gadget,
+				info->fs_out_desc, info->out_epname);
+		if (!ep)
+			goto fail;
+		gsi->d_port.out_ep = ep;
+		msm_ep_config(gsi->d_port.out_ep);
+		ep->driver_data = cdev;	/* claim */
+	}
+
+	if (info->fs_notify_desc) {
+		ep = usb_ep_autoconfig(cdev->gadget, info->fs_notify_desc);
+		if (!ep)
+			goto fail;
+		gsi->c_port.notify = ep;
+		ep->driver_data = cdev;	/* claim */
+
+		atomic_set(&gsi->c_port.notify_count, 0);
+
+		/* allocate notification request and buffer */
+		gsi->c_port.notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
+		if (!gsi->c_port.notify_req)
+			goto fail;
+
+		gsi->c_port.notify_req->buf =
+			kmalloc(info->notify_buf_len, GFP_KERNEL);
+		if (!gsi->c_port.notify_req->buf)
+			goto fail;
+
+		gsi->c_port.notify_req->length = info->notify_buf_len;
+		gsi->c_port.notify_req->context = gsi;
+		gsi->c_port.notify_req->complete =
+				gsi_ctrl_notify_resp_complete;
+		event = gsi->c_port.notify_req->buf;
+		event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+				| USB_RECIP_INTERFACE;
+
+		if (gsi->ctrl_id == -ENODEV)
+			event->wIndex = cpu_to_le16(gsi->data_id);
+		else
+			event->wIndex = cpu_to_le16(gsi->ctrl_id);
+
+		event->wLength = cpu_to_le16(0);
+		gsi->c_port.notify_state = GSI_CTRL_NOTIFY_NONE;
+	}
+
+	gsi->d_port.in_request.buf_len = info->in_req_buf_len;
+	gsi->d_port.in_request.num_bufs = info->in_req_num_buf;
+	if (gsi->d_port.out_ep) {
+		gsi->d_port.out_request.buf_len = info->out_req_buf_len;
+		gsi->d_port.out_request.num_bufs = info->out_req_num_buf;
+	}
+
+	/* Initialize event queue */
+	spin_lock_init(&gsi->d_port.evt_q.q_lock);
+	gsi->d_port.evt_q.head = gsi->d_port.evt_q.tail = MAXQUEUELEN - 1;
+
+	/* copy descriptors, and track endpoint copies */
+	f->fs_descriptors = usb_copy_descriptors(info->fs_desc_hdr);
+	if (!gsi->function.fs_descriptors)
+		goto fail;
+
+	/* support all relevant hardware speeds... we expect that when
+	 * hardware is dual speed, all bulk-capable endpoints work at
+	 * both speeds
+	 */
+	if (gadget_is_dualspeed(cdev->gadget)) {
+		if (info->fs_in_desc)
+			info->hs_in_desc->bEndpointAddress =
+					info->fs_in_desc->bEndpointAddress;
+		if (info->fs_out_desc)
+			info->hs_out_desc->bEndpointAddress =
+					info->fs_out_desc->bEndpointAddress;
+		if (info->fs_notify_desc)
+			info->hs_notify_desc->bEndpointAddress =
+					info->fs_notify_desc->bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->hs_descriptors = usb_copy_descriptors(info->hs_desc_hdr);
+		if (!f->hs_descriptors)
+			goto fail;
+	}
+
+	if (gadget_is_superspeed(cdev->gadget)) {
+		if (info->fs_in_desc)
+			info->ss_in_desc->bEndpointAddress =
+					info->fs_in_desc->bEndpointAddress;
+
+		if (info->fs_out_desc)
+			info->ss_out_desc->bEndpointAddress =
+					info->fs_out_desc->bEndpointAddress;
+		if (info->fs_notify_desc)
+			info->ss_notify_desc->bEndpointAddress =
+					info->fs_notify_desc->bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->ss_descriptors = usb_copy_descriptors(info->ss_desc_hdr);
+		if (!f->ss_descriptors)
+			goto fail;
+	}
+
+	return 0;
+
+fail:
+	if (gadget_is_superspeed(cdev->gadget) && f->ss_descriptors)
+		usb_free_descriptors(f->ss_descriptors);
+	if (gadget_is_dualspeed(cdev->gadget) && f->hs_descriptors)
+		usb_free_descriptors(f->hs_descriptors);
+	if (f->fs_descriptors)
+		usb_free_descriptors(f->fs_descriptors);
+	if (gsi->c_port.notify_req) {
+		kfree(gsi->c_port.notify_req->buf);
+		usb_ep_free_request(gsi->c_port.notify, gsi->c_port.notify_req);
+	}
+	/* we might as well release our claims on endpoints */
+	if (gsi->c_port.notify)
+		gsi->c_port.notify->driver_data = NULL;
+	if (gsi->d_port.out_ep && gsi->d_port.out_ep->desc)
+		gsi->d_port.out_ep->driver_data = NULL;
+	if (gsi->d_port.in_ep && gsi->d_port.in_ep->desc)
+		gsi->d_port.in_ep->driver_data = NULL;
+	log_event_err("%s: bind failed for %s", __func__, f->name);
+	return -ENOMEM;
+}
+
+static void ipa_ready_callback(void *user_data)
+{
+	struct f_gsi *gsi = user_data;
+
+	log_event_info("%s: ipa is ready\n", __func__);
+
+	gsi->d_port.ipa_ready = true;
+	wake_up_interruptible(&gsi->d_port.wait_for_ipa_ready);
+}
+
+static int gsi_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct gsi_function_bind_info info = {0};
+	struct f_gsi *gsi = func_to_gsi(f);
+	struct rndis_params *params;
+	int status;
+
+	if (gsi->prot_id == IPA_USB_RMNET ||
+		gsi->prot_id == IPA_USB_DIAG)
+		gsi->ctrl_id = -ENODEV;
+	else {
+		status = gsi->ctrl_id = usb_interface_id(c, f);
+		if (status < 0)
+			goto fail;
+	}
+
+	status = gsi->data_id = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+
+	switch (gsi->prot_id) {
+	case IPA_USB_RNDIS:
+		info.string_defs = rndis_gsi_string_defs;
+		info.ctrl_desc = &rndis_gsi_control_intf;
+		info.ctrl_str_idx = 0;
+		info.data_desc = &rndis_gsi_data_intf;
+		info.data_str_idx = 1;
+		info.iad_desc = &rndis_gsi_iad_descriptor;
+		info.iad_str_idx = 2;
+		info.union_desc = &rndis_gsi_union_desc;
+		info.fs_in_desc = &rndis_gsi_fs_in_desc;
+		info.fs_out_desc = &rndis_gsi_fs_out_desc;
+		info.fs_notify_desc = &rndis_gsi_fs_notify_desc;
+		info.hs_in_desc = &rndis_gsi_hs_in_desc;
+		info.hs_out_desc = &rndis_gsi_hs_out_desc;
+		info.hs_notify_desc = &rndis_gsi_hs_notify_desc;
+		info.ss_in_desc = &rndis_gsi_ss_in_desc;
+		info.ss_out_desc = &rndis_gsi_ss_out_desc;
+		info.ss_notify_desc = &rndis_gsi_ss_notify_desc;
+		info.fs_desc_hdr = gsi_eth_fs_function;
+		info.hs_desc_hdr = gsi_eth_hs_function;
+		info.ss_desc_hdr = gsi_eth_ss_function;
+		info.in_epname = "gsi-epin";
+		info.out_epname = "gsi-epout";
+		info.in_req_buf_len = GSI_IN_BUFF_SIZE;
+		gsi->d_port.in_aggr_size = GSI_IN_RNDIS_AGGR_SIZE;
+		info.in_req_num_buf = num_in_bufs;
+		gsi->d_port.out_aggr_size = GSI_OUT_AGGR_SIZE;
+		info.out_req_buf_len = GSI_OUT_AGGR_SIZE;
+		info.out_req_num_buf = num_out_bufs;
+		info.notify_buf_len = sizeof(struct usb_cdc_notification);
+
+		params = rndis_register(gsi_rndis_response_available, gsi,
+				gsi_rndis_flow_ctrl_enable);
+		if (IS_ERR(params))
+			goto fail;
+
+		gsi->params = params;
+
+		rndis_set_param_medium(gsi->params, RNDIS_MEDIUM_802_3, 0);
+
+		/* export host's Ethernet address in CDC format */
+		random_ether_addr(gsi->d_port.ipa_init_params.device_ethaddr);
+		random_ether_addr(gsi->d_port.ipa_init_params.host_ethaddr);
+		log_event_dbg("setting host_ethaddr=%pM, device_ethaddr = %pM",
+		gsi->d_port.ipa_init_params.host_ethaddr,
+		gsi->d_port.ipa_init_params.device_ethaddr);
+		memcpy(gsi->ethaddr, &gsi->d_port.ipa_init_params.host_ethaddr,
+				ETH_ALEN);
+		rndis_set_host_mac(gsi->params, gsi->ethaddr);
+
+		if (gsi->manufacturer && gsi->vendorID &&
+			rndis_set_param_vendor(gsi->params, gsi->vendorID,
+				gsi->manufacturer))
+			goto dereg_rndis;
+
+		log_event_dbg("%s: max_pkt_per_xfer : %d", __func__,
+					DEFAULT_MAX_PKT_PER_XFER);
+		rndis_set_max_pkt_xfer(gsi->params, DEFAULT_MAX_PKT_PER_XFER);
+
+		/* In case of aggregated packets QC device will request
+		 * aliment to 4 (2^2).
+		 */
+		log_event_dbg("%s: pkt_alignment_factor : %d", __func__,
+					DEFAULT_PKT_ALIGNMENT_FACTOR);
+		rndis_set_pkt_alignment_factor(gsi->params,
+					DEFAULT_PKT_ALIGNMENT_FACTOR);
+		break;
+	case IPA_USB_MBIM:
+		info.string_defs = mbim_gsi_string_defs;
+		info.ctrl_desc = &mbim_gsi_control_intf;
+		info.ctrl_str_idx = 0;
+		info.data_desc = &mbim_gsi_data_intf;
+		info.data_str_idx = 1;
+		info.data_nop_desc = &mbim_gsi_data_nop_intf;
+		info.iad_desc = &mbim_gsi_iad_desc;
+		info.iad_str_idx = -1;
+		info.union_desc = &mbim_gsi_union_desc;
+		info.fs_in_desc = &mbim_gsi_fs_in_desc;
+		info.fs_out_desc = &mbim_gsi_fs_out_desc;
+		info.fs_notify_desc = &mbim_gsi_fs_notify_desc;
+		info.hs_in_desc = &mbim_gsi_hs_in_desc;
+		info.hs_out_desc = &mbim_gsi_hs_out_desc;
+		info.hs_notify_desc = &mbim_gsi_hs_notify_desc;
+		info.ss_in_desc = &mbim_gsi_ss_in_desc;
+		info.ss_out_desc = &mbim_gsi_ss_out_desc;
+		info.ss_notify_desc = &mbim_gsi_ss_notify_desc;
+		info.fs_desc_hdr = mbim_gsi_fs_function;
+		info.hs_desc_hdr = mbim_gsi_hs_function;
+		info.ss_desc_hdr = mbim_gsi_ss_function;
+		info.in_epname = "gsi-epin";
+		info.out_epname = "gsi-epout";
+		gsi->d_port.in_aggr_size = GSI_IN_MBIM_AGGR_SIZE;
+		info.in_req_buf_len = GSI_IN_MBIM_AGGR_SIZE;
+		info.in_req_num_buf = num_in_bufs;
+		gsi->d_port.out_aggr_size = GSI_OUT_AGGR_SIZE;
+		info.out_req_buf_len = GSI_OUT_MBIM_BUF_LEN;
+		info.out_req_num_buf = num_out_bufs;
+		info.notify_buf_len = sizeof(struct usb_cdc_notification);
+		mbim_gsi_desc.wMaxSegmentSize = cpu_to_le16(0x800);
+
+		/*
+		 * If MBIM is bound in a config other than the first, tell
+		 * Windows about it by returning the num as a string in the
+		 * OS descriptor's subCompatibleID field. Windows only supports
+		 * up to config #4.
+		 */
+		if (c->bConfigurationValue >= 2 &&
+				c->bConfigurationValue <= 4) {
+			log_event_dbg("MBIM in configuration %d",
+					c->bConfigurationValue);
+			mbim_gsi_ext_config_desc.function.subCompatibleID[0] =
+				c->bConfigurationValue + '0';
+		}
+		break;
+	case IPA_USB_RMNET:
+		info.string_defs = rmnet_gsi_string_defs;
+		info.data_desc = &rmnet_gsi_interface_desc;
+		info.data_str_idx = 0;
+		info.fs_in_desc = &rmnet_gsi_fs_in_desc;
+		info.fs_out_desc = &rmnet_gsi_fs_out_desc;
+		info.fs_notify_desc = &rmnet_gsi_fs_notify_desc;
+		info.hs_in_desc = &rmnet_gsi_hs_in_desc;
+		info.hs_out_desc = &rmnet_gsi_hs_out_desc;
+		info.hs_notify_desc = &rmnet_gsi_hs_notify_desc;
+		info.ss_in_desc = &rmnet_gsi_ss_in_desc;
+		info.ss_out_desc = &rmnet_gsi_ss_out_desc;
+		info.ss_notify_desc = &rmnet_gsi_ss_notify_desc;
+		info.fs_desc_hdr = rmnet_gsi_fs_function;
+		info.hs_desc_hdr = rmnet_gsi_hs_function;
+		info.ss_desc_hdr = rmnet_gsi_ss_function;
+		info.in_epname = "gsi-epin";
+		info.out_epname = "gsi-epout";
+		gsi->d_port.in_aggr_size = GSI_IN_RMNET_AGGR_SIZE;
+		info.in_req_buf_len = GSI_IN_BUFF_SIZE;
+		info.in_req_num_buf = num_in_bufs;
+		gsi->d_port.out_aggr_size = GSI_OUT_AGGR_SIZE;
+		info.out_req_buf_len = GSI_OUT_RMNET_BUF_LEN;
+		info.out_req_num_buf = num_out_bufs;
+		info.notify_buf_len = sizeof(struct usb_cdc_notification);
+		break;
+	case IPA_USB_ECM:
+		info.string_defs = ecm_gsi_string_defs;
+		info.ctrl_desc = &ecm_gsi_control_intf;
+		info.ctrl_str_idx = 0;
+		info.data_desc = &ecm_gsi_data_intf;
+		info.data_str_idx = 2;
+		info.data_nop_desc = &ecm_gsi_data_nop_intf;
+		info.cdc_eth_desc = &ecm_gsi_desc;
+		info.mac_str_idx = 1;
+		info.union_desc = &ecm_gsi_union_desc;
+		info.fs_in_desc = &ecm_gsi_fs_in_desc;
+		info.fs_out_desc = &ecm_gsi_fs_out_desc;
+		info.fs_notify_desc = &ecm_gsi_fs_notify_desc;
+		info.hs_in_desc = &ecm_gsi_hs_in_desc;
+		info.hs_out_desc = &ecm_gsi_hs_out_desc;
+		info.hs_notify_desc = &ecm_gsi_hs_notify_desc;
+		info.ss_in_desc = &ecm_gsi_ss_in_desc;
+		info.ss_out_desc = &ecm_gsi_ss_out_desc;
+		info.ss_notify_desc = &ecm_gsi_ss_notify_desc;
+		info.fs_desc_hdr = ecm_gsi_fs_function;
+		info.hs_desc_hdr = ecm_gsi_hs_function;
+		info.ss_desc_hdr = ecm_gsi_ss_function;
+		info.in_epname = "gsi-epin";
+		info.out_epname = "gsi-epout";
+		gsi->d_port.in_aggr_size = GSI_ECM_AGGR_SIZE;
+		info.in_req_buf_len = GSI_IN_BUFF_SIZE;
+		info.in_req_num_buf = num_in_bufs;
+		gsi->d_port.out_aggr_size = GSI_ECM_AGGR_SIZE;
+		info.out_req_buf_len = GSI_OUT_ECM_BUF_LEN;
+		info.out_req_num_buf = GSI_ECM_NUM_OUT_BUFFERS;
+		info.notify_buf_len = GSI_CTRL_NOTIFY_BUFF_LEN;
+
+		/* export host's Ethernet address in CDC format */
+		random_ether_addr(gsi->d_port.ipa_init_params.device_ethaddr);
+		random_ether_addr(gsi->d_port.ipa_init_params.host_ethaddr);
+		log_event_dbg("setting host_ethaddr=%pM, device_ethaddr = %pM",
+		gsi->d_port.ipa_init_params.host_ethaddr,
+		gsi->d_port.ipa_init_params.device_ethaddr);
+
+		snprintf(gsi->ethaddr, sizeof(gsi->ethaddr),
+		"%02X%02X%02X%02X%02X%02X",
+		gsi->d_port.ipa_init_params.host_ethaddr[0],
+		gsi->d_port.ipa_init_params.host_ethaddr[1],
+		gsi->d_port.ipa_init_params.host_ethaddr[2],
+		gsi->d_port.ipa_init_params.host_ethaddr[3],
+		gsi->d_port.ipa_init_params.host_ethaddr[4],
+		gsi->d_port.ipa_init_params.host_ethaddr[5]);
+		info.string_defs[1].s = gsi->ethaddr;
+		break;
+	case IPA_USB_DIAG:
+		info.string_defs = qdss_gsi_string_defs;
+		info.data_desc = &qdss_gsi_data_intf_desc;
+		info.data_str_idx = 0;
+		info.fs_in_desc = &qdss_gsi_hs_data_desc;
+		info.hs_in_desc = &qdss_gsi_hs_data_desc;
+		info.ss_in_desc = &qdss_gsi_ss_data_desc;
+		info.fs_desc_hdr = qdss_gsi_hs_data_only_desc;
+		info.hs_desc_hdr = qdss_gsi_hs_data_only_desc;
+		info.ss_desc_hdr = qdss_gsi_ss_data_only_desc;
+		info.in_epname = "gsi-epin";
+		info.out_epname = "";
+		info.in_req_buf_len = 16384;
+		info.in_req_num_buf = num_in_bufs;
+		info.notify_buf_len = sizeof(struct usb_cdc_notification);
+		break;
+	default:
+		log_event_err("%s: Invalid prot id %d", __func__,
+							gsi->prot_id);
+		return -EINVAL;
+	}
+
+	status = gsi_update_function_bind_params(gsi, cdev, &info);
+	if (status)
+		goto dereg_rndis;
+
+	status = ipa_register_ipa_ready_cb(ipa_ready_callback, gsi);
+	if (!status) {
+		log_event_info("%s: ipa is not ready", __func__);
+		status = wait_event_interruptible_timeout(
+			gsi->d_port.wait_for_ipa_ready, gsi->d_port.ipa_ready,
+			msecs_to_jiffies(GSI_IPA_READY_TIMEOUT));
+		if (!status) {
+			log_event_err("%s: ipa ready timeout", __func__);
+			status = -ETIMEDOUT;
+			goto dereg_rndis;
+		}
+	}
+
+	gsi->d_port.ipa_usb_notify_cb = ipa_usb_notify_cb;
+	status = ipa_usb_init_teth_prot(gsi->prot_id,
+		&gsi->d_port.ipa_init_params, gsi->d_port.ipa_usb_notify_cb,
+		gsi);
+	if (status) {
+		log_event_err("%s: failed to init teth prot %d",
+						__func__, gsi->prot_id);
+		goto dereg_rndis;
+	}
+
+	gsi->d_port.sm_state = STATE_INITIALIZED;
+
+	DBG(cdev, "%s: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+			f->name,
+			gadget_is_superspeed(c->cdev->gadget) ? "super" :
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			gsi->d_port.in_ep->name, gsi->d_port.out_ep->name,
+			gsi->c_port.notify->name);
+	return 0;
+
+dereg_rndis:
+	rndis_deregister(gsi->params);
+fail:
+	return status;
+}
+
+static void gsi_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct f_gsi *gsi = func_to_gsi(f);
+
+	/*
+	 * Use drain_workqueue to accomplish below conditions:
+	 * 1. Make sure that any running work completed
+	 * 2. Make sure to wait until all pending work completed i.e. workqueue
+	 * is not having any pending work.
+	 * Above conditions are making sure that ipa_usb_deinit_teth_prot()
+	 * with ipa driver shall not fail due to unexpected state.
+	 */
+	drain_workqueue(gsi->d_port.ipa_usb_wq);
+	ipa_usb_deinit_teth_prot(gsi->prot_id);
+
+	if (gsi->prot_id == IPA_USB_RNDIS) {
+		gsi->d_port.sm_state = STATE_UNINITIALIZED;
+		rndis_deregister(gsi->params);
+	}
+
+	if (gsi->prot_id == IPA_USB_MBIM)
+		mbim_gsi_ext_config_desc.function.subCompatibleID[0] = 0;
+
+	if (gadget_is_superspeed(c->cdev->gadget)) {
+		usb_free_descriptors(f->ss_descriptors);
+		f->ss_descriptors = NULL;
+	}
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		usb_free_descriptors(f->hs_descriptors);
+		f->hs_descriptors = NULL;
+	}
+	usb_free_descriptors(f->fs_descriptors);
+	f->fs_descriptors = NULL;
+
+	if (gsi->c_port.notify) {
+		kfree(gsi->c_port.notify_req->buf);
+		usb_ep_free_request(gsi->c_port.notify, gsi->c_port.notify_req);
+	}
+}
+
+
+static void gsi_free_func(struct usb_function *f)
+{
+	pr_debug("%s\n", __func__);
+}
+
+static int gsi_bind_config(struct f_gsi *gsi)
+{
+	int status = 0;
+	enum ipa_usb_teth_prot prot_id = gsi->prot_id;
+
+	log_event_dbg("%s: prot id %d", __func__, prot_id);
+
+	switch (prot_id) {
+	case IPA_USB_RNDIS:
+		gsi->function.name = "rndis";
+		gsi->function.strings = rndis_gsi_strings;
+		break;
+	case IPA_USB_ECM:
+		gsi->function.name = "cdc_ethernet";
+		gsi->function.strings = ecm_gsi_strings;
+		break;
+	case IPA_USB_RMNET:
+		gsi->function.name = "rmnet";
+		gsi->function.strings = rmnet_gsi_strings;
+		break;
+	case IPA_USB_MBIM:
+		gsi->function.name = "mbim";
+		gsi->function.strings = mbim_gsi_strings;
+		break;
+	case IPA_USB_DIAG:
+		gsi->function.name = "dpl";
+		gsi->function.strings = qdss_gsi_strings;
+		break;
+	default:
+		log_event_err("%s: invalid prot id %d", __func__, prot_id);
+		return -EINVAL;
+	}
+
+	/* descriptors are per-instance copies */
+	gsi->function.bind = gsi_bind;
+	gsi->function.unbind = gsi_unbind;
+	gsi->function.set_alt = gsi_set_alt;
+	gsi->function.get_alt = gsi_get_alt;
+	gsi->function.setup = gsi_setup;
+	gsi->function.disable = gsi_disable;
+	gsi->function.free_func = gsi_free_func;
+	gsi->function.suspend = gsi_suspend;
+	gsi->function.func_suspend = gsi_func_suspend;
+	gsi->function.resume = gsi_resume;
+
+	INIT_WORK(&gsi->d_port.usb_ipa_w, ipa_work_handler);
+
+	return status;
+}
+
+static struct f_gsi *gsi_function_init(enum ipa_usb_teth_prot prot_id)
+{
+	struct f_gsi *gsi;
+	int ret = 0;
+
+	if (prot_id >= IPA_USB_MAX_TETH_PROT_SIZE) {
+		log_event_err("%s: invalid prot id %d", __func__, prot_id);
+		ret = -EINVAL;
+		goto error;
+	}
+
+	gsi = kzalloc(sizeof(*gsi), GFP_KERNEL);
+	if (!gsi) {
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	spin_lock_init(&gsi->d_port.lock);
+
+	init_waitqueue_head(&gsi->d_port.wait_for_ipa_ready);
+
+	gsi->d_port.in_channel_handle = -EINVAL;
+	gsi->d_port.out_channel_handle = -EINVAL;
+
+	gsi->prot_id = prot_id;
+
+	gsi->d_port.ipa_usb_wq = ipa_usb_wq;
+
+	ret = gsi_function_ctrl_port_init(gsi);
+	if (ret) {
+		kfree(gsi);
+		goto error;
+	}
+
+	return gsi;
+error:
+	return ERR_PTR(ret);
+}
+
+static void gsi_opts_release(struct config_item *item)
+{
+	struct gsi_opts *opts = to_gsi_opts(item);
+
+	usb_put_function_instance(&opts->func_inst);
+}
+
+static struct configfs_item_operations gsi_item_ops = {
+	.release	= gsi_opts_release,
+};
+
+static ssize_t gsi_info_show(struct config_item *item, char *page)
+{
+	struct ipa_usb_xdci_chan_params *ipa_chnl_params;
+	struct ipa_usb_xdci_connect_params *con_pms;
+	struct f_gsi *gsi = to_gsi_opts(item)->gsi;
+	int ret, j = 0;
+	unsigned int len = 0;
+	char *buf;
+
+	buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	if (gsi && atomic_read(&gsi->connected)) {
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+			"Info: Prot_id:%d\n", gsi->prot_id);
+		ipa_chnl_params = &gsi->d_port.ipa_in_channel_params;
+		con_pms = &gsi->d_port.ipa_conn_pms;
+		len += scnprintf(buf + len, PAGE_SIZE - len, "%55s\n",
+		"==================================================");
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10s\n", "Ctrl Name: ", gsi->c_port.name);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10u\n", "Notify State: ",
+				gsi->c_port.notify_state);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10u\n", "Notify Count: ",
+				gsi->c_port.notify_count.counter);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10u\n", "Ctrl Online: ",
+				gsi->c_port.ctrl_online.counter);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10u\n", "Ctrl Open: ",
+				gsi->c_port.is_open);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10u\n", "Ctrl Host to Modem: ",
+				gsi->c_port.host_to_modem);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10u\n", "Ctrl Modem to Host: ",
+				gsi->c_port.modem_to_host);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10u\n", "Ctrl Cpd to Modem: ",
+				gsi->c_port.copied_to_modem);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10u\n", "Ctrl Cpd From Modem: ",
+				gsi->c_port.copied_from_modem);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10u\n", "Ctrl Pkt Drops: ",
+				gsi->c_port.cpkt_drop_cnt);
+		len += scnprintf(buf + len, PAGE_SIZE - len, "%25s\n",
+		"==============");
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10u\n", "Protocol ID: ", gsi->prot_id);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10u\n", "SM State: ", gsi->d_port.sm_state);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10u\n", "IN XferRscIndex: ",
+				gsi->d_port.in_xfer_rsc_index);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10d\n", "IN Chnl Hdl: ",
+				gsi->d_port.in_channel_handle);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10x\n", "IN Chnl Dbl Addr: ",
+				gsi->d_port.in_db_reg_phs_addr_lsb);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10u\n", "IN TRB Ring Len: ",
+				ipa_chnl_params->xfer_ring_len);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10x\n", "IN TRB Base Addr: ", (unsigned int)
+			ipa_chnl_params->xfer_ring_base_addr);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10x\n", "GEVENTCNTLO IN Addr: ",
+			ipa_chnl_params->gevntcount_low_addr);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10x\n", "DEPCMDLO IN Addr: ",
+		ipa_chnl_params->xfer_scratch.depcmd_low_addr);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10x\n", "IN LastTRB Addr Off: ",
+		ipa_chnl_params->xfer_scratch.last_trb_addr_iova);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10u\n", "IN Buffer Size: ",
+		ipa_chnl_params->xfer_scratch.const_buffer_size);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10u\n", "IN/DL Aggr Size: ",
+		con_pms->teth_prot_params.max_xfer_size_bytes_to_host);
+
+		ipa_chnl_params = &gsi->d_port.ipa_out_channel_params;
+		len += scnprintf(buf + len, PAGE_SIZE - len, "%25s\n",
+		"==============");
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10u\n", "OUT XferRscIndex: ",
+			gsi->d_port.out_xfer_rsc_index);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10d\n", "OUT Channel Hdl: ",
+			gsi->d_port.out_channel_handle);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10x\n", "OUT Channel Dbl Addr: ",
+			gsi->d_port.out_db_reg_phs_addr_lsb);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10u\n", "OUT TRB Ring Len: ",
+			ipa_chnl_params->xfer_ring_len);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10x\n", "OUT TRB Base Addr: ", (unsigned int)
+			ipa_chnl_params->xfer_ring_base_addr);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10x\n", "GEVENTCNTLO OUT Addr: ",
+			ipa_chnl_params->gevntcount_low_addr);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10x\n", "DEPCMDLO OUT Addr: ",
+			ipa_chnl_params->xfer_scratch.depcmd_low_addr);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10x\n", "OUT LastTRB Addr Off: ",
+		ipa_chnl_params->xfer_scratch.last_trb_addr_iova);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10u\n", "OUT Buffer Size: ",
+		ipa_chnl_params->xfer_scratch.const_buffer_size);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10u\n", "OUT/UL Aggr Size: ",
+		con_pms->teth_prot_params.max_xfer_size_bytes_to_dev);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10u\n", "OUT/UL Packets to dev: ",
+		con_pms->teth_prot_params.max_packet_number_to_dev);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10u\n", "Net_ready_trigger:",
+		gsi->d_port.net_ready_trigger);
+		len += scnprintf(buf + len, PAGE_SIZE - len, "%25s\n",
+		"USB Bus Events");
+		for (j = 0; j < MAXQUEUELEN; j++)
+			len += scnprintf(buf + len, PAGE_SIZE - len,
+				"%d\t", gsi->d_port.evt_q.event[j]);
+		len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10u\n", "Eventq head: ",
+				gsi->d_port.evt_q.head);
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+		"%25s %10u\n", "Eventq tail: ",
+				gsi->d_port.evt_q.tail);
+	}
+
+	if (len > PAGE_SIZE)
+		len = PAGE_SIZE;
+
+	ret = scnprintf(page, len, buf);
+
+	kfree(buf);
+
+	return ret;
+}
+
+CONFIGFS_ATTR_RO(gsi_, info);
+
+static struct configfs_attribute *gsi_attrs[] = {
+	&gsi_attr_info,
+	NULL,
+};
+
+static struct config_item_type gsi_func_type = {
+	.ct_item_ops	= &gsi_item_ops,
+	.ct_attrs	= gsi_attrs,
+	.ct_owner	= THIS_MODULE,
+};
+
+static int gsi_set_inst_name(struct usb_function_instance *fi,
+	const char *name)
+{
+	int ret, name_len;
+	struct f_gsi *gsi;
+	struct gsi_opts *opts = container_of(fi, struct gsi_opts, func_inst);
+
+	name_len = strlen(name) + 1;
+	if (name_len > MAX_INST_NAME_LEN)
+		return -ENAMETOOLONG;
+
+	ret = name_to_prot_id(name);
+	if (ret < 0) {
+		pr_err("%s: failed to find prot id for %s instance\n",
+		__func__, name);
+		return -EINVAL;
+	}
+
+	gsi = gsi_function_init(ret);
+	if (IS_ERR(gsi))
+		return PTR_ERR(gsi);
+
+	opts->gsi = gsi;
+
+	return 0;
+}
+
+static void gsi_free_inst(struct usb_function_instance *f)
+{
+	struct gsi_opts *opts = container_of(f, struct gsi_opts, func_inst);
+
+	if (opts->gsi->c_port.ctrl_device.fops)
+		misc_deregister(&opts->gsi->c_port.ctrl_device);
+
+	kfree(opts->gsi);
+	kfree(opts);
+}
+
+static struct usb_function_instance *gsi_alloc_inst(void)
+{
+	struct gsi_opts *opts;
+
+	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+	if (!opts)
+		return ERR_PTR(-ENOMEM);
+
+	opts->func_inst.set_inst_name = gsi_set_inst_name;
+	opts->func_inst.free_func_inst = gsi_free_inst;
+	config_group_init_type_name(&opts->func_inst.group, "",
+				    &gsi_func_type);
+
+	return &opts->func_inst;
+}
+
+static struct usb_function *gsi_alloc(struct usb_function_instance *fi)
+{
+	struct gsi_opts *opts;
+	int ret;
+
+	opts = container_of(fi, struct gsi_opts, func_inst);
+
+	ret = gsi_bind_config(opts->gsi);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return &opts->gsi->function;
+}
+
+DECLARE_USB_FUNCTION(gsi, gsi_alloc_inst, gsi_alloc);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("GSI function driver");
+
+static int fgsi_init(void)
+{
+	ipa_usb_wq = alloc_workqueue("k_ipa_usb",
+				WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+	if (!ipa_usb_wq) {
+		log_event_err("Failed to create workqueue for IPA");
+		return -ENOMEM;
+	}
+
+	return usb_function_register(&gsiusb_func);
+}
+module_init(fgsi_init);
+
+static void __exit fgsi_exit(void)
+{
+	if (ipa_usb_wq)
+		destroy_workqueue(ipa_usb_wq);
+	usb_function_unregister(&gsiusb_func);
+}
+module_exit(fgsi_exit);
diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h
new file mode 100644
index 0000000..ccc7a48
--- /dev/null
+++ b/drivers/usb/gadget/function/f_gsi.h
@@ -0,0 +1,1371 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details
+ */
+
+#ifndef _F_GSI_H
+#define _F_GSI_H
+
+#include <linux/poll.h>
+#include <linux/miscdevice.h>
+#include <linux/ipa.h>
+#include <uapi/linux/usb/cdc.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/usb_ctrl_qti.h>
+#include <linux/etherdevice.h>
+#include <linux/debugfs.h>
+#include <linux/ipa_usb.h>
+
+#define GSI_RMNET_CTRL_NAME "rmnet_ctrl"
+#define GSI_MBIM_CTRL_NAME "android_mbim"
+#define GSI_DPL_CTRL_NAME "dpl_ctrl"
+#define GSI_CTRL_NAME_LEN (sizeof(GSI_MBIM_CTRL_NAME)+2)
+#define GSI_MAX_CTRL_PKT_SIZE 4096
+#define GSI_CTRL_DTR (1 << 0)
+
+
+#define GSI_NUM_IN_BUFFERS 15
+#define GSI_IN_BUFF_SIZE 2048
+#define GSI_NUM_OUT_BUFFERS 15
+#define GSI_ECM_NUM_OUT_BUFFERS 31
+#define GSI_OUT_AGGR_SIZE 24576
+
+#define GSI_IN_RNDIS_AGGR_SIZE 9216
+#define GSI_IN_MBIM_AGGR_SIZE 16384
+#define GSI_IN_RMNET_AGGR_SIZE 16384
+#define GSI_ECM_AGGR_SIZE 2048
+
+#define GSI_OUT_MBIM_BUF_LEN 16384
+#define GSI_OUT_RMNET_BUF_LEN 16384
+#define GSI_OUT_ECM_BUF_LEN 2048
+
+#define GSI_IPA_READY_TIMEOUT 5000
+
+#define ETH_ADDR_STR_LEN 14
+
+/* mbin and ecm */
+#define GSI_CTRL_NOTIFY_BUFF_LEN 16
+
+/* default max packets per tarnsfer value */
+#define DEFAULT_MAX_PKT_PER_XFER 15
+
+/* default pkt alignment factor */
+#define DEFAULT_PKT_ALIGNMENT_FACTOR 4
+
+#define GSI_MBIM_IOCTL_MAGIC 'o'
+#define GSI_MBIM_GET_NTB_SIZE  _IOR(GSI_MBIM_IOCTL_MAGIC, 2, u32)
+#define GSI_MBIM_GET_DATAGRAM_COUNT  _IOR(GSI_MBIM_IOCTL_MAGIC, 3, u16)
+#define GSI_MBIM_EP_LOOKUP _IOR(GSI_MBIM_IOCTL_MAGIC, 4, struct ep_info)
+#define GSI_MBIM_DATA_EP_TYPE_HSUSB 0x2
+/* ID for Microsoft OS String */
+#define GSI_MBIM_OS_STRING_ID 0xEE
+
+#define EVT_NONE			0
+#define EVT_UNINITIALIZED		1
+#define EVT_INITIALIZED			2
+#define EVT_CONNECT_IN_PROGRESS		3
+#define EVT_CONNECTED			4
+#define EVT_HOST_NRDY			5
+#define EVT_HOST_READY			6
+#define EVT_DISCONNECTED		7
+#define	EVT_SUSPEND			8
+#define	EVT_IPA_SUSPEND			9
+#define	EVT_RESUMED			10
+
+enum connection_state {
+	STATE_UNINITIALIZED,
+	STATE_INITIALIZED,
+	STATE_CONNECT_IN_PROGRESS,
+	STATE_CONNECTED,
+	STATE_DISCONNECTED,
+	STATE_SUSPEND_IN_PROGRESS,
+	STATE_SUSPENDED
+};
+
+#define MAXQUEUELEN 128
+struct event_queue {
+	u8 event[MAXQUEUELEN];
+	u8 head, tail;
+	spinlock_t q_lock;
+};
+
+struct gsi_ntb_info {
+	u32	ntb_input_size;
+	u16	ntb_max_datagrams;
+	u16	reserved;
+};
+
+struct gsi_ctrl_pkt {
+	void			*buf;
+	int			len;
+	struct list_head	list;
+};
+
+struct gsi_function_bind_info {
+	struct usb_string *string_defs;
+	int ctrl_str_idx;
+	int data_str_idx;
+	int iad_str_idx;
+	int mac_str_idx;
+	struct usb_interface_descriptor *ctrl_desc;
+	struct usb_interface_descriptor *data_desc;
+	struct usb_interface_assoc_descriptor *iad_desc;
+	struct usb_cdc_ether_desc *cdc_eth_desc;
+	struct usb_cdc_union_desc *union_desc;
+	struct usb_interface_descriptor *data_nop_desc;
+	struct usb_endpoint_descriptor *fs_in_desc;
+	struct usb_endpoint_descriptor *fs_out_desc;
+	struct usb_endpoint_descriptor *fs_notify_desc;
+	struct usb_endpoint_descriptor *hs_in_desc;
+	struct usb_endpoint_descriptor *hs_out_desc;
+	struct usb_endpoint_descriptor *hs_notify_desc;
+	struct usb_endpoint_descriptor *ss_in_desc;
+	struct usb_endpoint_descriptor *ss_out_desc;
+	struct usb_endpoint_descriptor *ss_notify_desc;
+
+	struct usb_descriptor_header **fs_desc_hdr;
+	struct usb_descriptor_header **hs_desc_hdr;
+	struct usb_descriptor_header **ss_desc_hdr;
+	const char *in_epname;
+	const char *out_epname;
+
+	u32 in_req_buf_len;
+	u32 in_req_num_buf;
+	u32 out_req_buf_len;
+	u32 out_req_num_buf;
+	u32 notify_buf_len;
+};
+
+enum gsi_ctrl_notify_state {
+	GSI_CTRL_NOTIFY_NONE,
+	GSI_CTRL_NOTIFY_CONNECT,
+	GSI_CTRL_NOTIFY_SPEED,
+	GSI_CTRL_NOTIFY_OFFLINE,
+	GSI_CTRL_NOTIFY_RESPONSE_AVAILABLE,
+};
+
+struct gsi_ctrl_port {
+	char name[GSI_CTRL_NAME_LEN];
+	struct miscdevice ctrl_device;
+
+	struct usb_ep *notify;
+	struct usb_request *notify_req;
+	int notify_state;
+	atomic_t notify_count;
+
+	atomic_t ctrl_online;
+
+	bool is_open;
+
+	wait_queue_head_t read_wq;
+
+	struct list_head cpkt_req_q;
+	struct list_head cpkt_resp_q;
+	unsigned long cpkts_len;
+
+	spinlock_t lock;
+
+	int ipa_cons_clnt_hdl;
+	int ipa_prod_clnt_hdl;
+
+	unsigned int host_to_modem;
+	unsigned int copied_to_modem;
+	unsigned int copied_from_modem;
+	unsigned int modem_to_host;
+	unsigned int cpkt_drop_cnt;
+};
+
+struct gsi_data_port {
+	struct usb_ep *in_ep;
+	struct usb_ep *out_ep;
+	struct usb_gsi_request in_request;
+	struct usb_gsi_request out_request;
+	struct usb_gadget *gadget;
+	int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, void *driver_data);
+	struct ipa_usb_teth_params ipa_init_params;
+	int in_channel_handle;
+	int out_channel_handle;
+	u32 in_db_reg_phs_addr_lsb;
+	u32 in_db_reg_phs_addr_msb;
+	u32 out_db_reg_phs_addr_lsb;
+	u32 out_db_reg_phs_addr_msb;
+	u32 in_xfer_rsc_index;
+	u32 out_xfer_rsc_index;
+	u16 in_last_trb_addr;
+	u16 cdc_filter;
+	u32 in_aggr_size;
+	u32 out_aggr_size;
+
+	bool ipa_ready;
+	bool net_ready_trigger;
+	struct gsi_ntb_info ntb_info;
+
+	spinlock_t lock;
+
+	struct work_struct usb_ipa_w;
+	struct workqueue_struct *ipa_usb_wq;
+	enum connection_state sm_state;
+	struct event_queue evt_q;
+	wait_queue_head_t wait_for_ipa_ready;
+
+	/* Track these for debugfs */
+	struct ipa_usb_xdci_chan_params ipa_in_channel_params;
+	struct ipa_usb_xdci_chan_params ipa_out_channel_params;
+	struct ipa_usb_xdci_connect_params ipa_conn_pms;
+};
+
+struct f_gsi {
+	struct usb_function function;
+	enum ipa_usb_teth_prot prot_id;
+	int ctrl_id;
+	int data_id;
+	u32 vendorID;
+	u8 ethaddr[ETH_ADDR_STR_LEN];
+	const char *manufacturer;
+	struct rndis_params *params;
+	atomic_t connected;
+	bool data_interface_up;
+
+	const struct usb_endpoint_descriptor *in_ep_desc_backup;
+	const struct usb_endpoint_descriptor *out_ep_desc_backup;
+
+	struct gsi_data_port d_port;
+	struct gsi_ctrl_port c_port;
+};
+
+static inline struct f_gsi *func_to_gsi(struct usb_function *f)
+{
+	return container_of(f, struct f_gsi, function);
+}
+
+static inline struct f_gsi *d_port_to_gsi(struct gsi_data_port *d)
+{
+	return container_of(d, struct f_gsi, d_port);
+}
+
+static inline struct f_gsi *c_port_to_gsi(struct gsi_ctrl_port *d)
+{
+	return container_of(d, struct f_gsi, c_port);
+}
+
+/* for configfs support */
+#define MAX_INST_NAME_LEN	40
+
+struct gsi_opts {
+	struct usb_function_instance func_inst;
+	struct f_gsi *gsi;
+};
+
+static inline struct gsi_opts *to_gsi_opts(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct gsi_opts,
+			    func_inst.group);
+}
+
+static enum ipa_usb_teth_prot name_to_prot_id(const char *name)
+{
+	if (!name)
+		goto error;
+
+	if (!strncasecmp(name, "rndis", strlen("rndis")))
+		return IPA_USB_RNDIS;
+	if (!strncasecmp(name, "ecm", strlen("ecm")))
+		return IPA_USB_ECM;
+	if (!strncasecmp(name, "rmnet", strlen("rmnet")))
+		return IPA_USB_RMNET;
+	if (!strncasecmp(name, "mbim", strlen("mbim")))
+		return IPA_USB_MBIM;
+	if (!strncasecmp(name, "dpl", strlen("dpl")))
+		return IPA_USB_DIAG;
+
+error:
+	return -EINVAL;
+}
+
+/* device descriptors */
+
+#define LOG2_STATUS_INTERVAL_MSEC 5
+#define MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
+
+/* rmnet device descriptors */
+
+static struct usb_interface_descriptor rmnet_gsi_interface_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bNumEndpoints =	3,
+	.bInterfaceClass =	USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass =	USB_CLASS_VENDOR_SPEC,
+	.bInterfaceProtocol =	USB_CLASS_VENDOR_SPEC,
+	/* .iInterface = DYNAMIC */
+};
+
+/* Full speed support */
+static struct usb_endpoint_descriptor rmnet_gsi_fs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(MAX_NOTIFY_SIZE),
+	.bInterval =		1 << LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor rmnet_gsi_fs_in_desc  = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   =	cpu_to_le16(64),
+};
+
+static struct usb_endpoint_descriptor rmnet_gsi_fs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   =	cpu_to_le16(64),
+};
+
+static struct usb_descriptor_header *rmnet_gsi_fs_function[] = {
+	(struct usb_descriptor_header *) &rmnet_gsi_interface_desc,
+	(struct usb_descriptor_header *) &rmnet_gsi_fs_notify_desc,
+	(struct usb_descriptor_header *) &rmnet_gsi_fs_in_desc,
+	(struct usb_descriptor_header *) &rmnet_gsi_fs_out_desc,
+	NULL,
+};
+
+/* High speed support */
+static struct usb_endpoint_descriptor rmnet_gsi_hs_notify_desc  = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(MAX_NOTIFY_SIZE),
+	.bInterval =		LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_endpoint_descriptor rmnet_gsi_hs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rmnet_gsi_hs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *rmnet_gsi_hs_function[] = {
+	(struct usb_descriptor_header *) &rmnet_gsi_interface_desc,
+	(struct usb_descriptor_header *) &rmnet_gsi_hs_notify_desc,
+	(struct usb_descriptor_header *) &rmnet_gsi_hs_in_desc,
+	(struct usb_descriptor_header *) &rmnet_gsi_hs_out_desc,
+	NULL,
+};
+
+/* Super speed support */
+static struct usb_endpoint_descriptor rmnet_gsi_ss_notify_desc  = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(MAX_NOTIFY_SIZE),
+	.bInterval =		LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor rmnet_gsi_ss_notify_comp_desc = {
+	.bLength =		sizeof(rmnet_gsi_ss_notify_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 3 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+	.wBytesPerInterval =	cpu_to_le16(MAX_NOTIFY_SIZE),
+};
+
+static struct usb_endpoint_descriptor rmnet_gsi_ss_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor rmnet_gsi_ss_in_comp_desc = {
+	.bLength =		sizeof(rmnet_gsi_ss_in_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	.bMaxBurst =		2,
+	/* .bmAttributes =	0, */
+};
+
+static struct usb_endpoint_descriptor rmnet_gsi_ss_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor rmnet_gsi_ss_out_comp_desc = {
+	.bLength =		sizeof(rmnet_gsi_ss_out_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	.bMaxBurst =		2,
+	/* .bmAttributes =	0, */
+};
+
+static struct usb_descriptor_header *rmnet_gsi_ss_function[] = {
+	(struct usb_descriptor_header *) &rmnet_gsi_interface_desc,
+	(struct usb_descriptor_header *) &rmnet_gsi_ss_notify_desc,
+	(struct usb_descriptor_header *) &rmnet_gsi_ss_notify_comp_desc,
+	(struct usb_descriptor_header *) &rmnet_gsi_ss_in_desc,
+	(struct usb_descriptor_header *) &rmnet_gsi_ss_in_comp_desc,
+	(struct usb_descriptor_header *) &rmnet_gsi_ss_out_desc,
+	(struct usb_descriptor_header *) &rmnet_gsi_ss_out_comp_desc,
+	NULL,
+};
+
+/* String descriptors */
+static struct usb_string rmnet_gsi_string_defs[] = {
+	[0].s = "RmNet",
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings rmnet_gsi_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		rmnet_gsi_string_defs,
+};
+
+static struct usb_gadget_strings *rmnet_gsi_strings[] = {
+	&rmnet_gsi_string_table,
+	NULL,
+};
+
+/* rndis device descriptors */
+
+/* interface descriptor: Supports "Wireless" RNDIS; auto-detected by Windows*/
+static struct usb_interface_descriptor rndis_gsi_control_intf = {
+	.bLength =		sizeof(rndis_gsi_control_intf),
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	/* .bInterfaceNumber = DYNAMIC */
+	/* status endpoint is optional; this could be patched later */
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	USB_CLASS_WIRELESS_CONTROLLER,
+	.bInterfaceSubClass =   0x01,
+	.bInterfaceProtocol =   0x03,
+	/* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc rndis_gsi_header_desc = {
+	.bLength =		sizeof(rndis_gsi_header_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_HEADER_TYPE,
+
+	.bcdCDC =		cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_call_mgmt_descriptor rndis_gsi_call_mgmt_descriptor = {
+	.bLength =		sizeof(rndis_gsi_call_mgmt_descriptor),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_CALL_MANAGEMENT_TYPE,
+
+	.bmCapabilities =	0x00,
+	.bDataInterface =	0x01,
+};
+
+static struct usb_cdc_acm_descriptor rndis_gsi_acm_descriptor = {
+	.bLength =		sizeof(rndis_gsi_acm_descriptor),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_ACM_TYPE,
+
+	.bmCapabilities =	0x00,
+};
+
+static struct usb_cdc_union_desc rndis_gsi_union_desc = {
+	.bLength =		sizeof(rndis_gsi_union_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_UNION_TYPE,
+	/* .bMasterInterface0 =	DYNAMIC */
+	/* .bSlaveInterface0 =	DYNAMIC */
+};
+
+/* the data interface has two bulk endpoints */
+
+static struct usb_interface_descriptor rndis_gsi_data_intf = {
+	.bLength =		sizeof(rndis_gsi_data_intf),
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	/* .bInterfaceNumber = DYNAMIC */
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	USB_CLASS_CDC_DATA,
+	.bInterfaceSubClass =	0,
+	.bInterfaceProtocol =	0,
+	/* .iInterface = DYNAMIC */
+};
+
+/*  Supports "Wireless" RNDIS; auto-detected by Windows */
+static struct usb_interface_assoc_descriptor
+rndis_gsi_iad_descriptor = {
+	.bLength =		sizeof(rndis_gsi_iad_descriptor),
+	.bDescriptorType =	USB_DT_INTERFACE_ASSOCIATION,
+	.bFirstInterface =	0, /* XXX, hardcoded */
+	.bInterfaceCount =	2, /* control + data */
+	.bFunctionClass =	USB_CLASS_WIRELESS_CONTROLLER,
+	.bFunctionSubClass =	0x01,
+	.bFunctionProtocol =	0x03,
+	/* .iFunction = DYNAMIC */
+};
+
+/* full speed support: */
+static struct usb_endpoint_descriptor rndis_gsi_fs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(MAX_NOTIFY_SIZE),
+	.bInterval =		1 << LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor rndis_gsi_fs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor rndis_gsi_fs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *gsi_eth_fs_function[] = {
+	(struct usb_descriptor_header *) &gsi_eth_fs_function,
+	/* control interface matches ACM, not Ethernet */
+	(struct usb_descriptor_header *) &rndis_gsi_control_intf,
+	(struct usb_descriptor_header *) &rndis_gsi_header_desc,
+	(struct usb_descriptor_header *) &rndis_gsi_call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &rndis_gsi_acm_descriptor,
+	(struct usb_descriptor_header *) &rndis_gsi_union_desc,
+	(struct usb_descriptor_header *) &rndis_gsi_fs_notify_desc,
+	/* data interface has no altsetting */
+	(struct usb_descriptor_header *) &rndis_gsi_data_intf,
+	(struct usb_descriptor_header *) &rndis_gsi_fs_in_desc,
+	(struct usb_descriptor_header *) &rndis_gsi_fs_out_desc,
+	NULL,
+};
+
+/* high speed support: */
+static struct usb_endpoint_descriptor rndis_gsi_hs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(MAX_NOTIFY_SIZE),
+	.bInterval =		LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+static struct usb_endpoint_descriptor rndis_gsi_hs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rndis_gsi_hs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *gsi_eth_hs_function[] = {
+	(struct usb_descriptor_header *) &rndis_gsi_iad_descriptor,
+	/* control interface matches ACM, not Ethernet */
+	(struct usb_descriptor_header *) &rndis_gsi_control_intf,
+	(struct usb_descriptor_header *) &rndis_gsi_header_desc,
+	(struct usb_descriptor_header *) &rndis_gsi_call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &rndis_gsi_acm_descriptor,
+	(struct usb_descriptor_header *) &rndis_gsi_union_desc,
+	(struct usb_descriptor_header *) &rndis_gsi_hs_notify_desc,
+	/* data interface has no altsetting */
+	(struct usb_descriptor_header *) &rndis_gsi_data_intf,
+	(struct usb_descriptor_header *) &rndis_gsi_hs_in_desc,
+	(struct usb_descriptor_header *) &rndis_gsi_hs_out_desc,
+	NULL,
+};
+
+/* super speed support: */
+static struct usb_endpoint_descriptor rndis_gsi_ss_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(MAX_NOTIFY_SIZE),
+	.bInterval =		LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor rndis_gsi_ss_intr_comp_desc = {
+	.bLength =		sizeof(rndis_gsi_ss_intr_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 3 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+	.wBytesPerInterval =	cpu_to_le16(MAX_NOTIFY_SIZE),
+};
+
+static struct usb_endpoint_descriptor rndis_gsi_ss_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor rndis_gsi_ss_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor rndis_gsi_ss_bulk_comp_desc = {
+	.bLength =		sizeof(rndis_gsi_ss_bulk_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	.bMaxBurst =		2,
+	/* .bmAttributes =	0, */
+};
+
+static struct usb_descriptor_header *gsi_eth_ss_function[] = {
+	(struct usb_descriptor_header *) &rndis_gsi_iad_descriptor,
+
+	/* control interface matches ACM, not Ethernet */
+	(struct usb_descriptor_header *) &rndis_gsi_control_intf,
+	(struct usb_descriptor_header *) &rndis_gsi_header_desc,
+	(struct usb_descriptor_header *) &rndis_gsi_call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &rndis_gsi_acm_descriptor,
+	(struct usb_descriptor_header *) &rndis_gsi_union_desc,
+	(struct usb_descriptor_header *) &rndis_gsi_ss_notify_desc,
+	(struct usb_descriptor_header *) &rndis_gsi_ss_intr_comp_desc,
+
+	/* data interface has no altsetting */
+	(struct usb_descriptor_header *) &rndis_gsi_data_intf,
+	(struct usb_descriptor_header *) &rndis_gsi_ss_in_desc,
+	(struct usb_descriptor_header *) &rndis_gsi_ss_bulk_comp_desc,
+	(struct usb_descriptor_header *) &rndis_gsi_ss_out_desc,
+	(struct usb_descriptor_header *) &rndis_gsi_ss_bulk_comp_desc,
+	NULL,
+};
+
+/* string descriptors: */
+static struct usb_string rndis_gsi_string_defs[] = {
+	[0].s = "RNDIS Communications Control",
+	[1].s = "RNDIS Ethernet Data",
+	[2].s = "RNDIS",
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings rndis_gsi_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		rndis_gsi_string_defs,
+};
+
+static struct usb_gadget_strings *rndis_gsi_strings[] = {
+	&rndis_gsi_string_table,
+	NULL,
+};
+
+/* mbim device descriptors */
+#define MBIM_NTB_DEFAULT_IN_SIZE	(0x4000)
+
+static struct usb_cdc_ncm_ntb_parameters mbim_gsi_ntb_parameters = {
+	.wLength = sizeof(mbim_gsi_ntb_parameters),
+	.bmNtbFormatsSupported = cpu_to_le16(USB_CDC_NCM_NTB16_SUPPORTED),
+	.dwNtbInMaxSize = cpu_to_le32(MBIM_NTB_DEFAULT_IN_SIZE),
+	.wNdpInDivisor = cpu_to_le16(4),
+	.wNdpInPayloadRemainder = cpu_to_le16(0),
+	.wNdpInAlignment = cpu_to_le16(4),
+
+	.dwNtbOutMaxSize = cpu_to_le32(0x4000),
+	.wNdpOutDivisor = cpu_to_le16(4),
+	.wNdpOutPayloadRemainder = cpu_to_le16(0),
+	.wNdpOutAlignment = cpu_to_le16(4),
+	.wNtbOutMaxDatagrams = 16,
+};
+
+/*
+ * Use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one
+ * packet, to simplify cancellation;
+ */
+#define NCM_STATUS_BYTECOUNT		16	/* 8 byte header + data */
+
+static struct usb_interface_assoc_descriptor mbim_gsi_iad_desc = {
+	.bLength =		sizeof(mbim_gsi_iad_desc),
+	.bDescriptorType =	USB_DT_INTERFACE_ASSOCIATION,
+
+	/* .bFirstInterface =	DYNAMIC, */
+	.bInterfaceCount =	2,	/* control + data */
+	.bFunctionClass =	2,
+	.bFunctionSubClass =	0x0e,
+	.bFunctionProtocol =	0,
+	/* .iFunction =		DYNAMIC */
+};
+
+/* interface descriptor: */
+static struct usb_interface_descriptor mbim_gsi_control_intf = {
+	.bLength =		sizeof(mbim_gsi_control_intf),
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	/* .bInterfaceNumber = DYNAMIC */
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	0x02,
+	.bInterfaceSubClass =	0x0e,
+	.bInterfaceProtocol =	0,
+	/* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc mbim_gsi_header_desc = {
+	.bLength =		sizeof(mbim_gsi_header_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_HEADER_TYPE,
+
+	.bcdCDC =		cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_union_desc mbim_gsi_union_desc = {
+	.bLength =		sizeof(mbim_gsi_union_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_UNION_TYPE,
+	/* .bMasterInterface0 =	DYNAMIC */
+	/* .bSlaveInterface0 =	DYNAMIC */
+};
+
+static struct usb_cdc_mbim_desc mbim_gsi_desc = {
+	.bLength =		sizeof(mbim_gsi_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_MBIM_TYPE,
+
+	.bcdMBIMVersion =	cpu_to_le16(0x0100),
+
+	.wMaxControlMessage =	cpu_to_le16(0x1000),
+	.bNumberFilters =	0x20,
+	.bMaxFilterSize =	0x80,
+	.wMaxSegmentSize =	cpu_to_le16(0xfe0),
+	.bmNetworkCapabilities = 0x20,
+};
+
+static struct usb_cdc_mbim_extended_desc mbim_gsi_ext_mbb_desc = {
+	.bLength =	sizeof(mbim_gsi_ext_mbb_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_MBIM_EXTENDED_TYPE,
+
+	.bcdMBIMExtendedVersion =		cpu_to_le16(0x0100),
+	.bMaxOutstandingCommandMessages =	64,
+	.wMTU =					cpu_to_le16(1500),
+};
+
+/* the default data interface has no endpoints ... */
+static struct usb_interface_descriptor mbim_gsi_data_nop_intf = {
+	.bLength =		sizeof(mbim_gsi_data_nop_intf),
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	/* .bInterfaceNumber = DYNAMIC */
+	.bAlternateSetting =	0,
+	.bNumEndpoints =	0,
+	.bInterfaceClass =	0x0a,
+	.bInterfaceSubClass =	0,
+	.bInterfaceProtocol =	0x02,
+	/* .iInterface = DYNAMIC */
+};
+
+/* ... but the "real" data interface has two bulk endpoints */
+static struct usb_interface_descriptor mbim_gsi_data_intf = {
+	.bLength =		sizeof(mbim_gsi_data_intf),
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	/* .bInterfaceNumber = DYNAMIC */
+	.bAlternateSetting =	1,
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	0x0a,
+	.bInterfaceSubClass =	0,
+	.bInterfaceProtocol =	0x02,
+	/* .iInterface = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor mbim_gsi_fs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	4*cpu_to_le16(NCM_STATUS_BYTECOUNT),
+	.bInterval =		1 << LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor mbim_gsi_fs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor mbim_gsi_fs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *mbim_gsi_fs_function[] = {
+	(struct usb_descriptor_header *) &mbim_gsi_iad_desc,
+	/* MBIM control descriptors */
+	(struct usb_descriptor_header *) &mbim_gsi_control_intf,
+	(struct usb_descriptor_header *) &mbim_gsi_header_desc,
+	(struct usb_descriptor_header *) &mbim_gsi_union_desc,
+	(struct usb_descriptor_header *) &mbim_gsi_desc,
+	(struct usb_descriptor_header *) &mbim_gsi_ext_mbb_desc,
+	(struct usb_descriptor_header *) &mbim_gsi_fs_notify_desc,
+	/* data interface, altsettings 0 and 1 */
+	(struct usb_descriptor_header *) &mbim_gsi_data_nop_intf,
+	(struct usb_descriptor_header *) &mbim_gsi_data_intf,
+	(struct usb_descriptor_header *) &mbim_gsi_fs_in_desc,
+	(struct usb_descriptor_header *) &mbim_gsi_fs_out_desc,
+	NULL,
+};
+
+/* high speed support: */
+static struct usb_endpoint_descriptor mbim_gsi_hs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	4*cpu_to_le16(NCM_STATUS_BYTECOUNT),
+	.bInterval =		LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+static struct usb_endpoint_descriptor mbim_gsi_hs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor mbim_gsi_hs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *mbim_gsi_hs_function[] = {
+	(struct usb_descriptor_header *) &mbim_gsi_iad_desc,
+	/* MBIM control descriptors */
+	(struct usb_descriptor_header *) &mbim_gsi_control_intf,
+	(struct usb_descriptor_header *) &mbim_gsi_header_desc,
+	(struct usb_descriptor_header *) &mbim_gsi_union_desc,
+	(struct usb_descriptor_header *) &mbim_gsi_desc,
+	(struct usb_descriptor_header *) &mbim_gsi_ext_mbb_desc,
+	(struct usb_descriptor_header *) &mbim_gsi_hs_notify_desc,
+	/* data interface, altsettings 0 and 1 */
+	(struct usb_descriptor_header *) &mbim_gsi_data_nop_intf,
+	(struct usb_descriptor_header *) &mbim_gsi_data_intf,
+	(struct usb_descriptor_header *) &mbim_gsi_hs_in_desc,
+	(struct usb_descriptor_header *) &mbim_gsi_hs_out_desc,
+	NULL,
+};
+
+/* Super Speed Support */
+static struct usb_endpoint_descriptor mbim_gsi_ss_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	4*cpu_to_le16(NCM_STATUS_BYTECOUNT),
+	.bInterval =		LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor mbim_gsi_ss_notify_comp_desc = {
+	.bLength =		sizeof(mbim_gsi_ss_notify_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 3 values can be tweaked if necessary */
+	/* .bMaxBurst =         0, */
+	/* .bmAttributes =      0, */
+	.wBytesPerInterval =	4*cpu_to_le16(NCM_STATUS_BYTECOUNT),
+};
+
+static struct usb_endpoint_descriptor mbim_gsi_ss_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor mbim_gsi_ss_in_comp_desc = {
+	.bLength =              sizeof(mbim_gsi_ss_in_comp_desc),
+	.bDescriptorType =      USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	.bMaxBurst =         2,
+	/* .bmAttributes =      0, */
+};
+
+static struct usb_endpoint_descriptor mbim_gsi_ss_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor mbim_gsi_ss_out_comp_desc = {
+	.bLength =		sizeof(mbim_gsi_ss_out_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	.bMaxBurst =         2,
+	/* .bmAttributes =      0, */
+};
+
+static struct usb_descriptor_header *mbim_gsi_ss_function[] = {
+	(struct usb_descriptor_header *) &mbim_gsi_iad_desc,
+	/* MBIM control descriptors */
+	(struct usb_descriptor_header *) &mbim_gsi_control_intf,
+	(struct usb_descriptor_header *) &mbim_gsi_header_desc,
+	(struct usb_descriptor_header *) &mbim_gsi_union_desc,
+	(struct usb_descriptor_header *) &mbim_gsi_desc,
+	(struct usb_descriptor_header *) &mbim_gsi_ext_mbb_desc,
+	(struct usb_descriptor_header *) &mbim_gsi_ss_notify_desc,
+	(struct usb_descriptor_header *) &mbim_gsi_ss_notify_comp_desc,
+	/* data interface, altsettings 0 and 1 */
+	(struct usb_descriptor_header *) &mbim_gsi_data_nop_intf,
+	(struct usb_descriptor_header *) &mbim_gsi_data_intf,
+	(struct usb_descriptor_header *) &mbim_gsi_ss_in_desc,
+	(struct usb_descriptor_header *) &mbim_gsi_ss_in_comp_desc,
+	(struct usb_descriptor_header *) &mbim_gsi_ss_out_desc,
+	(struct usb_descriptor_header *) &mbim_gsi_ss_out_comp_desc,
+	NULL,
+};
+
+/* string descriptors: */
+static struct usb_string mbim_gsi_string_defs[] = {
+	[0].s = "MBIM Control",
+	[1].s = "MBIM Data",
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings mbim_gsi_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		mbim_gsi_string_defs,
+};
+
+static struct usb_gadget_strings *mbim_gsi_strings[] = {
+	&mbim_gsi_string_table,
+	NULL,
+};
+
+/* Microsoft OS Descriptors */
+
+/*
+ * We specify our own bMS_VendorCode byte which Windows will use
+ * as the bRequest value in subsequent device get requests.
+ */
+#define MBIM_VENDOR_CODE	0xA5
+
+/* Microsoft Extended Configuration Descriptor Header Section */
+struct mbim_gsi_ext_config_desc_header {
+	__le32	dwLength;
+	__u16	bcdVersion;
+	__le16	wIndex;
+	__u8	bCount;
+	__u8	reserved[7];
+};
+
+/* Microsoft Extended Configuration Descriptor Function Section */
+struct mbim_gsi_ext_config_desc_function {
+	__u8	bFirstInterfaceNumber;
+	__u8	bInterfaceCount;
+	__u8	compatibleID[8];
+	__u8	subCompatibleID[8];
+	__u8	reserved[6];
+};
+
+/* Microsoft Extended Configuration Descriptor */
+static struct {
+	struct mbim_gsi_ext_config_desc_header	header;
+	struct mbim_gsi_ext_config_desc_function    function;
+} mbim_gsi_ext_config_desc = {
+	.header = {
+		.dwLength = cpu_to_le32(sizeof(mbim_gsi_ext_config_desc)),
+		.bcdVersion = cpu_to_le16(0x0100),
+		.wIndex = cpu_to_le16(4),
+		.bCount = 1,
+	},
+	.function = {
+		.bFirstInterfaceNumber = 0,
+		.bInterfaceCount = 1,
+		.compatibleID = { 'A', 'L', 'T', 'R', 'C', 'F', 'G' },
+		/* .subCompatibleID = DYNAMIC */
+	},
+};
+/* ecm device descriptors */
+#define ECM_QC_LOG2_STATUS_INTERVAL_MSEC	5
+#define ECM_QC_STATUS_BYTECOUNT			16 /* 8 byte header + data */
+
+/* interface descriptor: */
+static struct usb_interface_descriptor ecm_gsi_control_intf = {
+	.bLength =		sizeof(ecm_gsi_control_intf),
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	/* .bInterfaceNumber = DYNAMIC */
+	/* status endpoint is optional; this could be patched later */
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	USB_CLASS_COMM,
+	.bInterfaceSubClass =	USB_CDC_SUBCLASS_ETHERNET,
+	.bInterfaceProtocol =	USB_CDC_PROTO_NONE,
+	/* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc ecm_gsi_header_desc = {
+	.bLength =		sizeof(ecm_gsi_header_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_HEADER_TYPE,
+
+	.bcdCDC =		cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_union_desc ecm_gsi_union_desc = {
+	.bLength =		sizeof(ecm_gsi_union_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_UNION_TYPE,
+	/* .bMasterInterface0 =	DYNAMIC */
+	/* .bSlaveInterface0 =	DYNAMIC */
+};
+
+static struct usb_cdc_ether_desc ecm_gsi_desc = {
+	.bLength =		sizeof(ecm_gsi_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_ETHERNET_TYPE,
+
+	/* this descriptor actually adds value, surprise! */
+	/* .iMACAddress = DYNAMIC */
+	.bmEthernetStatistics =	cpu_to_le32(0), /* no statistics */
+	.wMaxSegmentSize =	cpu_to_le16(ETH_FRAME_LEN),
+	.wNumberMCFilters =	cpu_to_le16(0),
+	.bNumberPowerFilters =	0,
+};
+
+/* the default data interface has no endpoints ... */
+
+static struct usb_interface_descriptor ecm_gsi_data_nop_intf = {
+	.bLength =		sizeof(ecm_gsi_data_nop_intf),
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	.bInterfaceNumber =	1,
+	.bAlternateSetting =	0,
+	.bNumEndpoints =	0,
+	.bInterfaceClass =	USB_CLASS_CDC_DATA,
+	.bInterfaceSubClass =	0,
+	.bInterfaceProtocol =	0,
+	/* .iInterface = DYNAMIC */
+};
+
+/* ... but the "real" data interface has two bulk endpoints */
+
+static struct usb_interface_descriptor ecm_gsi_data_intf = {
+	.bLength =		sizeof(ecm_gsi_data_intf),
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	.bInterfaceNumber =	1,
+	.bAlternateSetting =	1,
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	USB_CLASS_CDC_DATA,
+	.bInterfaceSubClass =	0,
+	.bInterfaceProtocol =	0,
+	/* .iInterface = DYNAMIC */
+};
+
+/* full speed support: */
+static struct usb_endpoint_descriptor ecm_gsi_fs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(ECM_QC_STATUS_BYTECOUNT),
+	.bInterval =		1 << LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor ecm_gsi_fs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor ecm_gsi_fs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *ecm_gsi_fs_function[] = {
+	/* CDC ECM control descriptors */
+	(struct usb_descriptor_header *) &ecm_gsi_control_intf,
+	(struct usb_descriptor_header *) &ecm_gsi_header_desc,
+	(struct usb_descriptor_header *) &ecm_gsi_union_desc,
+	(struct usb_descriptor_header *) &ecm_gsi_desc,
+	/* NOTE: status endpoint might need to be removed */
+	(struct usb_descriptor_header *) &ecm_gsi_fs_notify_desc,
+	/* data interface, altsettings 0 and 1 */
+	(struct usb_descriptor_header *) &ecm_gsi_data_nop_intf,
+	(struct usb_descriptor_header *) &ecm_gsi_data_intf,
+	(struct usb_descriptor_header *) &ecm_gsi_fs_in_desc,
+	(struct usb_descriptor_header *) &ecm_gsi_fs_out_desc,
+	NULL,
+};
+
+/* high speed support: */
+static struct usb_endpoint_descriptor ecm_gsi_hs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(ECM_QC_STATUS_BYTECOUNT),
+	.bInterval =		LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+static struct usb_endpoint_descriptor ecm_gsi_hs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor ecm_gsi_hs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *ecm_gsi_hs_function[] = {
+	/* CDC ECM control descriptors */
+	(struct usb_descriptor_header *) &ecm_gsi_control_intf,
+	(struct usb_descriptor_header *) &ecm_gsi_header_desc,
+	(struct usb_descriptor_header *) &ecm_gsi_union_desc,
+	(struct usb_descriptor_header *) &ecm_gsi_desc,
+	/* NOTE: status endpoint might need to be removed */
+	(struct usb_descriptor_header *) &ecm_gsi_hs_notify_desc,
+	/* data interface, altsettings 0 and 1 */
+	(struct usb_descriptor_header *) &ecm_gsi_data_nop_intf,
+	(struct usb_descriptor_header *) &ecm_gsi_data_intf,
+	(struct usb_descriptor_header *) &ecm_gsi_hs_in_desc,
+	(struct usb_descriptor_header *) &ecm_gsi_hs_out_desc,
+	NULL,
+};
+
+static struct usb_endpoint_descriptor ecm_gsi_ss_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(ECM_QC_STATUS_BYTECOUNT),
+	.bInterval =		ECM_QC_LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor ecm_gsi_ss_notify_comp_desc = {
+	.bLength =		sizeof(ecm_gsi_ss_notify_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 3 values can be tweaked if necessary */
+	/* .bMaxBurst =         0, */
+	/* .bmAttributes =      0, */
+	.wBytesPerInterval =	cpu_to_le16(ECM_QC_STATUS_BYTECOUNT),
+};
+
+static struct usb_endpoint_descriptor ecm_gsi_ss_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ecm_gsi_ss_in_comp_desc = {
+	.bLength =		sizeof(ecm_gsi_ss_in_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	.bMaxBurst =         2,
+	/* .bmAttributes =      0, */
+};
+
+static struct usb_endpoint_descriptor ecm_gsi_ss_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ecm_gsi_ss_out_comp_desc = {
+	.bLength =		sizeof(ecm_gsi_ss_out_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	.bMaxBurst =         2,
+	/* .bmAttributes =      0, */
+};
+
+static struct usb_descriptor_header *ecm_gsi_ss_function[] = {
+	/* CDC ECM control descriptors */
+	(struct usb_descriptor_header *) &ecm_gsi_control_intf,
+	(struct usb_descriptor_header *) &ecm_gsi_header_desc,
+	(struct usb_descriptor_header *) &ecm_gsi_union_desc,
+	(struct usb_descriptor_header *) &ecm_gsi_desc,
+	/* NOTE: status endpoint might need to be removed */
+	(struct usb_descriptor_header *) &ecm_gsi_ss_notify_desc,
+	(struct usb_descriptor_header *) &ecm_gsi_ss_notify_comp_desc,
+	/* data interface, altsettings 0 and 1 */
+	(struct usb_descriptor_header *) &ecm_gsi_data_nop_intf,
+	(struct usb_descriptor_header *) &ecm_gsi_data_intf,
+	(struct usb_descriptor_header *) &ecm_gsi_ss_in_desc,
+	(struct usb_descriptor_header *) &ecm_gsi_ss_in_comp_desc,
+	(struct usb_descriptor_header *) &ecm_gsi_ss_out_desc,
+	(struct usb_descriptor_header *) &ecm_gsi_ss_out_comp_desc,
+	NULL,
+};
+
+/* string descriptors: */
+static struct usb_string ecm_gsi_string_defs[] = {
+	[0].s = "CDC Ethernet Control Model (ECM)",
+	[1].s = NULL /* DYNAMIC */,
+	[2].s = "CDC Ethernet Data",
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings ecm_gsi_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		ecm_gsi_string_defs,
+};
+
+static struct usb_gadget_strings *ecm_gsi_strings[] = {
+	&ecm_gsi_string_table,
+	NULL,
+};
+
+/* qdss device descriptor */
+
+static struct usb_interface_descriptor qdss_gsi_data_intf_desc = {
+	.bLength            =	sizeof(qdss_gsi_data_intf_desc),
+	.bDescriptorType    =	USB_DT_INTERFACE,
+	.bAlternateSetting  =   0,
+	.bNumEndpoints      =	1,
+	.bInterfaceClass    =	0xff,
+	.bInterfaceSubClass =	0xff,
+	.bInterfaceProtocol =	0xff,
+};
+
+static struct usb_endpoint_descriptor qdss_gsi_hs_data_desc = {
+	.bLength              =	 USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType      =	 USB_DT_ENDPOINT,
+	.bEndpointAddress     =	 USB_DIR_IN,
+	.bmAttributes         =	 USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize       =	 cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor qdss_gsi_ss_data_desc = {
+	.bLength              =	 USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType      =	 USB_DT_ENDPOINT,
+	.bEndpointAddress     =	 USB_DIR_IN,
+	.bmAttributes         =  USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize       =	 cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor qdss_gsi_data_ep_comp_desc = {
+	.bLength              =	 sizeof(qdss_gsi_data_ep_comp_desc),
+	.bDescriptorType      =	 USB_DT_SS_ENDPOINT_COMP,
+	.bMaxBurst            =	 1,
+	.bmAttributes         =	 0,
+	.wBytesPerInterval    =	 0,
+};
+
+static struct usb_descriptor_header *qdss_gsi_hs_data_only_desc[] = {
+	(struct usb_descriptor_header *) &qdss_gsi_data_intf_desc,
+	(struct usb_descriptor_header *) &qdss_gsi_hs_data_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *qdss_gsi_ss_data_only_desc[] = {
+	(struct usb_descriptor_header *) &qdss_gsi_data_intf_desc,
+	(struct usb_descriptor_header *) &qdss_gsi_ss_data_desc,
+	(struct usb_descriptor_header *) &qdss_gsi_data_ep_comp_desc,
+	NULL,
+};
+
+/* string descriptors: */
+static struct usb_string qdss_gsi_string_defs[] = {
+	[0].s = "QDSS DATA",
+	{}, /* end of list */
+};
+
+static struct usb_gadget_strings qdss_gsi_string_table = {
+	.language =		0x0409,
+	.strings =		qdss_gsi_string_defs,
+};
+
+static struct usb_gadget_strings *qdss_gsi_strings[] = {
+	&qdss_gsi_string_table,
+	NULL,
+};
+#endif
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
index 8002e4e..3802787 100644
--- a/drivers/usb/gadget/function/f_mtp.c
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -26,6 +26,8 @@
 #include <linux/err.h>
 #include <linux/interrupt.h>
 
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
 #include <linux/types.h>
 #include <linux/file.h>
 #include <linux/device.h>
@@ -73,6 +75,8 @@
 #define MTP_RESPONSE_DEVICE_BUSY    0x2019
 #define DRIVER_NAME "mtp"
 
+#define MAX_ITERATION		100
+
 unsigned int mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
 module_param(mtp_rx_req_len, uint, 0644);
 
@@ -122,6 +126,14 @@
 	uint16_t xfer_command;
 	uint32_t xfer_transaction_id;
 	int xfer_result;
+	struct {
+		unsigned long vfs_rbytes;
+		unsigned long vfs_wbytes;
+		unsigned int vfs_rtime;
+		unsigned int vfs_wtime;
+	} perf[MAX_ITERATION];
+	unsigned int dbg_read_index;
+	unsigned int dbg_write_index;
 };
 
 static struct usb_interface_descriptor mtp_interface_desc = {
@@ -544,7 +556,7 @@
 		if (!req) {
 			if (mtp_rx_req_len <= MTP_BULK_BUFFER_SIZE)
 				goto fail;
-			for (; i > 0; i--)
+			for (--i; i >= 0; i--)
 				mtp_request_free(dev->rx_req[i], dev->ep_out);
 			mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
 			goto retry_rx_alloc;
@@ -578,10 +590,6 @@
 
 	DBG(cdev, "mtp_read(%zu)\n", count);
 
-	len = ALIGN(count, dev->ep_out->maxpacket);
-	if (len > mtp_rx_req_len)
-		return -EINVAL;
-
 	/* we will block until we're online */
 	DBG(cdev, "mtp_read: waiting for online state\n");
 	ret = wait_event_interruptible(dev->read_wq,
@@ -590,6 +598,11 @@
 		r = ret;
 		goto done;
 	}
+
+	len = ALIGN(count, dev->ep_out->maxpacket);
+	if (len > mtp_rx_req_len)
+		return -EINVAL;
+
 	spin_lock_irq(&dev->lock);
 	if (dev->state == STATE_CANCELED) {
 		/* report cancelation to userspace */
@@ -614,7 +627,17 @@
 	}
 
 	/* wait for a request to complete */
-	ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
+	ret = wait_event_interruptible(dev->read_wq,
+				dev->rx_done || dev->state != STATE_BUSY);
+	if (dev->state == STATE_CANCELED) {
+		r = -ECANCELED;
+		if (!dev->rx_done)
+			usb_ep_dequeue(dev->ep_out, req);
+		spin_lock_irq(&dev->lock);
+		dev->state = STATE_CANCELED;
+		spin_unlock_irq(&dev->lock);
+		goto done;
+	}
 	if (ret < 0) {
 		r = ret;
 		usb_ep_dequeue(dev->ep_out, req);
@@ -751,6 +774,7 @@
 	int xfer, ret, hdr_size;
 	int r = 0;
 	int sendZLP = 0;
+	ktime_t start_time;
 
 	/* read our parameters */
 	smp_rmb();
@@ -806,21 +830,27 @@
 			header->transaction_id =
 					__cpu_to_le32(dev->xfer_transaction_id);
 		}
-
+		start_time = ktime_get();
 		ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size,
 								&offset);
 		if (ret < 0) {
 			r = ret;
 			break;
 		}
+
 		xfer = ret + hdr_size;
+		dev->perf[dev->dbg_read_index].vfs_rtime =
+			ktime_to_us(ktime_sub(ktime_get(), start_time));
+		dev->perf[dev->dbg_read_index].vfs_rbytes = xfer;
+		dev->dbg_read_index = (dev->dbg_read_index + 1) % MAX_ITERATION;
 		hdr_size = 0;
 
 		req->length = xfer;
 		ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
 		if (ret < 0) {
 			DBG(cdev, "send_file_work: xfer error %d\n", ret);
-			dev->state = STATE_ERROR;
+			if (dev->state != STATE_OFFLINE)
+				dev->state = STATE_ERROR;
 			r = -EIO;
 			break;
 		}
@@ -852,6 +882,7 @@
 	int64_t count;
 	int ret, cur_buf = 0;
 	int r = 0;
+	ktime_t start_time;
 
 	/* read our parameters */
 	smp_rmb();
@@ -877,21 +908,29 @@
 			ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
 			if (ret < 0) {
 				r = -EIO;
-				dev->state = STATE_ERROR;
+				if (dev->state != STATE_OFFLINE)
+					dev->state = STATE_ERROR;
 				break;
 			}
 		}
 
 		if (write_req) {
 			DBG(cdev, "rx %p %d\n", write_req, write_req->actual);
+			start_time = ktime_get();
 			ret = vfs_write(filp, write_req->buf, write_req->actual,
 				&offset);
 			DBG(cdev, "vfs_write %d\n", ret);
 			if (ret != write_req->actual) {
 				r = -EIO;
-				dev->state = STATE_ERROR;
+				if (dev->state != STATE_OFFLINE)
+					dev->state = STATE_ERROR;
 				break;
 			}
+			dev->perf[dev->dbg_write_index].vfs_wtime =
+				ktime_to_us(ktime_sub(ktime_get(), start_time));
+			dev->perf[dev->dbg_write_index].vfs_wbytes = ret;
+			dev->dbg_write_index =
+				(dev->dbg_write_index + 1) % MAX_ITERATION;
 			write_req = NULL;
 		}
 
@@ -899,8 +938,12 @@
 			/* wait for our last read to complete */
 			ret = wait_event_interruptible(dev->read_wq,
 				dev->rx_done || dev->state != STATE_BUSY);
-			if (dev->state == STATE_CANCELED) {
-				r = -ECANCELED;
+			if (dev->state == STATE_CANCELED
+					|| dev->state == STATE_OFFLINE) {
+				if (dev->state == STATE_OFFLINE)
+					r = -EIO;
+				else
+					r = -ECANCELED;
 				if (!dev->rx_done)
 					usb_ep_dequeue(dev->ep_out, read_req);
 				break;
@@ -966,95 +1009,71 @@
 	return ret;
 }
 
-static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
+static long mtp_send_receive_ioctl(struct file *fp, unsigned int code,
+	struct mtp_file_range *mfr)
 {
 	struct mtp_dev *dev = fp->private_data;
 	struct file *filp = NULL;
+	struct work_struct *work;
 	int ret = -EINVAL;
 
 	if (mtp_lock(&dev->ioctl_excl))
 		return -EBUSY;
 
-	switch (code) {
-	case MTP_SEND_FILE:
-	case MTP_RECEIVE_FILE:
-	case MTP_SEND_FILE_WITH_HEADER:
-	{
-		struct mtp_file_range	mfr;
-		struct work_struct *work;
-
-		spin_lock_irq(&dev->lock);
-		if (dev->state == STATE_CANCELED) {
-			/* report cancelation to userspace */
-			dev->state = STATE_READY;
-			spin_unlock_irq(&dev->lock);
-			ret = -ECANCELED;
-			goto out;
-		}
-		if (dev->state == STATE_OFFLINE) {
-			spin_unlock_irq(&dev->lock);
-			ret = -ENODEV;
-			goto out;
-		}
-		dev->state = STATE_BUSY;
+	spin_lock_irq(&dev->lock);
+	if (dev->state == STATE_CANCELED) {
+		/* report cancellation to userspace */
+		dev->state = STATE_READY;
 		spin_unlock_irq(&dev->lock);
-
-		if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
-			ret = -EFAULT;
-			goto fail;
-		}
-		/* hold a reference to the file while we are working with it */
-		filp = fget(mfr.fd);
-		if (!filp) {
-			ret = -EBADF;
-			goto fail;
-		}
-
-		/* write the parameters */
-		dev->xfer_file = filp;
-		dev->xfer_file_offset = mfr.offset;
-		dev->xfer_file_length = mfr.length;
-		smp_wmb();
-
-		if (code == MTP_SEND_FILE_WITH_HEADER) {
-			work = &dev->send_file_work;
-			dev->xfer_send_header = 1;
-			dev->xfer_command = mfr.command;
-			dev->xfer_transaction_id = mfr.transaction_id;
-		} else if (code == MTP_SEND_FILE) {
-			work = &dev->send_file_work;
-			dev->xfer_send_header = 0;
-		} else {
-			work = &dev->receive_file_work;
-		}
-
-		/* We do the file transfer on a work queue so it will run
-		 * in kernel context, which is necessary for vfs_read and
-		 * vfs_write to use our buffers in the kernel address space.
-		 */
-		queue_work(dev->wq, work);
-		/* wait for operation to complete */
-		flush_workqueue(dev->wq);
-		fput(filp);
-
-		/* read the result */
-		smp_rmb();
-		ret = dev->xfer_result;
-		break;
-	}
-	case MTP_SEND_EVENT:
-	{
-		struct mtp_event	event;
-		/* return here so we don't change dev->state below,
-		 * which would interfere with bulk transfer state.
-		 */
-		if (copy_from_user(&event, (void __user *)value, sizeof(event)))
-			ret = -EFAULT;
-		else
-			ret = mtp_send_event(dev, &event);
+		ret = -ECANCELED;
 		goto out;
 	}
+	if (dev->state == STATE_OFFLINE) {
+		spin_unlock_irq(&dev->lock);
+		ret = -ENODEV;
+		goto out;
 	}
+	dev->state = STATE_BUSY;
+	spin_unlock_irq(&dev->lock);
+
+	/* hold a reference to the file while we are working with it */
+	filp = fget(mfr->fd);
+	if (!filp) {
+		ret = -EBADF;
+		goto fail;
+	}
+
+	/* write the parameters */
+	dev->xfer_file = filp;
+	dev->xfer_file_offset = mfr->offset;
+	dev->xfer_file_length = mfr->length;
+	/* make sure write is done before parameters are read */
+	smp_wmb();
+
+	if (code == MTP_SEND_FILE_WITH_HEADER) {
+		work = &dev->send_file_work;
+		dev->xfer_send_header = 1;
+		dev->xfer_command = mfr->command;
+		dev->xfer_transaction_id = mfr->transaction_id;
+	} else if (code == MTP_SEND_FILE) {
+		work = &dev->send_file_work;
+		dev->xfer_send_header = 0;
+	} else {
+		work = &dev->receive_file_work;
+	}
+
+	/* We do the file transfer on a work queue so it will run
+	 * in kernel context, which is necessary for vfs_read and
+	 * vfs_write to use our buffers in the kernel address space.
+	 */
+	queue_work(dev->wq, work);
+	/* wait for operation to complete */
+	flush_workqueue(dev->wq);
+	fput(filp);
+
+	/* read the result */
+	smp_rmb();
+	ret = dev->xfer_result;
 
 fail:
 	spin_lock_irq(&dev->lock);
@@ -1069,6 +1088,113 @@
 	return ret;
 }
 
+static long mtp_ioctl(struct file *fp, unsigned int code, unsigned long value)
+{
+	struct mtp_dev *dev = fp->private_data;
+	struct mtp_file_range	mfr;
+	struct mtp_event	event;
+	int ret = -EINVAL;
+
+	switch (code) {
+	case MTP_SEND_FILE:
+	case MTP_RECEIVE_FILE:
+	case MTP_SEND_FILE_WITH_HEADER:
+		if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
+			ret = -EFAULT;
+			goto fail;
+		}
+		ret = mtp_send_receive_ioctl(fp, code, &mfr);
+	break;
+	case MTP_SEND_EVENT:
+		if (mtp_lock(&dev->ioctl_excl))
+			return -EBUSY;
+		/* return here so we don't change dev->state below,
+		 * which would interfere with bulk transfer state.
+		 */
+		if (copy_from_user(&event, (void __user *)value, sizeof(event)))
+			ret = -EFAULT;
+		else
+			ret = mtp_send_event(dev, &event);
+		mtp_unlock(&dev->ioctl_excl);
+	break;
+	default:
+		DBG(dev->cdev, "unknown ioctl code: %d\n", code);
+	}
+fail:
+	return ret;
+}
+
+/*
+ * 32 bit userspace calling into 64 bit kernel. handle ioctl code
+ * and userspace pointer
+ */
+#ifdef CONFIG_COMPAT
+static long compat_mtp_ioctl(struct file *fp, unsigned int code,
+	unsigned long value)
+{
+	struct mtp_dev *dev = fp->private_data;
+	struct mtp_file_range	mfr;
+	struct __compat_mtp_file_range	cmfr;
+	struct mtp_event	event;
+	struct __compat_mtp_event cevent;
+	unsigned int cmd;
+	bool send_file = false;
+	int ret = -EINVAL;
+
+	switch (code) {
+	case COMPAT_MTP_SEND_FILE:
+		cmd = MTP_SEND_FILE;
+		send_file = true;
+		break;
+	case COMPAT_MTP_RECEIVE_FILE:
+		cmd = MTP_RECEIVE_FILE;
+		send_file = true;
+		break;
+	case COMPAT_MTP_SEND_FILE_WITH_HEADER:
+		cmd = MTP_SEND_FILE_WITH_HEADER;
+		send_file = true;
+		break;
+	case COMPAT_MTP_SEND_EVENT:
+		cmd = MTP_SEND_EVENT;
+		break;
+	default:
+		DBG(dev->cdev, "unknown compat_ioctl code: %d\n", code);
+		ret = -ENOIOCTLCMD;
+		goto fail;
+	}
+
+	if (send_file) {
+		if (copy_from_user(&cmfr, (void __user *)value, sizeof(cmfr))) {
+			ret = -EFAULT;
+			goto fail;
+		}
+		mfr.fd = cmfr.fd;
+		mfr.offset = cmfr.offset;
+		mfr.length = cmfr.length;
+		mfr.command = cmfr.command;
+		mfr.transaction_id = cmfr.transaction_id;
+		ret = mtp_send_receive_ioctl(fp, cmd, &mfr);
+	} else {
+		if (mtp_lock(&dev->ioctl_excl))
+			return -EBUSY;
+		/* return here so we don't change dev->state below,
+		 * which would interfere with bulk transfer state.
+		 */
+		if (copy_from_user(&cevent, (void __user *)value,
+			sizeof(cevent))) {
+			ret = -EFAULT;
+			goto fail;
+		}
+		event.length = cevent.length;
+		event.data = compat_ptr(cevent.data);
+		ret = mtp_send_event(dev, &event);
+		mtp_unlock(&dev->ioctl_excl);
+	}
+fail:
+	return ret;
+}
+#endif
+
 static int mtp_open(struct inode *ip, struct file *fp)
 {
 	printk(KERN_INFO "mtp_open\n");
@@ -1097,6 +1223,9 @@
 	.read = mtp_read,
 	.write = mtp_write,
 	.unlocked_ioctl = mtp_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = compat_mtp_ioctl,
+#endif
 	.open = mtp_open,
 	.release = mtp_release,
 };
@@ -1349,6 +1478,120 @@
 	VDBG(cdev, "%s disabled\n", dev->function.name);
 }
 
+static int debug_mtp_read_stats(struct seq_file *s, void *unused)
+{
+	struct mtp_dev *dev = _mtp_dev;
+	int i;
+	unsigned long flags;
+	unsigned int min, max = 0, sum = 0, iteration = 0;
+
+	seq_puts(s, "\n=======================\n");
+	seq_puts(s, "USB MTP OUT related VFS write stats:\n");
+	seq_puts(s, "\n=======================\n");
+	spin_lock_irqsave(&dev->lock, flags);
+	min = dev->perf[0].vfs_wtime;
+	for (i = 0; i < MAX_ITERATION; i++) {
+		seq_printf(s, "vfs write: bytes:%ld\t\t time:%d\n",
+				dev->perf[i].vfs_wbytes,
+				dev->perf[i].vfs_wtime);
+		if (dev->perf[i].vfs_wbytes == mtp_rx_req_len) {
+			sum += dev->perf[i].vfs_wtime;
+			if (min > dev->perf[i].vfs_wtime)
+				min = dev->perf[i].vfs_wtime;
+			if (max < dev->perf[i].vfs_wtime)
+				max = dev->perf[i].vfs_wtime;
+			iteration++;
+		}
+	}
+
+	seq_printf(s, "vfs_write(time in usec) min:%d\t max:%d\t avg:%d\n",
+						min, max, sum / iteration);
+	min = max = sum = iteration = 0;
+	seq_puts(s, "\n=======================\n");
+	seq_puts(s, "USB MTP IN related VFS read stats:\n");
+	seq_puts(s, "\n=======================\n");
+
+	min = dev->perf[0].vfs_rtime;
+	for (i = 0; i < MAX_ITERATION; i++) {
+		seq_printf(s, "vfs read: bytes:%ld\t\t time:%d\n",
+				dev->perf[i].vfs_rbytes,
+				dev->perf[i].vfs_rtime);
+		if (dev->perf[i].vfs_rbytes == mtp_tx_req_len) {
+			sum += dev->perf[i].vfs_rtime;
+			if (min > dev->perf[i].vfs_rtime)
+				min = dev->perf[i].vfs_rtime;
+			if (max < dev->perf[i].vfs_rtime)
+				max = dev->perf[i].vfs_rtime;
+			iteration++;
+		}
+	}
+
+	seq_printf(s, "vfs_read(time in usec) min:%d\t max:%d\t avg:%d\n",
+						min, max, sum / iteration);
+	spin_unlock_irqrestore(&dev->lock, flags);
+	return 0;
+}
+
+static ssize_t debug_mtp_reset_stats(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	int clear_stats;
+	unsigned long flags;
+	struct mtp_dev *dev = _mtp_dev;
+
+	if (buf == NULL) {
+		pr_err("[%s] EINVAL\n", __func__);
+		goto done;
+	}
+
+	if (kstrtoint(buf, 0, &clear_stats) || clear_stats != 0) {
+		pr_err("Wrong value. To clear stats, enter value as 0.\n");
+		goto done;
+	}
+
+	spin_lock_irqsave(&dev->lock, flags);
+	memset(&dev->perf[0], 0, MAX_ITERATION * sizeof(dev->perf[0]));
+	dev->dbg_read_index = 0;
+	dev->dbg_write_index = 0;
+	spin_unlock_irqrestore(&dev->lock, flags);
+done:
+	return count;
+}
+
+static int debug_mtp_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, debug_mtp_read_stats, inode->i_private);
+}
+
+static const struct file_operations debug_mtp_ops = {
+	.open = debug_mtp_open,
+	.read = seq_read,
+	.write = debug_mtp_reset_stats,
+};
+
+struct dentry *dent_mtp;
+static void mtp_debugfs_init(void)
+{
+	struct dentry *dent_mtp_status;
+
+	dent_mtp = debugfs_create_dir("usb_mtp", 0);
+	if (!dent_mtp || IS_ERR(dent_mtp))
+		return;
+
+	dent_mtp_status = debugfs_create_file("status", 0644, dent_mtp,
+						0, &debug_mtp_ops);
+	if (!dent_mtp_status || IS_ERR(dent_mtp_status)) {
+		debugfs_remove(dent_mtp);
+		dent_mtp = NULL;
+		return;
+	}
+}
+
+static void mtp_debugfs_remove(void)
+{
+	debugfs_remove_recursive(dent_mtp);
+}
+
 static int __mtp_setup(struct mtp_instance *fi_mtp)
 {
 	struct mtp_dev *dev;
@@ -1385,6 +1628,7 @@
 	if (ret)
 		goto err2;
 
+	mtp_debugfs_init();
 	return 0;
 
 err2:
@@ -1409,6 +1653,7 @@
 	if (!dev)
 		return;
 
+	mtp_debugfs_remove();
 	misc_deregister(&mtp_device);
 	destroy_workqueue(dev->wq);
 	_mtp_dev = NULL;
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index d0fc40b..579fd0a 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -502,6 +502,14 @@
 #define DMI_MATCH(a, b)	{ .slot = a, .substr = b }
 #define DMI_EXACT_MATCH(a, b)	{ .slot = a, .substr = b, .exact_match = 1 }
 
+#define SLIMBUS_NAME_SIZE	32
+#define SLIMBUS_MODULE_PREFIX "slim:"
+
+struct slim_device_id {
+	char name[SLIMBUS_NAME_SIZE];
+	kernel_ulong_t driver_data;	/* Data private to the driver */
+};
+
 #define PLATFORM_NAME_SIZE	20
 #define PLATFORM_MODULE_PREFIX	"platform:"
 
diff --git a/include/linux/of_slimbus.h b/include/linux/of_slimbus.h
new file mode 100644
index 0000000..f686cdc
--- /dev/null
+++ b/include/linux/of_slimbus.h
@@ -0,0 +1,34 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slimbus/slimbus.h>
+#include <linux/of_irq.h>
+
+#ifdef CONFIG_OF_SLIMBUS
+/*
+ * of_slim_register_devices() - Register devices in the SLIMbus Device Tree
+ * @ctrl: slim_controller which devices should be registered to.
+ *
+ * This routine scans the SLIMbus Device Tree, allocating resources and
+ * creating slim_devices according to the SLIMbus Device Tree
+ * hierarchy. Details of this hierarchy can be found in
+ * Documentation/devicetree/bindings/slimbus. This routine is normally
+ * called from the probe routine of the driver registering as a
+ * slim_controller.
+ */
+extern int of_register_slim_devices(struct slim_controller *ctrl);
+#else
+static int of_register_slim_devices(struct slim_controller *ctrl)
+{
+	return 0;
+}
+#endif /* CONFIG_OF_SLIMBUS */
diff --git a/include/linux/slimbus/slimbus.h b/include/linux/slimbus/slimbus.h
new file mode 100644
index 0000000..f1b1a7f
--- /dev/null
+++ b/include/linux/slimbus/slimbus.h
@@ -0,0 +1,1213 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_SLIMBUS_H
+#define _LINUX_SLIMBUS_H
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/mod_devicetable.h>
+
+/* Interfaces between SLIMbus manager drivers and SLIMbus infrastructure. */
+
+extern struct bus_type slimbus_type;
+
+/* Standard values per SLIMbus spec needed by controllers and devices */
+#define SLIM_CL_PER_SUPERFRAME		6144
+#define SLIM_CL_PER_SUPERFRAME_DIV8	(SLIM_CL_PER_SUPERFRAME >> 3)
+#define SLIM_MAX_TXNS			256
+#define SLIM_MAX_CLK_GEAR		10
+#define SLIM_MIN_CLK_GEAR		1
+#define SLIM_CL_PER_SL			4
+#define SLIM_SL_PER_SUPERFRAME		(SLIM_CL_PER_SUPERFRAME >> 2)
+#define SLIM_FRM_SLOTS_PER_SUPERFRAME	16
+#define SLIM_GDE_SLOTS_PER_SUPERFRAME	2
+
+/*
+ * SLIMbus message types. Related to interpretation of message code.
+ * Values are defined in Table 32 (slimbus spec 1.01.01)
+ */
+#define SLIM_MSG_MT_CORE			0x0
+#define SLIM_MSG_MT_DEST_REFERRED_CLASS		0x1
+#define SLIM_MSG_MT_DEST_REFERRED_USER		0x2
+#define SLIM_MSG_MT_SRC_REFERRED_CLASS		0x5
+#define SLIM_MSG_MT_SRC_REFERRED_USER		0x6
+
+/*
+ * SLIMbus core type Message Codes.
+ * Values are defined in Table 65 (slimbus spec 1.01.01)
+ */
+/* Device management messages */
+#define SLIM_MSG_MC_REPORT_PRESENT               0x1
+#define SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS       0x2
+#define SLIM_MSG_MC_RESET_DEVICE                 0x4
+#define SLIM_MSG_MC_CHANGE_LOGICAL_ADDRESS       0x8
+#define SLIM_MSG_MC_CHANGE_ARBITRATION_PRIORITY  0x9
+#define SLIM_MSG_MC_REQUEST_SELF_ANNOUNCEMENT    0xC
+#define SLIM_MSG_MC_REPORT_ABSENT                0xF
+
+/* Data channel management messages */
+#define SLIM_MSG_MC_CONNECT_SOURCE               0x10
+#define SLIM_MSG_MC_CONNECT_SINK                 0x11
+#define SLIM_MSG_MC_DISCONNECT_PORT              0x14
+#define SLIM_MSG_MC_CHANGE_CONTENT               0x18
+
+/* Information management messages */
+#define SLIM_MSG_MC_REQUEST_INFORMATION          0x20
+#define SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION    0x21
+#define SLIM_MSG_MC_REPLY_INFORMATION            0x24
+#define SLIM_MSG_MC_CLEAR_INFORMATION            0x28
+#define SLIM_MSG_MC_REPORT_INFORMATION           0x29
+
+/* Reconfiguration messages */
+#define SLIM_MSG_MC_BEGIN_RECONFIGURATION        0x40
+#define SLIM_MSG_MC_NEXT_ACTIVE_FRAMER           0x44
+#define SLIM_MSG_MC_NEXT_SUBFRAME_MODE           0x45
+#define SLIM_MSG_MC_NEXT_CLOCK_GEAR              0x46
+#define SLIM_MSG_MC_NEXT_ROOT_FREQUENCY          0x47
+#define SLIM_MSG_MC_NEXT_PAUSE_CLOCK             0x4A
+#define SLIM_MSG_MC_NEXT_RESET_BUS               0x4B
+#define SLIM_MSG_MC_NEXT_SHUTDOWN_BUS            0x4C
+#define SLIM_MSG_MC_NEXT_DEFINE_CHANNEL          0x50
+#define SLIM_MSG_MC_NEXT_DEFINE_CONTENT          0x51
+#define SLIM_MSG_MC_NEXT_ACTIVATE_CHANNEL        0x54
+#define SLIM_MSG_MC_NEXT_DEACTIVATE_CHANNEL      0x55
+#define SLIM_MSG_MC_NEXT_REMOVE_CHANNEL          0x58
+#define SLIM_MSG_MC_RECONFIGURE_NOW              0x5F
+
+/*
+ * Clock pause flag to indicate that the reconfig message
+ * corresponds to clock pause sequence
+ */
+#define SLIM_MSG_CLK_PAUSE_SEQ_FLG		(1U << 8)
+
+/* Value management messages */
+#define SLIM_MSG_MC_REQUEST_VALUE                0x60
+#define SLIM_MSG_MC_REQUEST_CHANGE_VALUE         0x61
+#define SLIM_MSG_MC_REPLY_VALUE                  0x64
+#define SLIM_MSG_MC_CHANGE_VALUE                 0x68
+
+/* Clock pause values defined in Table 66 (slimbus spec 1.01.01) */
+#define SLIM_CLK_FAST				0
+#define SLIM_CLK_CONST_PHASE			1
+#define SLIM_CLK_UNSPECIFIED			2
+
+struct slim_controller;
+struct slim_device;
+
+/* Destination type Values defined in Table 33 (slimbus spec 1.01.01) */
+#define SLIM_MSG_DEST_LOGICALADDR	0
+#define SLIM_MSG_DEST_ENUMADDR		1
+#define	SLIM_MSG_DEST_BROADCAST		3
+
+/*
+ * @start_offset: Specifies starting offset in information/value element map
+ * @num_bytes: Can be 1, 2, 3, 4, 6, 8, 12, 16 per spec. This ensures that the
+ *	message will fit in the 40-byte message limit and the slicesize can be
+ *	compatible with values in table 21 (slimbus spec 1.01.01)
+ * @comp: Completion to indicate end of message-transfer. Used if client wishes
+ *	to use the API asynchronously.
+ */
+struct slim_ele_access {
+	u16			start_offset;
+	u8			num_bytes;
+	struct completion	*comp;
+};
+
+/*
+ * struct slim_framer - Represents Slimbus framer.
+ * Every controller may have multiple framers.
+ * Manager is responsible for framer hand-over.
+ * @e_addr: 6 byte Elemental address of the framer.
+ * @rootfreq: Root Frequency at which the framer can run. This is maximum
+ *	frequency (clock gear 10 per slimbus spec) at which the bus can operate.
+ * @superfreq: Superframes per root frequency. Every frame is 6144 cells (bits)
+ *	per slimbus specification.
+ */
+struct slim_framer {
+	u8	e_addr[6];
+	int	rootfreq;
+	int	superfreq;
+};
+#define to_slim_framer(d) container_of(d, struct slim_framer, dev)
+
+/*
+ * struct slim_addrt: slimbus address used internally by the slimbus framework.
+ * @valid: If the device is still there or if the address can be reused.
+ * @eaddr: 6-bytes-long elemental address
+ * @laddr: It is possible that controller will set a predefined logical address
+ *	rather than the one assigned by framework. (i.e. logical address may
+ *	not be same as index into this table). This entry will store the
+ *	logical address value for this enumeration address.
+ */
+struct slim_addrt {
+	bool	valid;
+	u8	eaddr[6];
+	u8	laddr;
+};
+
+/*
+ * struct slim_val_inf: slimbus value/information element transaction
+ * @start_offset: Specifies starting offset in information/value element map
+ * @num_bytes: number of bytes to be read/written
+ * @wbuf: buffer if this transaction has 'write' component in it
+ * @rbuf: buffer if this transaction has 'read' component in it
+ */
+struct slim_val_inf {
+	u16 start_offset;
+	u8 num_bytes;
+	u8 *wbuf;
+	u8 *rbuf;
+};
+
+/*
+ * struct slim_msg_txn: Message to be sent by the controller.
+ * Linux framework uses this structure with drivers implementing controller.
+ * This structure has packet header, payload and buffer to be filled (if any)
+ * For the header information, refer to Table 34-36.
+ * @rl: Header field. remaining length.
+ * @mt: Header field. Message type.
+ * @mc: Header field. LSB is message code for type mt. Framework will set MSB to
+ *	SLIM_MSG_CLK_PAUSE_SEQ_FLG in case "mc" in the reconfiguration sequence
+ *	is for pausing the clock.
+ * @dt: Header field. Destination type.
+ * @ec: Element size. Used for elemental access APIs.
+ * @len: Length of payload. (excludes ec)
+ * @tid: Transaction ID. Used for messages expecting response.
+ *	(e.g. relevant for mc = SLIM_MSG_MC_REQUEST_INFORMATION)
+ * @la: Logical address of the device this message is going to.
+ *	(Not used when destination type is broadcast.)
+ * @async: If this transaction is async
+ * @rbuf: Buffer to be populated by controller when response is received.
+ * @wbuf: Payload of the message. (e.g. channel number for DATA channel APIs)
+ * @comp: Completion structure. Used by controller to notify response.
+ *	(Field is relevant when tid is used)
+ */
+struct slim_msg_txn {
+	u8			rl;
+	u8			mt;
+	u16			mc;
+	u8			dt;
+	u16			ec;
+	u8			len;
+	u8			tid;
+	u8			la;
+	bool			async;
+	u8			*rbuf;
+	const u8		*wbuf;
+	struct completion	*comp;
+};
+
+/* Internal port state used by slimbus framework to manage data-ports */
+enum slim_port_state {
+	SLIM_P_FREE,
+	SLIM_P_UNCFG,
+	SLIM_P_CFG,
+};
+
+/*
+ * enum slim_port_req: Request port type by user through APIs to manage ports
+ * User can request default, half-duplex or port to be used in multi-channel
+ * configuration. Default indicates a simplex port.
+ */
+enum slim_port_req {
+	SLIM_REQ_DEFAULT,
+	SLIM_REQ_HALF_DUP,
+	SLIM_REQ_MULTI_CH,
+};
+
+/*
+ * enum slim_port_opts: Port options requested.
+ * User can request no configuration, packed data, and/or MSB aligned data port
+ */
+enum slim_port_opts {
+	SLIM_OPT_NONE = 0,
+	SLIM_OPT_NO_PACK = 1U,
+	SLIM_OPT_ALIGN_MSB = 1U << 1,
+};
+
+/* enum slim_port_flow: Port flow type (inbound/outbound). */
+enum slim_port_flow {
+	SLIM_SRC,
+	SLIM_SINK,
+};
+
+/* enum slim_port_err: Port errors */
+enum slim_port_err {
+	SLIM_P_INPROGRESS,
+	SLIM_P_OVERFLOW,
+	SLIM_P_UNDERFLOW,
+	SLIM_P_DISCONNECT,
+	SLIM_P_NOT_OWNED,
+};
+
+/*
+ * struct slim_port_cfg: Port config for the manager port
+ * port_opts: port options (bit-map) for this port
+ * watermark: watermark level set for this port
+ */
+struct slim_port_cfg {
+	u32 port_opts;
+	u32 watermark;
+};
+
+/*
+ * struct slim_port: Internal structure used by framework to manage ports
+ * @err: Port error if any for this port. Refer to enum above.
+ * @state: Port state. Refer to enum above.
+ * @req: Port request for this port.
+ * @cfg: Port configuration for this port.
+ * @flow: Flow type of this port.
+ * @ch: Channel association of this port.
+ * @xcomp: Completion to indicate error, data transfer done event.
+ * @ctrl: Controller to which this port belongs to. This is useful to associate
+ *	port with the SW since port hardware interrupts may only contain port
+ *	information.
+ */
+struct slim_port {
+	enum slim_port_err	err;
+	enum slim_port_state	state;
+	enum slim_port_req	req;
+	struct slim_port_cfg	cfg;
+	enum slim_port_flow	flow;
+	struct slim_ch		*ch;
+	struct completion	*xcomp;
+	struct slim_controller	*ctrl;
+};
+
+/*
+ * enum slim_ch_state: Channel state of a channel.
+ * Channel transition happens from free-to-allocated-to-defined-to-pending-
+ * active-to-active.
+ * Once active, channel can be removed or suspended. Suspended channels are
+ * still scheduled, but data transfer doesn't happen.
+ * Removed channels are not deallocated until dealloc_ch API is used.
+ * Deallocation reset channel state back to free.
+ * Removed channels can be defined with different parameters.
+ */
+enum slim_ch_state {
+	SLIM_CH_FREE,
+	SLIM_CH_ALLOCATED,
+	SLIM_CH_DEFINED,
+	SLIM_CH_PENDING_ACTIVE,
+	SLIM_CH_ACTIVE,
+	SLIM_CH_SUSPENDED,
+	SLIM_CH_PENDING_REMOVAL,
+};
+
+/*
+ * enum slim_ch_proto: Channel protocol used by the channel.
+ * Hard Isochronous channel is not scheduled if current frequency doesn't allow
+ * the channel to be run without flow-control.
+ * Auto isochronous channel will be scheduled as hard-isochronous or push-pull
+ * depending on current bus frequency.
+ * Currently, Push-pull or async or extended channels are not supported.
+ * For more details, refer to slimbus spec
+ */
+enum slim_ch_proto {
+	SLIM_HARD_ISO,
+	SLIM_AUTO_ISO,
+	SLIM_PUSH,
+	SLIM_PULL,
+	SLIM_ASYNC_SMPLX,
+	SLIM_ASYNC_HALF_DUP,
+	SLIM_EXT_SMPLX,
+	SLIM_EXT_HALF_DUP,
+};
+
+/*
+ * enum slim_ch_rate: Most commonly used frequency rate families.
+ * Use 1HZ for push-pull transport.
+ * 4KHz and 11.025KHz are most commonly used in audio applications.
+ * Typically, slimbus runs at frequencies to support channels running at 4KHz
+ * and/or 11.025KHz isochronously.
+ */
+enum slim_ch_rate {
+	SLIM_RATE_1HZ,
+	SLIM_RATE_4000HZ,
+	SLIM_RATE_11025HZ,
+};
+
+/*
+ * enum slim_ch_coeff: Coefficient of a channel used internally by framework.
+ * Coefficient is applicable to channels running isochronously.
+ * Coefficient is calculated based on channel rate multiplier.
+ * (If rate multiplier is power of 2, it's coeff.1 channel. Otherwise it's
+ * coeff.3 channel.
+ */
+enum slim_ch_coeff {
+	SLIM_COEFF_1,
+	SLIM_COEFF_3,
+};
+
+/*
+ * enum slim_ch_control: Channel control.
+ * Activate will schedule channel and/or group of channels in the TDM frame.
+ * Suspend will keep the schedule but data-transfer won't happen.
+ * Remove will remove the channel/group from the TDM frame.
+ */
+enum slim_ch_control {
+	SLIM_CH_ACTIVATE,
+	SLIM_CH_SUSPEND,
+	SLIM_CH_REMOVE,
+};
+
+/* enum slim_ch_dataf: Data format per table 60 from slimbus spec 1.01.01 */
+enum slim_ch_dataf {
+	SLIM_CH_DATAF_NOT_DEFINED = 0,
+	SLIM_CH_DATAF_LPCM_AUDIO = 1,
+	SLIM_CH_DATAF_IEC61937_COMP_AUDIO = 2,
+	SLIM_CH_DATAF_PACKED_PDM_AUDIO = 3,
+};
+
+/* enum slim_ch_auxf: Auxiliary field format per table 59 from slimbus spec */
+enum slim_ch_auxf {
+	SLIM_CH_AUXF_NOT_APPLICABLE = 0,
+	SLIM_CH_AUXF_ZCUV_TUNNEL_IEC60958 = 1,
+	SLIM_CH_USER_DEFINED = 0xF,
+};
+
+/*
+ * struct slim_ch: Channel structure used externally by users of channel APIs.
+ * @prot: Desired slimbus protocol.
+ * @baser: Desired base rate. (Typical isochronous rates are: 4KHz, or 11.025KHz
+ * @dataf: Data format.
+ * @auxf: Auxiliary format.
+ * @ratem: Channel rate multiplier. (e.g. 48KHz channel will have 4KHz base rate
+ *	and 12 as rate multiplier.
+ * @sampleszbits: Sample size in bits.
+ */
+struct slim_ch {
+	enum slim_ch_proto	prot;
+	enum slim_ch_rate	baser;
+	enum slim_ch_dataf	dataf;
+	enum slim_ch_auxf	auxf;
+	u32			ratem;
+	u32			sampleszbits;
+};
+
+/*
+ * struct slim_ich: Internal channel structure used by slimbus framework.
+ * @prop: structure passed by the client.
+ * @coeff: Coefficient of this channel.
+ * @state: Current state of the channel.
+ * @nextgrp: If this channel is part of group, next channel in this group.
+ * @prrate: Presence rate of this channel (per table 62 of the spec)
+ * @offset: Offset of this channel in the superframe.
+ * @newoff: Used during scheduling to hold temporary new offset until the offset
+ *	is accepted/rejected by slimbus reconfiguration.
+ * @interval: Interval of this channel per superframe.
+ * @newintr: Used during scheduling to new interval temporarily.
+ * @seglen: Segment length of this channel.
+ * @rootexp: root exponent of this channel. Rate can be found using rootexp and
+ *	coefficient. Used during scheduling.
+ * @srch: Source port used by this channel.
+ * @sinkh: Sink ports used by this channel.
+ * @nsink: number of sink ports used by this channel.
+ * @chan: Channel number sent on hardware lines for this channel. May not be
+ *	equal to array-index into chans if client requested to use number beyond
+ *	channel-array for the controller.
+ * @ref: Reference number to keep track of how many clients (upto 2) are using
+ *	this channel.
+ * @def: Used to keep track of how many times the channel definition is sent
+ *	to hardware and this will decide if channel-remove can be sent for the
+ *	channel. Channel definition may be sent upto twice (once per producer
+ *	and once per consumer). Channel removal should be sent only once to
+ *	avoid clients getting underflow/overflow errors.
+ */
+struct slim_ich {
+	struct slim_ch		prop;
+	enum slim_ch_coeff	coeff;
+	enum slim_ch_state	state;
+	u16			nextgrp;
+	u32			prrate;
+	u32			offset;
+	u32			newoff;
+	u32			interval;
+	u32			newintr;
+	u32			seglen;
+	u8			rootexp;
+	u32			srch;
+	u32			*sinkh;
+	int			nsink;
+	u8			chan;
+	int			ref;
+	int			def;
+};
+
+/*
+ * struct slim_sched: Framework uses this structure internally for scheduling.
+ * @chc3: Array of all active coeffient 3 channels.
+ * @num_cc3: Number of active coeffient 3 channels.
+ * @chc1: Array of all active coeffient 1 channels.
+ * @num_cc1: Number of active coeffient 1 channels.
+ * @subfrmcode: Current subframe-code used by TDM. This is decided based on
+ *	requested message bandwidth and current channels scheduled.
+ * @usedslots: Slots used by all active channels.
+ * @msgsl: Slots used by message-bandwidth.
+ * @pending_msgsl: Used to store pending request of message bandwidth (in slots)
+ *	until the scheduling is accepted by reconfiguration.
+ * @m_reconf: This mutex is held until current reconfiguration (data channel
+ *	scheduling, message bandwidth reservation) is done. Message APIs can
+ *	use the bus concurrently when this mutex is held since elemental access
+ *	messages can be sent on the bus when reconfiguration is in progress.
+ * @slots: Used for debugging purposes to debug/verify current schedule in TDM.
+ */
+struct slim_sched {
+	struct slim_ich	**chc3;
+	int		num_cc3;
+	struct slim_ich	**chc1;
+	int		num_cc1;
+	u32		subfrmcode;
+	u32		usedslots;
+	u32		msgsl;
+	u32		pending_msgsl;
+	struct mutex	m_reconf;
+	u8		*slots;
+};
+
+/*
+ * enum slim_clk_state: Slimbus controller's clock state used internally for
+ *	maintaining current clock state.
+ * @SLIM_CLK_ACTIVE: Slimbus clock is active
+ * @SLIM_CLK_PAUSE_FAILED: Slimbus controlled failed to go in clock pause.
+ *	Hardware-wise, this state is same as active but controller will wait on
+ *	completion before making transition to SLIM_CLK_ACTIVE in framework
+ * @SLIM_CLK_ENTERING_PAUSE: Slimbus clock pause sequence is being sent on the
+ *	bus. If this succeeds, state changes to SLIM_CLK_PAUSED. If the
+ *	transition fails, state changes to SLIM_CLK_PAUSE_FAILED
+ * @SLIM_CLK_PAUSED: Slimbus controller clock has paused.
+ */
+enum slim_clk_state {
+	SLIM_CLK_ACTIVE,
+	SLIM_CLK_ENTERING_PAUSE,
+	SLIM_CLK_PAUSE_FAILED,
+	SLIM_CLK_PAUSED,
+};
+/*
+ * struct slim_controller: Represents manager for a SlimBUS
+ *				(similar to 'master' on I2C)
+ * @dev: Device interface to this driver
+ * @nr: Board-specific number identifier for this controller/bus
+ * @list: Link with other slimbus controllers
+ * @name: Name for this controller
+ * @clkgear: Current clock gear in which this bus is running
+ * @min_cg: Minimum clock gear supported by this controller (default value: 1)
+ * @max_cg: Maximum clock gear supported by this controller (default value: 10)
+ * @clk_state: Controller's clock state from enum slim_clk_state
+ * @pause_comp: Signals completion of clock pause sequence. This is useful when
+ *	client tries to call slimbus transaction when controller may be entering
+ *	clock pause.
+ * @a_framer: Active framer which is clocking the bus managed by this controller
+ * @m_ctrl: Mutex protecting controller data structures (ports, channels etc)
+ * @addrt: Logical address table
+ * @num_dev: Number of active slimbus slaves on this bus
+ * @devs: List of devices on this controller
+ * @wq: Workqueue per controller used to notify devices when they report present
+ * @txnt: Table of transactions having transaction ID
+ * @last_tid: size of the table txnt (can't grow beyond 256 since TID is 8-bits)
+ * @ports: Ports associated with this controller
+ * @nports: Number of ports supported by the controller
+ * @chans: Channels associated with this controller
+ * @nchans: Number of channels supported
+ * @reserved: Reserved channels that controller wants to use internally
+ *		Clients will be assigned channel numbers after this number
+ * @sched: scheduler structure used by the controller
+ * @dev_released: completion used to signal when sysfs has released this
+ *	controller so that it can be deleted during shutdown
+ * @xfer_msg: Transfer a message on this controller (this can be a broadcast
+ *	control/status message like data channel setup, or a unicast message
+ *	like value element read/write.
+ * @set_laddr: Setup logical address at laddr for the slave with elemental
+ *	address e_addr. Drivers implementing controller will be expected to
+ *	send unicast message to this device with its logical address.
+ * @allocbw: Controller can override default reconfiguration and channel
+ *	scheduling algorithm.
+ * @get_laddr: It is possible that controller needs to set fixed logical
+ *	address table and get_laddr can be used in that case so that controller
+ *	can do this assignment.
+ * @wakeup: This function pointer implements controller-specific procedure
+ *	to wake it up from clock-pause. Framework will call this to bring
+ *	the controller out of clock pause.
+ * @alloc_port: Allocate a port and make it ready for data transfer. This is
+ *	called by framework to make sure controller can take necessary steps
+ *	to initialize its port
+ * @dealloc_port: Counter-part of alloc_port. This is called by framework so
+ *	that controller can free resources associated with this port
+ * @framer_handover: If this controller has multiple framers, this API will
+ *	be called to switch between framers if controller desires to change
+ *	the active framer.
+ * @port_xfer: Called to schedule a transfer on port pn. iobuf is physical
+ *	address and the buffer may have to be DMA friendly since data channels
+ *	will be using data from this buffers without SW intervention.
+ * @port_xfer_status: Called by framework when client calls get_xfer_status
+ *	API. Returns how much buffer is actually processed and the port
+ *	errors (e.g. overflow/underflow) if any.
+ * @xfer_user_msg: Send user message to specified logical address. Underlying
+ *	controller has to support sending user messages. Returns error if any.
+ * @xfer_bulk_wr: Send bulk of write messages to specified logical address.
+ *	Underlying controller has to support this. Typically useful to transfer
+ *	messages to download firmware, or messages where strict ordering for
+ *	slave is necessary
+ */
+struct slim_controller {
+	struct device		dev;
+	unsigned int		nr;
+	struct list_head	list;
+	char			name[SLIMBUS_NAME_SIZE];
+	int			clkgear;
+	int			min_cg;
+	int			max_cg;
+	enum slim_clk_state	clk_state;
+	struct completion	pause_comp;
+	struct slim_framer	*a_framer;
+	struct mutex		m_ctrl;
+	struct slim_addrt	*addrt;
+	u8			num_dev;
+	struct list_head	devs;
+	struct workqueue_struct *wq;
+	struct slim_msg_txn	*txnt[SLIM_MAX_TXNS];
+	u8			last_tid;
+	spinlock_t		txn_lock;
+	struct slim_port	*ports;
+	int			nports;
+	struct slim_ich		*chans;
+	int			nchans;
+	u8			reserved;
+	struct slim_sched	sched;
+	struct completion	dev_released;
+	int			(*xfer_msg)(struct slim_controller *ctrl,
+				struct slim_msg_txn *txn);
+	int			(*set_laddr)(struct slim_controller *ctrl,
+				const u8 *ea, u8 elen, u8 laddr);
+	int			(*allocbw)(struct slim_device *sb,
+				int *subfrmc, int *clkgear);
+	int			(*get_laddr)(struct slim_controller *ctrl,
+				const u8 *ea, u8 elen, u8 *laddr);
+	int			(*wakeup)(struct slim_controller *ctrl);
+	int			(*alloc_port)(struct slim_controller *ctrl,
+				u8 port);
+	void			(*dealloc_port)(struct slim_controller *ctrl,
+				u8 port);
+	int			(*framer_handover)(struct slim_controller *ctrl,
+				struct slim_framer *new_framer);
+	int			(*port_xfer)(struct slim_controller *ctrl,
+				u8 pn, phys_addr_t iobuf, u32 len,
+				struct completion *comp);
+	enum slim_port_err	(*port_xfer_status)(struct slim_controller *ctr,
+				u8 pn, phys_addr_t *done_buf, u32 *done_len);
+	int			(*xfer_user_msg)(struct slim_controller *ctrl,
+				u8 la, u8 mt, u8 mc,
+				struct slim_ele_access *msg, u8 *buf, u8 len);
+	int			(*xfer_bulk_wr)(struct slim_controller *ctrl,
+				u8 la, u8 mt, u8 mc, struct slim_val_inf msgs[],
+				int n, int (*comp_cb)(void *ctx, int err),
+				void *ctx);
+};
+#define to_slim_controller(d) container_of(d, struct slim_controller, dev)
+
+/*
+ * struct slim_driver: Manage Slimbus generic/slave device driver
+ * @probe: Binds this driver to a slimbus device.
+ * @remove: Unbinds this driver from the slimbus device.
+ * @shutdown: Standard shutdown callback used during powerdown/halt.
+ * @suspend: Standard suspend callback used during system suspend
+ * @resume: Standard resume callback used during system resume
+ * @device_up: This callback is called when the device reports present and
+ *		gets a logical address assigned to it
+ * @device_down: This callback is called when device reports absent, or the
+ *		bus goes down. Device will report present when bus is up and
+ *		device_up callback will be called again when that happens
+ * @reset_device: This callback is called after framer is booted.
+ *		Driver should do the needful to reset the device,
+ *		so that device acquires sync and be operational.
+ * @driver: Slimbus device drivers should initialize name and owner field of
+ *	this structure
+ * @id_table: List of slimbus devices supported by this driver
+ */
+struct slim_driver {
+	int (*probe)(struct slim_device *sldev);
+	int (*remove)(struct slim_device *sldev);
+	void (*shutdown)(struct slim_device *sldev);
+	int (*suspend)(struct slim_device *sldev, pm_message_t pmesg);
+	int (*resume)(struct slim_device *sldev);
+	int (*device_up)(struct slim_device *sldev);
+	int (*device_down)(struct slim_device *sldev);
+	int (*reset_device)(struct slim_device *sldev);
+
+	struct device_driver		driver;
+	const struct slim_device_id	*id_table;
+};
+#define to_slim_driver(d) container_of(d, struct slim_driver, driver)
+
+/*
+ * struct slim_pending_ch: List of pending channels used by framework.
+ * @chan: Channel number
+ * @pending: list of channels
+ */
+struct slim_pending_ch {
+	u8	chan;
+	struct	list_head pending;
+};
+
+/*
+ * Client/device handle (struct slim_device):
+ * ------------------------------------------
+ *  This is the client/device handle returned when a slimbus
+ *  device is registered with a controller. This structure can be provided
+ *  during register_board_info, or can be allocated using slim_add_device API.
+ *  Pointer to this structure is used by client-driver as a handle.
+ *  @dev: Driver model representation of the device.
+ *  @name: Name of driver to use with this device.
+ *  @e_addr: 6-byte elemental address of this device.
+ *  @driver: Device's driver. Pointer to access routines.
+ *  @ctrl: Slimbus controller managing the bus hosting this device.
+ *  @laddr: 1-byte Logical address of this device.
+ *  @reported: Flag to indicate whether this device reported present. The flag
+ *	is set when device reports present, and is reset when it reports
+ *	absent. This flag alongwith notified flag below is used to call
+ *	device_up, or device_down callbacks for driver of this device.
+ *  @mark_define: List of channels pending definition/activation.
+ *  @mark_suspend: List of channels pending suspend.
+ *  @mark_removal: List of channels pending removal.
+ *  @notified: Flag to indicate whether this device has been notified. The
+ *	device may report present multiple times, but should be notified only
+ *	first time it has reported present.
+ *  @dev_list: List of devices on a controller
+ *  @wd: Work structure associated with workqueue for presence notification
+ *  @sldev_reconf: Mutex to protect the pending data-channel lists.
+ *  @pending_msgsl: Message bandwidth reservation request by this client in
+ *	slots that's pending reconfiguration.
+ *  @cur_msgsl: Message bandwidth reserved by this client in slots.
+ *  These 3 lists are managed by framework. Lists are populated when client
+ *  calls channel control API without reconfig-flag set and the lists are
+ *  emptied when the reconfiguration is done by this client.
+ */
+struct slim_device {
+	struct device		dev;
+	const char		*name;
+	u8			e_addr[6];
+	struct slim_driver	*driver;
+	struct slim_controller	*ctrl;
+	u8			laddr;
+	bool			reported;
+	struct list_head	mark_define;
+	struct list_head	mark_suspend;
+	struct list_head	mark_removal;
+	bool			notified;
+	struct list_head	dev_list;
+	struct work_struct	wd;
+	struct mutex		sldev_reconf;
+	u32			pending_msgsl;
+	u32			cur_msgsl;
+};
+#define to_slim_device(d) container_of(d, struct slim_device, dev)
+
+/*
+ * struct slim_boardinfo: Declare board info for Slimbus device bringup.
+ * @bus_num: Controller number (bus) on which this device will sit.
+ * @slim_slave: Device to be registered with slimbus.
+ */
+struct slim_boardinfo {
+	int			bus_num;
+	struct slim_device	*slim_slave;
+};
+
+/*
+ * slim_get_logical_addr: Return the logical address of a slimbus device.
+ * @sb: client handle requesting the adddress.
+ * @e_addr: Elemental address of the device.
+ * @e_len: Length of e_addr
+ * @laddr: output buffer to store the address
+ * context: can sleep
+ * -EINVAL is returned in case of invalid parameters, and -ENXIO is returned if
+ *  the device with this elemental address is not found.
+ */
+
+extern int slim_get_logical_addr(struct slim_device *sb, const u8 *e_addr,
+					u8 e_len, u8 *laddr);
+
+
+/* Message APIs Unicast message APIs used by slimbus slave drivers */
+
+/*
+ * Message API access routines.
+ * @sb: client handle requesting elemental message reads, writes.
+ * @msg: Input structure for start-offset, number of bytes to read.
+ * @rbuf: data buffer to be filled with values read.
+ * @len: data buffer size
+ * @wbuf: data buffer containing value/information to be written
+ * context: can sleep
+ * Returns:
+ * -EINVAL: Invalid parameters
+ * -ETIMEDOUT: If controller could not complete the request. This may happen if
+ *  the bus lines are not clocked, controller is not powered-on, slave with
+ *  given address is not enumerated/responding.
+ */
+extern int slim_request_val_element(struct slim_device *sb,
+					struct slim_ele_access *msg, u8 *buf,
+					u8 len);
+extern int slim_request_inf_element(struct slim_device *sb,
+					struct slim_ele_access *msg, u8 *buf,
+					u8 len);
+extern int slim_change_val_element(struct slim_device *sb,
+					struct slim_ele_access *msg,
+					const u8 *buf, u8 len);
+extern int slim_clear_inf_element(struct slim_device *sb,
+					struct slim_ele_access *msg, u8 *buf,
+					u8 len);
+extern int slim_request_change_val_element(struct slim_device *sb,
+					struct slim_ele_access *msg, u8 *rbuf,
+					const u8 *wbuf, u8 len);
+extern int slim_request_clear_inf_element(struct slim_device *sb,
+					struct slim_ele_access *msg, u8 *rbuf,
+					const u8 *wbuf, u8 len);
+
+/*
+ * Broadcast message API:
+ * call this API directly with sbdev = NULL.
+ * For broadcast reads, make sure that buffers are big-enough to incorporate
+ * replies from all logical addresses.
+ * All controllers may not support broadcast
+ */
+extern int slim_xfer_msg(struct slim_controller *ctrl,
+			struct slim_device *sbdev, struct slim_ele_access *msg,
+			u16 mc, u8 *rbuf, const u8 *wbuf, u8 len);
+
+/*
+ * User message:
+ * slim_user_msg: Send user message that is interpreted by destination device
+ * @sb: Client handle sending the message
+ * @la: Destination device for this user message
+ * @mt: Message Type (Soruce-referred, or Destination-referred)
+ * @mc: Message Code
+ * @msg: Message structure (start offset, number of bytes) to be sent
+ * @buf: data buffer to be sent
+ * @len: data buffer size in bytes
+ */
+extern int slim_user_msg(struct slim_device *sb, u8 la, u8 mt, u8 mc,
+				struct slim_ele_access *msg, u8 *buf, u8 len);
+
+/*
+ * Queue bulk of message writes:
+ * slim_bulk_msg_write: Write bulk of messages (e.g. downloading FW)
+ * @sb: Client handle sending these messages
+ * @la: Destination device for these messages
+ * @mt: Message Type
+ * @mc: Message Code
+ * @msgs: List of messages to be written in bulk
+ * @n: Number of messages in the list
+ * @cb: Callback if client needs this to be non-blocking
+ * @ctx: Context for this callback
+ * If supported by controller, this message list will be sent in bulk to the HW
+ * If the client specifies this to be non-blocking, the callback will be
+ * called from atomic context.
+ */
+extern int slim_bulk_msg_write(struct slim_device *sb, u8 mt, u8 mc,
+			struct slim_val_inf msgs[], int n,
+			int (*comp_cb)(void *ctx, int err), void *ctx);
+/* end of message apis */
+
+/* Port management for manager device APIs */
+
+/*
+ * slim_alloc_mgrports: Allocate port on manager side.
+ * @sb: device/client handle.
+ * @req: Port request type.
+ * @nports: Number of ports requested
+ * @rh: output buffer to store the port handles
+ * @hsz: size of buffer storing handles
+ * context: can sleep
+ * This port will be typically used by SW. e.g. client driver wants to receive
+ * some data from audio codec HW using a data channel.
+ * Port allocated using this API will be used to receive the data.
+ * If half-duplex ports are requested, two adjacent ports are allocated for
+ * 1 half-duplex port. So the handle-buffer size should be twice the number
+ * of half-duplex ports to be allocated.
+ * -EDQUOT is returned if all ports are in use.
+ */
+extern int slim_alloc_mgrports(struct slim_device *sb, enum slim_port_req req,
+				int nports, u32 *rh, int hsz);
+
+/* Deallocate the port(s) allocated using the API above */
+extern int slim_dealloc_mgrports(struct slim_device *sb, u32 *hdl, int hsz);
+
+/*
+ * slim_config_mgrports: Configure manager side ports
+ * @sb: device/client handle.
+ * @ph: array of port handles for which this configuration is valid
+ * @nports: Number of ports in ph
+ * @cfg: configuration requested for port(s)
+ * Configure port settings if they are different than the default ones.
+ * Returns success if the config could be applied. Returns -EISCONN if the
+ * port is in use
+ */
+extern int slim_config_mgrports(struct slim_device *sb, u32 *ph, int nports,
+				struct slim_port_cfg *cfg);
+
+/*
+ * slim_port_xfer: Schedule buffer to be transferred/received using port-handle.
+ * @sb: client handle
+ * @ph: port-handle
+ * @iobuf: buffer to be transferred or populated
+ * @len: buffer size.
+ * @comp: completion signal to indicate transfer done or error.
+ * context: can sleep
+ * Returns number of bytes transferred/received if used synchronously.
+ * Will return 0 if used asynchronously.
+ * Client will call slim_port_get_xfer_status to get error and/or number of
+ * bytes transferred if used asynchronously.
+ */
+extern int slim_port_xfer(struct slim_device *sb, u32 ph, phys_addr_t iobuf,
+				u32 len, struct completion *comp);
+
+/*
+ * slim_port_get_xfer_status: Poll for port transfers, or get transfer status
+ *	after completion is done.
+ * @sb: client handle
+ * @ph: port-handle
+ * @done_buf: return pointer (iobuf from slim_port_xfer) which is processed.
+ * @done_len: Number of bytes transferred.
+ * This can be called when port_xfer complition is signalled.
+ * The API will return port transfer error (underflow/overflow/disconnect)
+ * and/or done_len will reflect number of bytes transferred. Note that
+ * done_len may be valid even if port error (overflow/underflow) has happened.
+ * e.g. If the transfer was scheduled with a few bytes to be transferred and
+ * client has not supplied more data to be transferred, done_len will indicate
+ * number of bytes transferred with underflow error. To avoid frequent underflow
+ * errors, multiple transfers can be queued (e.g. ping-pong buffers) so that
+ * channel has data to be transferred even if client is not ready to transfer
+ * data all the time. done_buf will indicate address of the last buffer
+ * processed from the multiple transfers.
+ */
+extern enum slim_port_err slim_port_get_xfer_status(struct slim_device *sb,
+			u32 ph, phys_addr_t *done_buf, u32 *done_len);
+
+/*
+ * slim_connect_src: Connect source port to channel.
+ * @sb: client handle
+ * @srch: source handle to be connected to this channel
+ * @chanh: Channel with which the ports need to be associated with.
+ * Per slimbus specification, a channel may have 1 source port.
+ * Channel specified in chanh needs to be allocated first.
+ * Returns -EALREADY if source is already configured for this channel.
+ * Returns -ENOTCONN if channel is not allocated
+ * Returns -EINVAL if invalid direction is specified for non-manager port,
+ * or if the manager side port number is out of bounds, or in incorrect state
+ */
+extern int slim_connect_src(struct slim_device *sb, u32 srch, u16 chanh);
+
+/*
+ * slim_connect_sink: Connect sink port(s) to channel.
+ * @sb: client handle
+ * @sinkh: sink handle(s) to be connected to this channel
+ * @nsink: number of sinks
+ * @chanh: Channel with which the ports need to be associated with.
+ * Per slimbus specification, a channel may have multiple sink-ports.
+ * Channel specified in chanh needs to be allocated first.
+ * Returns -EALREADY if sink is already configured for this channel.
+ * Returns -ENOTCONN if channel is not allocated
+ * Returns -EINVAL if invalid parameters are passed, or invalid direction is
+ * specified for non-manager port, or if the manager side port number is out of
+ * bounds, or in incorrect state
+ */
+extern int slim_connect_sink(struct slim_device *sb, u32 *sinkh, int nsink,
+				u16 chanh);
+/*
+ * slim_disconnect_ports: Disconnect port(s) from channel
+ * @sb: client handle
+ * @ph: ports to be disconnected
+ * @nph: number of ports.
+ * Disconnects ports from a channel.
+ */
+extern int slim_disconnect_ports(struct slim_device *sb, u32 *ph, int nph);
+
+/*
+ * slim_get_slaveport: Get slave port handle
+ * @la: slave device logical address.
+ * @idx: port index at slave
+ * @rh: return handle
+ * @flw: Flow type (source or destination)
+ * This API only returns a slave port's representation as expected by slimbus
+ * driver. This port is not managed by the slimbus driver. Caller is expected
+ * to have visibility of this port since it's a device-port.
+ */
+extern int slim_get_slaveport(u8 la, int idx, u32 *rh, enum slim_port_flow flw);
+
+
+/* Channel functions. */
+
+/*
+ * slim_alloc_ch: Allocate a slimbus channel and return its handle.
+ * @sb: client handle.
+ * @chanh: return channel handle
+ * Slimbus channels are limited to 256 per specification.
+ * -EXFULL is returned if all channels are in use.
+ * Although slimbus specification supports 256 channels, a controller may not
+ * support that many channels.
+ */
+extern int slim_alloc_ch(struct slim_device *sb, u16 *chanh);
+
+/*
+ * slim_query_ch: Get reference-counted handle for a channel number. Every
+ * channel is reference counted by one as producer and the others as
+ * consumer)
+ * @sb: client handle
+ * @chan: slimbus channel number
+ * @chanh: return channel handle
+ * If request channel number is not in use, it is allocated, and reference
+ * count is set to one. If the channel was was already allocated, this API
+ * will return handle to that channel and reference count is incremented.
+ * -EXFULL is returned if all channels are in use
+ */
+extern int slim_query_ch(struct slim_device *sb, u8 chan, u16 *chanh);
+/*
+ * slim_dealloc_ch: Deallocate channel allocated using the API above
+ * -EISCONN is returned if the channel is tried to be deallocated without
+ *  being removed first.
+ *  -ENOTCONN is returned if deallocation is tried on a channel that's not
+ *  allocated.
+ */
+extern int slim_dealloc_ch(struct slim_device *sb, u16 chanh);
+
+
+/*
+ * slim_define_ch: Define a channel.This API defines channel parameters for a
+ *	given channel.
+ * @sb: client handle.
+ * @prop: slim_ch structure with channel parameters desired to be used.
+ * @chanh: list of channels to be defined.
+ * @nchan: number of channels in a group (1 if grp is false)
+ * @grp: Are the channels grouped
+ * @grph: return group handle if grouping of channels is desired.
+ *	Channels can be grouped if multiple channels use same parameters
+ *	(e.g. 5.1 audio has 6 channels with same parameters. They will all be
+ *	grouped and given 1 handle for simplicity and avoid repeatedly calling
+ *	the API)
+ * -EISCONN is returned if channel is already used with different parameters.
+ * -ENXIO is returned if the channel is not yet allocated.
+ */
+extern int slim_define_ch(struct slim_device *sb, struct slim_ch *prop,
+				u16 *chanh, u8 nchan, bool grp, u16 *grph);
+
+/*
+ * slim_control_ch: Channel control API.
+ * @sb: client handle
+ * @grpchanh: group or channel handle to be controlled
+ * @chctrl: Control command (activate/suspend/remove)
+ * @commit: flag to indicate whether the control should take effect right-away.
+ * This API activates, removes or suspends a channel (or group of channels)
+ * grpchanh indicates the channel or group handle (returned by the define_ch
+ * API). Reconfiguration may be time-consuming since it can change all other
+ * active channel allocations on the bus, change in clock gear used by the
+ * slimbus, and change in the control space width used for messaging.
+ * commit makes sure that multiple channels can be activated/deactivated before
+ * reconfiguration is started.
+ * -EXFULL is returned if there is no space in TDM to reserve the bandwidth.
+ * -EISCONN/-ENOTCONN is returned if the channel is already connected or not
+ * yet defined.
+ * -EINVAL is returned if individual control of a grouped-channel is attempted.
+ */
+extern int slim_control_ch(struct slim_device *sb, u16 grpchanh,
+				enum slim_ch_control chctrl, bool commit);
+
+/*
+ * slim_get_ch_state: Channel state.
+ * This API returns the channel's state (active, suspended, inactive etc)
+ */
+extern enum slim_ch_state slim_get_ch_state(struct slim_device *sb,
+						u16 chanh);
+
+/*
+ * slim_reservemsg_bw: Request to reserve bandwidth for messages.
+ * @sb: client handle
+ * @bw_bps: message bandwidth in bits per second to be requested
+ * @commit: indicates whether the reconfiguration needs to be acted upon.
+ * This API call can be grouped with slim_control_ch API call with only one of
+ * the APIs specifying the commit flag to avoid reconfiguration being called too
+ * frequently. -EXFULL is returned if there is no space in TDM to reserve the
+ * bandwidth. -EBUSY is returned if reconfiguration is requested, but a request
+ * is already in progress.
+ */
+extern int slim_reservemsg_bw(struct slim_device *sb, u32 bw_bps, bool commit);
+
+/*
+ * slim_reconfigure_now: Request reconfiguration now.
+ * @sb: client handle
+ * This API does what commit flag in other scheduling APIs do.
+ * -EXFULL is returned if there is no space in TDM to reserve the
+ * bandwidth. -EBUSY is returned if reconfiguration request is already in
+ * progress.
+ */
+extern int slim_reconfigure_now(struct slim_device *sb);
+
+/*
+ * slim_ctrl_clk_pause: Called by slimbus controller to request clock to be
+ *	paused or woken up out of clock pause
+ * @ctrl: controller requesting bus to be paused or woken up
+ * @wakeup: Wakeup this controller from clock pause.
+ * @restart: Restart time value per spec used for clock pause. This value
+ *	isn't used when controller is to be woken up.
+ * This API executes clock pause reconfiguration sequence if wakeup is false.
+ * If wakeup is true, controller's wakeup is called
+ * Slimbus clock is idle and can be disabled by the controller later.
+ */
+extern int slim_ctrl_clk_pause(struct slim_controller *ctrl, bool wakeup,
+		u8 restart);
+
+/*
+ * slim_driver_register: Client driver registration with slimbus
+ * @drv:Client driver to be associated with client-device.
+ * This API will register the client driver with the slimbus
+ * It is called from the driver's module-init function.
+ */
+extern int slim_driver_register(struct slim_driver *drv);
+
+/*
+ * slim_driver_unregister: Undo effects of slim_driver_register
+ * @drv: Client driver to be unregistered
+ */
+extern void slim_driver_unregister(struct slim_driver *drv);
+
+/*
+ * slim_add_numbered_controller: Controller bring-up.
+ * @ctrl: Controller to be registered.
+ * A controller is registered with the framework using this API. ctrl->nr is the
+ * desired number with which slimbus framework registers the controller.
+ * Function will return -EBUSY if the number is in use.
+ */
+extern int slim_add_numbered_controller(struct slim_controller *ctrl);
+
+/*
+ * slim_del_controller: Controller tear-down.
+ * Controller added with the above API is teared down using this API.
+ */
+extern int slim_del_controller(struct slim_controller *ctrl);
+
+/*
+ * slim_add_device: Add a new device without register board info.
+ * @ctrl: Controller to which this device is to be added to.
+ * Called when device doesn't have an explicit client-driver to be probed, or
+ * the client-driver is a module installed dynamically.
+ */
+extern int slim_add_device(struct slim_controller *ctrl,
+			struct slim_device *sbdev);
+
+/* slim_remove_device: Remove the effect of slim_add_device() */
+extern void slim_remove_device(struct slim_device *sbdev);
+
+/*
+ * slim_assign_laddr: Assign logical address to a device enumerated.
+ * @ctrl: Controller with which device is enumerated.
+ * @e_addr: 6-byte elemental address of the device.
+ * @e_len: buffer length for e_addr
+ * @laddr: Return logical address (if valid flag is false)
+ * @valid: true if laddr holds a valid address that controller wants to
+ *	set for this enumeration address. Otherwise framework sets index into
+ *	address table as logical address.
+ * Called by controller in response to REPORT_PRESENT. Framework will assign
+ * a logical address to this enumeration address.
+ * Function returns -EXFULL to indicate that all logical addresses are already
+ * taken.
+ */
+extern int slim_assign_laddr(struct slim_controller *ctrl, const u8 *e_addr,
+				u8 e_len, u8 *laddr, bool valid);
+
+/*
+ * slim_report_absent: Controller calls this function when a device
+ *	reports absent, OR when the device cannot be communicated with
+ * @sbdev: Device that cannot be reached, or that sent report absent
+ */
+void slim_report_absent(struct slim_device *sbdev);
+
+/*
+ * slim_framer_booted: This function is called by controller after the active
+ * framer has booted (using Bus Reset sequence, or after it has shutdown and has
+ * come back up). Components, devices on the bus may be in undefined state,
+ * and this function triggers their drivers to do the needful
+ * to bring them back in Reset state so that they can acquire sync, report
+ * present and be operational again.
+ */
+void slim_framer_booted(struct slim_controller *ctrl);
+
+/*
+ * slim_msg_response: Deliver Message response received from a device to the
+ *	framework.
+ * @ctrl: Controller handle
+ * @reply: Reply received from the device
+ * @len: Length of the reply
+ * @tid: Transaction ID received with which framework can associate reply.
+ * Called by controller to inform framework about the response received.
+ * This helps in making the API asynchronous, and controller-driver doesn't need
+ * to manage 1 more table other than the one managed by framework mapping TID
+ * with buffers
+ */
+extern void slim_msg_response(struct slim_controller *ctrl, u8 *reply, u8 tid,
+				u8 len);
+
+/*
+ * slim_busnum_to_ctrl: Map bus number to controller
+ * @busnum: Bus number
+ * Returns controller representing this bus number
+ */
+extern struct slim_controller *slim_busnum_to_ctrl(u32 busnum);
+
+/*
+ * slim_ctrl_add_boarddevs: Add devices registered by board-info
+ * @ctrl: Controller to which these devices are to be added to.
+ * This API is called by controller when it is up and running.
+ * If devices on a controller were registered before controller,
+ * this will make sure that they get probed when controller is up
+ */
+extern void slim_ctrl_add_boarddevs(struct slim_controller *ctrl);
+
+extern const
+struct slim_device_id *slim_get_device_id(const struct slim_device *sdev);
+
+/*
+ * slim_register_board_info: Board-initialization routine.
+ * @info: List of all devices on all controllers present on the board.
+ * @n: number of entries.
+ * API enumerates respective devices on corresponding controller.
+ * Called from board-init function.
+ */
+#ifdef CONFIG_SLIMBUS
+extern int slim_register_board_info(struct slim_boardinfo const *info,
+					unsigned int n);
+#else
+static inline int slim_register_board_info(struct slim_boardinfo const *info,
+					unsigned int n)
+{
+	return 0;
+}
+#endif
+
+static inline void *slim_get_ctrldata(const struct slim_controller *dev)
+{
+	return dev_get_drvdata(&dev->dev);
+}
+
+static inline void slim_set_ctrldata(struct slim_controller *dev, void *data)
+{
+	dev_set_drvdata(&dev->dev, data);
+}
+
+static inline void *slim_get_devicedata(const struct slim_device *dev)
+{
+	return dev_get_drvdata(&dev->dev);
+}
+
+static inline void slim_set_clientdata(struct slim_device *dev, void *data)
+{
+	dev_set_drvdata(&dev->dev, data);
+}
+#endif /* _LINUX_SLIMBUS_H */
diff --git a/include/linux/usb/f_mtp.h b/include/linux/usb/f_mtp.h
index 4e84177..8def143 100644
--- a/include/linux/usb/f_mtp.h
+++ b/include/linux/usb/f_mtp.h
@@ -19,5 +19,35 @@
 #define __LINUX_USB_F_MTP_H
 
 #include <uapi/linux/usb/f_mtp.h>
+#include <linux/ioctl.h>
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
 
+#ifdef __KERNEL__
+
+#ifdef CONFIG_COMPAT
+struct __compat_mtp_file_range {
+	compat_int_t	fd;
+	compat_loff_t	offset;
+	int64_t		length;
+	uint16_t	command;
+	uint32_t	transaction_id;
+};
+
+struct __compat_mtp_event {
+	compat_size_t	length;
+	compat_caddr_t	data;
+};
+
+#define COMPAT_MTP_SEND_FILE              _IOW('M', 0, \
+						struct __compat_mtp_file_range)
+#define COMPAT_MTP_RECEIVE_FILE           _IOW('M', 1, \
+						struct __compat_mtp_file_range)
+#define COMPAT_MTP_SEND_EVENT             _IOW('M', 3, \
+						struct __compat_mtp_event)
+#define COMPAT_MTP_SEND_FILE_WITH_HEADER  _IOW('M', 4, \
+						struct __compat_mtp_file_range)
+#endif
+#endif
 #endif /* __LINUX_USB_F_MTP_H */
diff --git a/include/uapi/linux/msm_kgsl.h b/include/uapi/linux/msm_kgsl.h
index 573d1a8..7ed5a4c 100644
--- a/include/uapi/linux/msm_kgsl.h
+++ b/include/uapi/linux/msm_kgsl.h
@@ -50,6 +50,7 @@
 #define KGSL_CONTEXT_IFH_NOP            0x00010000
 #define KGSL_CONTEXT_SECURE             0x00020000
 #define KGSL_CONTEXT_NO_SNAPSHOT        0x00040000
+#define KGSL_CONTEXT_SPARSE             0x00080000
 
 #define KGSL_CONTEXT_PREEMPT_STYLE_MASK       0x0E000000
 #define KGSL_CONTEXT_PREEMPT_STYLE_SHIFT      25
@@ -89,6 +90,7 @@
 #define KGSL_CMDBATCH_END_OF_FRAME	KGSL_CONTEXT_END_OF_FRAME   /* 0x100 */
 #define KGSL_CMDBATCH_SYNC		KGSL_CONTEXT_SYNC           /* 0x400 */
 #define KGSL_CMDBATCH_PWR_CONSTRAINT	KGSL_CONTEXT_PWR_CONSTRAINT /* 0x800 */
+#define KGSL_CMDBATCH_SPARSE	    0x1000 /* 0x1000 */
 
 /*
  * Reserve bits [16:19] and bits [28:31] for possible bits shared between
@@ -1561,4 +1563,34 @@
 #define IOCTL_KGSL_SPARSE_BIND \
 	_IOW(KGSL_IOC_TYPE, 0x54, struct kgsl_sparse_bind)
 
+/**
+ * struct kgsl_gpu_sparse_command - Argument for
+ * IOCTL_KGSL_GPU_SPARSE_COMMAND
+ * @flags: Current flags for the object
+ * @sparselist: List of kgsl_sparse_binding_object to bind/unbind
+ * @synclist: List of kgsl_command_syncpoints
+ * @sparsesize: Size of kgsl_sparse_binding_object
+ * @numsparse: Number of elements in list
+ * @sync_size: Size of kgsl_command_syncpoint structure
+ * @numsyncs: Number of kgsl_command_syncpoints in syncpoint list
+ * @context_id: Context ID submitting the kgsl_gpu_command
+ * @timestamp: Timestamp for the submitted commands
+ * @id: Virtual ID to bind/unbind
+ */
+struct kgsl_gpu_sparse_command {
+	uint64_t flags;
+	uint64_t __user sparselist;
+	uint64_t __user synclist;
+	unsigned int sparsesize;
+	unsigned int numsparse;
+	unsigned int syncsize;
+	unsigned int numsyncs;
+	unsigned int context_id;
+	unsigned int timestamp;
+	unsigned int id;
+};
+
+#define IOCTL_KGSL_GPU_SPARSE_COMMAND \
+	_IOWR(KGSL_IOC_TYPE, 0x55, struct kgsl_gpu_sparse_command)
+
 #endif /* _UAPI_MSM_KGSL_H */
diff --git a/include/uapi/linux/usb/cdc.h b/include/uapi/linux/usb/cdc.h
index e2bc417..30258fb 100644
--- a/include/uapi/linux/usb/cdc.h
+++ b/include/uapi/linux/usb/cdc.h
@@ -231,6 +231,7 @@
 
 #define USB_CDC_SEND_ENCAPSULATED_COMMAND	0x00
 #define USB_CDC_GET_ENCAPSULATED_RESPONSE	0x01
+#define USB_CDC_RESET_FUNCTION			0x05
 #define USB_CDC_REQ_SET_LINE_CODING		0x20
 #define USB_CDC_REQ_GET_LINE_CODING		0x21
 #define USB_CDC_REQ_SET_CONTROL_LINE_STATE	0x22
diff --git a/init/main.c b/init/main.c
index 2858be7..c91ca2c 100644
--- a/init/main.c
+++ b/init/main.c
@@ -485,11 +485,6 @@
 	smp_setup_processor_id();
 	debug_objects_early_init();
 
-	/*
-	 * Set up the the initial canary ASAP:
-	 */
-	boot_init_stack_canary();
-
 	cgroup_init_early();
 
 	local_irq_disable();
@@ -503,6 +498,10 @@
 	page_address_init();
 	pr_notice("%s", linux_banner);
 	setup_arch(&command_line);
+	/*
+	 * Set up the the initial canary ASAP:
+	 */
+	boot_init_stack_canary();
 	mm_init_cpumask(&init_mm);
 	setup_command_line(command_line);
 	setup_nr_cpu_ids();