Merge "coresight: tmc: Add IOMMU support to TMC ETR"
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_bus.txt b/Documentation/devicetree/bindings/arm/msm/msm_bus.txt
new file mode 100644
index 0000000..2c23b4b
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_bus.txt
@@ -0,0 +1,270 @@
+MSM Bus Devices
+
+The bus devices (fabrics/NoCs) are the interconnects between various
+components on chipsets. These devices form the backbone of the chip
+topology. Entire topology of the chipset is built using the
+device-tree data of these bus devices.
+
+To add the bus devices following properties are required:
+
+compatible:		The bus devices need to be compatible with
+			msm-bus-fabric
+cell-id:		A 32 bit integer unique per bus per chipset. The IDs
+			for buses are in multiples of 1024.
+label:			Bus name
+qcom,fabclk-dual:	Dual set (active/sleep) bus clock name
+qcom,fabclk-active:	Active set bus clock name
+qcom,nfab:		An integer property which specifies the total number
+			of buses on the chipset.
+
+The following properties are optional as a bus might not support
+these features:
+
+qcom,ntieredslaves:	Number of tiered slaves on the bus.
+qcom,qos-freq:		QoS frequency (In KHz)
+qcom,hw-sel:		A string which decides whether QoS data
+			should be sent to RPM, set using BIMC or NoCs.
+			It can be set to "RPM", "NoC" or "BIMC".
+qcom,qos-baseoffset:	Base address offset of QoS registers from the bus device
+			base address.
+qcom,qos-delta:	 	Address delta between QoS registers of different masters.
+qcom,rpm-en:		A boolean flag indicating whether RPM transactions are
+			supported for nodes of the bus.
+qcom,ahb:		A boolean flag indicating whether the bus is ahb type.
+qcom,virt:		A boolean property indicating this is a virtual bus.
+reg:			Register space of the bus device. Not required in case
+			the bus is virtual.
+qom,nr-lim-thresh	The threshold below which to apply throttling of non
+			real time masters.
+qcom,eff-fact		The DDR effeciency factor to be assumed. This only
+			comes into play for buses that connect to the DDR.
+
+
+The following properties are optional as collecting data via coresight might
+not be supported for every bus. The documentation for coresight properties
+can be found in:
+Documentation/devicetree/bindings/coresight/coresight.txt
+
+coreisght-id		Unique integer identifier for the bus.
+coresight-name		Unique descriptive name of the bus.
+coresight-nr-inports	Number of input ports on the bus.
+coresight-outports	List of output port numbers on the bus.
+coresight-child-list	List of phandles pointing to the children of this
+			component.
+coresight-child-ports	List of input port numbers of the children.
+
+
+Any interconnect on the bus is represented as a child node.
+A child node can be of type: master, slave or a gateway.
+A gateway is an interconnect between buses and can be of both
+master and slave type.
+
+The following properties are available to characterize a child node.
+The properties can be chosen depending on the type of child node.
+
+cell-id:		For a master the ID is between 0 - 512
+			For a slave the ID is between 512 - 1024
+label:			Name of the master/slave/gateway
+qcom,masterp:		Hardware master port number(s)
+qcom,tier:		The tier to which a master/slave belongs.
+			Note that tiering might not be supported on
+			all architectures.
+qcom,hw-sel:		A string which decides whether QoS data should be sent
+			to RPM, set using BIMC or NoCs.
+			It can be set to "RPM", "NoC" or "BIMC".
+qcom,mode:		Used for masters on NoC/BIMC. Indicates which of the
+			four modes (Fixed/Limiter/Bypass/Regulator) the master
+			belongs to.
+qcom,perm-mode:		Permissible mode switches. Indicates which of the four
+			modes are supported of the master node. Generally,
+			modes are set at boot-up and not switched at run-time.
+qcom,qport:		QoS port number. This can be different from the
+			master-port number.
+qcom,ws:		Window size (in Hz), used for NoC/BIMC masters to
+			calculate saturation values.
+qcom,mas-hw-id:		A unique hardware ID agreed upon by processors across
+			the system. This ID is assigned to every master. It can
+			be used to send master specific data from
+			Apps/Modem/LPASS to RPM.
+qcom,slv-hw-id:		A unique hardware ID agreed upon by processors across
+			the system. This ID is assigned to every slave. It can
+			be used to send slave specific data from
+qcom,slaveclk-dual:	Dual set (active/sleep) slave clock name
+qcom,slaveclk-active:	Active set slave clock name
+			Apps/Modem/LPASS to RPM.
+qcom,gateway:		Flag indicating whether a particular node is a gateway.
+qcom,slavep:		Hardware slave port number(s).
+qcom,buswidth:		Width of the interconnect between a node and the bus.
+			(In Bytes).
+qcom,prio-rd:		Read priority for a BIMC bus master (Can be 0/1/2)
+qcom,prio-wr:		Write priority for a BIMC bus master (Can be 0/1/2)
+qcom,prio0:		Priority low signal for a NoC bus master
+			(Can be 0/1/2).
+qcom,prio1:		Priority high signal for a NoC bus master
+			(Can be 0/1/2)
+qcom,dual-conf:		Indicates whether a BIMC/NoC master can be configured
+			in multiple modes at run-time. (Boolean)
+qcom,mode-thresh:	Threshold mode for a BIMC/NoC master. Beyond a certain
+			threshold frequency, a threshold mode can be used.
+			(Can be Fixed/Limiter/Bypass/Regulator)
+qcom,bimc,bw:		Bandwidth limit for a BIMC master using dual modes.
+			This bandwidth is used to calculate Grant count and
+			other parameters used in Limiter and Regular mode.
+			for static BKE configuration. It is defined in KBytes/s.
+qcom,bimc,gp:		Grant Period for configuring a master in limiter
+			mode. This is an integer value in nano-seconds.
+qcom,bimc,thmp:		Medium threshold percentage for BIMC masters.
+			This percentage is used to calculate medium threshold
+			value for BIMC Masters in Limiter mode for static
+			configuration. This can be any integer value between
+			1 and 100.
+qcom,thresh:		Beyond this threshold frequency, the mode usage is
+			switched from mode specified by property qcom,mode
+			to the one specified by qcom,mode-thresh. These thresholds
+			can be setup in increasing order of thresholds, so the
+			requested IB is evaluated at each threshold level before
+			making the decision to switch QoS modes and applying the
+			corresponding qcom,bimc,bw limitig bw as needed.
+			This is specified in KBytes/s.
+qcom,rt-mas:		Indicates if a master node is a realtime master with
+			hard deadlines.
+qcom,nr-lim:		Indicates that this is non-real time master which can
+			be throttled in case of concurrent scenarios.
+qcom,floor-bw:		Represents the floor bandwidth below which this master
+			cannot be throttled. This floor bandwidth is specified in
+			KBytes/s.
+qcom,ff:		The fudge factor used by clients when voting for
+			bandwidth from the node.
+qcom,bcm-name:		The name used to fetch details about the bcm device from
+			the command DB driver.
+qcom,drv-id:		The DRV id associated with the RSC, used to differentiate
+			between RSCS owned by different execution environments.
+qcom,defer-init-qos:	Flag to force defer initial QoS configuration at probe time.
+qcom,sbm-offset:	The offset used to determine location of Sideband
+			Manager used in the disconnect mechanism when clients
+			remove bandwidth votes.
+qcom,disable-ports:	The ports to disable on the sideband manager when the
+			requirement bandwidth affecting	the node reduces to 0.
+node-reg-names:		Names of the regulator associated with bus node used
+			to grab the phandle of the regulator.
+
+Example:
+
+
+	msm-mmss-noc@fc478000 {
+		compatible = "msm-bus-fabric";
+		reg = <0xfc478000 0x00004000>;
+		cell-id = <2048>;
+		label = "msm_mmss_noc";
+		qcom,fabclk-dual = "bus_clk";
+		qcom,fabclk-active = "bus_a_clk";
+		qcom,ntieredslaves = <0>;
+		qcom,qos-freq = <4800>;
+		qcom,hw-sel = "NoC";
+		qcom,rpm-en;
+		qcom,nfab = <6>;
+		qcom,sbm-offset = <20000>;
+
+		mas-gfx3d {
+			cell-id = <26>;
+			label = "mas-gfx3d";
+			qcom,masterp = <2 3>;
+			qcom,tier = <2>;
+			qcom,hw-sel = "NoC";
+			qcom,perm-mode = "Bypass";
+			qcom,mode = "Bypass";
+			qcom,ws = <10000>;
+			qcom,qport = <2 3>;
+			qcom,mas-hw-id = <6>;
+			qcom,disable-ports = <1 2>;
+		};
+
+		mas-jpeg {
+			cell-id = <62>;
+			label = "mas-jpeg";
+			qcom,masterp = <4>;
+			qcom,tier = <2>;
+			qcom,hw-sel = "NoC";
+			qcom,perm-mode = "Bypass";
+			qcom,mode = "Bypass";
+			qcom,qport = <0>;
+			qcom,ws = <10000>;
+			qcom,mas-hw-id = <7>;
+		};
+	};
+
+	msm-bimc@0xfc380000 {
+		compatible = "msm-bus-fabric";
+		reg = <0xfc380000 0x0006A000>;
+		cell-id = <0>;
+		label = "msm_bimc";
+		qcom,fabclk-dual = "mem_clk";
+		qcom,fabclk-active = "mem_a_clk";
+		qcom,ntieredslaves = <0>;
+		qcom,qos-freq = <19200>;
+		qcom,hw-sel = "BIMC";
+		qcom,rpm-en;
+
+		coresight-id = <55>;
+		coresight-name = "coresight-bimc";
+		coresight-nr-inports = <0>;
+		coresight-outports = <0>;
+		coresight-child-list = <&funnel_in1>;
+		coresight-child-ports = <3>;
+
+		mas-ampss-m0 {
+			cell-id = <1>;
+			label = "mas-ampss-m0";
+			qcom,masterp = <0>;
+			qcom,tier = <2>;
+			qcom,hw-sel = "BIMC";
+			qcom,mode = "Limiter";
+			qcom,qport = <0>;
+			qcom,ws = <10000>;
+			qcom,mas-hw-id = <0>;
+			qcom,prio-rd = <0>;
+			qcom,prio-wr = <0>;
+			qcom,mode-thresh = "Fixed";
+			qcom,thresh = <2000000>;
+			qcom,dual-conf;
+			qcom,bimc,bw = <300000>;
+			qcom,bimc,gp = <5>;
+			qcom,bimc,thmp = <50>;
+		};
+	};
+
+
+
+
+The bus scaling driver also provides the ability to configure
+bus performance parameters across the entire chip-set.
+Various clients use MSM scaling APIs to request bandwidth
+between multiple master-slave pairs. The bus driver then finds
+the optimal path between the master and the slave, and aggregates
+the bandwidth and clock requests for all master-slave pairs on
+that path, and programs hardware accordingly.
+
+The device-tree data required for bus-scaling can be embedded within
+the clients' device nodes. The clients can register with the bus driver
+using the following properties:
+
+- qcom,msm-bus,name:		String representing the client-name
+- qcom,msm-bus,num-cases:	Total number of usecases
+- qcom,msm-bus,active-only:	Boolean context flag for requests in active or
+				dual (active & sleep) contex
+- qcom,msm-bus,num-paths:	Total number of master-slave pairs
+- qcom,msm-bus,vectors-KBps:	Arrays of unsigned integers representing:
+				master-id, slave-id, arbitrated bandwidth
+				in KBps, instantaneous bandwidth in KBps
+
+Example:
+
+	qcom,msm-bus,name = "client-name";
+	qcom,msm-bus,num-cases = <3>;
+	qcom,msm-bus,active-only;
+	qcom,msm-bus,num-paths = <2>;
+	qcom,msm-bus,vectors =
+			<22 512 0 0>, <26 512 0 0>,
+			<22 512 320000 3200000>, <26 512 3200000 3200000>,
+			<22 512 160000 1600000>, <26 512 1600000 1600000>;
+
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_bus_adhoc.txt b/Documentation/devicetree/bindings/arm/msm/msm_bus_adhoc.txt
new file mode 100644
index 0000000..a4778ef7
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_bus_adhoc.txt
@@ -0,0 +1,270 @@
+MSM Bus Devices for adhoc bus topologies
+
+Buses are the interconnects between various devices. The devices are
+connected in different topologies. The bus scaling driver accepts
+bandwidth requests from clients and ensures that the bandwidth requests
+can be met between the source and destination for that client.
+In order to accept and honor bandwidth requests the bus scaling driver
+needs to know about the bus topology.
+This device tree binding represents the bus devices in the SOC, their
+connections to other bus devices and the resources associated with each
+node. The bus scaling driver uses this device tree to setup the bus
+topology in order to apply client bandwidth requests.
+
+The mandatory properties for bus driver are:
+
+compatible:		"qcom,msm-bus-device"
+compatible:		"qcom,msm-bus-rsc"
+
+The register space associated with the bus devices are represented with
+the following optional properties:
+reg:			Register space for a bus device.
+reg-name:		Name of the register space for the bus device.
+
+The child nodes represent the devices on the bus.
+
+The following properties are mandatory for a child node
+
+cell-id:		The unique device id of the child node.
+			For a master the ID is between 0 - 512
+			For a slave the ID is between 512 - 1024
+			For internal nodes the range is > 10000
+			The range of ids for the different types of child
+			devices are chosen for convenience, the only
+			requirement is that the id's be unique among the
+			child devices.
+label:			Unique name of the device.
+
+The following are optional properties for child nodes:
+
+
+qcom,fab-dev:		Optional boolean parameter that states if the device
+			is a fabric device or not.
+			Typically these optional properties are used for
+			devices that represent fabric devices.
+qcom,bypass-qos-prg:	Optional debug parameter to avoid programming the QoS
+			HW registers for a given fabric device.
+			Typically these optional properties are used for
+			devices that represent fabric devices.
+qcom,base-name:		Parameter that specifies the physical base address for
+			accessing registers associated with the child device.
+			Typically these optional properties are used for
+			devices that represent fabric devices.
+qcom,base-offset:	Parameter that gives the offset from the base address to access
+			the QoS registers.
+			Typically these optional properties are used for
+			devices that represent fabric devices.
+qcom,qos-off:		Parameter that represents the delta between QoS register address
+			space for different devices.
+			Typically these optional properties are used for
+			devices that represent fabric devices.
+qcom,agg-scheme:	Parameter that represents the aggregation scheme to be used for the
+			node. This parameter defaults to LEGACY scheme. The valid options
+			are LEGACY/SCHEME_1.
+qcom,util-fact:		Parameter that represents the DDR utilization factor to be used in
+			LEGACY scheme. It is represented as actual util-factor * 100.
+qcom,vrail-comp:	Parameter that represents the voltage rail compensation to push
+			the bus to the next level if needed in LEGACY and SCHEME 1 aggregation
+			schemes. It is represented as actual vrail-comp * 100.
+qcom,util-levels:	Array of tuples that represent a bandwidth threshold and util factor
+			to be used uptil the given threshold.
+qcom,bus-type:		Parameter that represents the bus type such as BIMC or NOC.
+			Typically these optional properties are used for
+			devices that represent fabric devices.
+bus-gdsc-supply:	Optional fabric device parameter that is a reference to the dual
+			context GDSC supply that is needed before clock operations.
+bus-a-gdsc-supply:	Optional fabric device parameter that is a reference to an active
+			only context GDSC supply that is needed before clock operations.
+bus-qos-gdsc-supply:	Optional node or fabric device parameter that is a reference to a GDSC
+			supply that is needed before use of the clock needed to program
+			QoS registers.
+node-gdsc-supply:	Optional node device parameter that is a reference to a GDSC supply
+			that is needed before node-clock operations.
+qcom,enable-only-clk:   Optional property that is represents if the clock doesn't support
+                        the clk_set_rate API and should only be enabled/disabled.
+qcom,setrate-only-clk:   Optional property that is indicates that bus driver should only
+			set a rate on a clock handle and not call the enable/disable
+			clock API.
+clock-names:		Optional property that represents the clock name associated
+			with the device "bus_clk", "bus_a_clk";
+clocks:			Property pair that represents the clock controller and the clock
+			id. This in combimination with the clock-name is used to obtain
+			the handle for the clock associated with this device.
+qcom,virt-dev:		Parameter used for devices that represent virtual devices. Virtual
+			devices aren't real devices on the SOC but are used to aggregate
+			resources in some special cases.
+qcom,qport:		The offset index into the masters QoS register space.
+qcom,num-ports:		The number of ports that the device has.
+qcom,ap-owned:		Property that states if the device is "owned" by the Apps processor.
+			If true then the AP will program the QoS registers for the device
+			else it is done by RPM.
+qcom,connections:	An array of phandles that represent the devices this device is connected to.;
+qcom,bus-dev:		Phandle that represents the fabric device that this child node belongs to.
+qcom,qos-mode:		QoS mode to be programmed for this device, only applicable for AP owned resource.
+qcom,prio-rd:		Read priority for a BIMC bus master (Can be 0/1/2)
+qcom,prio-wr:		Write priority for a BIMC bus master (Can be 0/1/2)
+qcom,prio0:		Priority low signal for a NoC bus master
+			(Can be 0/1/2).
+qcom,reg-prio1:		Regulator mode Priority high signal for a NoC bus master if the master port is in
+			regulator QoS mode
+qcom,reg-prio0:		Regulator Priority low signal for a NoC bus master if the master port is in
+			regulator Qos mode.
+			(Can be 0/1/2).
+qcom,prio1:		Priority high signal for a NoC bus master
+qcom,bw_buffer:		Optional parameter in KBytes used to specify a buffer value that should be added to
+			the voted bandwidth value to figure out the limiting bandwidth for a master port.
+qcom,buswidth:		The buswidth at the device, default is 8 bytes.
+qcom,mas-rpm-id:	For non-AP owned device this is the RPM id for devices that are bus masters.
+			This is the id that is used when sending a message to RPM for this device.
+qcom,slv-rpm-id:	For non-AP owned device this is the RPM id for devices that are bus slaves.
+			This is the id that is used when sending a message to RPM for this device.
+qcom,blacklist:         An array of phandles that represent devices that this device
+			cannot connect to either directly or via any number of
+			intermediate nodes.
+qcom,agg-ports:		The number of aggregation ports on the bus.
+qcom,node-qos-bcms:	Optional property to target specific BCMs to toggle during QoS configuration,
+			this is to ensure QoS register space is clocked and accessible. Array is
+			defined as follows: BCM node ID, VoteX, VoteY. The vectors must be defined in
+			sets of the three values aforementioned.
+qcom,prio:		Default fixed priority for bus master.
+qcom,qos-lim-params:	Array containing QoS limiter configurations defined as: Bandwidth, Saturation.
+			Must define "qcom,qos-lim-en" for these settings to take effect.
+qcom,qos-lim-en:	Boolean to enable limiter settings, default is disabled.
+qcom,qos-reg-params:	Array containing QoS regulator configurations defined as: Low Priority, High
+			Priority, Bandwidth, Saturation. Must define "qcom,qos-reg-regmode" for these
+			settings to take effect.
+qcom,qos-reg-mode:	Array containing QoS regulator mode enablement: Read Enable, Write Enable,
+			default is disabled.
+qcom,forwarding:	Boolean indicate Urgent Forwarding enablement.
+
+The following properties are optional as collecting data via coresight might
+and are present on child nodes that represent NOC devices. The documentation
+for coresight properties can be found in:
+Documentation/devicetree/bindings/coresight/coresight.txt
+
+coreisght-id		Unique integer identifier for the bus.
+coresight-name		Unique descriptive name of the bus.
+coresight-nr-inports	Number of input ports on the bus.
+coresight-outports	List of output port numbers on the bus.
+coresight-child-list	List of phandles pointing to the children of this
+			component.
+coresight-child-ports	List of input port numbers of the children.
+
+The following sub-nodes are optional parameters:
+
+qcom,node-qos-clks:	Optional node listing all the clocks and regulators required for programming of
+			QoS registers. Usually these are associated with fabric nodes.
+	clock-names:	An array of clock names for QoS programming,
+	clocks:		An array of clock phandles corresponding to the clock names listed above.
+	clock-name-gdsc:
+			An optional property listing the regulator associated with a given clock name.
+
+Example:
+
+&ad_hoc_bus {
+        compatible = "msm-bus-device";
+        reg = <0x580000 0x62000>;
+        reg-names = "snoc-base";
+
+        fab_snoc: fab-snoc {
+                cell-id = <1024>;
+                label = "fab-snoc";
+                qcom,fab-dev;
+                qcom,bypass-qos-prg;
+		qcom,agg-scheme = <SCHEME_1>;
+		qcom,util-levels = <450000 133>,
+			<750000 154>;
+                qcom,base-name = "snoc-base";
+                qcom,base-offset = <0x7000>;
+                qcom,qos-off = <0x1000>;
+                qcom,bus-type = <1>;
+                clock-names = "bus_clk", "bus_a_clk";
+                clocks = <&clock_rpm  clk_snoc_msmbus_clk>,
+                      <&clock_rpm  clk_snoc_msmbus_a_clk>;
+		qcom,node-qos-clks {
+			clock-names = "q0-clk", "q1-clk";
+			clocks = <&clock_gcc clk_q0_clk>,
+				<&clock_gcc clk_q1_clk>;
+			q0-clk-supply = <&gdsc_q0_clk>;
+		};
+		qcom,node-qos-bcms = <0x7011 0 1>;
+		qcom,prio = 1;
+		qcom,qos-lim-params = <1000 1000>;
+		qcom,qos-lim-en:
+		qcom,qos-reg-params = <1 2 1000 1000>;
+		qcom,qos-reg-mode = <1 1>;
+        };
+
+        mm_int_bimc: mm-int-bimc {
+                cell-id = <10003>;
+                label = "mm-int-bimc";
+		qcom,util-fact = <154>;
+		qcom,vrail-comp = <100>;
+                qcom,ap-owned;
+                qcom,connections = <&snoc_bimc_1_mas>;
+                qcom,bus-dev = <&fab_snoc>;
+                qcom,buswidth = <16>;
+        };
+
+        snoc_int_0: snoc-int-0 {
+                cell-id = <10004>;
+                label = "snoc-int-0";
+                qcom,connections = <&slv_qdss_stm &slv_imem &snoc_pnoc_mas>;
+                qcom,bus-dev = <&fab_snoc>;
+                qcom,mas-rpm-id = <99>;
+                qcom,slv-rpm-id = <130>;
+                qcom,buswidth = <8>;
+        };
+};
+
+
+The bus scaling driver also provides the ability to configure
+bus performance parameters across the entire chip-set.
+Various clients use MSM scaling APIs to request bandwidth
+between multiple master-slave pairs. The bus driver then finds
+the optimal path between the master and the slave, and aggregates
+the bandwidth and clock requests for all master-slave pairs on
+that path, and programs hardware accordingly.
+
+The device-tree data required for bus-scaling can be embedded within
+the clients' device nodes. The clients can register with the bus driver
+using the following properties:
+
+- qcom,msm-bus,name:		String representing the client-name
+- qcom,msm-bus,num-cases:	Total number of usecases
+- qcom,msm-bus,active-only:	Boolean context flag for requests in active or
+				dual (active & sleep) contex
+- qcom,msm-bus,num-paths:	Total number of master-slave pairs
+- qcom,msm-bus,vectors-KBps:	Arrays of unsigned integers representing:
+				master-id, slave-id, arbitrated bandwidth
+				in KBps, instantaneous bandwidth in KBps
+
+The following are optional properties for client's device nodes:
+
+- qcom,msm-bus,alc-voter:	Boolean alc_voter flag to indicate that client
+				will vote as an Active Latency Client.
+- qcom,msm-bus,vectors-alc:	Arrays of unsigned integers representing:
+				first access latency, idle time in ns, this
+				property is required if qcom,msm-bus,alc-voter
+				is present.
+
+Example for default client:
+
+	qcom,msm-bus,name = "client-name";
+	qcom,msm-bus,num-cases = <3>;
+	qcom,msm-bus,active-only;
+	qcom,msm-bus,num-paths = <2>;
+	qcom,msm-bus,vectors =
+			<22 512 0 0>, <26 512 0 0>,
+			<22 512 320000 3200000>, <26 512 3200000 3200000>,
+			<22 512 160000 1600000>, <26 512 1600000 1600000>;
+
+Example for ALC client:
+
+	qcom,msm-bus,name = "client-name";
+	qcom,msm-bus,num-cases = <2>;
+	qcom,msm-bus,active-only;
+	qcom,msm-bus,alc-voter;
+	qcom,msm-bus,vectors-alc =
+			<0 0>,
+			<500 1600>;
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_bus_rules.txt b/Documentation/devicetree/bindings/arm/msm/msm_bus_rules.txt
new file mode 100644
index 0000000..b68284c
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_bus_rules.txt
@@ -0,0 +1,62 @@
+MSM Bus static bandwidth rules for adhoc bus topologies
+
+Buses are the interconnects between various devices. The devices are
+connected in different topologies. The static bandwidth rules allow
+setting up SOC specific rules to monitor certain bandwidth requests
+at different bus nodes. When the conditions of the rule are met
+the bus driver will be given a list of actions to be take on specific
+bus master ports (throttle on/off, what bandwidth to throttle to etc).
+
+The mandatory properties for bus driver are:
+
+compatible:	"qcom,msm-bus-static-bw-rules"
+
+The static_rules node can have numerous rules for the different bandwidth voting
+conditions to be monitored. The mandatory properties for the rules are
+
+- qcom,src-nodes:		An array of phandles denoting the source nodes
+				whose bandwidth votes need to be monitored.
+- qcom,src-field:		This field represents the voted field of the
+				source node to be monitored. Possible values
+				are FLD_IB/FLD_AB/FLD_CLK
+- qcom,src-op:			The operand to be used when evaluating a node's
+				bandwidth vote with a threshold.Possible values
+				are OP_LE/OP_LT/OP_GT/OP_GE.
+- qcom,thresh:			The threshold in Kbytes/s to be used in vote
+				evaluation.
+- qcom,mode:			The QoS mode to be applied when this rule's
+				criterion are satisfied. Possible values are
+				THROTTLE_ON/THROTTLE_OFF
+- qcom,dest-node:		An array of phandles representing the nodes to
+				which the QoS mode is to be applied.
+
+The optional properties for the rule node are:
+- qcom,dest-bw:			The destination bandwidth value in Kbytes/s to
+				be used toward the QoS mode for the destination
+				node.
+
+Example:
+	static-rules {
+		compatible = "qcom,msm-bus-static-bw-rules";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		rule@0 {
+			qcom,src-nodes = <&mas_apss>;
+			qcom,src-field = <FLD_IB>;
+			qcom,src-op = <OP_LE>;
+			qcom,thresh = <1599078>;
+			qcom,mode = <THROTTLE_ON>;
+			qcom,dest-node = <&mas_apss>;
+			qcom,dest-bw = <1599078>;
+		};
+
+		rule@1 {
+			qcom,src-nodes = <&mas_apss>;
+			qcom,src-field = <FLD_IB>;
+			qcom,src-op = <OP_GT>;
+			qcom,thresh = <1599078>;
+			qcom,mode = <THROTTLE_OFF>;
+			qcom,dest-node = <&mas_apss>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_ion.txt b/Documentation/devicetree/bindings/arm/msm/msm_ion.txt
index 3b6cf9c..cc7d2ba 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm_ion.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm_ion.txt
@@ -19,6 +19,7 @@
   the following:
     - "SYSTEM"
     - "CARVEOUT"
+    - "SECURE_CARVEOUT"
     - "DMA"
     - "HYP_CMA"
     - "SYSTEM_SECURE"
@@ -57,3 +58,32 @@
                  };
 
         };
+
+"SECURE_CARVEOUT"
+
+This heap type is expected to contain multiple child nodes. Each child node
+shall contain the following required properties:
+
+- memory-region:
+Refer to Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
+
+- token:
+A u32 containing the set of secure domains which will be able to access the
+memory-region.
+
+Example:
+qcom,ion {
+	compatible = "qcom,msm-ion";
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	qcom,ion-heap@14 {
+		reg = <14>;
+		qcom,ion-heap-type = "SECURE_CARVEOUT";
+
+		node1 {
+			memory-region = <&cp_region>;
+			token = <ION_FLAG_CP_TOUCH>;
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt b/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt
index 74991a0..19a9d359 100644
--- a/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt
@@ -50,6 +50,11 @@
 			to enable.
  - qcom,reset-aon-logic: If present, the GPU DEMET cells need to be reset while
 			 enabling the GX GDSC.
+ - vdd_parent-supply:	phandle to the regulator that this GDSC gates. If
+			present, need to vote for a minimum operational voltage
+			(LOW_SVS) on the GDSC parent regulator prior to
+			configuring it. The vote is removed once the GDSC FSM
+			has latched on to the new state.
  - resets: reset specifier pair consisting of phandle for the reset controller
 			and reset lines used by this controller. These can be
 			supplied only if we support qcom,skip-logic-collapse.
diff --git a/Documentation/devicetree/bindings/soc/qcom/fsa4480-i2c.txt b/Documentation/devicetree/bindings/soc/qcom/fsa4480-i2c.txt
new file mode 100644
index 0000000..ae128eb
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/qcom/fsa4480-i2c.txt
@@ -0,0 +1,18 @@
+Qualcomm Technologies, Inc.
+
+Fairchild FSA4480 Device
+
+This device is used for switching orientation of USB-C analog
+and for display. It uses I2C communication to set the registers
+to configure the switches inside the FSA4480 chip to change
+orientation and also to set SBU1/SBU2 connections of USB-C.
+
+Required properties:
+ - compatible:          Should be "qcom,fsa4480-i2c".
+ - reg:                 I2C device address of the device
+
+Example:
+	fsa4480: fsa4480@43 {
+		compatible = "qcom,fsa4480-i2c";
+		reg = <0x43>;
+	};
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,glink-probe.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,glink-probe.txt
new file mode 100644
index 0000000..badb9f9
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,glink-probe.txt
@@ -0,0 +1,69 @@
+Qualcomm Technologies, Inc. GLINK Probe
+
+This binding describes the GLINK Probe driver, a device
+that initializes the GLINK edge pairs within the system.
+
+- compatible :
+	Usage: required
+	Value type: <stringlist>
+	Definition: must be "qcom,glink"
+
+= SUBNODES
+The GLINK probe node must contain subnodes that describes the
+edge-pairs. See qcom,glink.txt for details on how to describe them.
+
+In addition to the properties in qcom,glink.txt, The GLINK Probe driver
+requires the qcom,glink-label and transport type to be specified in the
+subnodes.
+
+- transport :
+	Usage: required
+	Value type: <stringlist>
+	Definition: must be "smem", "spss", or "spi"
+
+- qcom,glink-label :
+	Usage: required
+	Value type: <stringlist>
+	Definition: specifies the identifier of the remote proc of this edge.
+
+= GLINK_SSR
+The GLINK probe driver also initializes the GLINK_SSR channel for the edges
+that it brings up. The channel should be specified as a subnode to each edge. In
+addition to the properties in qcom,glink.txt to specify a channel device node,
+the qcom,notify-edges property must be defined.
+
+- qcom,notify-edges :
+	Usage: required
+	Value type: <prop-encoded-array>
+	Definition: list of phandles that specify the subsystems this glink edge
+		    needs to receive ssr notifications about.
+
+= EXAMPLE
+qcom,glink {
+	compatible = "qcom,glink";
+	glink_modem: modem {
+		transport = "smem";
+		qcom,remote-pid = <0>;
+		mboxes = <&apcs_glb 8>;
+		mbox-names = "mpss_smem";
+		interrupts = <GIC_SPI 449 IRQ_TYPE_EDGE_RISING>;
+
+		qcom,modem_glink_ssr {
+			qcom,glink-channels = "glink_ssr";
+			qcom,notify-edges = <&glink_adsp>;
+		};
+	};
+
+	glink_adsp: adsp {
+		transport = "smem";
+		qcom,remote-pid = <2>;
+		mboxes = <&apcs_glb 4>;
+		mbox-names = "adsp_smem";
+		interrupts = <GIC_SPI 348 IRQ_TYPE_EDGE_RISING>;
+
+		qcom,modem_glink_ssr {
+			qcom,glink-channels = "glink_ssr";
+			qcom,notify-edges = <&glink_modem>;
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/usb/msm-ssusb.txt b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
index 880dc1e..33beda5 100644
--- a/Documentation/devicetree/bindings/usb/msm-ssusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
@@ -69,6 +69,9 @@
 	  events.
 - qcom,num-gsi-evt-buffs: If present, specifies number of GSI based hardware accelerated
   event buffers. 1 event buffer is needed per h/w accelerated endpoint.
+- qcom,gsi-reg-offset: USB GSI wrapper registers offset. It is must to provide this
+  if qcom,num-gsi-evt-buffs property is specified. Check dwc3-msm driver for order
+  and name of register offset need to provide.
 - qcom,pm-qos-latency: This represents max tolerable CPU latency in microsecs,
 	which is used as a vote by driver to get max performance in perf mode.
 - qcom,smmu-s1-bypass: If present, configure SMMU to bypass stage 1 translation.
diff --git a/arch/arm64/boot/dts/qcom/kona-ion.dtsi b/arch/arm64/boot/dts/qcom/kona-ion.dtsi
index 4a57f8f..b21d5e8 100644
--- a/arch/arm64/boot/dts/qcom/kona-ion.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-ion.dtsi
@@ -9,8 +9,8 @@
 		#address-cells = <1>;
 		#size-cells = <0>;
 
-		system_heap: qcom,ion-heap@19 {
-			reg = <0x19>;
+		system_heap: qcom,ion-heap@25 {
+			reg = <0x25>;
 			qcom,ion-heap-type = "SYSTEM";
 		};
 
diff --git a/arch/arm64/boot/dts/qcom/kona-rumi.dtsi b/arch/arm64/boot/dts/qcom/kona-rumi.dtsi
index a077e1b..0c0a68f 100644
--- a/arch/arm64/boot/dts/qcom/kona-rumi.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-rumi.dtsi
@@ -26,7 +26,7 @@
 	limit-tx-hs-gear = <1>;
 	limit-rx-hs-gear = <1>;
 
-	vdd-hba-supply = <&pm8150_s4>;
+	vdd-hba-supply = <&ufs_phy_gdsc>;
 	vdd-hba-fixed-regulator;
 	vcc-supply = <&pm8150_l17>;
 	vccq2-supply = <&pm8150_s4>;
@@ -41,3 +41,36 @@
 	spm-level = <0>;
 	status = "ok";
 };
+
+&soc {
+	usb_emu_phy: usb_emu_phy@a720000 {
+		compatible = "qcom,usb-emu-phy";
+		reg = <0x0a720000 0x9500>,
+		      <0x0a6f8800 0x100>;
+		reg-names = "base", "qscratch_base";
+
+		qcom,emu-init-seq = <0xfff0 0x4
+				     0xfff3 0x4
+				     0x40 0x4
+				     0xfff3 0x4
+				     0xfff0 0x4
+				     0x100000 0x20
+				     0x0 0x20
+				     0x1a0 0x20
+				     0x100000 0x3c
+				     0x0 0x3c
+				     0x10060 0x3c
+				     0x0 0x4>;
+	};
+
+	usb_nop_phy: usb_nop_phy {
+		compatible = "usb-nop-xceiv";
+	};
+};
+
+&usb0 {
+	dwc3@a600000 {
+		usb-phy = <&usb_emu_phy>, <&usb_nop_phy>;
+		maximum-speed = "high-speed";
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/kona-usb.dtsi b/arch/arm64/boot/dts/qcom/kona-usb.dtsi
new file mode 100644
index 0000000..81089fe
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/kona-usb.dtsi
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <dt-bindings/clock/qcom,gcc-kona.h>
+
+&soc {
+	/* Primary USB port related controller */
+	usb0: ssusb@a600000 {
+		compatible = "qcom,dwc-usb3-msm";
+		reg = <0x0a600000 0x100000>;
+		reg-names = "core_base";
+
+		iommus = <&apps_smmu 0x0 0x0>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		interrupts = <0 494 0>, <0 130 0>, <0 497 0>, <0 495 0>;
+		interrupt-names = "dp_hs_phy_irq", "pwr_event_irq",
+				"ss_phy_irq", "dm_hs_phy_irq";
+		qcom,use-pdc-interrupts;
+
+		USB3_GDSC-supply = <&usb30_prim_gdsc>;
+		clocks = <&clock_gcc GCC_USB30_PRIM_MASTER_CLK>,
+			<&clock_gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
+			<&clock_gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>,
+			<&clock_gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
+			<&clock_gcc GCC_USB30_PRIM_SLEEP_CLK>,
+			/*
+			 * GCC_USB3_SEC_CLKREF_EN provides ref_clk for both
+			 * USB instances.
+			 */
+			 <&clock_gcc GCC_USB3_SEC_CLKREF_EN>;
+		clock-names = "core_clk", "iface_clk", "bus_aggr_clk",
+					"utmi_clk", "sleep_clk", "xo";
+
+		resets = <&clock_gcc GCC_USB30_PRIM_BCR>;
+		reset-names = "core_reset";
+
+		qcom,core-clk-rate = <200000000>;
+		qcom,core-clk-rate-hs = <66666667>;
+		qcom,num-gsi-evt-buffs = <0x3>;
+		qcom,gsi-reg-offset =
+			<0x0fc /* GSI_GENERAL_CFG */
+			0x110 /* GSI_DBL_ADDR_L */
+			0x120 /* GSI_DBL_ADDR_H */
+			0x130 /* GSI_RING_BASE_ADDR_L */
+			0x144 /* GSI_RING_BASE_ADDR_H */
+			0x1a4>; /* GSI_IF_STS */
+		qcom,dwc-usb3-msm-tx-fifo-size = <27696>;
+
+		dwc3@a600000 {
+			compatible = "snps,dwc3";
+			reg = <0x0a600000 0xcd00>;
+			interrupts = <0 133 0>;
+			linux,sysdev_is_parent;
+			snps,disable-clk-gating;
+			snps,has-lpm-erratum;
+			snps,hird-threshold = /bits/ 8 <0x10>;
+			snps,ssp-u3-u0-quirk;
+			snps,usb3-u1u2-disable;
+			usb-core-id = <0>;
+			tx-fifo-resize;
+			maximum-speed = "super-speed-plus";
+			dr_mode = "drd";
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/kona.dtsi b/arch/arm64/boot/dts/qcom/kona.dtsi
index 7362a6b..7e55958 100644
--- a/arch/arm64/boot/dts/qcom/kona.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona.dtsi
@@ -445,7 +445,7 @@
 		};
 	};
 
-qcom,msm-imem@146bf000 {
+	qcom,msm-imem@146bf000 {
 		compatible = "qcom,msm-imem";
 		reg = <0x146bf000 0x1000>;
 		ranges = <0x0 0x146bf000 0x1000>;
@@ -595,6 +595,244 @@
 		#clock-cells = <1>;
 	};
 
+	/* GCC GDSCs */
+	pcie_0_gdsc: qcom,gdsc@16b004 {
+		compatible = "qcom,gdsc";
+		reg = <0x16b004 0x4>;
+		regulator-name = "pcie_0_gdsc";
+	};
+
+	pcie_1_gdsc: qcom,gdsc@18d004 {
+		compatible = "qcom,gdsc";
+		reg = <0x18d004 0x4>;
+		regulator-name = "pcie_1_gdsc";
+	};
+
+	pcie_2_gdsc: qcom,gdsc@106004 {
+		compatible = "qcom,gdsc";
+		reg = <0x106004 0x4>;
+		regulator-name = "pcie_2_gdsc";
+	};
+
+	ufs_card_gdsc: qcom,gdsc@175004 {
+		compatible = "qcom,gdsc";
+		reg = <0x175004 0x4>;
+		regulator-name = "ufs_card_gdsc";
+	};
+
+	ufs_phy_gdsc: qcom,gdsc@177004 {
+		compatible = "qcom,gdsc";
+		reg = <0x177004 0x4>;
+		regulator-name = "ufs_phy_gdsc";
+	};
+
+	usb30_prim_gdsc: qcom,gdsc@10f004 {
+		compatible = "qcom,gdsc";
+		reg = <0x10f004 0x4>;
+		regulator-name = "usb30_prim_gdsc";
+	};
+
+	usb30_sec_gdsc: qcom,gdsc@110004 {
+		compatible = "qcom,gdsc";
+		reg = <0x110004 0x4>;
+		regulator-name = "usb30_sec_gdsc";
+	};
+
+	hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc: qcom,gdsc@17d050 {
+		compatible = "qcom,gdsc";
+		reg = <0x17d050 0x4>;
+		regulator-name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc";
+		qcom,no-status-check-on-disable;
+		qcom,gds-timeout = <500>;
+	};
+
+	hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc: qcom,gdsc@17d058 {
+		compatible = "qcom,gdsc";
+		reg = <0x17d058 0x4>;
+		regulator-name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc";
+		qcom,no-status-check-on-disable;
+		qcom,gds-timeout = <500>;
+	};
+
+	hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc: qcom,gdsc@17d054 {
+		compatible = "qcom,gdsc";
+		reg = <0x17d054 0x4>;
+		regulator-name = "hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc";
+		qcom,no-status-check-on-disable;
+		qcom,gds-timeout = <500>;
+	};
+
+	hlos1_vote_mmnoc_mmu_tbu_sf1_gdsc: qcom,gdsc@17d06c {
+		compatible = "qcom,gdsc";
+		reg = <0x17d06c 0x4>;
+		regulator-name = "hlos1_vote_mmnoc_mmu_tbu_sf1_gdsc";
+		qcom,no-status-check-on-disable;
+		qcom,gds-timeout = <500>;
+	};
+
+	/* CAM_CC GDSCs */
+	bps_gdsc: qcom,gdsc@ad07004 {
+		compatible = "qcom,gdsc";
+		reg = <0xad07004 0x4>;
+		regulator-name = "bps_gdsc";
+		clock-names = "ahb_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>;
+		parent-supply = <&VDD_MMCX_LEVEL>;
+		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+		qcom,support-hw-trigger;
+	};
+
+	ife_0_gdsc: qcom,gdsc@ad0a004 {
+		compatible = "qcom,gdsc";
+		reg = <0xad0a004 0x4>;
+		regulator-name = "ife_0_gdsc";
+		clock-names = "ahb_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>;
+		parent-supply = <&VDD_MMCX_LEVEL>;
+		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+	};
+
+	ife_1_gdsc: qcom,gdsc@ad0b004 {
+		compatible = "qcom,gdsc";
+		reg = <0xad0b004 0x4>;
+		regulator-name = "ife_1_gdsc";
+		clock-names = "ahb_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>;
+		parent-supply = <&VDD_MMCX_LEVEL>;
+		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+	};
+
+	ipe_0_gdsc: qcom,gdsc@ad08004 {
+		compatible = "qcom,gdsc";
+		reg = <0xad08004 0x4>;
+		regulator-name = "ipe_0_gdsc";
+		clock-names = "ahb_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>;
+		parent-supply = <&VDD_MMCX_LEVEL>;
+		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+		qcom,support-hw-trigger;
+	};
+
+	sbi_gdsc: qcom,gdsc@ad09004 {
+		compatible = "qcom,gdsc";
+		reg = <0xad09004 0x4>;
+		regulator-name = "sbi_gdsc";
+		clock-names = "ahb_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>;
+		parent-supply = <&VDD_MMCX_LEVEL>;
+		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+	};
+
+	titan_top_gdsc: qcom,gdsc@ad0c144 {
+		compatible = "qcom,gdsc";
+		reg = <0xad0c144 0x4>;
+		regulator-name = "titan_top_gdsc";
+		clock-names = "ahb_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>;
+		parent-supply = <&VDD_MMCX_LEVEL>;
+		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+	};
+
+	/* DISP_CC GDSC */
+	mdss_core_gdsc: qcom,gdsc@af03000 {
+		compatible = "qcom,gdsc";
+		reg = <0xaf03000 0x4>;
+		regulator-name = "mdss_core_gdsc";
+		clock-names = "ahb_clk";
+		clocks = <&clock_gcc GCC_DISP_AHB_CLK>;
+		parent-supply = <&VDD_MMCX_LEVEL>;
+		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+		qcom,support-hw-trigger;
+	};
+
+	/* GPU_CC GDSCs */
+	gpu_cx_hw_ctrl: syscon@3d91540 {
+		compatible = "syscon";
+		reg = <0x3d91540 0x4>;
+	};
+
+	gpu_cx_gdsc: qcom,gdsc@3d9106c {
+		compatible = "qcom,gdsc";
+		reg = <0x3d9106c 0x4>;
+		regulator-name = "gpu_cx_gdsc";
+		hw-ctrl-addr = <&gpu_cx_hw_ctrl>;
+		parent-supply = <&VDD_CX_LEVEL>;
+		qcom,no-status-check-on-disable;
+		qcom,clk-dis-wait-val = <8>;
+		qcom,gds-timeout = <500>;
+	};
+
+	gpu_gx_domain_addr: syscon@3d91508 {
+		compatible = "syscon";
+		reg = <0x3d91508 0x4>;
+	};
+
+	gpu_gx_sw_reset: syscon@3d91008 {
+		compatible = "syscon";
+		reg = <0x3d91008 0x4>;
+	};
+
+	gpu_gx_gdsc: qcom,gdsc@3d9100c {
+		compatible = "qcom,gdsc";
+		reg = <0x3d9100c 0x4>;
+		regulator-name = "gpu_gx_gdsc";
+		domain-addr = <&gpu_gx_domain_addr>;
+		sw-reset = <&gpu_gx_sw_reset>;
+		parent-supply = <&VDD_GFX_LEVEL>;
+		vdd_parent-supply = <&VDD_GFX_LEVEL>;
+		qcom,reset-aon-logic;
+	};
+
+	/* NPU GDSC */
+	npu_core_gdsc: qcom,gdsc@9981004 {
+		compatible = "qcom,gdsc";
+		reg = <0x9981004 0x4>;
+		regulator-name = "npu_core_gdsc";
+		clock-names = "ahb_clk";
+		clocks = <&clock_gcc GCC_NPU_CFG_AHB_CLK>;
+	};
+
+	/* VIDEO_CC GDSCs */
+	mvs0_gdsc: qcom,gdsc@abf0d18 {
+		compatible = "qcom,gdsc";
+		reg = <0xabf0d18 0x4>;
+		regulator-name = "mvs0_gdsc";
+		clock-names = "ahb_clk";
+		clocks = <&clock_gcc GCC_VIDEO_AHB_CLK>;
+		parent-supply = <&VDD_MMCX_LEVEL>;
+		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+	};
+
+	mvs0c_gdsc: qcom,gdsc@abf0bf8 {
+		compatible = "qcom,gdsc";
+		reg = <0xabf0bf8 0x4>;
+		regulator-name = "mvs0c_gdsc";
+		clock-names = "ahb_clk";
+		clocks = <&clock_gcc GCC_VIDEO_AHB_CLK>;
+		parent-supply = <&VDD_MMCX_LEVEL>;
+		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+	};
+
+	mvs1_gdsc: qcom,gdsc@abf0d98 {
+		compatible = "qcom,gdsc";
+		reg = <0xabf0d98 0x4>;
+		regulator-name = "mvs1_gdsc";
+		clock-names = "ahb_clk";
+		clocks = <&clock_gcc GCC_VIDEO_AHB_CLK>;
+		parent-supply = <&VDD_MMCX_LEVEL>;
+		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+	};
+
+	mvs1c_gdsc: qcom,gdsc@abf0c98 {
+		compatible = "qcom,gdsc";
+		reg = <0xabf0c98 0x4>;
+		regulator-name = "mvs1c_gdsc";
+		clock-names = "ahb_clk";
+		clocks = <&clock_gcc GCC_VIDEO_AHB_CLK>;
+		parent-supply = <&VDD_MMCX_LEVEL>;
+		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+	};
+
 	ufsphy_mem: ufsphy_mem@1d87000 {
 		reg = <0x1d87000 0xe00>; /* PHY regs */
 		reg-names = "phy_mem";
@@ -792,3 +1030,4 @@
 #include "msm-arm-smmu-kona.dtsi"
 #include "kona-pinctrl.dtsi"
 #include "kona-smp2p.dtsi"
+#include "kona-usb.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi
index dbf96f7..530a1a6 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi
@@ -19,6 +19,8 @@
 		#size-cells = <1>;
 		#address-cells = <1>;
 		ranges;
+		qcom,regulator-names = "vdd";
+		vdd-supply = <&gpu_cx_gdsc>;
 		interrupts =	<GIC_SPI 673 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 674 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 682 IRQ_TYPE_LEVEL_HIGH>,
@@ -34,7 +36,7 @@
 			compatible = "qcom,qsmmuv500-tbu";
 			reg = <0x3DC5000 0x1000>,
 				<0x3DC2200 0x8>;
-			reg-names = "base", "status";
+			reg-names = "base", "status-reg";
 			qcom,stream-id-range = <0x0 0x400>;
 		};
 
@@ -42,7 +44,7 @@
 			compatible = "qcom,qsmmuv500-tbu";
 			reg = <0x3DC9000 0x1000>,
 				<0x3DC2208 0x8>;
-			reg-names = "base", "status";
+			reg-names = "base", "status-reg";
 			qcom,stream-id-range = <0x400 0x400>;
 		};
 	};
@@ -180,6 +182,8 @@
 				<0x15182210 0x8>;
 			reg-names = "base", "status-reg";
 			qcom,stream-id-range = <0x800 0x400>;
+			qcom,regulator-names = "vdd";
+			vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc>;
 		};
 
 		mnoc_hf_1_tbu: mnoc_hf_1_tbu@15191000 {
@@ -188,7 +192,8 @@
 				<0x15182218 0x8>;
 			reg-names = "base", "status-reg";
 			qcom,stream-id-range = <0xc00 0x400>;
-
+			qcom,regulator-names = "vdd";
+			vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc>;
 		};
 
 		compute_dsp_1_tbu: compute_dsp_1_tbu@15195000 {
@@ -229,6 +234,8 @@
 				<0x15182240 0x8>;
 			reg-names = "base", "status-reg";
 			qcom,stream-id-range = <0x2000 0x400>;
+			qcom,regulator-names = "vdd";
+			vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc>;
 		};
 
 		mnoc_sf_1_tbu: mnoc_sf_1_tbu@151a9000 {
@@ -237,6 +244,8 @@
 				<0x15182248 0x8>;
 			reg-names = "base", "status-reg";
 			qcom,stream-id-range = <0x2400 0x400>;
+			qcom,regulator-names = "vdd";
+			vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_sf1_gdsc>;
 		};
 	};
 
diff --git a/arch/arm64/configs/vendor/kona-perf_defconfig b/arch/arm64/configs/vendor/kona-perf_defconfig
index 5e4bf51..b4c0e58 100644
--- a/arch/arm64/configs/vendor/kona-perf_defconfig
+++ b/arch/arm64/configs/vendor/kona-perf_defconfig
@@ -305,6 +305,7 @@
 CONFIG_USB_OHCI_HCD_PLATFORM=y
 CONFIG_USB_STORAGE=y
 CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
 CONFIG_USB_ISP1760=y
 CONFIG_USB_ISP1760_HOST_ROLE=y
 CONFIG_NOP_USB_XCEIV=y
@@ -344,10 +345,15 @@
 CONFIG_QCOM_KONA_LLCC=y
 CONFIG_QCOM_RPMH=y
 CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_SMP2P=y
 CONFIG_SETUP_SSR_NOTIF_TIMEOUTS=y
 CONFIG_SSR_SYSMON_NOTIF_TIMEOUT=20000
 CONFIG_SSR_SUBSYS_NOTIF_TIMEOUT=20000
 CONFIG_PANIC_ON_SSR_NOTIF_TIMEOUT=y
+CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_MSM_BOOT_STATS=y
 CONFIG_DEVFREQ_GOV_PASSIVE=y
 CONFIG_IIO=y
@@ -392,6 +398,7 @@
 CONFIG_PANIC_TIMEOUT=5
 CONFIG_SCHEDSTATS=y
 # CONFIG_DEBUG_PREEMPT is not set
+CONFIG_IPC_LOGGING=y
 CONFIG_DEBUG_ALIGN_RODATA=y
 CONFIG_CORESIGHT=y
 CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
diff --git a/arch/arm64/configs/vendor/kona_defconfig b/arch/arm64/configs/vendor/kona_defconfig
index 6c97f84..3f0e039 100644
--- a/arch/arm64/configs/vendor/kona_defconfig
+++ b/arch/arm64/configs/vendor/kona_defconfig
@@ -313,6 +313,7 @@
 CONFIG_USB_OHCI_HCD_PLATFORM=y
 CONFIG_USB_STORAGE=y
 CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
 CONFIG_USB_ISP1760=y
 CONFIG_USB_ISP1760_HOST_ROLE=y
 CONFIG_NOP_USB_XCEIV=y
@@ -353,10 +354,15 @@
 CONFIG_QCOM_KONA_LLCC=y
 CONFIG_QCOM_RPMH=y
 CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_SMP2P=y
 CONFIG_SETUP_SSR_NOTIF_TIMEOUTS=y
 CONFIG_SSR_SYSMON_NOTIF_TIMEOUT=20000
 CONFIG_SSR_SUBSYS_NOTIF_TIMEOUT=20000
 CONFIG_PANIC_ON_SSR_NOTIF_TIMEOUT=y
+CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_MSM_BOOT_STATS=y
 CONFIG_MSM_CORE_HANG_DETECT=y
 CONFIG_DEVFREQ_GOV_PASSIVE=y
@@ -436,6 +442,7 @@
 CONFIG_FAIL_PAGE_ALLOC=y
 CONFIG_FAULT_INJECTION_DEBUG_FS=y
 CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
+CONFIG_IPC_LOGGING=y
 CONFIG_QCOM_RTB=y
 CONFIG_QCOM_RTB_SEPARATE_CPUS=y
 CONFIG_FUNCTION_TRACER=y
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 04bbcd7..9a802c0 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1658,7 +1658,7 @@
 	struct device_node *of_node = dev_of_node(dev);
 	int error;
 
-	if (of_node) {
+	if (of_node && of_node_kobj(of_node)) {
 		error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node");
 		if (error)
 			dev_warn(dev, "Error %d creating of_node link\n",error);
diff --git a/drivers/bus/mhi/devices/mhi_netdev.c b/drivers/bus/mhi/devices/mhi_netdev.c
index 6e65e46..8237de0 100644
--- a/drivers/bus/mhi/devices/mhi_netdev.c
+++ b/drivers/bus/mhi/devices/mhi_netdev.c
@@ -80,13 +80,6 @@
 	u32 alloc_failed;
 };
 
-/* important: do not exceed sk_buf->cb (48 bytes) */
-struct mhi_skb_priv {
-	void *buf;
-	size_t size;
-	struct mhi_netdev *mhi_netdev;
-};
-
 struct mhi_netdev {
 	int alias;
 	struct mhi_device *mhi_dev;
@@ -140,7 +133,6 @@
 {
 	u32 cur_mru = mhi_netdev->mru;
 	struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
-	struct mhi_skb_priv *skb_priv;
 	int ret;
 	struct sk_buff *skb;
 	int no_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
@@ -158,15 +150,11 @@
 			goto error_queue;
 		}
 
-		skb_priv = (struct mhi_skb_priv *)skb->cb;
-		skb_priv->buf = skb->data;
-		skb_priv->size = cur_mru;
-		skb_priv->mhi_netdev = mhi_netdev;
 		skb->dev = mhi_netdev->ndev;
 
 		spin_lock_bh(&mhi_netdev->rx_lock);
-		ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, skb,
-					 skb_priv->size, MHI_EOT);
+		ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, skb, cur_mru,
+					 MHI_EOT);
 		spin_unlock_bh(&mhi_netdev->rx_lock);
 
 		if (ret) {
@@ -301,12 +289,9 @@
 	struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev;
 	struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
 	int res = 0;
-	struct mhi_skb_priv *tx_priv;
 
 	MSG_VERB("Entered\n");
 
-	tx_priv = (struct mhi_skb_priv *)(skb->cb);
-	tx_priv->mhi_netdev = mhi_netdev;
 	read_lock_bh(&mhi_netdev->pm_lock);
 
 	if (unlikely(!mhi_netdev->enabled)) {
diff --git a/drivers/clk/qcom/gdsc-regulator.c b/drivers/clk/qcom/gdsc-regulator.c
index c524b0a..269ad77 100644
--- a/drivers/clk/qcom/gdsc-regulator.c
+++ b/drivers/clk/qcom/gdsc-regulator.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/kernel.h>
@@ -20,6 +20,8 @@
 #include <linux/mfd/syscon.h>
 #include <linux/clk/qcom.h>
 
+#include <dt-bindings/regulator/qcom,rpmh-regulator-levels.h>
+
 /* GDSCR */
 #define PWR_ON_MASK		BIT(31)
 #define CLK_DIS_WAIT_MASK	(0xF << 12)
@@ -50,6 +52,7 @@
 	struct regmap           *hw_ctrl;
 	struct regmap           *sw_reset;
 	struct clk		**clocks;
+	struct regulator	*parent_regulator;
 	struct reset_control	**reset_clocks;
 	bool			toggle_mem;
 	bool			toggle_periph;
@@ -120,7 +123,7 @@
 		 * bit in the GDSCR to be set or reset after the GDSC state
 		 * changes. Hence, keep on checking for a reasonable number
 		 * of times until the bit is set with the least possible delay
-		 * between succeessive tries.
+		 * between successive tries.
 		 */
 		udelay(1);
 	}
@@ -136,6 +139,30 @@
 	if (!sc->toggle_logic)
 		return !sc->resets_asserted;
 
+	if (sc->parent_regulator) {
+		/*
+		 * The parent regulator for the GDSC is required to be on to
+		 * make any register accesses to the GDSC base. Return false
+		 * if the parent supply is disabled.
+		 */
+		if (regulator_is_enabled(sc->parent_regulator) <= 0)
+			return false;
+
+		/*
+		 * Place an explicit vote on the parent rail to cover cases when
+		 * it might be disabled between this point and reading the GDSC
+		 * registers.
+		 */
+		if (regulator_set_voltage(sc->parent_regulator,
+					RPMH_REGULATOR_LEVEL_LOW_SVS, INT_MAX))
+			return false;
+
+		if (regulator_enable(sc->parent_regulator)) {
+			regulator_set_voltage(sc->parent_regulator, 0, INT_MAX);
+			return false;
+		}
+	}
+
 	regmap_read(sc->regmap, REG_OFFSET, &regval);
 
 	if (regval & PWR_ON_MASK) {
@@ -144,10 +171,20 @@
 		 * votable GDS registers. Check the SW_COLLAPSE_MASK to
 		 * determine if HLOS has voted for it.
 		 */
-		if (!(regval & SW_COLLAPSE_MASK))
+		if (!(regval & SW_COLLAPSE_MASK)) {
+			if (sc->parent_regulator) {
+				regulator_disable(sc->parent_regulator);
+				regulator_set_voltage(sc->parent_regulator, 0,
+							INT_MAX);
+			}
 			return true;
+		}
 	}
 
+	if (sc->parent_regulator) {
+		regulator_disable(sc->parent_regulator);
+		regulator_set_voltage(sc->parent_regulator, 0, INT_MAX);
+	}
 	return false;
 }
 
@@ -159,6 +196,15 @@
 
 	mutex_lock(&gdsc_seq_lock);
 
+	if (sc->parent_regulator) {
+		ret = regulator_set_voltage(sc->parent_regulator,
+				RPMH_REGULATOR_LEVEL_LOW_SVS, INT_MAX);
+		if (ret) {
+			mutex_unlock(&gdsc_seq_lock);
+			return ret;
+		}
+	}
+
 	if (sc->root_en || sc->force_root_en)
 		clk_prepare_enable(sc->clocks[sc->root_clk_idx]);
 
@@ -166,8 +212,8 @@
 	if (regval & HW_CONTROL_MASK) {
 		dev_warn(&rdev->dev, "Invalid enable while %s is under HW control\n",
 				sc->rdesc.name);
-		mutex_unlock(&gdsc_seq_lock);
-		return -EBUSY;
+		ret = -EBUSY;
+		goto end;
 	}
 
 	if (sc->toggle_logic) {
@@ -250,9 +296,7 @@
 					dev_err(&rdev->dev, "%s final state (after additional %d us timeout): 0x%x, GDS_HW_CTRL: 0x%x\n",
 						sc->rdesc.name, sc->gds_timeout,
 						regval, hw_ctrl_regval);
-
-					mutex_unlock(&gdsc_seq_lock);
-					return ret;
+					goto end;
 				}
 			} else {
 				dev_err(&rdev->dev, "%s enable timed out: 0x%x\n",
@@ -264,10 +308,7 @@
 				dev_err(&rdev->dev, "%s final state: 0x%x (%d us after timeout)\n",
 					sc->rdesc.name, regval,
 					sc->gds_timeout);
-
-				mutex_unlock(&gdsc_seq_lock);
-
-				return ret;
+				goto end;
 			}
 		}
 	} else {
@@ -300,6 +341,9 @@
 		clk_disable_unprepare(sc->clocks[sc->root_clk_idx]);
 
 	sc->is_gdsc_enabled = true;
+end:
+	if (sc->parent_regulator)
+		regulator_set_voltage(sc->parent_regulator, 0, INT_MAX);
 
 	mutex_unlock(&gdsc_seq_lock);
 
@@ -314,6 +358,15 @@
 
 	mutex_lock(&gdsc_seq_lock);
 
+	if (sc->parent_regulator) {
+		ret = regulator_set_voltage(sc->parent_regulator,
+				RPMH_REGULATOR_LEVEL_LOW_SVS, INT_MAX);
+		if (ret) {
+			mutex_unlock(&gdsc_seq_lock);
+			return ret;
+		}
+	}
+
 	if (sc->force_root_en)
 		clk_prepare_enable(sc->clocks[sc->root_clk_idx]);
 
@@ -371,6 +424,9 @@
 	if ((sc->is_gdsc_enabled && sc->root_en) || sc->force_root_en)
 		clk_disable_unprepare(sc->clocks[sc->root_clk_idx]);
 
+	if (sc->parent_regulator)
+		regulator_set_voltage(sc->parent_regulator, 0, INT_MAX);
+
 	sc->is_gdsc_enabled = false;
 
 	mutex_unlock(&gdsc_seq_lock);
@@ -382,9 +438,33 @@
 {
 	struct gdsc *sc = rdev_get_drvdata(rdev);
 	uint32_t regval;
+	int ret;
 
 	mutex_lock(&gdsc_seq_lock);
+
+	if (sc->parent_regulator) {
+		ret = regulator_set_voltage(sc->parent_regulator,
+					RPMH_REGULATOR_LEVEL_LOW_SVS, INT_MAX);
+		if (ret) {
+			mutex_unlock(&gdsc_seq_lock);
+			return ret;
+		}
+
+		ret = regulator_enable(sc->parent_regulator);
+		if (ret) {
+			regulator_set_voltage(sc->parent_regulator, 0, INT_MAX);
+			mutex_unlock(&gdsc_seq_lock);
+			return ret;
+		}
+	}
+
 	regmap_read(sc->regmap, REG_OFFSET, &regval);
+
+	if (sc->parent_regulator) {
+		regulator_disable(sc->parent_regulator);
+		regulator_set_voltage(sc->parent_regulator, 0, INT_MAX);
+	}
+
 	mutex_unlock(&gdsc_seq_lock);
 
 	if (regval & HW_CONTROL_MASK)
@@ -401,6 +481,22 @@
 
 	mutex_lock(&gdsc_seq_lock);
 
+	if (sc->parent_regulator) {
+		ret = regulator_set_voltage(sc->parent_regulator,
+				RPMH_REGULATOR_LEVEL_LOW_SVS, INT_MAX);
+		if (ret) {
+			mutex_unlock(&gdsc_seq_lock);
+			return ret;
+		}
+
+		ret = regulator_enable(sc->parent_regulator);
+		if (ret) {
+			regulator_set_voltage(sc->parent_regulator, 0, INT_MAX);
+			mutex_unlock(&gdsc_seq_lock);
+			return ret;
+		}
+	}
+
 	regmap_read(sc->regmap, REG_OFFSET, &regval);
 
 	switch (mode) {
@@ -444,6 +540,11 @@
 		break;
 	}
 
+	if (sc->parent_regulator) {
+		regulator_disable(sc->parent_regulator);
+		regulator_set_voltage(sc->parent_regulator, 0, INT_MAX);
+	}
+
 	mutex_unlock(&gdsc_seq_lock);
 
 	return ret;
@@ -560,6 +661,19 @@
 	sc->force_root_en = of_property_read_bool(pdev->dev.of_node,
 						"qcom,force-enable-root-clk");
 
+	if (of_find_property(pdev->dev.of_node, "vdd_parent-supply", NULL)) {
+		sc->parent_regulator = devm_regulator_get(&pdev->dev,
+							"vdd_parent");
+		if (IS_ERR(sc->parent_regulator)) {
+			ret = PTR_ERR(sc->parent_regulator);
+			if (ret != -EPROBE_DEFER)
+				dev_err(&pdev->dev,
+				"Unable to get vdd_parent regulator, err: %d\n",
+					ret);
+			return ret;
+		}
+	}
+
 	for (i = 0; i < sc->clock_count; i++) {
 		const char *clock_name;
 
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index ad3fcad..2b83a41 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -49,7 +49,13 @@
 	bool
 
 config OF_KOBJ
+	bool "Display devicetree in sysfs"
 	def_bool SYSFS
+	help
+	  Some embedded platforms have no need to display the devicetree
+	  nodes and properties in sysfs. Disabling this option will save
+	  a small amount of memory, as well as decrease boot time. By
+	  default this option will be enabled if SYSFS is enabled.
 
 # Hardly any platforms need this.  It is safe to select, but only do so if you
 # need it.
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 895c83e..779d6228 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -20,6 +20,7 @@
 #include <linux/of_reserved_mem.h>
 #include <linux/sort.h>
 #include <linux/slab.h>
+#include <linux/kmemleak.h>
 
 #define MAX_RESERVED_REGIONS	32
 static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
@@ -50,8 +51,10 @@
 	}
 
 	*res_base = base;
-	if (nomap)
+	if (nomap) {
+		kmemleak_ignore_phys(base);
 		return memblock_remove(base, size);
+	}
 	return 0;
 }
 #else
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 4e18ba9..635dab9 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -43,7 +43,9 @@
 static const char * const power_supply_type_text[] = {
 	"Unknown", "Battery", "UPS", "Mains", "USB",
 	"USB_DCP", "USB_CDP", "USB_ACA", "USB_C",
-	"USB_PD", "USB_PD_DRP", "BrickID"
+	"USB_PD", "USB_PD_DRP", "BrickID",
+	"USB_HVDCP", "USB_HVDCP_3", "Wireless", "USB_FLOAT",
+	"BMS", "Parallel", "Main", "Wipower", "USB_C_UFP", "USB_C_DFP",
 };
 
 static const char * const power_supply_usb_type_text[] = {
@@ -56,13 +58,14 @@
 };
 
 static const char * const power_supply_charge_type_text[] = {
-	"Unknown", "N/A", "Trickle", "Fast"
+	"Unknown", "N/A", "Trickle", "Fast", "Taper"
 };
 
 static const char * const power_supply_health_text[] = {
 	"Unknown", "Good", "Overheat", "Dead", "Over voltage",
 	"Unspecified failure", "Cold", "Watchdog timer expire",
-	"Safety timer expire"
+	"Safety timer expire",
+	"Warm", "Cool", "Hot"
 };
 
 static const char * const power_supply_technology_text[] = {
@@ -78,6 +81,23 @@
 	"Unknown", "System", "Device"
 };
 
+static const char * const power_supply_usbc_text[] = {
+	"Nothing attached", "Sink attached", "Powered cable w/ sink",
+	"Debug Accessory", "Audio Adapter", "Powered cable w/o sink",
+	"Source attached (default current)",
+	"Source attached (medium current)",
+	"Source attached (high current)",
+	"Non compliant",
+};
+
+static const char * const power_supply_usbc_pr_text[] = {
+	"none", "dual power role", "sink", "source"
+};
+
+static const char * const power_supply_typec_src_rp_text[] = {
+	"Rp-Default", "Rp-1.5A", "Rp-3A"
+};
+
 static ssize_t power_supply_show_usb_type(struct device *dev,
 					  enum power_supply_usb_type *usb_types,
 					  ssize_t num_usb_types,
@@ -159,6 +179,7 @@
 			      power_supply_capacity_level_text[value.intval]);
 		break;
 	case POWER_SUPPLY_PROP_TYPE:
+	case POWER_SUPPLY_PROP_REAL_TYPE:
 		ret = sprintf(buf, "%s\n",
 			      power_supply_type_text[value.intval]);
 		break;
@@ -171,6 +192,23 @@
 		ret = sprintf(buf, "%s\n",
 			      power_supply_scope_text[value.intval]);
 		break;
+	case POWER_SUPPLY_PROP_TYPEC_MODE:
+		ret = sprintf(buf, "%s\n",
+			      power_supply_usbc_text[value.intval]);
+		break;
+	case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
+		ret = sprintf(buf, "%s\n",
+			      power_supply_usbc_pr_text[value.intval]);
+		break;
+	case POWER_SUPPLY_PROP_TYPEC_SRC_RP:
+		ret = sprintf(buf, "%s\n",
+			      power_supply_typec_src_rp_text[value.intval]);
+		break;
+	case POWER_SUPPLY_PROP_DIE_HEALTH:
+	case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
+		ret = sprintf(buf, "%s\n",
+			      power_supply_health_text[value.intval]);
+		break;
 	case POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT:
 		ret = sprintf(buf, "%lld\n", value.int64val);
 		break;
@@ -309,12 +347,112 @@
 	POWER_SUPPLY_ATTR(usb_hc),
 	POWER_SUPPLY_ATTR(usb_otg),
 	POWER_SUPPLY_ATTR(charge_enabled),
+	POWER_SUPPLY_ATTR(set_ship_mode),
+	POWER_SUPPLY_ATTR(real_type),
+	POWER_SUPPLY_ATTR(charge_now_raw),
+	POWER_SUPPLY_ATTR(charge_now_error),
+	POWER_SUPPLY_ATTR(capacity_raw),
+	POWER_SUPPLY_ATTR(battery_charging_enabled),
+	POWER_SUPPLY_ATTR(charging_enabled),
+	POWER_SUPPLY_ATTR(step_charging_enabled),
+	POWER_SUPPLY_ATTR(step_charging_step),
+	POWER_SUPPLY_ATTR(pin_enabled),
+	POWER_SUPPLY_ATTR(input_suspend),
+	POWER_SUPPLY_ATTR(input_voltage_regulation),
+	POWER_SUPPLY_ATTR(input_current_max),
+	POWER_SUPPLY_ATTR(input_current_trim),
+	POWER_SUPPLY_ATTR(input_current_settled),
+	POWER_SUPPLY_ATTR(input_voltage_settled),
+	POWER_SUPPLY_ATTR(bypass_vchg_loop_debouncer),
+	POWER_SUPPLY_ATTR(charge_counter_shadow),
+	POWER_SUPPLY_ATTR(hi_power),
+	POWER_SUPPLY_ATTR(low_power),
+	POWER_SUPPLY_ATTR(temp_cool),
+	POWER_SUPPLY_ATTR(temp_warm),
+	POWER_SUPPLY_ATTR(temp_cold),
+	POWER_SUPPLY_ATTR(temp_hot),
+	POWER_SUPPLY_ATTR(system_temp_level),
+	POWER_SUPPLY_ATTR(resistance),
+	POWER_SUPPLY_ATTR(resistance_capacitive),
+	POWER_SUPPLY_ATTR(resistance_id),
+	POWER_SUPPLY_ATTR(resistance_now),
+	POWER_SUPPLY_ATTR(flash_current_max),
+	POWER_SUPPLY_ATTR(update_now),
+	POWER_SUPPLY_ATTR(esr_count),
+	POWER_SUPPLY_ATTR(buck_freq),
+	POWER_SUPPLY_ATTR(boost_current),
+	POWER_SUPPLY_ATTR(safety_timer_enabled),
+	POWER_SUPPLY_ATTR(charge_done),
+	POWER_SUPPLY_ATTR(flash_active),
+	POWER_SUPPLY_ATTR(flash_trigger),
+	POWER_SUPPLY_ATTR(force_tlim),
+	POWER_SUPPLY_ATTR(dp_dm),
+	POWER_SUPPLY_ATTR(input_current_limited),
+	POWER_SUPPLY_ATTR(input_current_now),
+	POWER_SUPPLY_ATTR(charge_qnovo_enable),
+	POWER_SUPPLY_ATTR(current_qnovo),
+	POWER_SUPPLY_ATTR(voltage_qnovo),
+	POWER_SUPPLY_ATTR(rerun_aicl),
+	POWER_SUPPLY_ATTR(cycle_count_id),
+	POWER_SUPPLY_ATTR(safety_timer_expired),
+	POWER_SUPPLY_ATTR(restricted_charging),
+	POWER_SUPPLY_ATTR(current_capability),
+	POWER_SUPPLY_ATTR(typec_mode),
+	POWER_SUPPLY_ATTR(typec_cc_orientation),
+	POWER_SUPPLY_ATTR(typec_power_role),
+	POWER_SUPPLY_ATTR(typec_src_rp),
+	POWER_SUPPLY_ATTR(pd_allowed),
+	POWER_SUPPLY_ATTR(pd_active),
+	POWER_SUPPLY_ATTR(pd_in_hard_reset),
+	POWER_SUPPLY_ATTR(pd_current_max),
+	POWER_SUPPLY_ATTR(pd_usb_suspend_supported),
+	POWER_SUPPLY_ATTR(charger_temp),
+	POWER_SUPPLY_ATTR(charger_temp_max),
+	POWER_SUPPLY_ATTR(parallel_disable),
+	POWER_SUPPLY_ATTR(pe_start),
+	POWER_SUPPLY_ATTR(soc_reporting_ready),
+	POWER_SUPPLY_ATTR(debug_battery),
+	POWER_SUPPLY_ATTR(fcc_delta),
+	POWER_SUPPLY_ATTR(icl_reduction),
+	POWER_SUPPLY_ATTR(parallel_mode),
+	POWER_SUPPLY_ATTR(die_health),
+	POWER_SUPPLY_ATTR(connector_health),
+	POWER_SUPPLY_ATTR(ctm_current_max),
+	POWER_SUPPLY_ATTR(hw_current_max),
+	POWER_SUPPLY_ATTR(pr_swap),
+	POWER_SUPPLY_ATTR(cc_step),
+	POWER_SUPPLY_ATTR(cc_step_sel),
+	POWER_SUPPLY_ATTR(sw_jeita_enabled),
+	POWER_SUPPLY_ATTR(pd_voltage_max),
+	POWER_SUPPLY_ATTR(pd_voltage_min),
+	POWER_SUPPLY_ATTR(sdp_current_max),
+	POWER_SUPPLY_ATTR(connector_type),
+	POWER_SUPPLY_ATTR(parallel_batfet_mode),
+	POWER_SUPPLY_ATTR(parallel_fcc_max),
+	POWER_SUPPLY_ATTR(min_icl),
+	POWER_SUPPLY_ATTR(moisture_detected),
+	POWER_SUPPLY_ATTR(batt_profile_version),
+	POWER_SUPPLY_ATTR(batt_full_current),
+	POWER_SUPPLY_ATTR(recharge_soc),
+	POWER_SUPPLY_ATTR(hvdcp_opti_allowed),
+	POWER_SUPPLY_ATTR(smb_en_mode),
+	POWER_SUPPLY_ATTR(smb_en_reason),
+	POWER_SUPPLY_ATTR(esr_actual),
+	POWER_SUPPLY_ATTR(esr_nominal),
+	POWER_SUPPLY_ATTR(soh),
+	POWER_SUPPLY_ATTR(clear_soh),
+	POWER_SUPPLY_ATTR(force_recharge),
+	POWER_SUPPLY_ATTR(fcc_stepper_enable),
+	POWER_SUPPLY_ATTR(toggle_stat),
+	POWER_SUPPLY_ATTR(main_fcc_max),
 	/* Local extensions of type int64_t */
 	POWER_SUPPLY_ATTR(charge_counter_ext),
 	/* Properties of type `const char *' */
 	POWER_SUPPLY_ATTR(model_name),
 	POWER_SUPPLY_ATTR(manufacturer),
 	POWER_SUPPLY_ATTR(serial_number),
+	POWER_SUPPLY_ATTR(battery_type),
+	POWER_SUPPLY_ATTR(cycle_counts),
 };
 
 static struct attribute *
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 6e81fcc..0ea7a80 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -373,4 +373,32 @@
 	  watchdog times out. It allows for detection of cpu hangs and
 	  deadlocks. It does not run during the bootup process, so it will
 	  not catch any early lockups.
+
+config QCOM_RPMH
+	bool "Qualcomm Technologies, Inc. RPM-Hardened (RPMH) Communication"
+	depends on ARCH_QCOM && ARM64 && OF || COMPILE_TEST
+	help
+	  Support for communication with the hardened-RPM blocks in
+	  Qualcomm Technologies Inc (QTI) SoCs. RPMH communication uses an
+	  internal bus to transmit state requests for shared resources. A set
+	  of hardware components aggregate requests for these resources and
+	  help apply the aggregated state on the resource.
+
+config QCOM_BUS_SCALING
+	bool "Bus scaling driver"
+	help
+	This option enables bus scaling on MSM devices.  Bus scaling
+	allows devices to request the clocks be set to rates sufficient
+	for the active devices needs without keeping the clocks at max
+	frequency when a slower speed is sufficient.
+
+config  QCOM_BUS_CONFIG_RPMH
+	bool "RPMH Bus scaling driver"
+	depends on QCOM_BUS_SCALING
+	help
+	  This option enables bus scaling using QCOM specific hardware
+	  accelerators. It enables the translation of bandwidth requests
+	  from logical nodes to hardware nodes controlled by the BCM (Bus
+	  Clock Manager)
+
 endmenu
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 7464fd4..12e2d93 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -46,3 +46,5 @@
 obj-$(CONFIG_SOC_BUS) += socinfo.o
 obj-$(CONFIG_QCOM_WATCHDOG_V2) += watchdog_v2.o
 obj-$(CONFIG_QCOM_SDM845_LLCC) += llcc-sdm845.o
+CFLAGS_rpmh-rsc.o := -I$(src)
+obj-$(CONFIG_QCOM_BUS_SCALING) += msm_bus/
diff --git a/drivers/soc/qcom/msm_bus/Makefile b/drivers/soc/qcom/msm_bus/Makefile
new file mode 100644
index 0000000..d0d43d4
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/Makefile
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for msm-bus driver specific files
+#
+obj-y +=  msm_bus_core.o msm_bus_client_api.o
+obj-$(CONFIG_OF) += msm_bus_of.o
+obj-$(CONFIG_MSM_RPM_SMD) += msm_bus_rpm_smd.o
+
+ifdef CONFIG_QCOM_BUS_CONFIG_RPMH
+	obj-y += msm_bus_fabric_rpmh.o msm_bus_arb_rpmh.o msm_bus_rules.o \
+		msm_bus_bimc_rpmh.o msm_bus_noc_rpmh.o
+	obj-$(CONFIG_OF) += msm_bus_of_rpmh.o
+else
+	obj-y += msm_bus_fabric_adhoc.o msm_bus_arb_adhoc.o msm_bus_rules.o \
+		msm_bus_bimc_adhoc.o msm_bus_noc_adhoc.o
+	obj-$(CONFIG_OF) += msm_bus_of_adhoc.o
+endif
+
+obj-$(CONFIG_DEBUG_FS) += msm_bus_dbg.o
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_adhoc.h b/drivers/soc/qcom/msm_bus/msm_bus_adhoc.h
new file mode 100644
index 0000000..d5792ad
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_adhoc.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_ADHOC_H
+#define _ARCH_ARM_MACH_MSM_BUS_ADHOC_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm-bus.h>
+#include <linux/msm_bus_rules.h>
+#include "msm_bus_core.h"
+
+struct msm_bus_node_device_type;
+struct link_node {
+	uint64_t lnode_ib[NUM_CTX];
+	uint64_t lnode_ab[NUM_CTX];
+	int next;
+	struct device *next_dev;
+	struct list_head link;
+	uint32_t in_use;
+	const char *cl_name;
+};
+
+/* New types introduced for adhoc topology */
+struct msm_bus_noc_ops {
+	int (*qos_init)(struct msm_bus_node_device_type *dev,
+			void __iomem *qos_base, uint32_t qos_off,
+			uint32_t qos_delta, uint32_t qos_freq);
+	int (*set_bw)(struct msm_bus_node_device_type *dev,
+			void __iomem *qos_base, uint32_t qos_off,
+			uint32_t qos_delta, uint32_t qos_freq);
+	int (*limit_mport)(struct msm_bus_node_device_type *dev,
+			void __iomem *qos_base, uint32_t qos_off,
+			uint32_t qos_delta, uint32_t qos_freq, int enable_lim,
+			uint64_t lim_bw);
+	bool (*update_bw_reg)(int mode);
+};
+
+struct nodebw {
+	uint64_t sum_ab;
+	uint64_t last_sum_ab;
+	uint64_t max_ib;
+	uint64_t cur_clk_hz;
+	uint32_t util_used;
+	uint32_t vrail_used;
+};
+
+struct msm_bus_fab_device_type {
+	void __iomem *qos_base;
+	phys_addr_t pqos_base;
+	size_t qos_range;
+	uint32_t base_offset;
+	uint32_t qos_freq;
+	uint32_t qos_off;
+	struct msm_bus_noc_ops noc_ops;
+	enum msm_bus_hw_sel bus_type;
+	bool bypass_qos_prg;
+};
+
+struct qos_params_type {
+	int mode;
+	unsigned int prio_lvl;
+	unsigned int prio_rd;
+	unsigned int prio_wr;
+	unsigned int prio1;
+	unsigned int prio0;
+	unsigned int reg_prio1;
+	unsigned int reg_prio0;
+	unsigned int gp;
+	unsigned int thmp;
+	unsigned int ws;
+	u64 bw_buffer;
+};
+
+struct node_util_levels_type {
+	uint64_t threshold;
+	uint32_t util_fact;
+};
+
+struct node_agg_params_type {
+	uint32_t agg_scheme;
+	uint32_t num_aggports;
+	unsigned int buswidth;
+	uint32_t vrail_comp;
+	uint32_t num_util_levels;
+	struct node_util_levels_type *util_levels;
+};
+
+struct msm_bus_node_info_type {
+	const char *name;
+	unsigned int id;
+	int mas_rpm_id;
+	int slv_rpm_id;
+	int num_ports;
+	int num_qports;
+	int *qport;
+	struct qos_params_type qos_params;
+	unsigned int num_connections;
+	unsigned int num_blist;
+	bool is_fab_dev;
+	bool virt_dev;
+	bool is_traversed;
+	unsigned int *connections;
+	unsigned int *bl_cons;
+	struct device **dev_connections;
+	struct device **black_connections;
+	unsigned int bus_device_id;
+	struct device *bus_device;
+	struct rule_update_path_info rule;
+	uint64_t lim_bw;
+	bool defer_qos;
+	struct node_agg_params_type agg_params;
+};
+
+struct msm_bus_node_device_type {
+	struct msm_bus_node_info_type *node_info;
+	struct msm_bus_fab_device_type *fabdev;
+	int num_lnodes;
+	struct link_node *lnode_list;
+	struct nodebw node_bw[NUM_CTX];
+	struct list_head link;
+	unsigned int ap_owned;
+	struct nodeclk clk[NUM_CTX];
+	struct nodeclk bus_qos_clk;
+	uint32_t num_node_qos_clks;
+	struct nodeclk *node_qos_clks;
+	struct device_node *of_node;
+	struct device dev;
+	bool dirty;
+	struct list_head dev_link;
+	struct list_head devlist;
+};
+
+static inline struct msm_bus_node_device_type *to_msm_bus_node(struct device *d)
+{
+	return container_of(d, struct msm_bus_node_device_type, dev);
+}
+
+
+int msm_bus_enable_limiter(struct msm_bus_node_device_type *nodedev,
+				int throttle_en, uint64_t lim_bw);
+int msm_bus_commit_data(struct list_head *clist);
+void *msm_bus_realloc_devmem(struct device *dev, void *p, size_t old_size,
+					size_t new_size, gfp_t flags);
+
+extern struct msm_bus_device_node_registration
+	*msm_bus_of_to_pdata(struct platform_device *pdev);
+extern void msm_bus_arb_setops_adhoc(struct msm_bus_arb_ops *arb_ops);
+extern int msm_bus_bimc_set_ops(struct msm_bus_node_device_type *bus_dev);
+extern int msm_bus_noc_set_ops(struct msm_bus_node_device_type *bus_dev);
+extern int msm_bus_of_get_static_rules(struct platform_device *pdev,
+					struct bus_rule_type **static_rule);
+extern int msm_rules_update_path(struct list_head *input_list,
+				struct list_head *output_list);
+extern void print_all_rules(void);
+#ifdef CONFIG_DEBUG_BUS_VOTER
+int msm_bus_floor_init(struct device *dev);
+#else
+static inline int msm_bus_floor_init(struct device *dev)
+{
+	return 0;
+}
+#endif /* CONFIG_DBG_BUS_VOTER */
+#endif /* _ARCH_ARM_MACH_MSM_BUS_ADHOC_H */
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_arb_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_arb_adhoc.c
new file mode 100644
index 0000000..4bd5273
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_arb_adhoc.c
@@ -0,0 +1,1419 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/rtmutex.h>
+#include <linux/clk.h>
+#include <linux/msm-bus.h>
+#include "msm_bus_core.h"
+#include "msm_bus_adhoc.h"
+#include <trace/events/trace_msm_bus.h>
+
+#define NUM_CL_HANDLES	50
+#define NUM_LNODES	3
+#define MAX_STR_CL	50
+
+struct bus_search_type {
+	struct list_head link;
+	struct list_head node_list;
+};
+
+struct handle_type {
+	int num_entries;
+	struct msm_bus_client **cl_list;
+};
+
+static struct handle_type handle_list;
+static LIST_HEAD(input_list);
+static LIST_HEAD(apply_list);
+static LIST_HEAD(commit_list);
+
+DEFINE_RT_MUTEX(msm_bus_adhoc_lock);
+
+static bool chk_bl_list(struct list_head *black_list, unsigned int id)
+{
+	struct msm_bus_node_device_type *bus_node = NULL;
+
+	list_for_each_entry(bus_node, black_list, link) {
+		if (bus_node->node_info->id == id)
+			return true;
+	}
+	return false;
+}
+
+static void copy_remaining_nodes(struct list_head *edge_list, struct list_head
+	*traverse_list, struct list_head *route_list)
+{
+	struct bus_search_type *search_node;
+
+	if (list_empty(edge_list) && list_empty(traverse_list))
+		return;
+
+	search_node = kzalloc(sizeof(struct bus_search_type), GFP_KERNEL);
+	INIT_LIST_HEAD(&search_node->node_list);
+	list_splice_init(edge_list, traverse_list);
+	list_splice_init(traverse_list, &search_node->node_list);
+	list_add_tail(&search_node->link, route_list);
+}
+
+/*
+ * Duplicate instantiaion from msm_bus_arb.c. Todo there needs to be a
+ * "util" file for these common func/macros.
+ *
+ */
+uint64_t msm_bus_div64(uint64_t num, unsigned int base)
+{
+	uint64_t *n = &num;
+
+	if ((num > 0) && (num < base))
+		return 1;
+
+	switch (base) {
+	case 0:
+		WARN(1, "AXI: Divide by 0 attempted\n");
+	case 1: return num;
+	case 2: return (num >> 1);
+	case 4: return (num >> 2);
+	case 8: return (num >> 3);
+	case 16: return (num >> 4);
+	case 32: return (num >> 5);
+	}
+
+	do_div(*n, base);
+	return *n;
+}
+
+int msm_bus_device_match_adhoc(struct device *dev, void *id)
+{
+	int ret = 0;
+	struct msm_bus_node_device_type *bnode = to_msm_bus_node(dev);
+
+	if (bnode)
+		ret = (bnode->node_info->id == *(unsigned int *)id);
+	else
+		ret = 0;
+
+	return ret;
+}
+
+static int gen_lnode(struct device *dev,
+			int next_hop, int prev_idx, const char *cl_name)
+{
+	struct link_node *lnode;
+	struct msm_bus_node_device_type *cur_dev = NULL;
+	int lnode_idx = -1;
+
+	if (!dev)
+		goto exit_gen_lnode;
+
+	cur_dev = to_msm_bus_node(dev);
+	if (!cur_dev) {
+		MSM_BUS_ERR("%s: Null device ptr", __func__);
+		goto exit_gen_lnode;
+	}
+
+	if (!cur_dev->num_lnodes) {
+		cur_dev->lnode_list = devm_kzalloc(dev,
+				sizeof(struct link_node) * NUM_LNODES,
+								GFP_KERNEL);
+		if (!cur_dev->lnode_list)
+			goto exit_gen_lnode;
+
+		lnode = cur_dev->lnode_list;
+		cur_dev->num_lnodes = NUM_LNODES;
+		lnode_idx = 0;
+	} else {
+		int i;
+
+		for (i = 0; i < cur_dev->num_lnodes; i++) {
+			if (!cur_dev->lnode_list[i].in_use)
+				break;
+		}
+
+		if (i < cur_dev->num_lnodes) {
+			lnode = &cur_dev->lnode_list[i];
+			lnode_idx = i;
+		} else {
+			struct link_node *realloc_list;
+			size_t cur_size = sizeof(struct link_node) *
+					cur_dev->num_lnodes;
+
+			cur_dev->num_lnodes += NUM_LNODES;
+			realloc_list = msm_bus_realloc_devmem(
+					dev,
+					cur_dev->lnode_list,
+					cur_size,
+					sizeof(struct link_node) *
+					cur_dev->num_lnodes, GFP_KERNEL);
+
+			if (!realloc_list)
+				goto exit_gen_lnode;
+
+			cur_dev->lnode_list = realloc_list;
+			lnode = &cur_dev->lnode_list[i];
+			lnode_idx = i;
+		}
+	}
+
+	lnode->in_use = 1;
+	lnode->cl_name = cl_name;
+	if (next_hop == cur_dev->node_info->id) {
+		lnode->next = -1;
+		lnode->next_dev = NULL;
+	} else {
+		lnode->next = prev_idx;
+		lnode->next_dev = bus_find_device(&msm_bus_type, NULL,
+					(void *) &next_hop,
+					msm_bus_device_match_adhoc);
+	}
+
+	memset(lnode->lnode_ib, 0, sizeof(uint64_t) * NUM_CTX);
+	memset(lnode->lnode_ab, 0, sizeof(uint64_t) * NUM_CTX);
+
+exit_gen_lnode:
+	return lnode_idx;
+}
+
+static int remove_lnode(struct msm_bus_node_device_type *cur_dev,
+				int lnode_idx)
+{
+	int ret = 0;
+
+	if (!cur_dev) {
+		MSM_BUS_ERR("%s: Null device ptr", __func__);
+		ret = -ENODEV;
+		goto exit_remove_lnode;
+	}
+
+	if (lnode_idx != -1) {
+		if (!cur_dev->num_lnodes ||
+				(lnode_idx > (cur_dev->num_lnodes - 1))) {
+			MSM_BUS_ERR("%s: Invalid Idx %d, num_lnodes %d",
+				__func__, lnode_idx, cur_dev->num_lnodes);
+			ret = -ENODEV;
+			goto exit_remove_lnode;
+		}
+
+		cur_dev->lnode_list[lnode_idx].next = -1;
+		cur_dev->lnode_list[lnode_idx].next_dev = NULL;
+		cur_dev->lnode_list[lnode_idx].in_use = 0;
+		cur_dev->lnode_list[lnode_idx].cl_name = NULL;
+	}
+
+exit_remove_lnode:
+	return ret;
+}
+
+static int prune_path(struct list_head *route_list, int dest, int src,
+				struct list_head *black_list, int found,
+				const char *cl_name)
+{
+	struct bus_search_type *search_node, *temp_search_node;
+	struct msm_bus_node_device_type *bus_node;
+	struct list_head *bl_list;
+	struct list_head *temp_bl_list;
+	int search_dev_id = dest;
+	struct device *dest_dev = bus_find_device(&msm_bus_type, NULL,
+					(void *) &dest,
+					msm_bus_device_match_adhoc);
+	int lnode_hop = -1;
+
+	if (!found)
+		goto reset_links;
+
+	if (!dest_dev) {
+		MSM_BUS_ERR("%s: Can't find dest dev %d", __func__, dest);
+		goto exit_prune_path;
+	}
+
+	lnode_hop = gen_lnode(dest_dev, search_dev_id, lnode_hop, cl_name);
+
+	list_for_each_entry_reverse(search_node, route_list, link) {
+		list_for_each_entry(bus_node, &search_node->node_list, link) {
+			unsigned int i;
+
+			for (i = 0; i < bus_node->node_info->num_connections;
+									i++) {
+				if (bus_node->node_info->connections[i] ==
+								search_dev_id) {
+					dest_dev = bus_find_device(
+						&msm_bus_type,
+						NULL,
+						(void *)
+						&bus_node->node_info->id,
+						msm_bus_device_match_adhoc);
+
+					if (!dest_dev) {
+						lnode_hop = -1;
+						goto reset_links;
+					}
+
+					lnode_hop = gen_lnode(dest_dev,
+							search_dev_id,
+							lnode_hop, cl_name);
+					search_dev_id =
+						bus_node->node_info->id;
+					break;
+				}
+			}
+		}
+	}
+reset_links:
+	list_for_each_entry_safe(search_node, temp_search_node, route_list,
+									link) {
+		list_for_each_entry(bus_node, &search_node->node_list,
+								link)
+			bus_node->node_info->is_traversed = false;
+
+		list_del(&search_node->link);
+		kfree(search_node);
+	}
+
+	list_for_each_safe(bl_list, temp_bl_list, black_list)
+		list_del(bl_list);
+
+exit_prune_path:
+	return lnode_hop;
+}
+
+static void setup_bl_list(struct msm_bus_node_device_type *node,
+				struct list_head *black_list)
+{
+	unsigned int i;
+
+	for (i = 0; i < node->node_info->num_blist; i++) {
+		struct msm_bus_node_device_type *bdev;
+
+		bdev = to_msm_bus_node(node->node_info->black_connections[i]);
+		list_add_tail(&bdev->link, black_list);
+	}
+}
+
+static int getpath(struct device *src_dev, int dest, const char *cl_name)
+{
+	struct list_head traverse_list;
+	struct list_head edge_list;
+	struct list_head route_list;
+	struct list_head black_list;
+	struct msm_bus_node_device_type *src_node;
+	struct bus_search_type *search_node;
+	int found = 0;
+	int depth_index = 0;
+	int first_hop = -1;
+	int src;
+
+	INIT_LIST_HEAD(&traverse_list);
+	INIT_LIST_HEAD(&edge_list);
+	INIT_LIST_HEAD(&route_list);
+	INIT_LIST_HEAD(&black_list);
+
+	if (!src_dev) {
+		MSM_BUS_ERR("%s: Cannot locate src dev ", __func__);
+		goto exit_getpath;
+	}
+
+	src_node = to_msm_bus_node(src_dev);
+	if (!src_node) {
+		MSM_BUS_ERR("%s:Fatal, Source node not found", __func__);
+		goto exit_getpath;
+	}
+	src = src_node->node_info->id;
+	list_add_tail(&src_node->link, &traverse_list);
+
+	while ((!found && !list_empty(&traverse_list))) {
+		struct msm_bus_node_device_type *bus_node = NULL;
+		/* Locate dest_id in the traverse list */
+		list_for_each_entry(bus_node, &traverse_list, link) {
+			if (bus_node->node_info->id == dest) {
+				found = 1;
+				break;
+			}
+		}
+
+		if (!found) {
+			unsigned int i;
+			/* Setup the new edge list */
+			list_for_each_entry(bus_node, &traverse_list, link) {
+				/* Setup list of black-listed nodes */
+				setup_bl_list(bus_node, &black_list);
+
+				for (i = 0; i <
+				bus_node->node_info->num_connections; i++) {
+					bool skip;
+					struct msm_bus_node_device_type
+							*node_conn;
+					node_conn = to_msm_bus_node(
+				bus_node->node_info->dev_connections[i]);
+					if (
+					node_conn->node_info->is_traversed) {
+						MSM_BUS_ERR("Circ Path %d\n",
+						node_conn->node_info->id);
+						goto reset_traversed;
+					}
+					skip = chk_bl_list(&black_list,
+					bus_node->node_info->connections[i]);
+					if (!skip) {
+						list_add_tail(
+						&node_conn->link, &edge_list);
+					node_conn->node_info->is_traversed =
+									true;
+					}
+				}
+			}
+
+			/* Keep tabs of the previous search list */
+			search_node = kzalloc(sizeof(struct bus_search_type),
+					 GFP_KERNEL);
+			INIT_LIST_HEAD(&search_node->node_list);
+			list_splice_init(&traverse_list,
+					&search_node->node_list);
+			/* Add the previous search list to a route list */
+			list_add_tail(&search_node->link, &route_list);
+			/* Advancing the list depth */
+			depth_index++;
+			list_splice_init(&edge_list, &traverse_list);
+		}
+	}
+reset_traversed:
+	copy_remaining_nodes(&edge_list, &traverse_list, &route_list);
+	first_hop = prune_path(&route_list, dest, src, &black_list, found,
+								cl_name);
+
+exit_getpath:
+	return first_hop;
+}
+
+static uint64_t scheme1_agg_scheme(struct msm_bus_node_device_type *bus_dev,
+			struct msm_bus_node_device_type *fab_dev, int ctx)
+{
+	uint64_t max_ib;
+	uint64_t sum_ab;
+	uint64_t bw_max_hz;
+	uint32_t util_fact = 0;
+	uint32_t vrail_comp = 0;
+	struct node_util_levels_type *utils;
+	int i;
+	int num_util_levels;
+
+	/*
+	 *  Account for Util factor and vrail comp.
+	 *  Util factor is picked according to the current sum(AB) for this
+	 *  node and for this context.
+	 *  Vrail comp is fixed for the entire performance range.
+	 *  They default to 100 if absent.
+	 *
+	 *  The aggregated clock is computed as:
+	 *  Freq_hz = max((sum(ab) * util_fact)/num_chan, max(ib)/vrail_comp)
+	 *				/ bus-width
+	 */
+	if (bus_dev->node_info->agg_params.num_util_levels) {
+		utils = bus_dev->node_info->agg_params.util_levels;
+		num_util_levels =
+			bus_dev->node_info->agg_params.num_util_levels;
+	} else {
+		utils = fab_dev->node_info->agg_params.util_levels;
+		num_util_levels =
+			fab_dev->node_info->agg_params.num_util_levels;
+	}
+
+	sum_ab = bus_dev->node_bw[ctx].sum_ab;
+	max_ib = bus_dev->node_bw[ctx].max_ib;
+
+	for (i = 0; i < num_util_levels; i++) {
+		if (sum_ab < utils[i].threshold) {
+			util_fact = utils[i].util_fact;
+			break;
+		}
+	}
+	if (i == num_util_levels)
+		util_fact = utils[(num_util_levels - 1)].util_fact;
+
+	vrail_comp = bus_dev->node_info->agg_params.vrail_comp ?
+			bus_dev->node_info->agg_params.vrail_comp :
+			fab_dev->node_info->agg_params.vrail_comp;
+
+	bus_dev->node_bw[ctx].vrail_used = vrail_comp;
+	bus_dev->node_bw[ctx].util_used = util_fact;
+
+	if (util_fact && (util_fact != 100)) {
+		sum_ab *= util_fact;
+		sum_ab = msm_bus_div64(sum_ab, 100);
+	}
+
+	if (vrail_comp && (vrail_comp != 100)) {
+		max_ib *= 100;
+		max_ib = msm_bus_div64(max_ib, vrail_comp);
+	}
+
+	/* Account for multiple channels if any */
+	if (bus_dev->node_info->agg_params.num_aggports > 1)
+		sum_ab = msm_bus_div64(sum_ab,
+				bus_dev->node_info->agg_params.num_aggports);
+
+	if (!bus_dev->node_info->agg_params.buswidth) {
+		MSM_BUS_WARN("No bus width found for %d. Using default\n",
+					bus_dev->node_info->id);
+		bus_dev->node_info->agg_params.buswidth = 8;
+	}
+
+	bw_max_hz = max(max_ib, sum_ab);
+	bw_max_hz = msm_bus_div64(bw_max_hz,
+				bus_dev->node_info->agg_params.buswidth);
+
+	return bw_max_hz;
+}
+
+static uint64_t legacy_agg_scheme(struct msm_bus_node_device_type *bus_dev,
+			struct msm_bus_node_device_type *fab_dev, int ctx)
+{
+	uint64_t max_ib;
+	uint64_t sum_ab;
+	uint64_t bw_max_hz;
+	uint32_t util_fact = 0;
+	uint32_t vrail_comp = 0;
+
+	/*
+	 *  Util_fact and vrail comp are obtained from fabric/Node's dts
+	 *  properties and are fixed for the entire performance range.
+	 *  They default to 100 if absent.
+	 *
+	 *  The clock frequency is computed as:
+	 *  Freq_hz = max((sum(ab) * util_fact)/num_chan, max(ib)/vrail_comp)
+	 *				/ bus-width
+	 */
+	util_fact = fab_dev->node_info->agg_params.util_levels[0].util_fact;
+	vrail_comp = fab_dev->node_info->agg_params.vrail_comp;
+
+	if (bus_dev->node_info->agg_params.num_util_levels)
+		util_fact =
+		bus_dev->node_info->agg_params.util_levels[0].util_fact ?
+		bus_dev->node_info->agg_params.util_levels[0].util_fact :
+		util_fact;
+
+	vrail_comp = bus_dev->node_info->agg_params.vrail_comp ?
+			bus_dev->node_info->agg_params.vrail_comp :
+			vrail_comp;
+
+	bus_dev->node_bw[ctx].vrail_used = vrail_comp;
+	bus_dev->node_bw[ctx].util_used = util_fact;
+	sum_ab = bus_dev->node_bw[ctx].sum_ab;
+	max_ib = bus_dev->node_bw[ctx].max_ib;
+
+	if (util_fact && (util_fact != 100)) {
+		sum_ab *= util_fact;
+		sum_ab = msm_bus_div64(sum_ab, 100);
+	}
+
+	if (vrail_comp && (vrail_comp != 100)) {
+		max_ib *= 100;
+		max_ib = msm_bus_div64(max_ib, vrail_comp);
+	}
+
+	/* Account for multiple channels if any */
+	if (bus_dev->node_info->agg_params.num_aggports > 1)
+		sum_ab = msm_bus_div64(sum_ab,
+				bus_dev->node_info->agg_params.num_aggports);
+
+	if (!bus_dev->node_info->agg_params.buswidth) {
+		MSM_BUS_WARN("No bus width found for %d. Using default\n",
+					bus_dev->node_info->id);
+		bus_dev->node_info->agg_params.buswidth = 8;
+	}
+
+	bw_max_hz = max(max_ib, sum_ab);
+	bw_max_hz = msm_bus_div64(bw_max_hz,
+				bus_dev->node_info->agg_params.buswidth);
+
+	return bw_max_hz;
+}
+
+static uint64_t aggregate_bus_req(struct msm_bus_node_device_type *bus_dev,
+									int ctx)
+{
+	uint64_t bw_hz = 0;
+	int i;
+	struct msm_bus_node_device_type *fab_dev = NULL;
+	uint32_t agg_scheme;
+	uint64_t max_ib = 0;
+	uint64_t sum_ab = 0;
+
+	if (!bus_dev || !to_msm_bus_node(bus_dev->node_info->bus_device)) {
+		MSM_BUS_ERR("Bus node pointer is Invalid");
+		goto exit_agg_bus_req;
+	}
+
+	fab_dev = to_msm_bus_node(bus_dev->node_info->bus_device);
+	for (i = 0; i < bus_dev->num_lnodes; i++) {
+		max_ib = max(max_ib, bus_dev->lnode_list[i].lnode_ib[ctx]);
+		sum_ab += bus_dev->lnode_list[i].lnode_ab[ctx];
+	}
+
+	bus_dev->node_bw[ctx].sum_ab = sum_ab;
+	bus_dev->node_bw[ctx].max_ib = max_ib;
+
+	if (bus_dev->node_info->agg_params.agg_scheme != AGG_SCHEME_NONE)
+		agg_scheme = bus_dev->node_info->agg_params.agg_scheme;
+	else
+		agg_scheme = fab_dev->node_info->agg_params.agg_scheme;
+
+	switch (agg_scheme) {
+	case AGG_SCHEME_1:
+		bw_hz = scheme1_agg_scheme(bus_dev, fab_dev, ctx);
+		break;
+	case AGG_SCHEME_LEG:
+		bw_hz = legacy_agg_scheme(bus_dev, fab_dev, ctx);
+		break;
+	}
+
+exit_agg_bus_req:
+	return bw_hz;
+}
+
+
+static void del_inp_list(struct list_head *list)
+{
+	struct rule_update_path_info *rule_node;
+	struct rule_update_path_info *rule_node_tmp;
+
+	list_for_each_entry_safe(rule_node, rule_node_tmp, list, link) {
+		list_del(&rule_node->link);
+		rule_node->added = false;
+	}
+}
+
+static void del_op_list(struct list_head *list)
+{
+	struct rule_apply_rcm_info *rule;
+	struct rule_apply_rcm_info *rule_tmp;
+
+	list_for_each_entry_safe(rule, rule_tmp, list, link)
+		list_del(&rule->link);
+}
+
+static int msm_bus_apply_rules(struct list_head *list, bool after_clk_commit)
+{
+	struct rule_apply_rcm_info *rule;
+	struct device *dev = NULL;
+	struct msm_bus_node_device_type *dev_info = NULL;
+	int ret = 0;
+
+	list_for_each_entry(rule, list, link) {
+		if (rule->after_clk_commit != after_clk_commit)
+			continue;
+
+		dev = bus_find_device(&msm_bus_type, NULL,
+				(void *) &rule->id,
+				msm_bus_device_match_adhoc);
+
+		if (!dev) {
+			MSM_BUS_ERR("Can't find dev node for %d", rule->id);
+			continue;
+		}
+		dev_info = to_msm_bus_node(dev);
+
+		ret = msm_bus_enable_limiter(dev_info, rule->throttle,
+							rule->lim_bw);
+		if (ret)
+			MSM_BUS_ERR("Failed to set limiter for %d", rule->id);
+	}
+
+	return ret;
+}
+
+static void commit_data(void)
+{
+	bool rules_registered = msm_rule_are_rules_registered();
+
+	if (rules_registered) {
+		msm_rules_update_path(&input_list, &apply_list);
+		msm_bus_apply_rules(&apply_list, false);
+	}
+
+	msm_bus_commit_data(&commit_list);
+
+	if (rules_registered) {
+		msm_bus_apply_rules(&apply_list, true);
+		del_inp_list(&input_list);
+		del_op_list(&apply_list);
+	}
+	INIT_LIST_HEAD(&input_list);
+	INIT_LIST_HEAD(&apply_list);
+	INIT_LIST_HEAD(&commit_list);
+}
+
+static void add_node_to_clist(struct msm_bus_node_device_type *node)
+{
+	struct msm_bus_node_device_type *node_parent =
+			to_msm_bus_node(node->node_info->bus_device);
+
+	if (!node->dirty) {
+		list_add_tail(&node->link, &commit_list);
+		node->dirty = true;
+	}
+
+	if (!node_parent->dirty) {
+		list_add_tail(&node_parent->link, &commit_list);
+		node_parent->dirty = true;
+	}
+}
+
+static int update_path(struct device *src_dev, int dest, uint64_t act_req_ib,
+			uint64_t act_req_bw, uint64_t slp_req_ib,
+			uint64_t slp_req_bw, uint64_t cur_ib, uint64_t cur_bw,
+			int src_idx, int ctx)
+{
+	struct device *next_dev = NULL;
+	struct link_node *lnode = NULL;
+	struct msm_bus_node_device_type *dev_info = NULL;
+	int curr_idx;
+	int ret = 0;
+	struct rule_update_path_info *rule_node;
+	bool rules_registered = msm_rule_are_rules_registered();
+
+	if (IS_ERR_OR_NULL(src_dev)) {
+		MSM_BUS_ERR("%s: No source device", __func__);
+		ret = -ENODEV;
+		goto exit_update_path;
+	}
+
+	next_dev = src_dev;
+
+	if (src_idx < 0) {
+		MSM_BUS_ERR("%s: Invalid lnode idx %d", __func__, src_idx);
+		ret = -ENXIO;
+		goto exit_update_path;
+	}
+	curr_idx = src_idx;
+
+	while (next_dev) {
+		int i;
+
+		dev_info = to_msm_bus_node(next_dev);
+
+		if (curr_idx >= dev_info->num_lnodes) {
+			MSM_BUS_ERR("%s: Invalid lnode Idx %d num lnodes %d",
+			 __func__, curr_idx, dev_info->num_lnodes);
+			ret = -ENXIO;
+			goto exit_update_path;
+		}
+
+		lnode = &dev_info->lnode_list[curr_idx];
+		if (!lnode) {
+			MSM_BUS_ERR("%s: Invalid lnode ptr lnode %d",
+				 __func__, curr_idx);
+			ret = -ENXIO;
+			goto exit_update_path;
+		}
+		lnode->lnode_ib[ACTIVE_CTX] = act_req_ib;
+		lnode->lnode_ab[ACTIVE_CTX] = act_req_bw;
+		lnode->lnode_ib[DUAL_CTX] = slp_req_ib;
+		lnode->lnode_ab[DUAL_CTX] = slp_req_bw;
+
+		for (i = 0; i < NUM_CTX; i++)
+			dev_info->node_bw[i].cur_clk_hz =
+					aggregate_bus_req(dev_info, i);
+
+		add_node_to_clist(dev_info);
+
+		if (rules_registered) {
+			rule_node = &dev_info->node_info->rule;
+			rule_node->id = dev_info->node_info->id;
+			rule_node->ib = dev_info->node_bw[ACTIVE_CTX].max_ib;
+			rule_node->ab = dev_info->node_bw[ACTIVE_CTX].sum_ab;
+			rule_node->clk =
+				dev_info->node_bw[ACTIVE_CTX].cur_clk_hz;
+			if (!rule_node->added) {
+				list_add_tail(&rule_node->link, &input_list);
+				rule_node->added = true;
+			}
+		}
+
+		next_dev = lnode->next_dev;
+		curr_idx = lnode->next;
+	}
+
+exit_update_path:
+	return ret;
+}
+
+static int remove_path(struct device *src_dev, int dst, uint64_t cur_ib,
+			uint64_t cur_ab, int src_idx, int active_only)
+{
+	struct device *next_dev = NULL;
+	struct link_node *lnode = NULL;
+	struct msm_bus_node_device_type *dev_info = NULL;
+	int ret = 0;
+	int cur_idx = src_idx;
+	int next_idx;
+
+	/* Update the current path to zero out all request from
+	 * this cient on all paths
+	 */
+	if (!src_dev) {
+		MSM_BUS_ERR("%s: Can't find source device", __func__);
+		ret = -ENODEV;
+		goto exit_remove_path;
+	}
+
+	ret = update_path(src_dev, dst, 0, 0, 0, 0, cur_ib, cur_ab, src_idx,
+							active_only);
+	if (ret) {
+		MSM_BUS_ERR("%s: Error zeroing out path ctx %d",
+					__func__, ACTIVE_CTX);
+		goto exit_remove_path;
+	}
+
+	next_dev = src_dev;
+
+	while (next_dev) {
+		dev_info = to_msm_bus_node(next_dev);
+		lnode = &dev_info->lnode_list[cur_idx];
+		next_idx = lnode->next;
+		next_dev = lnode->next_dev;
+		remove_lnode(dev_info, cur_idx);
+		cur_idx = next_idx;
+	}
+
+exit_remove_path:
+	return ret;
+}
+
+static void getpath_debug(int src, int curr, int active_only)
+{
+	struct device *dev_node;
+	struct device *dev_it;
+	unsigned int hop = 1;
+	int idx;
+	struct msm_bus_node_device_type *devinfo;
+	int i;
+
+	dev_node = bus_find_device(&msm_bus_type, NULL,
+				(void *) &src,
+				msm_bus_device_match_adhoc);
+
+	if (!dev_node) {
+		MSM_BUS_ERR("SRC NOT FOUND %d", src);
+		return;
+	}
+
+	idx = curr;
+	devinfo = to_msm_bus_node(dev_node);
+	dev_it = dev_node;
+
+	MSM_BUS_ERR("Route list Src %d", src);
+	while (dev_it) {
+		struct msm_bus_node_device_type *busdev =
+			to_msm_bus_node(devinfo->node_info->bus_device);
+
+		MSM_BUS_ERR("Hop[%d] at Device %d ctx %d", hop,
+					devinfo->node_info->id, active_only);
+
+		for (i = 0; i < NUM_CTX; i++) {
+			MSM_BUS_ERR("dev info sel ib %llu",
+						devinfo->node_bw[i].cur_clk_hz);
+			MSM_BUS_ERR("dev info sel ab %llu",
+						devinfo->node_bw[i].sum_ab);
+		}
+
+		dev_it = devinfo->lnode_list[idx].next_dev;
+		idx = devinfo->lnode_list[idx].next;
+		if (dev_it)
+			devinfo = to_msm_bus_node(dev_it);
+
+		MSM_BUS_ERR("Bus Device %d", busdev->node_info->id);
+		MSM_BUS_ERR("Bus Clock %llu", busdev->clk[active_only].rate);
+
+		if (idx < 0)
+			break;
+		hop++;
+	}
+}
+
+static void unregister_client_adhoc(uint32_t cl)
+{
+	int i;
+	struct msm_bus_scale_pdata *pdata;
+	int lnode, src, curr, dest;
+	uint64_t  cur_clk, cur_bw;
+	struct msm_bus_client *client;
+	struct device *src_dev;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+	if (!cl) {
+		MSM_BUS_ERR("%s: Null cl handle passed unregister\n",
+				__func__);
+		goto exit_unregister_client;
+	}
+	client = handle_list.cl_list[cl];
+	pdata = client->pdata;
+	if (!pdata) {
+		MSM_BUS_ERR("%s: Null pdata passed to unregister\n",
+				__func__);
+		goto exit_unregister_client;
+	}
+
+	curr = client->curr;
+	if ((curr < 0) || (curr >= pdata->num_usecases)) {
+		MSM_BUS_ERR("Invalid index Defaulting curr to 0");
+		curr = 0;
+	}
+
+	MSM_BUS_DBG("%s: Unregistering client %p", __func__, client);
+
+	for (i = 0; i < pdata->usecase->num_paths; i++) {
+		src = client->pdata->usecase[curr].vectors[i].src;
+		dest = client->pdata->usecase[curr].vectors[i].dst;
+
+		lnode = client->src_pnode[i];
+		src_dev = client->src_devs[i];
+		cur_clk = client->pdata->usecase[curr].vectors[i].ib;
+		cur_bw = client->pdata->usecase[curr].vectors[i].ab;
+		remove_path(src_dev, dest, cur_clk, cur_bw, lnode,
+						pdata->active_only);
+	}
+	commit_data();
+	msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_UNREGISTER, cl);
+	kfree(client->src_pnode);
+	kfree(client->src_devs);
+	kfree(client);
+	handle_list.cl_list[cl] = NULL;
+exit_unregister_client:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+}
+
+static int alloc_handle_lst(int size)
+{
+	int ret = 0;
+	struct msm_bus_client **t_cl_list;
+
+	if (!handle_list.num_entries) {
+		t_cl_list = kzalloc(sizeof(struct msm_bus_client *)
+			* NUM_CL_HANDLES, GFP_KERNEL);
+		if (ZERO_OR_NULL_PTR(t_cl_list)) {
+			ret = -ENOMEM;
+			MSM_BUS_ERR("%s: Failed to allocate handles list",
+								__func__);
+			goto exit_alloc_handle_lst;
+		}
+		handle_list.cl_list = t_cl_list;
+		handle_list.num_entries += NUM_CL_HANDLES;
+	} else {
+		t_cl_list = krealloc(handle_list.cl_list,
+				sizeof(struct msm_bus_client *) *
+				(handle_list.num_entries + NUM_CL_HANDLES),
+				GFP_KERNEL);
+		if (ZERO_OR_NULL_PTR(t_cl_list)) {
+			ret = -ENOMEM;
+			MSM_BUS_ERR("%s: Failed to allocate handles list",
+								__func__);
+			goto exit_alloc_handle_lst;
+		}
+
+		handle_list.cl_list = t_cl_list;
+		memset(&handle_list.cl_list[handle_list.num_entries], 0,
+			NUM_CL_HANDLES * sizeof(struct msm_bus_client *));
+		handle_list.num_entries += NUM_CL_HANDLES;
+	}
+exit_alloc_handle_lst:
+	return ret;
+}
+
+static uint32_t gen_handle(struct msm_bus_client *client)
+{
+	uint32_t handle = 0;
+	int i;
+	int ret = 0;
+
+	for (i = 0; i < handle_list.num_entries; i++) {
+		if (i && !handle_list.cl_list[i]) {
+			handle = i;
+			break;
+		}
+	}
+
+	if (!handle) {
+		ret = alloc_handle_lst(NUM_CL_HANDLES);
+
+		if (ret) {
+			MSM_BUS_ERR("%s: Failed to allocate handle list",
+							__func__);
+			goto exit_gen_handle;
+		}
+		handle = i + 1;
+	}
+	handle_list.cl_list[handle] = client;
+exit_gen_handle:
+	return handle;
+}
+
+static uint32_t register_client_adhoc(struct msm_bus_scale_pdata *pdata)
+{
+	int src, dest;
+	int i;
+	struct msm_bus_client *client = NULL;
+	int *lnode;
+	struct device *dev;
+	uint32_t handle = 0;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+	client = kzalloc(sizeof(struct msm_bus_client), GFP_KERNEL);
+	if (!client) {
+		MSM_BUS_ERR("%s: Error allocating client data", __func__);
+		goto exit_register_client;
+	}
+	client->pdata = pdata;
+
+	lnode = kcalloc(pdata->usecase->num_paths, sizeof(int), GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(lnode)) {
+		MSM_BUS_ERR("%s: Error allocating pathnode ptr!", __func__);
+		goto exit_lnode_malloc_fail;
+	}
+	client->src_pnode = lnode;
+
+	client->src_devs = kcalloc(pdata->usecase->num_paths,
+					sizeof(struct device *), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(client->src_devs)) {
+		MSM_BUS_ERR("%s: Error allocating pathnode ptr!", __func__);
+		goto exit_src_dev_malloc_fail;
+	}
+	client->curr = -1;
+
+	for (i = 0; i < pdata->usecase->num_paths; i++) {
+		src = pdata->usecase->vectors[i].src;
+		dest = pdata->usecase->vectors[i].dst;
+
+		if ((src < 0) || (dest < 0)) {
+			MSM_BUS_ERR("%s:Invalid src/dst.src %d dest %d",
+				__func__, src, dest);
+			goto exit_invalid_data;
+		}
+		dev = bus_find_device(&msm_bus_type, NULL,
+				(void *) &src,
+				msm_bus_device_match_adhoc);
+		if (IS_ERR_OR_NULL(dev)) {
+			MSM_BUS_ERR("%s:Failed to find path.src %d dest %d",
+				__func__, src, dest);
+			goto exit_invalid_data;
+		}
+		client->src_devs[i] = dev;
+
+		lnode[i] = getpath(dev, dest, client->pdata->name);
+		if (lnode[i] < 0) {
+			MSM_BUS_ERR("%s:Failed to find path.src %d dest %d",
+				__func__, src, dest);
+			goto exit_invalid_data;
+		}
+	}
+
+	handle = gen_handle(client);
+	msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_REGISTER,
+					handle);
+	MSM_BUS_DBG("%s:Client handle %d %s", __func__, handle,
+						client->pdata->name);
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return handle;
+exit_invalid_data:
+	kfree(client->src_devs);
+exit_src_dev_malloc_fail:
+	kfree(lnode);
+exit_lnode_malloc_fail:
+	kfree(client);
+exit_register_client:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return handle;
+}
+
+static int update_client_paths(struct msm_bus_client *client, bool log_trns,
+							unsigned int idx)
+{
+	int lnode, src, dest, cur_idx;
+	uint64_t req_clk, req_bw, curr_clk, curr_bw, slp_clk, slp_bw;
+	int i, ret = 0;
+	struct msm_bus_scale_pdata *pdata;
+	struct device *src_dev;
+
+	if (!client) {
+		MSM_BUS_ERR("Client handle  Null");
+		ret = -ENXIO;
+		goto exit_update_client_paths;
+	}
+
+	pdata = client->pdata;
+	if (!pdata) {
+		MSM_BUS_ERR("Client pdata Null");
+		ret = -ENXIO;
+		goto exit_update_client_paths;
+	}
+
+	cur_idx = client->curr;
+	client->curr = idx;
+	for (i = 0; i < pdata->usecase->num_paths; i++) {
+		src = pdata->usecase[idx].vectors[i].src;
+		dest = pdata->usecase[idx].vectors[i].dst;
+
+		lnode = client->src_pnode[i];
+		src_dev = client->src_devs[i];
+		req_clk = client->pdata->usecase[idx].vectors[i].ib;
+		req_bw = client->pdata->usecase[idx].vectors[i].ab;
+		if (cur_idx < 0) {
+			curr_clk = 0;
+			curr_bw = 0;
+		} else {
+			curr_clk =
+				client->pdata->usecase[cur_idx].vectors[i].ib;
+			curr_bw = client->pdata->usecase[cur_idx].vectors[i].ab;
+			MSM_BUS_DBG("%s:ab: %llu ib: %llu\n", __func__,
+					curr_bw, curr_clk);
+		}
+
+		if (pdata->active_only) {
+			slp_clk = 0;
+			slp_bw = 0;
+		} else {
+			slp_clk = req_clk;
+			slp_bw = req_bw;
+		}
+
+		ret = update_path(src_dev, dest, req_clk, req_bw, slp_clk,
+			slp_bw, curr_clk, curr_bw, lnode, pdata->active_only);
+
+		if (ret) {
+			MSM_BUS_ERR("%s: Update path failed! %d ctx %d\n",
+					__func__, ret, pdata->active_only);
+			goto exit_update_client_paths;
+		}
+
+		if (log_trns)
+			getpath_debug(src, lnode, pdata->active_only);
+	}
+	commit_data();
+exit_update_client_paths:
+	return ret;
+}
+
+static int update_context(uint32_t cl, bool active_only,
+					unsigned int ctx_idx)
+{
+	int ret = 0;
+	struct msm_bus_scale_pdata *pdata;
+	struct msm_bus_client *client;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+	if (!cl) {
+		MSM_BUS_ERR("%s: Invalid client handle %d", __func__, cl);
+		ret = -ENXIO;
+		goto exit_update_context;
+	}
+
+	client = handle_list.cl_list[cl];
+	if (!client) {
+		ret = -ENXIO;
+		goto exit_update_context;
+	}
+
+	pdata = client->pdata;
+	if (!pdata) {
+		ret = -ENXIO;
+		goto exit_update_context;
+	}
+	if (pdata->active_only == active_only) {
+		MSM_BUS_ERR("No change in context(%d==%d), skip\n",
+					pdata->active_only, active_only);
+		ret = -ENXIO;
+		goto exit_update_context;
+	}
+
+	if (ctx_idx >= pdata->num_usecases) {
+		MSM_BUS_ERR("Client %u passed invalid index: %d\n",
+			cl, ctx_idx);
+		ret = -ENXIO;
+		goto exit_update_context;
+	}
+
+	pdata->active_only = active_only;
+
+	msm_bus_dbg_client_data(client->pdata, ctx_idx, cl);
+	ret = update_client_paths(client, false, ctx_idx);
+	if (ret) {
+		pr_err("%s: Err updating path\n", __func__);
+		goto exit_update_context;
+	}
+
+	trace_bus_update_request_end(pdata->name);
+
+exit_update_context:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return ret;
+}
+
+static int update_request_adhoc(uint32_t cl, unsigned int index)
+{
+	int ret = 0;
+	struct msm_bus_scale_pdata *pdata;
+	struct msm_bus_client *client;
+	const char *test_cl = "Null";
+	bool log_transaction = false;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+
+	if (!cl) {
+		MSM_BUS_ERR("%s: Invalid client handle %d", __func__, cl);
+		ret = -ENXIO;
+		goto exit_update_request;
+	}
+
+	client = handle_list.cl_list[cl];
+	if (!client) {
+		MSM_BUS_ERR("%s: Invalid client pointer ", __func__);
+		ret = -ENXIO;
+		goto exit_update_request;
+	}
+
+	pdata = client->pdata;
+	if (!pdata) {
+		MSM_BUS_ERR("%s: Client data Null.[client didn't register]",
+				__func__);
+		ret = -ENXIO;
+		goto exit_update_request;
+	}
+
+	if (index >= pdata->num_usecases) {
+		MSM_BUS_ERR("Client %u passed invalid index: %d\n",
+			cl, index);
+		ret = -ENXIO;
+		goto exit_update_request;
+	}
+
+	if (client->curr == index) {
+		MSM_BUS_DBG("%s: Not updating client request idx %d unchanged",
+				__func__, index);
+		goto exit_update_request;
+	}
+
+	if (!strcmp(test_cl, pdata->name))
+		log_transaction = true;
+
+	MSM_BUS_DBG("%s: cl: %u index: %d curr: %d num_paths: %d\n", __func__,
+		cl, index, client->curr, client->pdata->usecase->num_paths);
+	msm_bus_dbg_client_data(client->pdata, index, cl);
+	ret = update_client_paths(client, log_transaction, index);
+	if (ret) {
+		pr_err("%s: Err updating path\n", __func__);
+		goto exit_update_request;
+	}
+
+	trace_bus_update_request_end(pdata->name);
+
+exit_update_request:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return ret;
+}
+
+static void free_cl_mem(struct msm_bus_client_handle *cl)
+{
+	if (cl) {
+		kfree(cl->name);
+		kfree(cl);
+		cl = NULL;
+	}
+}
+
+static int update_bw_adhoc(struct msm_bus_client_handle *cl, u64 ab, u64 ib)
+{
+	int ret = 0;
+	char *test_cl = "test-client";
+	bool log_transaction = false;
+	u64 slp_ib, slp_ab;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+
+	if (!cl) {
+		MSM_BUS_ERR("%s: Invalid client handle %p", __func__, cl);
+		ret = -ENXIO;
+		goto exit_update_request;
+	}
+
+	if (!strcmp(test_cl, cl->name))
+		log_transaction = true;
+
+	msm_bus_dbg_rec_transaction(cl, ab, ib);
+
+	if ((cl->cur_act_ib == ib) && (cl->cur_act_ab == ab)) {
+		MSM_BUS_DBG("%s:no change in request", cl->name);
+		goto exit_update_request;
+	}
+
+	if (cl->active_only) {
+		slp_ib = 0;
+		slp_ab = 0;
+	} else {
+		slp_ib = ib;
+		slp_ab = ab;
+	}
+
+	ret = update_path(cl->mas_dev, cl->slv, ib, ab, slp_ib, slp_ab,
+		cl->cur_act_ib, cl->cur_act_ab, cl->first_hop, cl->active_only);
+
+	if (ret) {
+		MSM_BUS_ERR("%s: Update path failed! %d active_only %d\n",
+				__func__, ret, cl->active_only);
+		goto exit_update_request;
+	}
+
+	commit_data();
+	cl->cur_act_ib = ib;
+	cl->cur_act_ab = ab;
+	cl->cur_dual_ib = slp_ib;
+	cl->cur_dual_ab = slp_ab;
+
+	if (log_transaction)
+		getpath_debug(cl->mas, cl->first_hop, cl->active_only);
+	trace_bus_update_request_end(cl->name);
+exit_update_request:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+
+	return ret;
+}
+
+static int update_bw_context(struct msm_bus_client_handle *cl, u64 act_ab,
+				u64 act_ib, u64 slp_ib, u64 slp_ab)
+{
+	int ret = 0;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+	if (!cl) {
+		MSM_BUS_ERR("Invalid client handle %p", cl);
+		ret = -ENXIO;
+		goto exit_change_context;
+	}
+
+	if ((cl->cur_act_ib == act_ib) &&
+		(cl->cur_act_ab == act_ab) &&
+		(cl->cur_dual_ib == slp_ib) &&
+		(cl->cur_dual_ab == slp_ab)) {
+		MSM_BUS_ERR("No change in vote");
+		goto exit_change_context;
+	}
+
+	if (!slp_ab && !slp_ib)
+		cl->active_only = true;
+	msm_bus_dbg_rec_transaction(cl, cl->cur_act_ab, cl->cur_dual_ib);
+	ret = update_path(cl->mas_dev, cl->slv, act_ib, act_ab, slp_ib,
+				slp_ab, cl->cur_act_ab, cl->cur_act_ab,
+				cl->first_hop, cl->active_only);
+	if (ret) {
+		MSM_BUS_ERR("%s: Update path failed! %d active_only %d\n",
+				__func__, ret, cl->active_only);
+		goto exit_change_context;
+	}
+	commit_data();
+	cl->cur_act_ib = act_ib;
+	cl->cur_act_ab = act_ab;
+	cl->cur_dual_ib = slp_ib;
+	cl->cur_dual_ab = slp_ab;
+	trace_bus_update_request_end(cl->name);
+exit_change_context:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return ret;
+}
+
+static void unregister_adhoc(struct msm_bus_client_handle *cl)
+{
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+	if (!cl) {
+		MSM_BUS_ERR("%s: Null cl handle passed unregister\n",
+				__func__);
+		goto exit_unregister_client;
+	}
+
+	MSM_BUS_DBG("%s: Unregistering client %p", __func__, cl);
+
+	remove_path(cl->mas_dev, cl->slv, cl->cur_act_ib, cl->cur_act_ab,
+				cl->first_hop, cl->active_only);
+	commit_data();
+	msm_bus_dbg_remove_client(cl);
+	kfree(cl->name);
+	kfree(cl);
+exit_unregister_client:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+}
+
+static struct msm_bus_client_handle*
+register_adhoc(uint32_t mas, uint32_t slv, char *name, bool active_only)
+{
+	struct msm_bus_client_handle *client = NULL;
+	int len = 0;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+
+	if (!(mas && slv && name)) {
+		pr_err("%s: Error: src dst name num_paths are required\n",
+								 __func__);
+		goto exit_register;
+	}
+
+	client = kzalloc(sizeof(struct msm_bus_client_handle), GFP_KERNEL);
+	if (!client) {
+		MSM_BUS_ERR("%s: Error allocating client data", __func__);
+		goto exit_register;
+	}
+
+	len = strnlen(name, MAX_STR_CL);
+	client->name = kzalloc((len + 1), GFP_KERNEL);
+	if (!client->name) {
+		MSM_BUS_ERR("%s: Error allocating client name buf", __func__);
+		free_cl_mem(client);
+		goto exit_register;
+	}
+	strlcpy(client->name, name, MAX_STR_CL);
+	client->active_only = active_only;
+
+	client->mas = mas;
+	client->slv = slv;
+
+	client->mas_dev = bus_find_device(&msm_bus_type, NULL,
+					(void *) &mas,
+					msm_bus_device_match_adhoc);
+	if (IS_ERR_OR_NULL(client->mas_dev)) {
+		MSM_BUS_ERR("%s:Failed to find path.src %d dest %d",
+			__func__, client->mas, client->slv);
+		free_cl_mem(client);
+		goto exit_register;
+	}
+
+	client->first_hop = getpath(client->mas_dev, client->slv, client->name);
+	if (client->first_hop < 0) {
+		MSM_BUS_ERR("%s:Failed to find path.src %d dest %d",
+			__func__, client->mas, client->slv);
+		free_cl_mem(client);
+		goto exit_register;
+	}
+
+	MSM_BUS_DBG("%s:Client handle %p %s", __func__, client,
+						client->name);
+	msm_bus_dbg_add_client(client);
+exit_register:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return client;
+}
+/**
+ *  msm_bus_arb_setops_adhoc() : Setup the bus arbitration ops
+ *  @ arb_ops: pointer to the arb ops.
+ */
+void msm_bus_arb_setops_adhoc(struct msm_bus_arb_ops *arb_ops)
+{
+	arb_ops->register_client = register_client_adhoc;
+	arb_ops->update_request = update_request_adhoc;
+	arb_ops->unregister_client = unregister_client_adhoc;
+	arb_ops->update_context = update_context;
+
+	arb_ops->register_cl = register_adhoc;
+	arb_ops->unregister = unregister_adhoc;
+	arb_ops->update_bw = update_bw_adhoc;
+	arb_ops->update_bw_context = update_bw_context;
+}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
new file mode 100644
index 0000000..f78bcd4
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
@@ -0,0 +1,1976 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/rtmutex.h>
+#include <linux/clk.h>
+#include <linux/msm-bus.h>
+#include "msm_bus_core.h"
+#include "msm_bus_rpmh.h"
+
+#define NUM_CL_HANDLES	50
+#define NUM_LNODES	3
+#define MAX_STR_CL	50
+
+#define MSM_BUS_MAS_ALC			144
+#define MSM_BUS_RSC_APPS		8000
+#define MSM_BUS_RSC_DISP		8001
+#define BCM_TCS_CMD_ACV_APPS		0x8
+
+struct bus_search_type {
+	struct list_head link;
+	struct list_head node_list;
+};
+
+struct handle_type {
+	int num_entries;
+	struct msm_bus_client **cl_list;
+};
+
+static struct handle_type handle_list;
+static LIST_HEAD(commit_list);
+static LIST_HEAD(late_init_clist);
+static LIST_HEAD(query_list);
+
+DEFINE_RT_MUTEX(msm_bus_adhoc_lock);
+
+static bool chk_bl_list(struct list_head *black_list, unsigned int id)
+{
+	struct msm_bus_node_device_type *bus_node = NULL;
+
+	list_for_each_entry(bus_node, black_list, link) {
+		if (bus_node->node_info->id == id)
+			return true;
+	}
+	return false;
+}
+
+static void copy_remaining_nodes(struct list_head *edge_list, struct list_head
+	*traverse_list, struct list_head *route_list)
+{
+	struct bus_search_type *search_node;
+
+	if (list_empty(edge_list) && list_empty(traverse_list))
+		return;
+
+	search_node = kzalloc(sizeof(struct bus_search_type), GFP_KERNEL);
+	INIT_LIST_HEAD(&search_node->node_list);
+	list_splice_init(edge_list, traverse_list);
+	list_splice_init(traverse_list, &search_node->node_list);
+	list_add_tail(&search_node->link, route_list);
+}
+
+/*
+ * Duplicate instantiaion from msm_bus_arb.c. Todo there needs to be a
+ * "util" file for these common func/macros.
+ *
+ */
+uint64_t msm_bus_div64(uint64_t num, unsigned int base)
+{
+	uint64_t *n = &num;
+
+	if ((num > 0) && (num < base))
+		return 1;
+
+	switch (base) {
+	case 0:
+		WARN(1, "AXI: Divide by 0 attempted\n");
+	case 1: return num;
+	case 2: return (num >> 1);
+	case 4: return (num >> 2);
+	case 8: return (num >> 3);
+	case 16: return (num >> 4);
+	case 32: return (num >> 5);
+	}
+
+	do_div(*n, base);
+	return *n;
+}
+
+int msm_bus_device_match_adhoc(struct device *dev, void *id)
+{
+	int ret = 0;
+	struct msm_bus_node_device_type *bnode = to_msm_bus_node(dev);
+
+	if (bnode)
+		ret = (bnode->node_info->id == *(unsigned int *)id);
+	else
+		ret = 0;
+
+	return ret;
+}
+
+static void bcm_add_bus_req(struct device *dev)
+{
+	struct msm_bus_node_device_type *cur_dev = NULL;
+	struct msm_bus_node_device_type *bcm_dev = NULL;
+	struct link_node *lnode;
+	int lnode_idx = -1;
+	int max_num_lnodes = 0;
+	int i;
+
+	cur_dev = to_msm_bus_node(dev);
+	if (!cur_dev) {
+		MSM_BUS_ERR("%s: Null device ptr", __func__);
+		goto exit_bcm_add_bus_req;
+	}
+
+	if (!cur_dev->node_info->num_bcm_devs)
+		goto exit_bcm_add_bus_req;
+
+	for (i = 0; i < cur_dev->node_info->num_bcm_devs; i++) {
+		if (cur_dev->node_info->bcm_req_idx[i] != -1)
+			continue;
+		bcm_dev = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]);
+		max_num_lnodes = bcm_dev->bcmdev->num_bus_devs;
+		if (!bcm_dev->num_lnodes) {
+			bcm_dev->lnode_list = devm_kzalloc(dev,
+				sizeof(struct link_node) * max_num_lnodes,
+								GFP_KERNEL);
+			if (!bcm_dev->lnode_list)
+				goto exit_bcm_add_bus_req;
+
+			lnode = bcm_dev->lnode_list;
+			bcm_dev->num_lnodes = max_num_lnodes;
+			lnode_idx = 0;
+		} else {
+			int i;
+
+			for (i = 0; i < bcm_dev->num_lnodes; i++) {
+				if (!bcm_dev->lnode_list[i].in_use)
+					break;
+			}
+
+			if (i < bcm_dev->num_lnodes) {
+				lnode = &bcm_dev->lnode_list[i];
+				lnode_idx = i;
+			} else {
+				struct link_node *realloc_list;
+				size_t cur_size = sizeof(struct link_node) *
+						bcm_dev->num_lnodes;
+
+				bcm_dev->num_lnodes += NUM_LNODES;
+				realloc_list = msm_bus_realloc_devmem(
+						dev,
+						bcm_dev->lnode_list,
+						cur_size,
+						sizeof(struct link_node) *
+						bcm_dev->num_lnodes,
+								GFP_KERNEL);
+
+				if (!realloc_list)
+					goto exit_bcm_add_bus_req;
+
+				bcm_dev->lnode_list = realloc_list;
+				lnode = &bcm_dev->lnode_list[i];
+				lnode_idx = i;
+			}
+		}
+
+		lnode->in_use = 1;
+		lnode->bus_dev_id = cur_dev->node_info->id;
+		cur_dev->node_info->bcm_req_idx[i] = lnode_idx;
+		memset(lnode->lnode_ib, 0, sizeof(uint64_t) * NUM_CTX);
+		memset(lnode->lnode_ab, 0, sizeof(uint64_t) * NUM_CTX);
+	}
+
+exit_bcm_add_bus_req:
+	return;
+}
+
+static int gen_lnode(struct device *dev,
+			int next_hop, int prev_idx, const char *cl_name)
+{
+	struct link_node *lnode;
+	struct msm_bus_node_device_type *cur_dev = NULL;
+	int lnode_idx = -1;
+
+	if (!dev)
+		goto exit_gen_lnode;
+
+	cur_dev = to_msm_bus_node(dev);
+	if (!cur_dev) {
+		MSM_BUS_ERR("%s: Null device ptr", __func__);
+		goto exit_gen_lnode;
+	}
+
+	if (!cur_dev->num_lnodes) {
+		cur_dev->lnode_list = devm_kzalloc(dev,
+				sizeof(struct link_node) * NUM_LNODES,
+								GFP_KERNEL);
+		if (!cur_dev->lnode_list)
+			goto exit_gen_lnode;
+
+		lnode = cur_dev->lnode_list;
+		cur_dev->num_lnodes = NUM_LNODES;
+		lnode_idx = 0;
+	} else {
+		int i;
+
+		for (i = 0; i < cur_dev->num_lnodes; i++) {
+			if (!cur_dev->lnode_list[i].in_use)
+				break;
+		}
+
+		if (i < cur_dev->num_lnodes) {
+			lnode = &cur_dev->lnode_list[i];
+			lnode_idx = i;
+		} else {
+			struct link_node *realloc_list;
+			size_t cur_size = sizeof(struct link_node) *
+					cur_dev->num_lnodes;
+
+			cur_dev->num_lnodes += NUM_LNODES;
+			realloc_list = msm_bus_realloc_devmem(
+					dev,
+					cur_dev->lnode_list,
+					cur_size,
+					sizeof(struct link_node) *
+					cur_dev->num_lnodes, GFP_KERNEL);
+
+			if (!realloc_list)
+				goto exit_gen_lnode;
+
+			cur_dev->lnode_list = realloc_list;
+			lnode = &cur_dev->lnode_list[i];
+			lnode_idx = i;
+		}
+	}
+
+	lnode->in_use = 1;
+	lnode->cl_name = cl_name;
+	if (next_hop == cur_dev->node_info->id) {
+		lnode->next = -1;
+		lnode->next_dev = NULL;
+	} else {
+		lnode->next = prev_idx;
+		lnode->next_dev = bus_find_device(&msm_bus_type, NULL,
+					(void *) &next_hop,
+					msm_bus_device_match_adhoc);
+	}
+
+	memset(lnode->lnode_ib, 0, sizeof(uint64_t) * NUM_CTX);
+	memset(lnode->lnode_ab, 0, sizeof(uint64_t) * NUM_CTX);
+
+exit_gen_lnode:
+	return lnode_idx;
+}
+
+static int remove_lnode(struct msm_bus_node_device_type *cur_dev,
+				int lnode_idx)
+{
+	int ret = 0;
+
+	if (!cur_dev) {
+		MSM_BUS_ERR("%s: Null device ptr", __func__);
+		ret = -ENODEV;
+		goto exit_remove_lnode;
+	}
+
+	if (lnode_idx != -1) {
+		if (!cur_dev->num_lnodes ||
+				(lnode_idx > (cur_dev->num_lnodes - 1))) {
+			MSM_BUS_ERR("%s: Invalid Idx %d, num_lnodes %d",
+				__func__, lnode_idx, cur_dev->num_lnodes);
+			ret = -ENODEV;
+			goto exit_remove_lnode;
+		}
+
+		cur_dev->lnode_list[lnode_idx].next = -1;
+		cur_dev->lnode_list[lnode_idx].next_dev = NULL;
+		cur_dev->lnode_list[lnode_idx].in_use = 0;
+		cur_dev->lnode_list[lnode_idx].cl_name = NULL;
+	}
+
+exit_remove_lnode:
+	return ret;
+}
+
+static int prune_path(struct list_head *route_list, int dest, int src,
+				struct list_head *black_list, int found,
+				const char *cl_name)
+{
+	struct bus_search_type *search_node, *temp_search_node;
+	struct msm_bus_node_device_type *bus_node;
+	struct list_head *bl_list;
+	struct list_head *temp_bl_list;
+	int search_dev_id = dest;
+	struct device *dest_dev = bus_find_device(&msm_bus_type, NULL,
+					(void *) &dest,
+					msm_bus_device_match_adhoc);
+	int lnode_hop = -1;
+
+	if (!found)
+		goto reset_links;
+
+	if (!dest_dev) {
+		MSM_BUS_ERR("%s: Can't find dest dev %d", __func__, dest);
+		goto exit_prune_path;
+	}
+
+	lnode_hop = gen_lnode(dest_dev, search_dev_id, lnode_hop, cl_name);
+	bcm_add_bus_req(dest_dev);
+
+	list_for_each_entry_reverse(search_node, route_list, link) {
+		list_for_each_entry(bus_node, &search_node->node_list, link) {
+			unsigned int i;
+
+			for (i = 0; i < bus_node->node_info->num_connections;
+									i++) {
+				if (bus_node->node_info->connections[i] ==
+								search_dev_id) {
+					dest_dev = bus_find_device(
+						&msm_bus_type,
+						NULL,
+						(void *)
+						&bus_node->node_info->id,
+						msm_bus_device_match_adhoc);
+
+					if (!dest_dev) {
+						lnode_hop = -1;
+						goto reset_links;
+					}
+
+					lnode_hop = gen_lnode(dest_dev,
+							search_dev_id,
+							lnode_hop, cl_name);
+					bcm_add_bus_req(dest_dev);
+					search_dev_id =
+						bus_node->node_info->id;
+					break;
+				}
+			}
+		}
+	}
+reset_links:
+	list_for_each_entry_safe(search_node, temp_search_node, route_list,
+									link) {
+		list_for_each_entry(bus_node, &search_node->node_list, link)
+			bus_node->node_info->is_traversed = false;
+
+		list_del(&search_node->link);
+		kfree(search_node);
+	}
+
+	list_for_each_safe(bl_list, temp_bl_list, black_list)
+		list_del(bl_list);
+
+exit_prune_path:
+	return lnode_hop;
+}
+
+static void setup_bl_list(struct msm_bus_node_device_type *node,
+				struct list_head *black_list)
+{
+	unsigned int i;
+
+	for (i = 0; i < node->node_info->num_blist; i++) {
+		struct msm_bus_node_device_type *bdev;
+
+		bdev = to_msm_bus_node(node->node_info->black_connections[i]);
+		list_add_tail(&bdev->link, black_list);
+	}
+}
+
+static int getpath(struct device *src_dev, int dest, const char *cl_name)
+{
+	struct list_head traverse_list;
+	struct list_head edge_list;
+	struct list_head route_list;
+	struct list_head black_list;
+	struct msm_bus_node_device_type *src_node;
+	struct bus_search_type *search_node;
+	int found = 0;
+	int depth_index = 0;
+	int first_hop = -1;
+	int src;
+
+	INIT_LIST_HEAD(&traverse_list);
+	INIT_LIST_HEAD(&edge_list);
+	INIT_LIST_HEAD(&route_list);
+	INIT_LIST_HEAD(&black_list);
+
+	if (!src_dev) {
+		MSM_BUS_ERR("%s: Cannot locate src dev ", __func__);
+		goto exit_getpath;
+	}
+
+	src_node = to_msm_bus_node(src_dev);
+	if (!src_node) {
+		MSM_BUS_ERR("%s:Fatal, Source node not found", __func__);
+		goto exit_getpath;
+	}
+	src = src_node->node_info->id;
+	list_add_tail(&src_node->link, &traverse_list);
+
+	while ((!found && !list_empty(&traverse_list))) {
+		struct msm_bus_node_device_type *bus_node = NULL;
+		unsigned int i;
+		/* Locate dest_id in the traverse list */
+		list_for_each_entry(bus_node, &traverse_list, link) {
+			if (bus_node->node_info->id == dest) {
+				found = 1;
+				break;
+			}
+		}
+
+		/* Setup the new edge list */
+		list_for_each_entry(bus_node, &traverse_list, link) {
+			/* Setup list of black-listed nodes */
+			setup_bl_list(bus_node, &black_list);
+
+			for (i = 0; i < bus_node->node_info->num_connections;
+									i++) {
+				bool skip;
+				struct msm_bus_node_device_type
+						*node_conn;
+				node_conn =
+				to_msm_bus_node(
+				bus_node->node_info->dev_connections[i]);
+				if (node_conn->node_info->is_traversed) {
+					MSM_BUS_ERR("Circ Path %d\n",
+					node_conn->node_info->id);
+					goto reset_traversed;
+				}
+				skip = chk_bl_list(&black_list,
+					bus_node->node_info->connections[i]);
+				if (!skip) {
+					list_add_tail(&node_conn->link,
+						&edge_list);
+					node_conn->node_info->is_traversed =
+									true;
+				}
+			}
+		}
+		/* Keep tabs of the previous search list */
+		search_node = kzalloc(sizeof(struct bus_search_type),
+				 GFP_KERNEL);
+		INIT_LIST_HEAD(&search_node->node_list);
+		list_splice_init(&traverse_list,
+				 &search_node->node_list);
+		/* Add the previous search list to a route list */
+		list_add_tail(&search_node->link, &route_list);
+		/* Advancing the list depth */
+		depth_index++;
+		list_splice_init(&edge_list, &traverse_list);
+	}
+reset_traversed:
+	copy_remaining_nodes(&edge_list, &traverse_list, &route_list);
+	first_hop = prune_path(&route_list, dest, src, &black_list, found,
+								cl_name);
+
+exit_getpath:
+	return first_hop;
+}
+
+static void bcm_update_acv_req(struct msm_bus_node_device_type *cur_rsc,
+				uint64_t max_ab, uint64_t max_ib,
+				uint64_t *vec_a, uint64_t *vec_b,
+				uint32_t *acv, int ctx)
+{
+	uint32_t acv_bmsk = 0;
+	/*
+	 * Base ACV voting on current RSC until mapping is set up in commanddb
+	 * that allows us to vote ACV based on master.
+	 */
+
+	if (cur_rsc->node_info->id == MSM_BUS_RSC_APPS)
+		acv_bmsk = BCM_TCS_CMD_ACV_APPS;
+
+	if (max_ab == 0 && max_ib == 0)
+		*acv = *acv & ~acv_bmsk;
+	else
+		*acv = *acv | acv_bmsk;
+	*vec_a = 0;
+	*vec_b = *acv;
+}
+
+static void bcm_update_bus_req(struct device *dev, int ctx)
+{
+	struct msm_bus_node_device_type *cur_dev = NULL;
+	struct msm_bus_node_device_type *bcm_dev = NULL;
+	struct msm_bus_node_device_type *cur_rsc = NULL;
+
+	int i, j;
+	uint64_t max_ib = 0;
+	uint64_t max_ab = 0;
+	int lnode_idx = 0;
+
+	cur_dev = to_msm_bus_node(dev);
+	if (!cur_dev) {
+		MSM_BUS_ERR("%s: Null device ptr", __func__);
+		goto exit_bcm_update_bus_req;
+	}
+
+	if (!cur_dev->node_info->num_bcm_devs)
+		goto exit_bcm_update_bus_req;
+
+	for (i = 0; i < cur_dev->node_info->num_bcm_devs; i++) {
+		bcm_dev = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]);
+
+		if (!bcm_dev)
+			goto exit_bcm_update_bus_req;
+
+		lnode_idx = cur_dev->node_info->bcm_req_idx[i];
+		bcm_dev->lnode_list[lnode_idx].lnode_ib[ctx] =
+			msm_bus_div64(cur_dev->node_bw[ctx].max_ib *
+					(uint64_t)bcm_dev->bcmdev->width,
+				cur_dev->node_info->agg_params.buswidth);
+
+		bcm_dev->lnode_list[lnode_idx].lnode_ab[ctx] =
+			msm_bus_div64(cur_dev->node_bw[ctx].sum_ab *
+					(uint64_t)bcm_dev->bcmdev->width,
+				cur_dev->node_info->agg_params.buswidth *
+				cur_dev->node_info->agg_params.num_aggports);
+
+		for (j = 0; j < bcm_dev->num_lnodes; j++) {
+			if (ctx == ACTIVE_CTX) {
+				max_ib = max(max_ib,
+				max(bcm_dev->lnode_list[j].lnode_ib[ACTIVE_CTX],
+				bcm_dev->lnode_list[j].lnode_ib[DUAL_CTX]));
+				max_ab = max(max_ab,
+				bcm_dev->lnode_list[j].lnode_ab[ACTIVE_CTX] +
+				bcm_dev->lnode_list[j].lnode_ab[DUAL_CTX]);
+			} else {
+				max_ib = max(max_ib,
+					bcm_dev->lnode_list[j].lnode_ib[ctx]);
+				max_ab = max(max_ab,
+					bcm_dev->lnode_list[j].lnode_ab[ctx]);
+			}
+		}
+		bcm_dev->node_bw[ctx].max_ab = max_ab;
+		bcm_dev->node_bw[ctx].max_ib = max_ib;
+
+		max_ab = msm_bus_div64(max_ab, bcm_dev->bcmdev->unit_size);
+		max_ib = msm_bus_div64(max_ib, bcm_dev->bcmdev->unit_size);
+
+		if (bcm_dev->node_info->id == MSM_BUS_BCM_ACV) {
+			cur_rsc =
+			to_msm_bus_node(bcm_dev->node_info->rsc_devs[0]);
+			bcm_update_acv_req(cur_rsc, max_ab, max_ib,
+					&bcm_dev->node_vec[ctx].vec_a,
+					&bcm_dev->node_vec[ctx].vec_b,
+					&cur_rsc->rscdev->acv[ctx], ctx);
+
+		} else {
+			bcm_dev->node_vec[ctx].vec_a = max_ab;
+			bcm_dev->node_vec[ctx].vec_b = max_ib;
+		}
+	}
+exit_bcm_update_bus_req:
+	return;
+}
+
+static void bcm_query_bus_req(struct device *dev, int ctx)
+{
+	struct msm_bus_node_device_type *cur_dev = NULL;
+	struct msm_bus_node_device_type *bcm_dev = NULL;
+	struct msm_bus_node_device_type *cur_rsc = NULL;
+	int i, j;
+	uint64_t max_query_ib = 0;
+	uint64_t max_query_ab = 0;
+	int lnode_idx = 0;
+
+	cur_dev = to_msm_bus_node(dev);
+	if (!cur_dev) {
+		MSM_BUS_ERR("%s: Null device ptr", __func__);
+		goto exit_bcm_query_bus_req;
+	}
+
+	if (!cur_dev->node_info->num_bcm_devs)
+		goto exit_bcm_query_bus_req;
+
+	for (i = 0; i < cur_dev->node_info->num_bcm_devs; i++) {
+		bcm_dev = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]);
+
+		if (!bcm_dev)
+			goto exit_bcm_query_bus_req;
+
+		lnode_idx = cur_dev->node_info->bcm_req_idx[i];
+		bcm_dev->lnode_list[lnode_idx].query_ib[ctx] =
+			msm_bus_div64(cur_dev->node_bw[ctx].max_query_ib *
+					(uint64_t)bcm_dev->bcmdev->width,
+				cur_dev->node_info->agg_params.buswidth);
+
+		bcm_dev->lnode_list[lnode_idx].query_ab[ctx] =
+			msm_bus_div64(cur_dev->node_bw[ctx].sum_query_ab *
+					(uint64_t)bcm_dev->bcmdev->width,
+				cur_dev->node_info->agg_params.num_aggports *
+				cur_dev->node_info->agg_params.buswidth);
+
+		for (j = 0; j < bcm_dev->num_lnodes; j++) {
+			if (ctx == ACTIVE_CTX) {
+				max_query_ib = max(max_query_ib,
+				max(bcm_dev->lnode_list[j].query_ib[ACTIVE_CTX],
+				bcm_dev->lnode_list[j].query_ib[DUAL_CTX]));
+
+				max_query_ab = max(max_query_ab,
+				bcm_dev->lnode_list[j].query_ab[ACTIVE_CTX] +
+				bcm_dev->lnode_list[j].query_ab[DUAL_CTX]);
+			} else {
+				max_query_ib = max(max_query_ib,
+					bcm_dev->lnode_list[j].query_ib[ctx]);
+				max_query_ab = max(max_query_ab,
+					bcm_dev->lnode_list[j].query_ab[ctx]);
+			}
+		}
+
+		max_query_ab = msm_bus_div64(max_query_ab,
+						bcm_dev->bcmdev->unit_size);
+		max_query_ib = msm_bus_div64(max_query_ib,
+						bcm_dev->bcmdev->unit_size);
+
+		if (bcm_dev->node_info->id == MSM_BUS_BCM_ACV) {
+			cur_rsc =
+			to_msm_bus_node(bcm_dev->node_info->rsc_devs[0]);
+			bcm_update_acv_req(cur_rsc, max_query_ab, max_query_ib,
+					&bcm_dev->node_vec[ctx].query_vec_a,
+					&bcm_dev->node_vec[ctx].query_vec_b,
+					&cur_rsc->rscdev->query_acv[ctx], ctx);
+		} else {
+			bcm_dev->node_vec[ctx].query_vec_a = max_query_ab;
+			bcm_dev->node_vec[ctx].query_vec_b = max_query_ib;
+		}
+
+		bcm_dev->node_bw[ctx].max_query_ab = max_query_ab;
+		bcm_dev->node_bw[ctx].max_query_ib = max_query_ib;
+	}
+exit_bcm_query_bus_req:
+	return;
+}
+
+static void bcm_update_alc_req(struct msm_bus_node_device_type *dev, int ctx)
+{
+	struct msm_bus_node_device_type *bcm_dev = NULL;
+	int i;
+	uint64_t max_alc = 0;
+
+	if (!dev || !to_msm_bus_node(dev->node_info->bus_device)) {
+		MSM_BUS_ERR("Bus node pointer is Invalid");
+		goto exit_bcm_update_alc_req;
+	}
+
+	for (i = 0; i < dev->num_lnodes; i++)
+		max_alc = max(max_alc, dev->lnode_list[i].alc_idx[ctx]);
+
+	dev->node_bw[ctx].max_alc = max_alc;
+
+	bcm_dev = to_msm_bus_node(dev->node_info->bcm_devs[0]);
+
+	if (ctx == ACTIVE_CTX) {
+		max_alc = max(max_alc,
+				max(dev->node_bw[ACTIVE_CTX].max_alc,
+				dev->node_bw[DUAL_CTX].max_alc));
+	} else {
+		max_alc = dev->node_bw[ctx].max_alc;
+	}
+
+	bcm_dev->node_bw[ctx].max_alc = max_alc;
+	bcm_dev->node_vec[ctx].vec_a = max_alc;
+	bcm_dev->node_vec[ctx].vec_b = 0;
+
+exit_bcm_update_alc_req:
+	return;
+}
+
+int bcm_remove_handoff_req(struct device *dev, void *data)
+{
+	struct msm_bus_node_device_type *bus_dev = NULL;
+	struct msm_bus_node_device_type *cur_bcm = NULL;
+	struct msm_bus_node_device_type *cur_rsc = NULL;
+	int ret = 0;
+
+	bus_dev = to_msm_bus_node(dev);
+	if (bus_dev->node_info->is_bcm_dev ||
+		bus_dev->node_info->is_fab_dev ||
+		bus_dev->node_info->is_rsc_dev)
+		goto exit_bcm_remove_handoff_req;
+
+	if (bus_dev->node_info->num_bcm_devs) {
+		cur_bcm = to_msm_bus_node(bus_dev->node_info->bcm_devs[0]);
+		if (cur_bcm->node_info->num_rsc_devs) {
+			cur_rsc =
+			to_msm_bus_node(cur_bcm->node_info->rsc_devs[0]);
+			if (cur_rsc->node_info->id != MSM_BUS_RSC_APPS)
+				goto exit_bcm_remove_handoff_req;
+		}
+	}
+
+	if (!bus_dev->dirty) {
+		list_add_tail(&bus_dev->link, &late_init_clist);
+		bus_dev->dirty = true;
+	}
+
+exit_bcm_remove_handoff_req:
+	return ret;
+}
+
+static void aggregate_bus_req(struct msm_bus_node_device_type *bus_dev,
+									int ctx)
+{
+	int i;
+	uint64_t max_ib = 0;
+	uint64_t sum_ab = 0;
+
+	if (!bus_dev || !to_msm_bus_node(bus_dev->node_info->bus_device)) {
+		MSM_BUS_ERR("Bus node pointer is Invalid");
+		goto exit_agg_bus_req;
+	}
+
+	for (i = 0; i < bus_dev->num_lnodes; i++) {
+		max_ib = max(max_ib, bus_dev->lnode_list[i].lnode_ib[ctx]);
+		sum_ab += bus_dev->lnode_list[i].lnode_ab[ctx];
+	}
+
+	bus_dev->node_bw[ctx].sum_ab = sum_ab;
+	bus_dev->node_bw[ctx].max_ib = max_ib;
+
+exit_agg_bus_req:
+	return;
+}
+
+static void aggregate_bus_query_req(struct msm_bus_node_device_type *bus_dev,
+									int ctx)
+{
+	int i;
+	uint64_t max_ib = 0;
+	uint64_t sum_ab = 0;
+
+	if (!bus_dev || !to_msm_bus_node(bus_dev->node_info->bus_device)) {
+		MSM_BUS_ERR("Bus node pointer is Invalid");
+		goto exit_agg_bus_req;
+	}
+
+	for (i = 0; i < bus_dev->num_lnodes; i++) {
+		max_ib = max(max_ib,
+				bus_dev->lnode_list[i].query_ib[ctx]);
+		sum_ab += bus_dev->lnode_list[i].query_ab[ctx];
+	}
+
+	bus_dev->node_bw[ctx].sum_query_ab = sum_ab;
+	bus_dev->node_bw[ctx].max_query_ib = max_ib;
+
+exit_agg_bus_req:
+	return;
+}
+
+static void commit_data(void)
+{
+	msm_bus_commit_data(&commit_list);
+	INIT_LIST_HEAD(&commit_list);
+}
+
+int commit_late_init_data(bool lock)
+{
+	int rc;
+
+	if (lock) {
+		rt_mutex_lock(&msm_bus_adhoc_lock);
+		return 0;
+	}
+
+	rc = bus_for_each_dev(&msm_bus_type, NULL, NULL,
+						bcm_remove_handoff_req);
+
+	msm_bus_commit_data(&late_init_clist);
+	INIT_LIST_HEAD(&late_init_clist);
+
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return rc;
+}
+
+
+
+static void add_node_to_clist(struct msm_bus_node_device_type *node)
+{
+	struct msm_bus_node_device_type *node_parent =
+			to_msm_bus_node(node->node_info->bus_device);
+
+	if (!node->dirty) {
+		list_add_tail(&node->link, &commit_list);
+		node->dirty = true;
+	}
+
+	if (!node_parent->dirty) {
+		list_add_tail(&node_parent->link, &commit_list);
+		node_parent->dirty = true;
+	}
+}
+
+static void add_node_to_query_list(struct msm_bus_node_device_type *node)
+{
+	if (!node->query_dirty) {
+		list_add_tail(&node->query_link, &query_list);
+		node->query_dirty = true;
+	}
+}
+
+static int update_path(struct device *src_dev, int dest, uint64_t act_req_ib,
+			uint64_t act_req_bw, uint64_t slp_req_ib,
+			uint64_t slp_req_bw, uint64_t cur_ib, uint64_t cur_bw,
+			int src_idx, int ctx)
+{
+	struct device *next_dev = NULL;
+	struct link_node *lnode = NULL;
+	struct msm_bus_node_device_type *dev_info = NULL;
+	int curr_idx;
+	int ret = 0;
+
+	if (IS_ERR_OR_NULL(src_dev)) {
+		MSM_BUS_ERR("%s: No source device", __func__);
+		ret = -ENODEV;
+		goto exit_update_path;
+	}
+
+	next_dev = src_dev;
+
+	if (src_idx < 0) {
+		MSM_BUS_ERR("%s: Invalid lnode idx %d", __func__, src_idx);
+		ret = -ENXIO;
+		goto exit_update_path;
+	}
+	curr_idx = src_idx;
+
+	while (next_dev) {
+		int i;
+
+		dev_info = to_msm_bus_node(next_dev);
+
+		if (curr_idx >= dev_info->num_lnodes) {
+			MSM_BUS_ERR("%s: Invalid lnode Idx %d num lnodes %d",
+			 __func__, curr_idx, dev_info->num_lnodes);
+			ret = -ENXIO;
+			goto exit_update_path;
+		}
+
+		lnode = &dev_info->lnode_list[curr_idx];
+		if (!lnode) {
+			MSM_BUS_ERR("%s: Invalid lnode ptr lnode %d",
+				 __func__, curr_idx);
+			ret = -ENXIO;
+			goto exit_update_path;
+		}
+		lnode->lnode_ib[ACTIVE_CTX] = act_req_ib;
+		lnode->lnode_ab[ACTIVE_CTX] = act_req_bw;
+		lnode->lnode_ib[DUAL_CTX] = slp_req_ib;
+		lnode->lnode_ab[DUAL_CTX] = slp_req_bw;
+
+		for (i = 0; i < NUM_CTX; i++) {
+			aggregate_bus_req(dev_info, i);
+			bcm_update_bus_req(next_dev, i);
+		}
+
+		add_node_to_clist(dev_info);
+
+		next_dev = lnode->next_dev;
+		curr_idx = lnode->next;
+	}
+
+exit_update_path:
+	return ret;
+}
+
+static int update_alc_vote(struct device *alc_dev, uint64_t act_req_fa_lat,
+			uint64_t act_req_idle_time, uint64_t slp_req_fa_lat,
+			uint64_t slp_req_idle_time, uint64_t cur_fa_lat,
+			uint64_t cur_idle_time, int idx, int ctx)
+{
+	struct link_node *lnode = NULL;
+	struct msm_bus_node_device_type *dev_info = NULL;
+	int curr_idx, i;
+	int ret = 0;
+
+	if (IS_ERR_OR_NULL(alc_dev)) {
+		MSM_BUS_ERR("%s: No source device", __func__);
+		ret = -ENODEV;
+		goto exit_update_alc_vote;
+	}
+
+	if (idx < 0) {
+		MSM_BUS_ERR("%s: Invalid lnode idx %d", __func__, idx);
+		ret = -ENXIO;
+		goto exit_update_alc_vote;
+	}
+
+	dev_info = to_msm_bus_node(alc_dev);
+	curr_idx = idx;
+
+	if (curr_idx >= dev_info->num_lnodes) {
+		MSM_BUS_ERR("%s: Invalid lnode Idx %d num lnodes %d",
+				 __func__, curr_idx, dev_info->num_lnodes);
+		ret = -ENXIO;
+		goto exit_update_alc_vote;
+	}
+
+	lnode = &dev_info->lnode_list[curr_idx];
+	if (!lnode) {
+		MSM_BUS_ERR("%s: Invalid lnode ptr lnode %d",
+			 __func__, curr_idx);
+		ret = -ENXIO;
+		goto exit_update_alc_vote;
+	}
+
+	/*
+	 * Add aggregation and mapping logic once LUT is avail.
+	 * Use default values for time being.
+	 */
+	lnode->alc_idx[ACTIVE_CTX] = 12;
+	lnode->alc_idx[DUAL_CTX] = 0;
+
+	for (i = 0; i < NUM_CTX; i++)
+		bcm_update_alc_req(dev_info, i);
+
+	add_node_to_clist(dev_info);
+
+exit_update_alc_vote:
+	return ret;
+}
+
+
+static int query_path(struct device *src_dev, int dest, uint64_t act_req_ib,
+			uint64_t act_req_bw, uint64_t slp_req_ib,
+			uint64_t slp_req_bw, uint64_t cur_ib, uint64_t cur_bw,
+			int src_idx)
+{
+	struct device *next_dev = NULL;
+	struct link_node *lnode = NULL;
+	struct msm_bus_node_device_type *dev_info = NULL;
+	int curr_idx;
+	int ret = 0;
+
+	if (IS_ERR_OR_NULL(src_dev)) {
+		MSM_BUS_ERR("%s: No source device", __func__);
+		ret = -ENODEV;
+		goto exit_query_path;
+	}
+
+	next_dev = src_dev;
+
+	if (src_idx < 0) {
+		MSM_BUS_ERR("%s: Invalid lnode idx %d", __func__, src_idx);
+		ret = -ENXIO;
+		goto exit_query_path;
+	}
+	curr_idx = src_idx;
+
+	while (next_dev) {
+		int i;
+
+		dev_info = to_msm_bus_node(next_dev);
+
+		if (curr_idx >= dev_info->num_lnodes) {
+			MSM_BUS_ERR("%s: Invalid lnode Idx %d num lnodes %d",
+			 __func__, curr_idx, dev_info->num_lnodes);
+			ret = -ENXIO;
+			goto exit_query_path;
+		}
+
+		lnode = &dev_info->lnode_list[curr_idx];
+		if (!lnode) {
+			MSM_BUS_ERR("%s: Invalid lnode ptr lnode %d",
+				 __func__, curr_idx);
+			ret = -ENXIO;
+			goto exit_query_path;
+		}
+		lnode->query_ib[ACTIVE_CTX] = act_req_ib;
+		lnode->query_ab[ACTIVE_CTX] = act_req_bw;
+		lnode->query_ib[DUAL_CTX] = slp_req_ib;
+		lnode->query_ab[DUAL_CTX] = slp_req_bw;
+
+		for (i = 0; i < NUM_CTX; i++) {
+			aggregate_bus_query_req(dev_info, i);
+			bcm_query_bus_req(next_dev, i);
+		}
+
+		add_node_to_query_list(dev_info);
+
+		next_dev = lnode->next_dev;
+		curr_idx = lnode->next;
+	}
+
+exit_query_path:
+	return ret;
+}
+
+static int remove_path(struct device *src_dev, int dst, uint64_t cur_ib,
+			uint64_t cur_ab, int src_idx, int active_only)
+{
+	struct device *next_dev = NULL;
+	struct link_node *lnode = NULL;
+	struct msm_bus_node_device_type *dev_info = NULL;
+	int ret = 0;
+	int cur_idx = src_idx;
+	int next_idx;
+
+	/* Update the current path to zero out all request from
+	 * this cient on all paths
+	 */
+	if (!src_dev) {
+		MSM_BUS_ERR("%s: Can't find source device", __func__);
+		ret = -ENODEV;
+		goto exit_remove_path;
+	}
+
+	ret = update_path(src_dev, dst, 0, 0, 0, 0, cur_ib, cur_ab, src_idx,
+							active_only);
+	if (ret) {
+		MSM_BUS_ERR("%s: Error zeroing out path ctx %d",
+					__func__, ACTIVE_CTX);
+		goto exit_remove_path;
+	}
+
+	next_dev = src_dev;
+
+	while (next_dev) {
+		dev_info = to_msm_bus_node(next_dev);
+		lnode = &dev_info->lnode_list[cur_idx];
+		next_idx = lnode->next;
+		next_dev = lnode->next_dev;
+		remove_lnode(dev_info, cur_idx);
+		cur_idx = next_idx;
+	}
+
+exit_remove_path:
+	return ret;
+}
+
+static void getpath_debug(int src, int curr, int active_only)
+{
+	struct device *dev_node;
+	struct device *dev_it;
+	unsigned int hop = 1;
+	int idx;
+	struct msm_bus_node_device_type *devinfo;
+	int i;
+
+	dev_node = bus_find_device(&msm_bus_type, NULL,
+				(void *) &src,
+				msm_bus_device_match_adhoc);
+
+	if (!dev_node) {
+		MSM_BUS_ERR("SRC NOT FOUND %d", src);
+		return;
+	}
+
+	idx = curr;
+	devinfo = to_msm_bus_node(dev_node);
+	dev_it = dev_node;
+
+	MSM_BUS_ERR("Route list Src %d", src);
+	while (dev_it) {
+		struct msm_bus_node_device_type *busdev =
+			to_msm_bus_node(devinfo->node_info->bus_device);
+
+		MSM_BUS_ERR("Hop[%d] at Device %d ctx %d", hop,
+					devinfo->node_info->id, active_only);
+
+		for (i = 0; i < NUM_CTX; i++) {
+			MSM_BUS_ERR("dev info sel ib %llu",
+						devinfo->node_bw[i].cur_clk_hz);
+			MSM_BUS_ERR("dev info sel ab %llu",
+						devinfo->node_bw[i].sum_ab);
+		}
+
+		dev_it = devinfo->lnode_list[idx].next_dev;
+		idx = devinfo->lnode_list[idx].next;
+		if (dev_it)
+			devinfo = to_msm_bus_node(dev_it);
+
+		MSM_BUS_ERR("Bus Device %d", busdev->node_info->id);
+		MSM_BUS_ERR("Bus Clock %llu", busdev->clk[active_only].rate);
+
+		if (idx < 0)
+			break;
+		hop++;
+	}
+}
+
+static void unregister_client_adhoc(uint32_t cl)
+{
+	int i;
+	struct msm_bus_scale_pdata *pdata;
+	int lnode, src, curr, dest;
+	uint64_t  cur_clk, cur_bw;
+	struct msm_bus_client *client;
+	struct device *src_dev;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+	if (!cl) {
+		MSM_BUS_ERR("%s: Null cl handle passed unregister\n",
+				__func__);
+		goto exit_unregister_client;
+	}
+	client = handle_list.cl_list[cl];
+	pdata = client->pdata;
+	if (!pdata) {
+		MSM_BUS_ERR("%s: Null pdata passed to unregister\n",
+				__func__);
+		goto exit_unregister_client;
+	}
+
+	curr = client->curr;
+	if (curr >= pdata->num_usecases) {
+		MSM_BUS_ERR("Invalid index Defaulting curr to 0");
+		curr = 0;
+	}
+
+	for (i = 0; i < pdata->usecase->num_paths; i++) {
+		src = client->pdata->usecase[curr].vectors[i].src;
+		dest = client->pdata->usecase[curr].vectors[i].dst;
+
+		lnode = client->src_pnode[i];
+		src_dev = client->src_devs[i];
+		cur_clk = client->pdata->usecase[curr].vectors[i].ib;
+		cur_bw = client->pdata->usecase[curr].vectors[i].ab;
+		remove_path(src_dev, dest, cur_clk, cur_bw, lnode,
+						pdata->active_only);
+	}
+	commit_data();
+	msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_UNREGISTER, cl);
+	kfree(client->src_pnode);
+	kfree(client->src_devs);
+	kfree(client);
+	handle_list.cl_list[cl] = NULL;
+exit_unregister_client:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+}
+
+static int alloc_handle_lst(int size)
+{
+	int ret = 0;
+	struct msm_bus_client **t_cl_list;
+
+	if (!handle_list.num_entries) {
+		t_cl_list = kzalloc(sizeof(struct msm_bus_client *)
+			* NUM_CL_HANDLES, GFP_KERNEL);
+		if (ZERO_OR_NULL_PTR(t_cl_list)) {
+			ret = -ENOMEM;
+			MSM_BUS_ERR("%s: Failed to allocate handles list",
+								__func__);
+			goto exit_alloc_handle_lst;
+		}
+		handle_list.cl_list = t_cl_list;
+		handle_list.num_entries += NUM_CL_HANDLES;
+	} else {
+		t_cl_list = krealloc(handle_list.cl_list,
+				sizeof(struct msm_bus_client *) *
+				(handle_list.num_entries + NUM_CL_HANDLES),
+				GFP_KERNEL);
+		if (ZERO_OR_NULL_PTR(t_cl_list)) {
+			ret = -ENOMEM;
+			MSM_BUS_ERR("%s: Failed to allocate handles list",
+								__func__);
+			goto exit_alloc_handle_lst;
+		}
+
+		handle_list.cl_list = t_cl_list;
+		memset(&handle_list.cl_list[handle_list.num_entries], 0,
+			NUM_CL_HANDLES * sizeof(struct msm_bus_client *));
+		handle_list.num_entries += NUM_CL_HANDLES;
+	}
+exit_alloc_handle_lst:
+	return ret;
+}
+
+static uint32_t gen_handle(struct msm_bus_client *client)
+{
+	uint32_t handle = 0;
+	int i;
+	int ret = 0;
+
+	for (i = 0; i < handle_list.num_entries; i++) {
+		if (i && !handle_list.cl_list[i]) {
+			handle = i;
+			break;
+		}
+	}
+
+	if (!handle) {
+		ret = alloc_handle_lst(NUM_CL_HANDLES);
+
+		if (ret) {
+			MSM_BUS_ERR("%s: Failed to allocate handle list",
+							__func__);
+			goto exit_gen_handle;
+		}
+		handle = i + 1;
+	}
+	handle_list.cl_list[handle] = client;
+exit_gen_handle:
+	return handle;
+}
+
+static uint32_t register_client_adhoc(struct msm_bus_scale_pdata *pdata)
+{
+	int src, dest;
+	int i;
+	struct msm_bus_client *client = NULL;
+	int *lnode;
+	struct device *dev;
+	uint32_t handle = 0;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+	client = kzalloc(sizeof(struct msm_bus_client), GFP_KERNEL);
+	if (!client) {
+		MSM_BUS_ERR("%s: Error allocating client data", __func__);
+		goto exit_register_client;
+	}
+	client->pdata = pdata;
+
+	if (pdata->alc) {
+		client->curr = -1;
+		lnode = kzalloc(sizeof(int), GFP_KERNEL);
+
+		if (ZERO_OR_NULL_PTR(lnode)) {
+			MSM_BUS_ERR("%s: Error allocating lnode!", __func__);
+			goto exit_lnode_malloc_fail;
+		}
+		client->src_pnode = lnode;
+
+		client->src_devs = kzalloc(sizeof(struct device *),
+							GFP_KERNEL);
+		if (IS_ERR_OR_NULL(client->src_devs)) {
+			MSM_BUS_ERR("%s: Error allocating src_dev!", __func__);
+			goto exit_src_dev_malloc_fail;
+		}
+		src = MSM_BUS_MAS_ALC;
+		dev = bus_find_device(&msm_bus_type, NULL,
+				(void *) &src,
+				msm_bus_device_match_adhoc);
+		if (IS_ERR_OR_NULL(dev)) {
+			MSM_BUS_ERR("%s:Failed to find alc device",
+				__func__);
+			goto exit_invalid_data;
+		}
+		gen_lnode(dev, MSM_BUS_MAS_ALC, 0, pdata->name);
+		bcm_add_bus_req(dev);
+
+		client->src_devs[0] = dev;
+
+		handle = gen_handle(client);
+		goto exit_register_client;
+	}
+
+	lnode = kcalloc(pdata->usecase->num_paths, sizeof(int), GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(lnode)) {
+		MSM_BUS_ERR("%s: Error allocating pathnode ptr!", __func__);
+		goto exit_lnode_malloc_fail;
+	}
+	client->src_pnode = lnode;
+
+	client->src_devs = kcalloc(pdata->usecase->num_paths,
+					sizeof(struct device *), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(client->src_devs)) {
+		MSM_BUS_ERR("%s: Error allocating pathnode ptr!", __func__);
+		goto exit_src_dev_malloc_fail;
+	}
+	client->curr = -1;
+
+	for (i = 0; i < pdata->usecase->num_paths; i++) {
+		src = pdata->usecase->vectors[i].src;
+		dest = pdata->usecase->vectors[i].dst;
+
+		if ((src < 0) || (dest < 0) || (src == dest)) {
+			MSM_BUS_ERR("%s:Invalid src/dst.src %d dest %d",
+				__func__, src, dest);
+			goto exit_invalid_data;
+		}
+		dev = bus_find_device(&msm_bus_type, NULL,
+				(void *) &src,
+				msm_bus_device_match_adhoc);
+		if (IS_ERR_OR_NULL(dev)) {
+			MSM_BUS_ERR("%s:Failed to find path.src %d dest %d",
+				__func__, src, dest);
+			goto exit_invalid_data;
+		}
+		client->src_devs[i] = dev;
+
+		MSM_BUS_ERR("%s:find path.src %d dest %d",
+				__func__, src, dest);
+
+		lnode[i] = getpath(dev, dest, client->pdata->name);
+		if (lnode[i] < 0) {
+			MSM_BUS_ERR("%s:Failed to find path.src %d dest %d",
+				__func__, src, dest);
+			goto exit_invalid_data;
+		}
+	}
+
+	handle = gen_handle(client);
+	msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_REGISTER,
+					handle);
+	MSM_BUS_ERR("%s:Client handle %d %s", __func__, handle,
+						client->pdata->name);
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return handle;
+exit_invalid_data:
+	kfree(client->src_devs);
+exit_src_dev_malloc_fail:
+	kfree(lnode);
+exit_lnode_malloc_fail:
+	kfree(client);
+exit_register_client:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return handle;
+}
+
+static int update_client_paths(struct msm_bus_client *client, bool log_trns,
+							unsigned int idx)
+{
+	int lnode, src, dest, cur_idx;
+	uint64_t req_clk, req_bw, curr_clk, curr_bw, slp_clk, slp_bw;
+	int i, ret = 0;
+	struct msm_bus_scale_pdata *pdata;
+	struct device *src_dev;
+
+	if (!client) {
+		MSM_BUS_ERR("Client handle  Null");
+		ret = -ENXIO;
+		goto exit_update_client_paths;
+	}
+
+	pdata = client->pdata;
+	if (!pdata) {
+		MSM_BUS_ERR("Client pdata Null");
+		ret = -ENXIO;
+		goto exit_update_client_paths;
+	}
+
+	cur_idx = client->curr;
+	client->curr = idx;
+	for (i = 0; i < pdata->usecase->num_paths; i++) {
+		src = pdata->usecase[idx].vectors[i].src;
+		dest = pdata->usecase[idx].vectors[i].dst;
+
+		lnode = client->src_pnode[i];
+		src_dev = client->src_devs[i];
+		req_clk = client->pdata->usecase[idx].vectors[i].ib;
+		req_bw = client->pdata->usecase[idx].vectors[i].ab;
+		if (cur_idx < 0) {
+			curr_clk = 0;
+			curr_bw = 0;
+		} else {
+			curr_clk =
+				client->pdata->usecase[cur_idx].vectors[i].ib;
+			curr_bw = client->pdata->usecase[cur_idx].vectors[i].ab;
+			MSM_BUS_DBG("%s:ab: %llu ib: %llu\n", __func__,
+					curr_bw, curr_clk);
+		}
+
+		if (pdata->active_only) {
+			slp_clk = 0;
+			slp_bw = 0;
+		} else {
+			slp_clk = req_clk;
+			slp_bw = req_bw;
+			req_clk = 0;
+			req_bw = 0;
+		}
+
+		ret = update_path(src_dev, dest, req_clk, req_bw, slp_clk,
+			slp_bw, curr_clk, curr_bw, lnode, pdata->active_only);
+
+		if (ret) {
+			MSM_BUS_ERR("%s: Update path failed! %d ctx %d\n",
+					__func__, ret, pdata->active_only);
+			goto exit_update_client_paths;
+		}
+
+		if (log_trns)
+			getpath_debug(src, lnode, pdata->active_only);
+	}
+	commit_data();
+exit_update_client_paths:
+	return ret;
+}
+
+static int update_client_alc(struct msm_bus_client *client, bool log_trns,
+							unsigned int idx)
+{
+	int lnode, cur_idx;
+	uint64_t req_idle_time, req_fal, dual_idle_time, dual_fal,
+	cur_idle_time, cur_fal;
+	int ret = 0;
+	struct msm_bus_scale_pdata *pdata;
+	struct device *src_dev;
+
+	if (!client) {
+		MSM_BUS_ERR("Client handle  Null");
+		ret = -ENXIO;
+		goto exit_update_client_alc;
+	}
+
+	pdata = client->pdata;
+	if (!pdata) {
+		MSM_BUS_ERR("Client pdata Null");
+		ret = -ENXIO;
+		goto exit_update_client_alc;
+	}
+
+	cur_idx = client->curr;
+	client->curr = idx;
+	req_fal = pdata->usecase_lat[idx].fal_ns;
+	req_idle_time = pdata->usecase_lat[idx].idle_t_ns;
+	lnode = client->src_pnode[0];
+	src_dev = client->src_devs[0];
+
+	if (pdata->active_only) {
+		dual_fal = 0;
+		dual_idle_time = 0;
+	} else {
+		dual_fal = req_fal;
+		dual_idle_time = req_idle_time;
+	}
+
+	ret = update_alc_vote(src_dev, req_fal, req_idle_time, dual_fal,
+		dual_idle_time, cur_fal, cur_idle_time, lnode,
+		pdata->active_only);
+
+	if (ret) {
+		MSM_BUS_ERR("%s: Update path failed! %d ctx %d\n",
+				__func__, ret, pdata->active_only);
+		goto exit_update_client_alc;
+	}
+	commit_data();
+exit_update_client_alc:
+	return ret;
+}
+
+static int query_usecase(struct msm_bus_client *client, bool log_trns,
+					unsigned int idx,
+					struct msm_bus_tcs_usecase *tcs_usecase)
+{
+	int lnode, src, dest, cur_idx;
+	uint64_t req_clk, req_bw, curr_clk, curr_bw;
+	int i, ret = 0;
+	struct msm_bus_scale_pdata *pdata;
+	struct device *src_dev;
+	struct msm_bus_node_device_type *node = NULL;
+	struct msm_bus_node_device_type *node_tmp = NULL;
+
+	if (!client) {
+		MSM_BUS_ERR("Client handle  Null");
+		ret = -ENXIO;
+		goto exit_query_usecase;
+	}
+
+	pdata = client->pdata;
+	if (!pdata) {
+		MSM_BUS_ERR("Client pdata Null");
+		ret = -ENXIO;
+		goto exit_query_usecase;
+	}
+
+	cur_idx = client->curr;
+	client->curr = idx;
+	for (i = 0; i < pdata->usecase->num_paths; i++) {
+		src = pdata->usecase[idx].vectors[i].src;
+		dest = pdata->usecase[idx].vectors[i].dst;
+
+		lnode = client->src_pnode[i];
+		src_dev = client->src_devs[i];
+		req_clk = client->pdata->usecase[idx].vectors[i].ib;
+		req_bw = client->pdata->usecase[idx].vectors[i].ab;
+		if (cur_idx < 0) {
+			curr_clk = 0;
+			curr_bw = 0;
+		} else {
+			curr_clk =
+				client->pdata->usecase[cur_idx].vectors[i].ib;
+			curr_bw = client->pdata->usecase[cur_idx].vectors[i].ab;
+			MSM_BUS_DBG("%s:ab: %llu ib: %llu\n", __func__,
+					curr_bw, curr_clk);
+		}
+
+		ret = query_path(src_dev, dest, req_clk, req_bw, 0,
+			0, curr_clk, curr_bw, lnode);
+
+		if (ret) {
+			MSM_BUS_ERR("%s: Query path failed! %d ctx %d\n",
+					__func__, ret, pdata->active_only);
+			goto exit_query_usecase;
+		}
+	}
+	msm_bus_query_gen(&query_list, tcs_usecase);
+	INIT_LIST_HEAD(&query_list);
+
+	for (i = 0; i < pdata->usecase->num_paths; i++) {
+		src = pdata->usecase[idx].vectors[i].src;
+		dest = pdata->usecase[idx].vectors[i].dst;
+
+		lnode = client->src_pnode[i];
+		src_dev = client->src_devs[i];
+
+		ret = query_path(src_dev, dest, 0, 0, 0, 0,
+						curr_clk, curr_bw, lnode);
+
+		if (ret) {
+			MSM_BUS_ERR("%s: Clear query path failed! %d ctx %d\n",
+					__func__, ret, pdata->active_only);
+			goto exit_query_usecase;
+		}
+	}
+
+	list_for_each_entry_safe(node, node_tmp, &query_list, query_link) {
+		node->query_dirty = false;
+		list_del_init(&node->query_link);
+	}
+
+	INIT_LIST_HEAD(&query_list);
+
+exit_query_usecase:
+	return ret;
+}
+
+static int update_context(uint32_t cl, bool active_only,
+					unsigned int ctx_idx)
+{
+	int ret = 0;
+	struct msm_bus_scale_pdata *pdata;
+	struct msm_bus_client *client;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+	if (!cl) {
+		MSM_BUS_ERR("%s: Invalid client handle %d", __func__, cl);
+		ret = -ENXIO;
+		goto exit_update_context;
+	}
+
+	client = handle_list.cl_list[cl];
+	if (!client) {
+		ret = -ENXIO;
+		goto exit_update_context;
+	}
+
+	pdata = client->pdata;
+	if (!pdata) {
+		ret = -ENXIO;
+		goto exit_update_context;
+	}
+	if (pdata->active_only == active_only) {
+		MSM_BUS_ERR("No change in context(%d==%d), skip\n",
+					pdata->active_only, active_only);
+		ret = -ENXIO;
+		goto exit_update_context;
+	}
+
+	if (ctx_idx >= pdata->num_usecases) {
+		MSM_BUS_ERR("Client %u passed invalid index: %d\n",
+			cl, ctx_idx);
+		ret = -ENXIO;
+		goto exit_update_context;
+	}
+
+	pdata->active_only = active_only;
+
+	msm_bus_dbg_client_data(client->pdata, ctx_idx, cl);
+	ret = update_client_paths(client, false, ctx_idx);
+	if (ret) {
+		pr_err("%s: Err updating path\n", __func__);
+		goto exit_update_context;
+	}
+
+//	trace_bus_update_request_end(pdata->name);
+
+exit_update_context:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return ret;
+}
+
+static int update_request_adhoc(uint32_t cl, unsigned int index)
+{
+	int ret = 0;
+	struct msm_bus_scale_pdata *pdata;
+	struct msm_bus_client *client;
+	const char *test_cl = "Null";
+	bool log_transaction = false;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+
+	if (!cl) {
+		MSM_BUS_ERR("%s: Invalid client handle %d", __func__, cl);
+		ret = -ENXIO;
+		goto exit_update_request;
+	}
+
+	client = handle_list.cl_list[cl];
+	if (!client) {
+		MSM_BUS_ERR("%s: Invalid client pointer ", __func__);
+		ret = -ENXIO;
+		goto exit_update_request;
+	}
+
+	pdata = client->pdata;
+	if (!pdata) {
+		MSM_BUS_ERR("%s: Client data Null.[client didn't register]",
+				__func__);
+		ret = -ENXIO;
+		goto exit_update_request;
+	}
+
+	if (index >= pdata->num_usecases) {
+		MSM_BUS_ERR("Client %u passed invalid index: %d\n",
+			cl, index);
+		ret = -ENXIO;
+		goto exit_update_request;
+	}
+
+	if (client->curr == index) {
+		MSM_BUS_DBG("%s: Not updating client request idx %d unchanged",
+				__func__, index);
+		goto exit_update_request;
+	}
+
+	if (!strcmp(test_cl, pdata->name))
+		log_transaction = true;
+
+	MSM_BUS_DBG("%s: cl: %u index: %d curr: %d num_paths: %d\n", __func__,
+		cl, index, client->curr, client->pdata->usecase->num_paths);
+
+	if (pdata->alc)
+		ret = update_client_alc(client, log_transaction, index);
+	else {
+		msm_bus_dbg_client_data(client->pdata, index, cl);
+		ret = update_client_paths(client, log_transaction, index);
+	}
+	if (ret) {
+		pr_err("%s: Err updating path\n", __func__);
+		goto exit_update_request;
+	}
+
+//	trace_bus_update_request_end(pdata->name);
+
+exit_update_request:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return ret;
+}
+
+static int query_client_usecase(struct msm_bus_tcs_usecase *tcs_usecase,
+					uint32_t cl, unsigned int index)
+{
+	int ret = 0;
+	struct msm_bus_scale_pdata *pdata;
+	struct msm_bus_client *client;
+	const char *test_cl = "Null";
+	bool log_transaction = false;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+
+	if (!cl) {
+		MSM_BUS_ERR("%s: Invalid client handle %d", __func__, cl);
+		ret = -ENXIO;
+		goto exit_query_client_usecase;
+	}
+
+	client = handle_list.cl_list[cl];
+	if (!client) {
+		MSM_BUS_ERR("%s: Invalid client pointer ", __func__);
+		ret = -ENXIO;
+		goto exit_query_client_usecase;
+	}
+
+	pdata = client->pdata;
+	if (!pdata) {
+		MSM_BUS_ERR("%s: Client data Null.[client didn't register]",
+				__func__);
+		ret = -ENXIO;
+		goto exit_query_client_usecase;
+	}
+
+	if (index >= pdata->num_usecases) {
+		MSM_BUS_ERR("Client %u passed invalid index: %d\n",
+			cl, index);
+		ret = -ENXIO;
+		goto exit_query_client_usecase;
+	}
+
+	if (!strcmp(test_cl, pdata->name))
+		log_transaction = true;
+
+	MSM_BUS_DBG("%s: cl: %u index: %d curr: %d num_paths: %d\n", __func__,
+		cl, index, client->curr, client->pdata->usecase->num_paths);
+	ret = query_usecase(client, log_transaction, index, tcs_usecase);
+	if (ret) {
+		pr_err("%s: Err updating path\n", __func__);
+		goto exit_query_client_usecase;
+	}
+
+//	trace_bus_update_request_end(pdata->name);
+
+exit_query_client_usecase:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return ret;
+}
+
+static int query_client_usecase_all(struct msm_bus_tcs_handle *tcs_handle,
+					uint32_t cl)
+{
+	int ret = 0;
+	struct msm_bus_scale_pdata *pdata;
+	struct msm_bus_client *client;
+	const char *test_cl = "Null";
+	bool log_transaction = false;
+	int i = 0;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+
+	if (!cl) {
+		MSM_BUS_ERR("%s: Invalid client handle %d", __func__, cl);
+		ret = -ENXIO;
+		goto exit_query_client_usecase_all;
+	}
+
+	client = handle_list.cl_list[cl];
+	if (!client) {
+		MSM_BUS_ERR("%s: Invalid client pointer ", __func__);
+		ret = -ENXIO;
+		goto exit_query_client_usecase_all;
+	}
+
+	pdata = client->pdata;
+	if (!pdata) {
+		MSM_BUS_ERR("%s: Client data Null.[client didn't register]",
+				__func__);
+		ret = -ENXIO;
+		goto exit_query_client_usecase_all;
+	}
+
+	if (!strcmp(test_cl, pdata->name))
+		log_transaction = true;
+
+	MSM_BUS_ERR("%s: query_start", __func__);
+	for (i = 0; i < pdata->num_usecases; i++)
+		query_usecase(client, log_transaction, i,
+						&tcs_handle->usecases[i]);
+	tcs_handle->num_usecases = pdata->num_usecases;
+
+	if (ret) {
+		pr_err("%s: Err updating path\n", __func__);
+		goto exit_query_client_usecase_all;
+	}
+
+//	trace_bus_update_request_end(pdata->name);
+
+exit_query_client_usecase_all:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return ret;
+}
+
+static void free_cl_mem(struct msm_bus_client_handle *cl)
+{
+	if (cl) {
+		kfree(cl->name);
+		kfree(cl);
+		cl = NULL;
+	}
+}
+
+static int update_bw_adhoc(struct msm_bus_client_handle *cl, u64 ab, u64 ib)
+{
+	int ret = 0;
+	char *test_cl = "test-client";
+	bool log_transaction = false;
+	u64 dual_ib, dual_ab, act_ib, act_ab;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+
+	if (!cl) {
+		MSM_BUS_ERR("%s: Invalid client handle %p", __func__, cl);
+		ret = -ENXIO;
+		goto exit_update_request;
+	}
+
+	if (!strcmp(test_cl, cl->name))
+		log_transaction = true;
+
+	msm_bus_dbg_rec_transaction(cl, ab, ib);
+
+	if (cl->active_only) {
+		if ((cl->cur_act_ib == ib) && (cl->cur_act_ab == ab)) {
+			MSM_BUS_DBG("%s:no change in request", cl->name);
+			goto exit_update_request;
+		}
+		act_ib = ib;
+		act_ab = ab;
+		dual_ib = 0;
+		dual_ab = 0;
+	} else {
+		if ((cl->cur_dual_ib == ib) && (cl->cur_dual_ab == ab)) {
+			MSM_BUS_DBG("%s:no change in request", cl->name);
+			goto exit_update_request;
+		}
+		dual_ib = ib;
+		dual_ab = ab;
+		act_ib = 0;
+		act_ab = 0;
+	}
+
+	ret = update_path(cl->mas_dev, cl->slv, act_ib, act_ab, dual_ib,
+		dual_ab, cl->cur_act_ib, cl->cur_act_ab, cl->first_hop,
+							cl->active_only);
+
+	if (ret) {
+		MSM_BUS_ERR("%s: Update path failed! %d active_only %d\n",
+				__func__, ret, cl->active_only);
+		goto exit_update_request;
+	}
+
+	commit_data();
+	cl->cur_act_ib = act_ib;
+	cl->cur_act_ab = act_ab;
+	cl->cur_dual_ib = dual_ib;
+	cl->cur_dual_ab = dual_ab;
+
+	if (log_transaction)
+		getpath_debug(cl->mas, cl->first_hop, cl->active_only);
+//	trace_bus_update_request_end(cl->name);
+exit_update_request:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+
+	return ret;
+}
+
+static int update_bw_context(struct msm_bus_client_handle *cl, u64 act_ab,
+				u64 act_ib, u64 dual_ib, u64 dual_ab)
+{
+	int ret = 0;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+	if (!cl) {
+		MSM_BUS_ERR("Invalid client handle %p", cl);
+		ret = -ENXIO;
+		goto exit_change_context;
+	}
+
+	if ((cl->cur_act_ib == act_ib) &&
+		(cl->cur_act_ab == act_ab) &&
+		(cl->cur_dual_ib == dual_ib) &&
+		(cl->cur_dual_ab == dual_ab)) {
+		MSM_BUS_ERR("No change in vote");
+		goto exit_change_context;
+	}
+
+	if (!dual_ab && !dual_ib)
+		cl->active_only = true;
+	msm_bus_dbg_rec_transaction(cl, cl->cur_act_ab, cl->cur_dual_ib);
+	ret = update_path(cl->mas_dev, cl->slv, act_ib, act_ab, dual_ib,
+				dual_ab, cl->cur_act_ab, cl->cur_act_ab,
+				cl->first_hop, cl->active_only);
+	if (ret) {
+		MSM_BUS_ERR("%s: Update path failed! %d active_only %d\n",
+				__func__, ret, cl->active_only);
+		goto exit_change_context;
+	}
+	commit_data();
+	cl->cur_act_ib = act_ib;
+	cl->cur_act_ab = act_ab;
+	cl->cur_dual_ib = dual_ib;
+	cl->cur_dual_ab = dual_ab;
+//	trace_bus_update_request_end(cl->name);
+exit_change_context:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return ret;
+}
+
+static void unregister_adhoc(struct msm_bus_client_handle *cl)
+{
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+	if (!cl) {
+		MSM_BUS_ERR("%s: Null cl handle passed unregister\n",
+				__func__);
+		goto exit_unregister_client;
+	}
+
+	MSM_BUS_DBG("%s: Unregistering client %p", __func__, cl);
+
+	remove_path(cl->mas_dev, cl->slv, cl->cur_act_ib, cl->cur_act_ab,
+				cl->first_hop, cl->active_only);
+	commit_data();
+	msm_bus_dbg_remove_client(cl);
+	kfree(cl);
+	MSM_BUS_DBG("%s: Unregistered client", __func__);
+exit_unregister_client:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+}
+
+static struct msm_bus_client_handle*
+register_adhoc(uint32_t mas, uint32_t slv, char *name, bool active_only)
+{
+	struct msm_bus_client_handle *client = NULL;
+	int len = 0;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+
+	if (!(mas && slv && name)) {
+		pr_err("%s: Error: src dst name num_paths are required\n",
+								 __func__);
+		goto exit_register;
+	}
+
+	client = kzalloc(sizeof(struct msm_bus_client_handle), GFP_KERNEL);
+	if (!client) {
+		MSM_BUS_ERR("%s: Error allocating client data", __func__);
+		goto exit_register;
+	}
+
+	len = strnlen(name, MAX_STR_CL);
+	client->name = kzalloc((len + 1), GFP_KERNEL);
+	if (!client->name) {
+		MSM_BUS_ERR("%s: Error allocating client name buf", __func__);
+		free_cl_mem(client);
+		goto exit_register;
+	}
+	strlcpy(client->name, name, MAX_STR_CL);
+	client->active_only = active_only;
+
+	client->mas = mas;
+	client->slv = slv;
+
+	client->mas_dev = bus_find_device(&msm_bus_type, NULL,
+					(void *) &mas,
+					msm_bus_device_match_adhoc);
+	if (IS_ERR_OR_NULL(client->mas_dev)) {
+		MSM_BUS_ERR("%s:Failed to find path.src %d dest %d",
+			__func__, client->mas, client->slv);
+		free_cl_mem(client);
+		goto exit_register;
+	}
+
+	client->first_hop = getpath(client->mas_dev, client->slv, client->name);
+	if (client->first_hop < 0) {
+		MSM_BUS_ERR("%s:Failed to find path.src %d dest %d",
+			__func__, client->mas, client->slv);
+		free_cl_mem(client);
+		goto exit_register;
+	}
+
+	MSM_BUS_DBG("%s:Client handle %p %s", __func__, client,
+						client->name);
+	msm_bus_dbg_add_client(client);
+exit_register:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return client;
+}
+/**
+ *  msm_bus_arb_setops_adhoc() : Setup the bus arbitration ops
+ *  @ arb_ops: pointer to the arb ops.
+ */
+void msm_bus_arb_setops_adhoc(struct msm_bus_arb_ops *arb_ops)
+{
+	arb_ops->register_client = register_client_adhoc;
+	arb_ops->update_request = update_request_adhoc;
+	arb_ops->unregister_client = unregister_client_adhoc;
+	arb_ops->update_context = update_context;
+
+	arb_ops->register_cl = register_adhoc;
+	arb_ops->unregister = unregister_adhoc;
+	arb_ops->update_bw = update_bw_adhoc;
+	arb_ops->update_bw_context = update_bw_context;
+	arb_ops->query_usecase = query_client_usecase;
+	arb_ops->query_usecase_all = query_client_usecase_all;
+}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_bimc.h b/drivers/soc/qcom/msm_bus/msm_bus_bimc.h
new file mode 100644
index 0000000..15b61c1
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_bimc.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_BIMC_H
+#define _ARCH_ARM_MACH_MSM_BUS_BIMC_H
+
+struct msm_bus_bimc_params {
+	uint32_t bus_id;
+	uint32_t addr_width;
+	uint32_t data_width;
+	uint32_t nmasters;
+	uint32_t nslaves;
+};
+
+struct msm_bus_bimc_commit {
+	struct msm_bus_node_hw_info *mas;
+	struct msm_bus_node_hw_info *slv;
+};
+
+struct msm_bus_bimc_info {
+	void __iomem *base;
+	uint32_t base_addr;
+	uint32_t qos_freq;
+	struct msm_bus_bimc_params params;
+	struct msm_bus_bimc_commit cdata[NUM_CTX];
+};
+
+struct msm_bus_bimc_node {
+	uint32_t conn_mask;
+	uint32_t data_width;
+	uint8_t slv_arb_mode;
+};
+
+enum msm_bus_bimc_arb_mode {
+	BIMC_ARB_MODE_RR = 0,
+	BIMC_ARB_MODE_PRIORITY_RR,
+	BIMC_ARB_MODE_TIERED_RR,
+};
+
+
+enum msm_bus_bimc_interleave {
+	BIMC_INTERLEAVE_NONE = 0,
+	BIMC_INTERLEAVE_ODD,
+	BIMC_INTERLEAVE_EVEN,
+};
+
+struct msm_bus_bimc_slave_seg {
+	bool enable;
+	uint64_t start_addr;
+	uint64_t seg_size;
+	uint8_t interleave;
+};
+
+enum msm_bus_bimc_qos_mode_type {
+	BIMC_QOS_MODE_FIXED = 0,
+	BIMC_QOS_MODE_LIMITER,
+	BIMC_QOS_MODE_BYPASS,
+	BIMC_QOS_MODE_REGULATOR,
+};
+
+struct msm_bus_bimc_qos_health {
+	bool limit_commands;
+	uint32_t areq_prio;
+	uint32_t prio_level;
+};
+
+struct msm_bus_bimc_mode_fixed {
+	uint32_t prio_level;
+	uint32_t areq_prio_rd;
+	uint32_t areq_prio_wr;
+};
+
+struct msm_bus_bimc_mode_rl {
+	uint8_t qhealthnum;
+	struct msm_bus_bimc_qos_health qhealth[4];
+};
+
+struct msm_bus_bimc_qos_mode {
+	uint8_t mode;
+	struct msm_bus_bimc_mode_fixed fixed;
+	struct msm_bus_bimc_mode_rl rl;
+};
+
+struct msm_bus_bimc_qos_bw {
+	uint64_t bw;	/* bw is in Bytes/sec */
+	uint32_t ws;	/* Window size in nano seconds*/
+	int64_t thh;	/* Threshold high, bytes per second */
+	int64_t thm;	/* Threshold medium, bytes per second */
+	int64_t thl;	/* Threshold low, bytes per second */
+	u32 gp;	/* Grant Period in micro seconds */
+	u32 thmp; /* Threshold medium in percentage */
+};
+
+struct msm_bus_bimc_clk_gate {
+	bool core_clk_gate_en;
+	bool arb_clk_gate_en;	/* For arbiter */
+	bool port_clk_gate_en;	/* For regs on BIMC core clock */
+};
+
+void msm_bus_bimc_set_slave_seg(struct msm_bus_bimc_info *binfo,
+	uint32_t slv_index, uint32_t seg_index,
+	struct msm_bus_bimc_slave_seg *bsseg);
+void msm_bus_bimc_set_slave_clk_gate(struct msm_bus_bimc_info *binfo,
+	uint32_t slv_index, struct msm_bus_bimc_clk_gate *bgate);
+void msm_bus_bimc_set_mas_clk_gate(struct msm_bus_bimc_info *binfo,
+	uint32_t mas_index, struct msm_bus_bimc_clk_gate *bgate);
+void msm_bus_bimc_arb_en(struct msm_bus_bimc_info *binfo,
+	uint32_t slv_index, bool en);
+void msm_bus_bimc_get_params(struct msm_bus_bimc_info *binfo,
+	struct msm_bus_bimc_params *params);
+void msm_bus_bimc_get_mas_params(struct msm_bus_bimc_info *binfo,
+	uint32_t mas_index, struct msm_bus_bimc_node *mparams);
+void msm_bus_bimc_get_slv_params(struct msm_bus_bimc_info *binfo,
+	uint32_t slv_index, struct msm_bus_bimc_node *sparams);
+bool msm_bus_bimc_get_arb_en(struct msm_bus_bimc_info *binfo,
+	uint32_t slv_index);
+
+#endif /*_ARCH_ARM_MACH_MSM_BUS_BIMC_H*/
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_bimc_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_bimc_adhoc.c
new file mode 100644
index 0000000..f180781
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_bimc_adhoc.c
@@ -0,0 +1,602 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "AXI: BIMC: %s(): " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/msm-bus-board.h>
+#include "msm_bus_core.h"
+#include "msm_bus_bimc.h"
+#include "msm_bus_adhoc.h"
+#include <trace/events/trace_msm_bus.h>
+
+/* M_Generic */
+
+enum bke_sw {
+	BKE_OFF = 0,
+	BKE_ON = 1,
+};
+
+#define M_REG_BASE(b)		((b) + 0x00008000)
+
+#define M_MODE_ADDR(b, n) \
+		(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000210)
+enum bimc_m_mode {
+	M_MODE_RMSK				= 0xf0000011,
+	M_MODE_WR_GATHER_BEATS_BMSK		= 0xf0000000,
+	M_MODE_WR_GATHER_BEATS_SHFT		= 0x1c,
+	M_MODE_NARROW_WR_BMSK			= 0x10,
+	M_MODE_NARROW_WR_SHFT			= 0x4,
+	M_MODE_ORDERING_MODEL_BMSK		= 0x1,
+	M_MODE_ORDERING_MODEL_SHFT		= 0x0,
+};
+
+#define M_PRIOLVL_OVERRIDE_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000230)
+enum bimc_m_priolvl_override {
+	M_PRIOLVL_OVERRIDE_RMSK			= 0x301,
+	M_PRIOLVL_OVERRIDE_BMSK			= 0x300,
+	M_PRIOLVL_OVERRIDE_SHFT			= 0x8,
+	M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_BMSK	= 0x1,
+	M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_SHFT	= 0x0,
+};
+
+#define M_RD_CMD_OVERRIDE_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000240)
+enum bimc_m_read_command_override {
+	M_RD_CMD_OVERRIDE_RMSK			= 0x3071f7f,
+	M_RD_CMD_OVERRIDE_AREQPRIO_BMSK		= 0x3000000,
+	M_RD_CMD_OVERRIDE_AREQPRIO_SHFT		= 0x18,
+	M_RD_CMD_OVERRIDE_AMEMTYPE_BMSK		= 0x70000,
+	M_RD_CMD_OVERRIDE_AMEMTYPE_SHFT		= 0x10,
+	M_RD_CMD_OVERRIDE_ATRANSIENT_BMSK		= 0x1000,
+	M_RD_CMD_OVERRIDE_ATRANSIENT_SHFT		= 0xc,
+	M_RD_CMD_OVERRIDE_ASHARED_BMSK		= 0x800,
+	M_RD_CMD_OVERRIDE_ASHARED_SHFT		= 0xb,
+	M_RD_CMD_OVERRIDE_AREDIRECT_BMSK		= 0x400,
+	M_RD_CMD_OVERRIDE_AREDIRECT_SHFT		= 0xa,
+	M_RD_CMD_OVERRIDE_AOOO_BMSK			= 0x200,
+	M_RD_CMD_OVERRIDE_AOOO_SHFT			= 0x9,
+	M_RD_CMD_OVERRIDE_AINNERSHARED_BMSK		= 0x100,
+	M_RD_CMD_OVERRIDE_AINNERSHARED_SHFT		= 0x8,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK	= 0x40,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT	= 0x6,
+	M_RD_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_BMSK	= 0x20,
+	M_RD_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_SHFT	= 0x5,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_BMSK	= 0x10,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_SHFT	= 0x4,
+	M_RD_CMD_OVERRIDE_OVERRIDE_ASHARED_BMSK	= 0x8,
+	M_RD_CMD_OVERRIDE_OVERRIDE_ASHARED_SHFT	= 0x3,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AREDIRECT_BMSK	= 0x4,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AREDIRECT_SHFT	= 0x2,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AOOO_BMSK		= 0x2,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AOOO_SHFT		= 0x1,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_BMSK	= 0x1,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_SHFT	= 0x0,
+};
+
+#define M_WR_CMD_OVERRIDE_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000250)
+enum bimc_m_write_command_override {
+	M_WR_CMD_OVERRIDE_RMSK			= 0x3071f7f,
+	M_WR_CMD_OVERRIDE_AREQPRIO_BMSK		= 0x3000000,
+	M_WR_CMD_OVERRIDE_AREQPRIO_SHFT		= 0x18,
+	M_WR_CMD_OVERRIDE_AMEMTYPE_BMSK		= 0x70000,
+	M_WR_CMD_OVERRIDE_AMEMTYPE_SHFT		= 0x10,
+	M_WR_CMD_OVERRIDE_ATRANSIENT_BMSK	= 0x1000,
+	M_WR_CMD_OVERRIDE_ATRANSIENT_SHFT	= 0xc,
+	M_WR_CMD_OVERRIDE_ASHARED_BMSK		= 0x800,
+	M_WR_CMD_OVERRIDE_ASHARED_SHFT		= 0xb,
+	M_WR_CMD_OVERRIDE_AREDIRECT_BMSK		= 0x400,
+	M_WR_CMD_OVERRIDE_AREDIRECT_SHFT		= 0xa,
+	M_WR_CMD_OVERRIDE_AOOO_BMSK			= 0x200,
+	M_WR_CMD_OVERRIDE_AOOO_SHFT			= 0x9,
+	M_WR_CMD_OVERRIDE_AINNERSHARED_BMSK		= 0x100,
+	M_WR_CMD_OVERRIDE_AINNERSHARED_SHFT		= 0x8,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK	= 0x40,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT	= 0x6,
+	M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_BMSK	= 0x20,
+	M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_SHFT	= 0x5,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_BMSK	= 0x10,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_SHFT	= 0x4,
+	M_WR_CMD_OVERRIDE_OVERRIDE_ASHARED_BMSK	= 0x8,
+	M_WR_CMD_OVERRIDE_OVERRIDE_ASHARED_SHFT	= 0x3,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AREDIRECT_BMSK	= 0x4,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AREDIRECT_SHFT	= 0x2,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AOOO_BMSK	= 0x2,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AOOO_SHFT	= 0x1,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_BMSK	= 0x1,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_SHFT	= 0x0,
+};
+
+#define M_BKE_EN_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000300)
+enum bimc_m_bke_en {
+	M_BKE_EN_RMSK			= 0x1,
+	M_BKE_EN_EN_BMSK		= 0x1,
+	M_BKE_EN_EN_SHFT		= 0x0,
+};
+
+/* Grant Period registers */
+#define M_BKE_GP_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000304)
+enum bimc_m_bke_grant_period {
+	M_BKE_GP_RMSK		= 0x3ff,
+	M_BKE_GP_GP_BMSK	= 0x3ff,
+	M_BKE_GP_GP_SHFT	= 0x0,
+};
+
+/* Grant count register.
+ * The Grant count register represents a signed 16 bit
+ * value, range 0-0x7fff
+ */
+#define M_BKE_GC_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000308)
+enum bimc_m_bke_grant_count {
+	M_BKE_GC_RMSK			= 0xffff,
+	M_BKE_GC_GC_BMSK		= 0xffff,
+	M_BKE_GC_GC_SHFT		= 0x0,
+};
+
+/* Threshold High Registers */
+#define M_BKE_THH_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000320)
+enum bimc_m_bke_thresh_high {
+	M_BKE_THH_RMSK		= 0xffff,
+	M_BKE_THH_THRESH_BMSK	= 0xffff,
+	M_BKE_THH_THRESH_SHFT	= 0x0,
+};
+
+/* Threshold Medium Registers */
+#define M_BKE_THM_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000324)
+enum bimc_m_bke_thresh_medium {
+	M_BKE_THM_RMSK		= 0xffff,
+	M_BKE_THM_THRESH_BMSK	= 0xffff,
+	M_BKE_THM_THRESH_SHFT	= 0x0,
+};
+
+/* Threshold Low Registers */
+#define M_BKE_THL_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000328)
+enum bimc_m_bke_thresh_low {
+	M_BKE_THL_RMSK			= 0xffff,
+	M_BKE_THL_THRESH_BMSK		= 0xffff,
+	M_BKE_THL_THRESH_SHFT		= 0x0,
+};
+
+#define NUM_HEALTH_LEVEL	(4)
+#define M_BKE_HEALTH_0_CONFIG_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000340)
+enum bimc_m_bke_health_0 {
+	M_BKE_HEALTH_0_CONFIG_RMSK			= 0x80000303,
+	M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK		= 0x80000000,
+	M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_SHFT		= 0x1f,
+	M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK		= 0x300,
+	M_BKE_HEALTH_0_CONFIG_AREQPRIO_SHFT		= 0x8,
+	M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK		= 0x3,
+	M_BKE_HEALTH_0_CONFIG_PRIOLVL_SHFT		= 0x0,
+};
+
+#define M_BKE_HEALTH_1_CONFIG_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000344)
+enum bimc_m_bke_health_1 {
+	M_BKE_HEALTH_1_CONFIG_RMSK			= 0x80000303,
+	M_BKE_HEALTH_1_CONFIG_LIMIT_CMDS_BMSK		= 0x80000000,
+	M_BKE_HEALTH_1_CONFIG_LIMIT_CMDS_SHFT		= 0x1f,
+	M_BKE_HEALTH_1_CONFIG_AREQPRIO_BMSK		= 0x300,
+	M_BKE_HEALTH_1_CONFIG_AREQPRIO_SHFT		= 0x8,
+	M_BKE_HEALTH_1_CONFIG_PRIOLVL_BMSK		= 0x3,
+	M_BKE_HEALTH_1_CONFIG_PRIOLVL_SHFT		= 0x0,
+};
+
+#define M_BKE_HEALTH_2_CONFIG_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000348)
+enum bimc_m_bke_health_2 {
+	M_BKE_HEALTH_2_CONFIG_RMSK			= 0x80000303,
+	M_BKE_HEALTH_2_CONFIG_LIMIT_CMDS_BMSK		= 0x80000000,
+	M_BKE_HEALTH_2_CONFIG_LIMIT_CMDS_SHFT		= 0x1f,
+	M_BKE_HEALTH_2_CONFIG_AREQPRIO_BMSK		= 0x300,
+	M_BKE_HEALTH_2_CONFIG_AREQPRIO_SHFT		= 0x8,
+	M_BKE_HEALTH_2_CONFIG_PRIOLVL_BMSK		= 0x3,
+	M_BKE_HEALTH_2_CONFIG_PRIOLVL_SHFT		= 0x0,
+};
+
+#define M_BKE_HEALTH_3_CONFIG_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x0000034c)
+enum bimc_m_bke_health_3 {
+	M_BKE_HEALTH_3_CONFIG_RMSK			= 0x303,
+	M_BKE_HEALTH_3_CONFIG_AREQPRIO_BMSK	= 0x300,
+	M_BKE_HEALTH_3_CONFIG_AREQPRIO_SHFT	= 0x8,
+	M_BKE_HEALTH_3_CONFIG_PRIOLVL_BMSK		= 0x3,
+	M_BKE_HEALTH_3_CONFIG_PRIOLVL_SHFT		= 0x0,
+};
+
+#define BKE_HEALTH_MASK \
+	(M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK |\
+	M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK |\
+	M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK)
+
+#define BKE_HEALTH_VAL(limit, areq, plvl) \
+	((((limit) << M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_SHFT) & \
+	M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK) | \
+	(((areq) << M_BKE_HEALTH_0_CONFIG_AREQPRIO_SHFT) & \
+	M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK) | \
+	(((plvl) << M_BKE_HEALTH_0_CONFIG_PRIOLVL_SHFT) & \
+	M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK))
+
+#define MAX_GRANT_PERIOD \
+	(M_BKE_GP_GP_BMSK >> \
+	M_BKE_GP_GP_SHFT)
+
+#define MAX_GC \
+	(M_BKE_GC_GC_BMSK >> \
+	(M_BKE_GC_GC_SHFT + 1))
+
+static int bimc_div(uint64_t *a, uint32_t b)
+{
+	if ((*a > 0) && (*a < b)) {
+		*a = 0;
+		return 1;
+	} else {
+		return do_div(*a, b);
+	}
+}
+
+static void set_bke_en(void __iomem *addr, uint32_t index,
+		bool req)
+{
+	uint32_t old_val, new_val;
+
+	old_val = readl_relaxed(M_BKE_EN_ADDR(addr, index));
+	new_val = req << M_BKE_EN_EN_SHFT;
+	if ((old_val & M_BKE_EN_RMSK) == (new_val))
+		return;
+	writel_relaxed(((old_val & ~(M_BKE_EN_EN_BMSK)) | (new_val &
+				M_BKE_EN_EN_BMSK)), M_BKE_EN_ADDR(addr, index));
+	/* Ensure that BKE register is programmed set before returning */
+	wmb();
+}
+
+static void set_health_reg(void __iomem *addr, uint32_t rmsk,
+	uint8_t index, struct msm_bus_bimc_qos_mode *qmode)
+{
+	uint32_t reg_val, val0, val;
+
+	/* Note, addr is already passed with right mas_index */
+	reg_val = readl_relaxed(addr) & rmsk;
+	val0 = BKE_HEALTH_VAL(qmode->rl.qhealth[index].limit_commands,
+		qmode->rl.qhealth[index].areq_prio,
+		qmode->rl.qhealth[index].prio_level);
+	val = ((reg_val & (~(BKE_HEALTH_MASK))) | (val0 & BKE_HEALTH_MASK));
+	writel_relaxed(val, addr);
+	/*
+	 * Ensure that priority for regulator/limiter modes are
+	 * set before returning
+	 */
+	wmb();
+}
+
+static void msm_bus_bimc_set_qos_prio(void __iomem *base,
+	uint32_t mas_index, uint8_t qmode_sel,
+	struct msm_bus_bimc_qos_mode *qmode)
+{
+
+	switch (qmode_sel) {
+	case BIMC_QOS_MODE_FIXED:
+	case BIMC_QOS_MODE_REGULATOR:
+	case BIMC_QOS_MODE_LIMITER:
+		set_health_reg(M_BKE_HEALTH_3_CONFIG_ADDR(base,
+			mas_index), M_BKE_HEALTH_3_CONFIG_RMSK, 3, qmode);
+		set_health_reg(M_BKE_HEALTH_2_CONFIG_ADDR(base,
+			mas_index), M_BKE_HEALTH_2_CONFIG_RMSK, 2, qmode);
+		set_health_reg(M_BKE_HEALTH_1_CONFIG_ADDR(base,
+			mas_index), M_BKE_HEALTH_1_CONFIG_RMSK, 1, qmode);
+		set_health_reg(M_BKE_HEALTH_0_CONFIG_ADDR(base,
+			mas_index), M_BKE_HEALTH_0_CONFIG_RMSK, 0, qmode);
+		set_bke_en(base, mas_index, true);
+		break;
+	case BIMC_QOS_MODE_BYPASS:
+		set_bke_en(base, mas_index, false);
+		break;
+	default:
+		break;
+	}
+}
+
+static void set_qos_bw_regs(void __iomem *baddr, uint32_t mas_index,
+	int32_t th, int32_t tm, int32_t tl, uint32_t gp,
+	uint32_t gc)
+{
+	int32_t reg_val, val;
+	int32_t bke_reg_val;
+	int16_t val2;
+
+	/* Disable BKE before writing to registers as per spec */
+	bke_reg_val = readl_relaxed(M_BKE_EN_ADDR(baddr, mas_index));
+	writel_relaxed((bke_reg_val & ~(M_BKE_EN_EN_BMSK)),
+		M_BKE_EN_ADDR(baddr, mas_index));
+
+	/* Write values of registers calculated */
+	reg_val = readl_relaxed(M_BKE_GP_ADDR(baddr, mas_index))
+		& M_BKE_GP_RMSK;
+	val =  gp << M_BKE_GP_GP_SHFT;
+	writel_relaxed(((reg_val & ~(M_BKE_GP_GP_BMSK)) | (val &
+		M_BKE_GP_GP_BMSK)), M_BKE_GP_ADDR(baddr, mas_index));
+
+	reg_val = readl_relaxed(M_BKE_GC_ADDR(baddr, mas_index)) &
+		M_BKE_GC_RMSK;
+	val =  gc << M_BKE_GC_GC_SHFT;
+	writel_relaxed(((reg_val & ~(M_BKE_GC_GC_BMSK)) | (val &
+		M_BKE_GC_GC_BMSK)), M_BKE_GC_ADDR(baddr, mas_index));
+
+	reg_val = readl_relaxed(M_BKE_THH_ADDR(baddr, mas_index)) &
+		M_BKE_THH_RMSK;
+	val =  th << M_BKE_THH_THRESH_SHFT;
+	writel_relaxed(((reg_val & ~(M_BKE_THH_THRESH_BMSK)) | (val &
+		M_BKE_THH_THRESH_BMSK)), M_BKE_THH_ADDR(baddr, mas_index));
+
+	reg_val = readl_relaxed(M_BKE_THM_ADDR(baddr, mas_index)) &
+		M_BKE_THM_RMSK;
+	val2 =	tm << M_BKE_THM_THRESH_SHFT;
+	writel_relaxed(((reg_val & ~(M_BKE_THM_THRESH_BMSK)) | (val2 &
+		M_BKE_THM_THRESH_BMSK)), M_BKE_THM_ADDR(baddr, mas_index));
+
+	reg_val = readl_relaxed(M_BKE_THL_ADDR(baddr, mas_index)) &
+		M_BKE_THL_RMSK;
+	val2 =	tl << M_BKE_THL_THRESH_SHFT;
+	writel_relaxed(((reg_val & ~(M_BKE_THL_THRESH_BMSK)) |
+		(val2 & M_BKE_THL_THRESH_BMSK)), M_BKE_THL_ADDR(baddr,
+		mas_index));
+
+	/* Ensure that all bandwidth register writes have completed
+	 * before returning
+	 */
+	wmb();
+}
+
+static void bimc_set_static_qos_bw(void __iomem *base, unsigned int qos_freq,
+	int mport, struct msm_bus_bimc_qos_bw *qbw)
+{
+	int32_t bw_mbps, thh = 0, thm, thl, gc;
+	int32_t gp;
+	u64 temp;
+
+	if (qos_freq == 0) {
+		MSM_BUS_DBG("No QoS Frequency.\n");
+		return;
+	}
+
+	if (!(qbw->bw && qbw->gp)) {
+		MSM_BUS_DBG("No QoS Bandwidth or Window size\n");
+		return;
+	}
+
+	/* Convert bandwidth to MBPS */
+	temp = qbw->bw;
+	bimc_div(&temp, 1000000);
+	bw_mbps = temp;
+
+	/* Grant period in clock cycles
+	 * Grant period from bandwidth structure
+	 * is in nano seconds, QoS freq is in KHz.
+	 * Divide by 1000 to get clock cycles.
+	 */
+	gp = (qos_freq * qbw->gp) / (1000 * NSEC_PER_USEC);
+
+	/* Grant count = BW in MBps * Grant period
+	 * in micro seconds
+	 */
+	gc = bw_mbps * (qbw->gp / NSEC_PER_USEC);
+	gc = min(gc, MAX_GC);
+
+	/* Medium threshold = -((Medium Threshold percentage *
+	 * Grant count) / 100)
+	 */
+	thm = -((qbw->thmp * gc) / 100);
+	qbw->thm = thm;
+
+	/* Low threshold = -(Grant count) */
+	thl = -gc;
+	qbw->thl = thl;
+
+	MSM_BUS_DBG("%s: BKE parameters: gp %d, gc %d, thm %d thl %d thh %d",
+			__func__, gp, gc, thm, thl, thh);
+
+	trace_bus_bke_params(gc, gp, thl, thm, thl);
+	set_qos_bw_regs(base, mport, thh, thm, thl, gp, gc);
+}
+
+static int msm_bus_bimc_limit_mport(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base, uint32_t qos_off,
+				uint32_t qos_delta, uint32_t qos_freq,
+				int enable_lim, u64 lim_bw)
+{
+	int mode;
+	int i;
+	struct msm_bus_bimc_qos_mode qmode = {0};
+
+	if (ZERO_OR_NULL_PTR(info->node_info->qport)) {
+		MSM_BUS_DBG("No QoS Ports to limit\n");
+		return 0;
+	}
+
+	if ((enable_lim == THROTTLE_ON) && lim_bw) {
+		mode =  BIMC_QOS_MODE_LIMITER;
+
+		qmode.rl.qhealth[0].limit_commands = true;
+		qmode.rl.qhealth[1].limit_commands = false;
+		qmode.rl.qhealth[2].limit_commands = false;
+		qmode.rl.qhealth[3].limit_commands = false;
+		for (i = 0; i < NUM_HEALTH_LEVEL; i++) {
+			qmode.rl.qhealth[i].prio_level =
+					info->node_info->qos_params.prio_lvl;
+			qmode.rl.qhealth[i].areq_prio =
+					info->node_info->qos_params.prio_rd;
+		}
+
+		for (i = 0; i < info->node_info->num_qports; i++) {
+			struct msm_bus_bimc_qos_bw qbw;
+			/* If not in fixed mode, update bandwidth */
+			if (info->node_info->lim_bw != lim_bw) {
+				qbw.ws = info->node_info->qos_params.ws;
+				qbw.bw = lim_bw;
+				qbw.gp = info->node_info->qos_params.gp;
+				qbw.thmp = info->node_info->qos_params.thmp;
+				bimc_set_static_qos_bw(qos_base, qos_freq,
+					info->node_info->qport[i], &qbw);
+			}
+		}
+		info->node_info->lim_bw = lim_bw;
+	} else {
+		mode = info->node_info->qos_params.mode;
+		if (mode != BIMC_QOS_MODE_BYPASS) {
+			for (i = 0; i < NUM_HEALTH_LEVEL; i++) {
+				qmode.rl.qhealth[i].prio_level =
+					info->node_info->qos_params.prio_lvl;
+				qmode.rl.qhealth[i].areq_prio =
+					info->node_info->qos_params.prio_rd;
+			}
+		}
+	}
+
+	for (i = 0; i < info->node_info->num_qports; i++)
+		msm_bus_bimc_set_qos_prio(qos_base, info->node_info->qport[i],
+				mode, &qmode);
+	return 0;
+}
+
+static bool msm_bus_bimc_update_bw_reg(int mode)
+{
+	bool ret = false;
+
+	if ((mode == BIMC_QOS_MODE_LIMITER)
+		|| (mode == BIMC_QOS_MODE_REGULATOR))
+		ret = true;
+
+	return ret;
+}
+
+static int msm_bus_bimc_qos_init(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base,
+				uint32_t qos_off, uint32_t qos_delta,
+				uint32_t qos_freq)
+{
+	int i;
+	struct msm_bus_bimc_qos_mode qmode = {0};
+
+	if (ZERO_OR_NULL_PTR(info->node_info->qport)) {
+		MSM_BUS_DBG("No QoS Ports to init\n");
+		return 0;
+	}
+
+	switch (info->node_info->qos_params.mode) {
+		/* For now Fixed and regulator are handled the same way. */
+	case BIMC_QOS_MODE_FIXED:
+	case BIMC_QOS_MODE_REGULATOR:
+		for (i = 0; i < NUM_HEALTH_LEVEL; i++) {
+			qmode.rl.qhealth[i].prio_level =
+				info->node_info->qos_params.prio_lvl;
+			qmode.rl.qhealth[i].areq_prio =
+				info->node_info->qos_params.prio_rd;
+		}
+		break;
+	case BIMC_QOS_MODE_LIMITER:
+		qmode.rl.qhealth[0].limit_commands = true;
+		qmode.rl.qhealth[1].limit_commands = false;
+		qmode.rl.qhealth[2].limit_commands = false;
+		qmode.rl.qhealth[3].limit_commands = false;
+		for (i = 0; i < NUM_HEALTH_LEVEL; i++) {
+			qmode.rl.qhealth[i].prio_level =
+				info->node_info->qos_params.prio_lvl;
+			qmode.rl.qhealth[i].areq_prio =
+				info->node_info->qos_params.prio_rd;
+		}
+		break;
+	default:
+		break;
+	}
+
+
+	for (i = 0; i < info->node_info->num_qports; i++)
+		msm_bus_bimc_set_qos_prio(qos_base, info->node_info->qport[i],
+			info->node_info->qos_params.mode, &qmode);
+
+	return 0;
+}
+
+static int msm_bus_bimc_set_bw(struct msm_bus_node_device_type *dev,
+				void __iomem *qos_base, uint32_t qos_off,
+				uint32_t qos_delta, uint32_t qos_freq)
+{
+	struct msm_bus_bimc_qos_bw qbw;
+	struct msm_bus_bimc_qos_mode qmode = {0};
+	int i;
+	int64_t bw = 0;
+	int ret = 0;
+	struct msm_bus_node_info_type *info = dev->node_info;
+	int mode;
+
+	if (info && info->num_qports &&
+		((info->qos_params.mode == BIMC_QOS_MODE_LIMITER))) {
+		bw = msm_bus_div64(dev->node_bw[ACTIVE_CTX].sum_ab,
+				info->num_qports);
+
+		MSM_BUS_DBG("BIMC: Update mas_bw for ID: %d -> %llu\n",
+				info->id, bw);
+
+		if (!info->qport) {
+			MSM_BUS_DBG("No qos ports to update!\n");
+			goto exit_set_bw;
+		}
+
+		qbw.bw = bw + info->qos_params.bw_buffer;
+		trace_bus_bimc_config_limiter(info->id, bw);
+
+		/* Default to gp of 5us */
+		qbw.gp = (info->qos_params.gp ?
+				info->qos_params.gp : 5000);
+		/* Default to thmp of 50% */
+		qbw.thmp = (info->qos_params.thmp ?
+				info->qos_params.thmp : 50);
+		/*
+		 * If the BW vote is 0 then set the QoS mode to
+		 * Fixed/0/0.
+		 */
+		if (bw) {
+			qmode.rl.qhealth[0].limit_commands = true;
+			qmode.rl.qhealth[1].limit_commands = false;
+			qmode.rl.qhealth[2].limit_commands = false;
+			qmode.rl.qhealth[3].limit_commands = false;
+			mode = info->qos_params.mode;
+		} else {
+			mode =	BIMC_QOS_MODE_FIXED;
+		}
+
+		for (i = 0; i < info->num_qports; i++) {
+			msm_bus_bimc_set_qos_prio(qos_base,
+				info->qport[i], mode, &qmode);
+			if (bw)
+				bimc_set_static_qos_bw(qos_base, qos_freq,
+					info->qport[i], &qbw);
+		}
+	}
+exit_set_bw:
+	return ret;
+}
+
+int msm_bus_bimc_set_ops(struct msm_bus_node_device_type *bus_dev)
+{
+	if (!bus_dev)
+		return -ENODEV;
+	bus_dev->fabdev->noc_ops.qos_init = msm_bus_bimc_qos_init;
+	bus_dev->fabdev->noc_ops.set_bw = msm_bus_bimc_set_bw;
+	bus_dev->fabdev->noc_ops.limit_mport = msm_bus_bimc_limit_mport;
+	bus_dev->fabdev->noc_ops.update_bw_reg =
+					msm_bus_bimc_update_bw_reg;
+	return 0;
+}
+EXPORT_SYMBOL(msm_bus_bimc_set_ops);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_bimc_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_bimc_rpmh.c
new file mode 100644
index 0000000..1c4546a
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_bimc_rpmh.c
@@ -0,0 +1,599 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "AXI: BIMC: %s(): " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/msm-bus-board.h>
+#include "msm_bus_core.h"
+#include "msm_bus_bimc.h"
+#include "msm_bus_adhoc.h"
+
+/* M_Generic */
+
+enum bke_sw {
+	BKE_OFF = 0,
+	BKE_ON = 1,
+};
+
+#define M_REG_BASE(b)		((b) + 0x00008000)
+
+#define M_MODE_ADDR(b, n) \
+		(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000210)
+enum bimc_m_mode {
+	M_MODE_RMSK				= 0xf0000011,
+	M_MODE_WR_GATHER_BEATS_BMSK		= 0xf0000000,
+	M_MODE_WR_GATHER_BEATS_SHFT		= 0x1c,
+	M_MODE_NARROW_WR_BMSK			= 0x10,
+	M_MODE_NARROW_WR_SHFT			= 0x4,
+	M_MODE_ORDERING_MODEL_BMSK		= 0x1,
+	M_MODE_ORDERING_MODEL_SHFT		= 0x0,
+};
+
+#define M_PRIOLVL_OVERRIDE_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000230)
+enum bimc_m_priolvl_override {
+	M_PRIOLVL_OVERRIDE_RMSK			= 0x301,
+	M_PRIOLVL_OVERRIDE_BMSK			= 0x300,
+	M_PRIOLVL_OVERRIDE_SHFT			= 0x8,
+	M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_BMSK	= 0x1,
+	M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_SHFT	= 0x0,
+};
+
+#define M_RD_CMD_OVERRIDE_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000240)
+enum bimc_m_read_command_override {
+	M_RD_CMD_OVERRIDE_RMSK			= 0x3071f7f,
+	M_RD_CMD_OVERRIDE_AREQPRIO_BMSK		= 0x3000000,
+	M_RD_CMD_OVERRIDE_AREQPRIO_SHFT		= 0x18,
+	M_RD_CMD_OVERRIDE_AMEMTYPE_BMSK		= 0x70000,
+	M_RD_CMD_OVERRIDE_AMEMTYPE_SHFT		= 0x10,
+	M_RD_CMD_OVERRIDE_ATRANSIENT_BMSK		= 0x1000,
+	M_RD_CMD_OVERRIDE_ATRANSIENT_SHFT		= 0xc,
+	M_RD_CMD_OVERRIDE_ASHARED_BMSK		= 0x800,
+	M_RD_CMD_OVERRIDE_ASHARED_SHFT		= 0xb,
+	M_RD_CMD_OVERRIDE_AREDIRECT_BMSK		= 0x400,
+	M_RD_CMD_OVERRIDE_AREDIRECT_SHFT		= 0xa,
+	M_RD_CMD_OVERRIDE_AOOO_BMSK			= 0x200,
+	M_RD_CMD_OVERRIDE_AOOO_SHFT			= 0x9,
+	M_RD_CMD_OVERRIDE_AINNERSHARED_BMSK		= 0x100,
+	M_RD_CMD_OVERRIDE_AINNERSHARED_SHFT		= 0x8,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK	= 0x40,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT	= 0x6,
+	M_RD_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_BMSK	= 0x20,
+	M_RD_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_SHFT	= 0x5,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_BMSK	= 0x10,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_SHFT	= 0x4,
+	M_RD_CMD_OVERRIDE_OVERRIDE_ASHARED_BMSK	= 0x8,
+	M_RD_CMD_OVERRIDE_OVERRIDE_ASHARED_SHFT	= 0x3,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AREDIRECT_BMSK	= 0x4,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AREDIRECT_SHFT	= 0x2,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AOOO_BMSK		= 0x2,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AOOO_SHFT		= 0x1,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_BMSK	= 0x1,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_SHFT	= 0x0,
+};
+
+#define M_WR_CMD_OVERRIDE_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000250)
+enum bimc_m_write_command_override {
+	M_WR_CMD_OVERRIDE_RMSK			= 0x3071f7f,
+	M_WR_CMD_OVERRIDE_AREQPRIO_BMSK		= 0x3000000,
+	M_WR_CMD_OVERRIDE_AREQPRIO_SHFT		= 0x18,
+	M_WR_CMD_OVERRIDE_AMEMTYPE_BMSK		= 0x70000,
+	M_WR_CMD_OVERRIDE_AMEMTYPE_SHFT		= 0x10,
+	M_WR_CMD_OVERRIDE_ATRANSIENT_BMSK	= 0x1000,
+	M_WR_CMD_OVERRIDE_ATRANSIENT_SHFT	= 0xc,
+	M_WR_CMD_OVERRIDE_ASHARED_BMSK		= 0x800,
+	M_WR_CMD_OVERRIDE_ASHARED_SHFT		= 0xb,
+	M_WR_CMD_OVERRIDE_AREDIRECT_BMSK		= 0x400,
+	M_WR_CMD_OVERRIDE_AREDIRECT_SHFT		= 0xa,
+	M_WR_CMD_OVERRIDE_AOOO_BMSK			= 0x200,
+	M_WR_CMD_OVERRIDE_AOOO_SHFT			= 0x9,
+	M_WR_CMD_OVERRIDE_AINNERSHARED_BMSK		= 0x100,
+	M_WR_CMD_OVERRIDE_AINNERSHARED_SHFT		= 0x8,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK	= 0x40,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT	= 0x6,
+	M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_BMSK	= 0x20,
+	M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_SHFT	= 0x5,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_BMSK	= 0x10,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_SHFT	= 0x4,
+	M_WR_CMD_OVERRIDE_OVERRIDE_ASHARED_BMSK	= 0x8,
+	M_WR_CMD_OVERRIDE_OVERRIDE_ASHARED_SHFT	= 0x3,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AREDIRECT_BMSK	= 0x4,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AREDIRECT_SHFT	= 0x2,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AOOO_BMSK	= 0x2,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AOOO_SHFT	= 0x1,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_BMSK	= 0x1,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_SHFT	= 0x0,
+};
+
+#define M_BKE_EN_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000300)
+enum bimc_m_bke_en {
+	M_BKE_EN_RMSK			= 0x1,
+	M_BKE_EN_EN_BMSK		= 0x1,
+	M_BKE_EN_EN_SHFT		= 0x0,
+};
+
+/* Grant Period registers */
+#define M_BKE_GP_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000304)
+enum bimc_m_bke_grant_period {
+	M_BKE_GP_RMSK		= 0x3ff,
+	M_BKE_GP_GP_BMSK	= 0x3ff,
+	M_BKE_GP_GP_SHFT	= 0x0,
+};
+
+/* Grant count register.
+ * The Grant count register represents a signed 16 bit
+ * value, range 0-0x7fff
+ */
+#define M_BKE_GC_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000308)
+enum bimc_m_bke_grant_count {
+	M_BKE_GC_RMSK			= 0xffff,
+	M_BKE_GC_GC_BMSK		= 0xffff,
+	M_BKE_GC_GC_SHFT		= 0x0,
+};
+
+/* Threshold High Registers */
+#define M_BKE_THH_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000320)
+enum bimc_m_bke_thresh_high {
+	M_BKE_THH_RMSK		= 0xffff,
+	M_BKE_THH_THRESH_BMSK	= 0xffff,
+	M_BKE_THH_THRESH_SHFT	= 0x0,
+};
+
+/* Threshold Medium Registers */
+#define M_BKE_THM_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000324)
+enum bimc_m_bke_thresh_medium {
+	M_BKE_THM_RMSK		= 0xffff,
+	M_BKE_THM_THRESH_BMSK	= 0xffff,
+	M_BKE_THM_THRESH_SHFT	= 0x0,
+};
+
+/* Threshold Low Registers */
+#define M_BKE_THL_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000328)
+enum bimc_m_bke_thresh_low {
+	M_BKE_THL_RMSK			= 0xffff,
+	M_BKE_THL_THRESH_BMSK		= 0xffff,
+	M_BKE_THL_THRESH_SHFT		= 0x0,
+};
+
+#define NUM_HEALTH_LEVEL	(4)
+#define M_BKE_HEALTH_0_CONFIG_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000340)
+enum bimc_m_bke_health_0 {
+	M_BKE_HEALTH_0_CONFIG_RMSK			= 0x80000303,
+	M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK		= 0x80000000,
+	M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_SHFT		= 0x1f,
+	M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK		= 0x300,
+	M_BKE_HEALTH_0_CONFIG_AREQPRIO_SHFT		= 0x8,
+	M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK		= 0x3,
+	M_BKE_HEALTH_0_CONFIG_PRIOLVL_SHFT		= 0x0,
+};
+
+#define M_BKE_HEALTH_1_CONFIG_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000344)
+enum bimc_m_bke_health_1 {
+	M_BKE_HEALTH_1_CONFIG_RMSK			= 0x80000303,
+	M_BKE_HEALTH_1_CONFIG_LIMIT_CMDS_BMSK		= 0x80000000,
+	M_BKE_HEALTH_1_CONFIG_LIMIT_CMDS_SHFT		= 0x1f,
+	M_BKE_HEALTH_1_CONFIG_AREQPRIO_BMSK		= 0x300,
+	M_BKE_HEALTH_1_CONFIG_AREQPRIO_SHFT		= 0x8,
+	M_BKE_HEALTH_1_CONFIG_PRIOLVL_BMSK		= 0x3,
+	M_BKE_HEALTH_1_CONFIG_PRIOLVL_SHFT		= 0x0,
+};
+
+#define M_BKE_HEALTH_2_CONFIG_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000348)
+enum bimc_m_bke_health_2 {
+	M_BKE_HEALTH_2_CONFIG_RMSK			= 0x80000303,
+	M_BKE_HEALTH_2_CONFIG_LIMIT_CMDS_BMSK		= 0x80000000,
+	M_BKE_HEALTH_2_CONFIG_LIMIT_CMDS_SHFT		= 0x1f,
+	M_BKE_HEALTH_2_CONFIG_AREQPRIO_BMSK		= 0x300,
+	M_BKE_HEALTH_2_CONFIG_AREQPRIO_SHFT		= 0x8,
+	M_BKE_HEALTH_2_CONFIG_PRIOLVL_BMSK		= 0x3,
+	M_BKE_HEALTH_2_CONFIG_PRIOLVL_SHFT		= 0x0,
+};
+
+#define M_BKE_HEALTH_3_CONFIG_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x0000034c)
+enum bimc_m_bke_health_3 {
+	M_BKE_HEALTH_3_CONFIG_RMSK			= 0x303,
+	M_BKE_HEALTH_3_CONFIG_AREQPRIO_BMSK	= 0x300,
+	M_BKE_HEALTH_3_CONFIG_AREQPRIO_SHFT	= 0x8,
+	M_BKE_HEALTH_3_CONFIG_PRIOLVL_BMSK		= 0x3,
+	M_BKE_HEALTH_3_CONFIG_PRIOLVL_SHFT		= 0x0,
+};
+
+#define BKE_HEALTH_MASK \
+	(M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK |\
+	M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK |\
+	M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK)
+
+#define BKE_HEALTH_VAL(limit, areq, plvl) \
+	((((limit) << M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_SHFT) & \
+	M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK) | \
+	(((areq) << M_BKE_HEALTH_0_CONFIG_AREQPRIO_SHFT) & \
+	M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK) | \
+	(((plvl) << M_BKE_HEALTH_0_CONFIG_PRIOLVL_SHFT) & \
+	M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK))
+
+#define MAX_GRANT_PERIOD \
+	(M_BKE_GP_GP_BMSK >> \
+	M_BKE_GP_GP_SHFT)
+
+#define MAX_GC \
+	(M_BKE_GC_GC_BMSK >> \
+	(M_BKE_GC_GC_SHFT + 1))
+
+static int bimc_div(uint64_t *a, uint32_t b)
+{
+	if ((*a > 0) && (*a < b)) {
+		*a = 0;
+		return 1;
+	} else {
+		return do_div(*a, b);
+	}
+}
+
+static void set_bke_en(void __iomem *addr, uint32_t index,
+		bool req)
+{
+	uint32_t old_val, new_val;
+
+	old_val = readl_relaxed(M_BKE_EN_ADDR(addr, index));
+	new_val = req << M_BKE_EN_EN_SHFT;
+	if ((old_val & M_BKE_EN_RMSK) == (new_val))
+		return;
+	writel_relaxed(((old_val & ~(M_BKE_EN_EN_BMSK)) | (new_val &
+				M_BKE_EN_EN_BMSK)), M_BKE_EN_ADDR(addr, index));
+	/* Ensure that BKE register is programmed set before returning */
+	wmb();
+}
+
+static void set_health_reg(void __iomem *addr, uint32_t rmsk,
+	uint8_t index, struct msm_bus_bimc_qos_mode *qmode)
+{
+	uint32_t reg_val, val0, val;
+
+	/* Note, addr is already passed with right mas_index */
+	reg_val = readl_relaxed(addr) & rmsk;
+	val0 = BKE_HEALTH_VAL(qmode->rl.qhealth[index].limit_commands,
+		qmode->rl.qhealth[index].areq_prio,
+		qmode->rl.qhealth[index].prio_level);
+	val = ((reg_val & (~(BKE_HEALTH_MASK))) | (val0 & BKE_HEALTH_MASK));
+	writel_relaxed(val, addr);
+	/*
+	 * Ensure that priority for regulator/limiter modes are
+	 * set before returning
+	 */
+	wmb();
+}
+
+static void msm_bus_bimc_set_qos_prio(void __iomem *base,
+	uint32_t mas_index, uint8_t qmode_sel,
+	struct msm_bus_bimc_qos_mode *qmode)
+{
+
+	switch (qmode_sel) {
+	case BIMC_QOS_MODE_FIXED:
+	case BIMC_QOS_MODE_REGULATOR:
+	case BIMC_QOS_MODE_LIMITER:
+		set_health_reg(M_BKE_HEALTH_3_CONFIG_ADDR(base,
+			mas_index), M_BKE_HEALTH_3_CONFIG_RMSK, 3, qmode);
+		set_health_reg(M_BKE_HEALTH_2_CONFIG_ADDR(base,
+			mas_index), M_BKE_HEALTH_2_CONFIG_RMSK, 2, qmode);
+		set_health_reg(M_BKE_HEALTH_1_CONFIG_ADDR(base,
+			mas_index), M_BKE_HEALTH_1_CONFIG_RMSK, 1, qmode);
+		set_health_reg(M_BKE_HEALTH_0_CONFIG_ADDR(base,
+			mas_index), M_BKE_HEALTH_0_CONFIG_RMSK, 0, qmode);
+		set_bke_en(base, mas_index, true);
+		break;
+	case BIMC_QOS_MODE_BYPASS:
+		set_bke_en(base, mas_index, false);
+		break;
+	default:
+		break;
+	}
+}
+
+static void set_qos_bw_regs(void __iomem *baddr, uint32_t mas_index,
+	int32_t th, int32_t tm, int32_t tl, uint32_t gp,
+	uint32_t gc)
+{
+	int32_t reg_val, val;
+	int32_t bke_reg_val;
+	int16_t val2;
+
+	/* Disable BKE before writing to registers as per spec */
+	bke_reg_val = readl_relaxed(M_BKE_EN_ADDR(baddr, mas_index));
+	writel_relaxed((bke_reg_val & ~(M_BKE_EN_EN_BMSK)),
+		M_BKE_EN_ADDR(baddr, mas_index));
+
+	/* Write values of registers calculated */
+	reg_val = readl_relaxed(M_BKE_GP_ADDR(baddr, mas_index))
+		& M_BKE_GP_RMSK;
+	val =  gp << M_BKE_GP_GP_SHFT;
+	writel_relaxed(((reg_val & ~(M_BKE_GP_GP_BMSK)) | (val &
+		M_BKE_GP_GP_BMSK)), M_BKE_GP_ADDR(baddr, mas_index));
+
+	reg_val = readl_relaxed(M_BKE_GC_ADDR(baddr, mas_index)) &
+		M_BKE_GC_RMSK;
+	val =  gc << M_BKE_GC_GC_SHFT;
+	writel_relaxed(((reg_val & ~(M_BKE_GC_GC_BMSK)) | (val &
+		M_BKE_GC_GC_BMSK)), M_BKE_GC_ADDR(baddr, mas_index));
+
+	reg_val = readl_relaxed(M_BKE_THH_ADDR(baddr, mas_index)) &
+		M_BKE_THH_RMSK;
+	val =  th << M_BKE_THH_THRESH_SHFT;
+	writel_relaxed(((reg_val & ~(M_BKE_THH_THRESH_BMSK)) | (val &
+		M_BKE_THH_THRESH_BMSK)), M_BKE_THH_ADDR(baddr, mas_index));
+
+	reg_val = readl_relaxed(M_BKE_THM_ADDR(baddr, mas_index)) &
+		M_BKE_THM_RMSK;
+	val2 =	tm << M_BKE_THM_THRESH_SHFT;
+	writel_relaxed(((reg_val & ~(M_BKE_THM_THRESH_BMSK)) | (val2 &
+		M_BKE_THM_THRESH_BMSK)), M_BKE_THM_ADDR(baddr, mas_index));
+
+	reg_val = readl_relaxed(M_BKE_THL_ADDR(baddr, mas_index)) &
+		M_BKE_THL_RMSK;
+	val2 =	tl << M_BKE_THL_THRESH_SHFT;
+	writel_relaxed(((reg_val & ~(M_BKE_THL_THRESH_BMSK)) |
+		(val2 & M_BKE_THL_THRESH_BMSK)), M_BKE_THL_ADDR(baddr,
+		mas_index));
+
+	/* Ensure that all bandwidth register writes have completed
+	 * before returning
+	 */
+	wmb();
+}
+
+static void bimc_set_static_qos_bw(void __iomem *base, unsigned int qos_freq,
+	int mport, struct msm_bus_bimc_qos_bw *qbw)
+{
+	int32_t bw_mbps, thh = 0, thm, thl, gc;
+	int32_t gp;
+	u64 temp;
+
+	if (qos_freq == 0) {
+		MSM_BUS_DBG("No QoS Frequency.\n");
+		return;
+	}
+
+	if (!(qbw->bw && qbw->gp)) {
+		MSM_BUS_DBG("No QoS Bandwidth or Window size\n");
+		return;
+	}
+
+	/* Convert bandwidth to MBPS */
+	temp = qbw->bw;
+	bimc_div(&temp, 1000000);
+	bw_mbps = temp;
+
+	/* Grant period in clock cycles
+	 * Grant period from bandwidth structure
+	 * is in nano seconds, QoS freq is in KHz.
+	 * Divide by 1000 to get clock cycles.
+	 */
+	gp = (qos_freq * qbw->gp) / (1000 * NSEC_PER_USEC);
+
+	/* Grant count = BW in MBps * Grant period
+	 * in micro seconds
+	 */
+	gc = bw_mbps * (qbw->gp / NSEC_PER_USEC);
+	gc = min(gc, MAX_GC);
+
+	/* Medium threshold = -((Medium Threshold percentage *
+	 * Grant count) / 100)
+	 */
+	thm = -((qbw->thmp * gc) / 100);
+	qbw->thm = thm;
+
+	/* Low threshold = -(Grant count) */
+	thl = -gc;
+	qbw->thl = thl;
+
+	MSM_BUS_DBG("%s: BKE parameters: gp %d, gc %d, thm %d thl %d thh %d",
+			__func__, gp, gc, thm, thl, thh);
+
+	set_qos_bw_regs(base, mport, thh, thm, thl, gp, gc);
+}
+
+static int msm_bus_bimc_limit_mport(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base, uint32_t qos_off,
+				uint32_t qos_delta, uint32_t qos_freq,
+				int enable_lim, u64 lim_bw)
+{
+	int mode;
+	int i;
+	struct msm_bus_bimc_qos_mode qmode = {0};
+
+	if (ZERO_OR_NULL_PTR(info->node_info->qport)) {
+		MSM_BUS_DBG("No QoS Ports to limit\n");
+		return 0;
+	}
+
+	if ((enable_lim == THROTTLE_ON) && lim_bw) {
+		mode =  BIMC_QOS_MODE_LIMITER;
+
+		qmode.rl.qhealth[0].limit_commands = true;
+		qmode.rl.qhealth[1].limit_commands = false;
+		qmode.rl.qhealth[2].limit_commands = false;
+		qmode.rl.qhealth[3].limit_commands = false;
+		for (i = 0; i < NUM_HEALTH_LEVEL; i++) {
+			qmode.rl.qhealth[i].prio_level =
+					info->node_info->qos_params.prio_lvl;
+			qmode.rl.qhealth[i].areq_prio =
+					info->node_info->qos_params.prio_rd;
+		}
+
+		for (i = 0; i < info->node_info->num_qports; i++) {
+			struct msm_bus_bimc_qos_bw qbw;
+			/* If not in fixed mode, update bandwidth */
+			if (info->node_info->lim_bw != lim_bw) {
+				qbw.ws = info->node_info->qos_params.ws;
+				qbw.bw = lim_bw;
+				qbw.gp = info->node_info->qos_params.gp;
+				qbw.thmp = info->node_info->qos_params.thmp;
+				bimc_set_static_qos_bw(qos_base, qos_freq,
+					info->node_info->qport[i], &qbw);
+			}
+		}
+		info->node_info->lim_bw = lim_bw;
+	} else {
+		mode = info->node_info->qos_params.mode;
+		if (mode != BIMC_QOS_MODE_BYPASS) {
+			for (i = 0; i < NUM_HEALTH_LEVEL; i++) {
+				qmode.rl.qhealth[i].prio_level =
+					info->node_info->qos_params.prio_lvl;
+				qmode.rl.qhealth[i].areq_prio =
+					info->node_info->qos_params.prio_rd;
+			}
+		}
+	}
+
+	for (i = 0; i < info->node_info->num_qports; i++)
+		msm_bus_bimc_set_qos_prio(qos_base, info->node_info->qport[i],
+				mode, &qmode);
+	return 0;
+}
+
+static bool msm_bus_bimc_update_bw_reg(int mode)
+{
+	bool ret = false;
+
+	if ((mode == BIMC_QOS_MODE_LIMITER)
+		|| (mode == BIMC_QOS_MODE_REGULATOR))
+		ret = true;
+
+	return ret;
+}
+
+static int msm_bus_bimc_qos_init(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base,
+				uint32_t qos_off, uint32_t qos_delta,
+				uint32_t qos_freq)
+{
+	int i;
+	struct msm_bus_bimc_qos_mode qmode = {0};
+
+	if (ZERO_OR_NULL_PTR(info->node_info->qport)) {
+		MSM_BUS_DBG("No QoS Ports to init\n");
+		return 0;
+	}
+
+	switch (info->node_info->qos_params.mode) {
+		/* For now Fixed and regulator are handled the same way. */
+	case BIMC_QOS_MODE_FIXED:
+	case BIMC_QOS_MODE_REGULATOR:
+		for (i = 0; i < NUM_HEALTH_LEVEL; i++) {
+			qmode.rl.qhealth[i].prio_level =
+				info->node_info->qos_params.prio_lvl;
+			qmode.rl.qhealth[i].areq_prio =
+				info->node_info->qos_params.prio_rd;
+		}
+		break;
+	case BIMC_QOS_MODE_LIMITER:
+		qmode.rl.qhealth[0].limit_commands = true;
+		qmode.rl.qhealth[1].limit_commands = false;
+		qmode.rl.qhealth[2].limit_commands = false;
+		qmode.rl.qhealth[3].limit_commands = false;
+		for (i = 0; i < NUM_HEALTH_LEVEL; i++) {
+			qmode.rl.qhealth[i].prio_level =
+				info->node_info->qos_params.prio_lvl;
+			qmode.rl.qhealth[i].areq_prio =
+				info->node_info->qos_params.prio_rd;
+		}
+		break;
+	default:
+		break;
+	}
+
+
+	for (i = 0; i < info->node_info->num_qports; i++)
+		msm_bus_bimc_set_qos_prio(qos_base, info->node_info->qport[i],
+			info->node_info->qos_params.mode, &qmode);
+
+	return 0;
+}
+
+static int msm_bus_bimc_set_bw(struct msm_bus_node_device_type *dev,
+				void __iomem *qos_base, uint32_t qos_off,
+				uint32_t qos_delta, uint32_t qos_freq)
+{
+	struct msm_bus_bimc_qos_bw qbw;
+	struct msm_bus_bimc_qos_mode qmode = {0};
+	int i;
+	int64_t bw = 0;
+	int ret = 0;
+	struct msm_bus_node_info_type *info = dev->node_info;
+	int mode;
+
+	if (info && info->num_qports &&
+		((info->qos_params.mode == BIMC_QOS_MODE_LIMITER))) {
+		bw = msm_bus_div64(info->num_qports,
+				dev->node_bw[ACTIVE_CTX].sum_ab);
+
+		MSM_BUS_DBG("BIMC: Update mas_bw for ID: %d -> %llu\n",
+				info->id, bw);
+
+		if (!info->qport) {
+			MSM_BUS_DBG("No qos ports to update!\n");
+			goto exit_set_bw;
+		}
+
+		qbw.bw = bw + info->qos_params.bw_buffer;
+
+		/* Default to gp of 5us */
+		qbw.gp = (info->qos_params.gp ?
+				info->qos_params.gp : 5000);
+		/* Default to thmp of 50% */
+		qbw.thmp = (info->qos_params.thmp ?
+				info->qos_params.thmp : 50);
+		/*
+		 * If the BW vote is 0 then set the QoS mode to
+		 * Fixed/0/0.
+		 */
+		if (bw) {
+			qmode.rl.qhealth[0].limit_commands = true;
+			qmode.rl.qhealth[1].limit_commands = false;
+			qmode.rl.qhealth[2].limit_commands = false;
+			qmode.rl.qhealth[3].limit_commands = false;
+			mode = info->qos_params.mode;
+		} else {
+			mode =	BIMC_QOS_MODE_FIXED;
+		}
+
+		for (i = 0; i < info->num_qports; i++) {
+			msm_bus_bimc_set_qos_prio(qos_base,
+				info->qport[i], mode, &qmode);
+			if (bw)
+				bimc_set_static_qos_bw(qos_base, qos_freq,
+					info->qport[i], &qbw);
+		}
+	}
+exit_set_bw:
+	return ret;
+}
+
+int msm_bus_bimc_set_ops(struct msm_bus_node_device_type *bus_dev)
+{
+	if (!bus_dev)
+		return -ENODEV;
+	bus_dev->fabdev->noc_ops.qos_init = msm_bus_bimc_qos_init;
+	bus_dev->fabdev->noc_ops.set_bw = msm_bus_bimc_set_bw;
+	bus_dev->fabdev->noc_ops.limit_mport = msm_bus_bimc_limit_mport;
+	bus_dev->fabdev->noc_ops.update_bw_reg =
+					msm_bus_bimc_update_bw_reg;
+	return 0;
+}
+EXPORT_SYMBOL(msm_bus_bimc_set_ops);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_client_api.c b/drivers/soc/qcom/msm_bus/msm_bus_client_api.c
new file mode 100644
index 0000000..d4443f3
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_client_api.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/radix-tree.h>
+#include <linux/clk.h>
+#include <linux/msm-bus.h>
+#include "msm_bus_core.h"
+
+struct msm_bus_arb_ops arb_ops;
+
+/**
+ * msm_bus_scale_register_client() - Register the clients with the msm bus
+ * driver
+ * @pdata: Platform data of the client, containing src, dest, ab, ib.
+ * Return non-zero value in case of success, 0 in case of failure.
+ *
+ * Client data contains the vectors specifying arbitrated bandwidth (ab)
+ * and instantaneous bandwidth (ib) requested between a particular
+ * src and dest.
+ */
+uint32_t msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata)
+{
+	if (arb_ops.register_client)
+		return arb_ops.register_client(pdata);
+	pr_err("%s: Bus driver not ready.\n",
+			__func__);
+	return 0;
+}
+EXPORT_SYMBOL(msm_bus_scale_register_client);
+
+/**
+ * msm_bus_scale_client_update_request() - Update the request for bandwidth
+ * from a particular client
+ *
+ * cl: Handle to the client
+ * index: Index into the vector, to which the bw and clock values need to be
+ * updated
+ */
+int msm_bus_scale_client_update_request(uint32_t cl, unsigned int index)
+{
+	if (arb_ops.update_request)
+		return arb_ops.update_request(cl, index);
+	pr_err("%s: Bus driver not ready.\n",
+			__func__);
+	return -EPROBE_DEFER;
+}
+EXPORT_SYMBOL(msm_bus_scale_client_update_request);
+
+/**
+ * msm_bus_scale_client_update_context() - Update the context for a client
+ * cl: Handle to the client
+ * active_only: Bool to indicate dual context or active-only context.
+ * ctx_idx: Voting index to be used when switching contexts.
+ */
+int msm_bus_scale_client_update_context(uint32_t cl, bool active_only,
+							unsigned int ctx_idx)
+{
+	if (arb_ops.update_context)
+		return arb_ops.update_context(cl, active_only, ctx_idx);
+
+	return -EPROBE_DEFER;
+}
+EXPORT_SYMBOL(msm_bus_scale_client_update_context);
+
+/**
+ * msm_bus_scale_unregister_client() - Unregister the client from the bus driver
+ * @cl: Handle to the client
+ */
+void msm_bus_scale_unregister_client(uint32_t cl)
+{
+	if (arb_ops.unregister_client)
+		arb_ops.unregister_client(cl);
+	else {
+		pr_err("%s: Bus driver not ready.\n",
+				__func__);
+	}
+}
+EXPORT_SYMBOL(msm_bus_scale_unregister_client);
+
+/**
+ * msm_bus_scale_register() - Register the clients with the msm bus
+ * driver
+ *
+ * @mas: Master ID
+ * @slv: Slave ID
+ * @name: descriptive name for this client
+ * @active_only: Whether or not this bandwidth vote should only be
+ *               effective while the application processor is active.
+ *
+ * Client data contains the vectors specifying arbitrated bandwidth (ab)
+ * and instantaneous bandwidth (ib) requested between a particular
+ * src and dest.
+ */
+struct msm_bus_client_handle*
+msm_bus_scale_register(uint32_t mas, uint32_t slv, char *name, bool active_only)
+{
+	if (arb_ops.register_cl)
+		return arb_ops.register_cl(mas, slv, name, active_only);
+	pr_err("%s: Bus driver not ready.\n",
+			__func__);
+	return ERR_PTR(-EPROBE_DEFER);
+}
+EXPORT_SYMBOL(msm_bus_scale_register);
+
+/**
+ * msm_bus_scale_client_update_bw() - Update the request for bandwidth
+ * from a particular client
+ *
+ * @cl: Handle to the client
+ * @ab: Arbitrated bandwidth being requested
+ * @ib: Instantaneous bandwidth being requested
+ */
+int msm_bus_scale_update_bw(struct msm_bus_client_handle *cl, u64 ab, u64 ib)
+{
+	if (arb_ops.update_request)
+		return arb_ops.update_bw(cl, ab, ib);
+	pr_err("%s: Bus driver not ready.\n", __func__);
+	return -EPROBE_DEFER;
+}
+EXPORT_SYMBOL(msm_bus_scale_update_bw);
+
+/**
+ * msm_bus_scale_change_context() - Update the context for a particular client
+ * cl: Handle to the client
+ * act_ab: The average bandwidth(AB) in Bytes/s to be used in active context.
+ * act_ib: The instantaneous bandwidth(IB) in Bytes/s to be used in active
+ *         context.
+ * dual_ib: The average bandwidth(AB) in Bytes/s to be used in dual context.
+ * dual_ab: The instantaneous bandwidth(IB) in Bytes/s to be used in dual
+ *         context.
+ */
+int
+msm_bus_scale_update_bw_context(struct msm_bus_client_handle *cl, u64 act_ab,
+				u64 act_ib, u64 dual_ib, u64 dual_ab)
+{
+	if (arb_ops.update_context)
+		return arb_ops.update_bw_context(cl, act_ab, act_ib,
+							dual_ab, dual_ib);
+
+	return -EPROBE_DEFER;
+}
+EXPORT_SYMBOL(msm_bus_scale_update_bw_context);
+
+/**
+ * msm_bus_scale_unregister() - Update the request for bandwidth
+ * from a particular client
+ *
+ * cl: Handle to the client
+ */
+void msm_bus_scale_unregister(struct msm_bus_client_handle *cl)
+{
+	if (arb_ops.unregister)
+		arb_ops.unregister(cl);
+	else
+		pr_err("%s: Bus driver not ready.\n",
+				__func__);
+}
+EXPORT_SYMBOL(msm_bus_scale_unregister);
+
+/**
+ * msm_bus_scale_query_tcs_cmd() - Query for a list of TCS commands for
+ * an aggregated votes of paths from a single usecase.
+ *
+ * tcs_usecase: pointer to client allocated memory blob
+ * cl: Handle to the client
+ * index: Index into the vector, to which the bw and clock values need to be
+ * updated
+ */
+int msm_bus_scale_query_tcs_cmd(struct msm_bus_tcs_usecase *tcs_usecase,
+					uint32_t cl, unsigned int index)
+{
+	if (arb_ops.query_usecase)
+		return arb_ops.query_usecase(tcs_usecase, cl, index);
+	pr_err("%s: Bus driver not ready.\n",
+			__func__);
+	return -EPROBE_DEFER;
+}
+EXPORT_SYMBOL(msm_bus_scale_query_tcs_cmd);
+
+/**
+ * msm_bus_scale_query_tcs_cmd_all() - Query for a list of TCS commands for
+ * an aggregated vote of paths for all usecases registered by client
+ *
+ * tcs_handle: pointer to client allocated memory blob
+ * cl: Handle to the client
+ *
+ */
+int msm_bus_scale_query_tcs_cmd_all(struct msm_bus_tcs_handle *tcs_handle,
+					uint32_t cl)
+{
+	if (arb_ops.query_usecase)
+		return arb_ops.query_usecase_all(tcs_handle, cl);
+	pr_err("%s: Bus driver not ready.\n",
+			__func__);
+	return -EPROBE_DEFER;
+}
+EXPORT_SYMBOL(msm_bus_scale_query_tcs_cmd_all);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_core.c b/drivers/soc/qcom/msm_bus/msm_bus_core.c
new file mode 100644
index 0000000..6dfb3a0
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_core.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/radix-tree.h>
+#include <linux/clk.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm-bus.h>
+#include "msm_bus_core.h"
+
+static atomic_t num_fab = ATOMIC_INIT(0);
+
+int msm_bus_get_num_fab(void)
+{
+	return atomic_read(&num_fab);
+}
+
+int msm_bus_device_match(struct device *dev, void *id)
+{
+	struct msm_bus_fabric_device *fabdev = to_msm_bus_fabric_device(dev);
+
+	if (!fabdev) {
+		MSM_BUS_WARN("Fabric %p returning 0\n", fabdev);
+		return 0;
+	}
+	return fabdev->id == *(int *)id;
+}
+
+static void msm_bus_release(struct device *device)
+{
+}
+
+struct bus_type msm_bus_type = {
+	.name      = "msm-bus-type",
+};
+EXPORT_SYMBOL(msm_bus_type);
+
+/**
+ * msm_bus_get_fabric_device() - This function is used to search for
+ * the fabric device on the bus
+ * @fabid: Fabric id
+ * Function returns: Pointer to the fabric device
+ */
+struct msm_bus_fabric_device *msm_bus_get_fabric_device(int fabid)
+{
+	struct device *dev;
+	struct msm_bus_fabric_device *fabric;
+
+	dev = bus_find_device(&msm_bus_type, NULL, (void *)&fabid,
+		msm_bus_device_match);
+	if (!dev)
+		return NULL;
+	fabric = to_msm_bus_fabric_device(dev);
+	return fabric;
+}
+
+/**
+ * msm_bus_fabric_device_register() - Registers a fabric on msm bus
+ * @fabdev: Fabric device to be registered
+ */
+int msm_bus_fabric_device_register(struct msm_bus_fabric_device *fabdev)
+{
+	int ret = 0;
+
+	fabdev->dev.bus = &msm_bus_type;
+	fabdev->dev.release = msm_bus_release;
+	ret = dev_set_name(&fabdev->dev, fabdev->name);
+	if (ret) {
+		MSM_BUS_ERR("error setting dev name\n");
+		goto err;
+	}
+
+	ret = device_register(&fabdev->dev);
+	if (ret < 0) {
+		MSM_BUS_ERR("error registering device%d %s\n",
+				ret, fabdev->name);
+		goto err;
+	}
+	atomic_inc(&num_fab);
+err:
+	return ret;
+}
+
+/**
+ * msm_bus_fabric_device_unregister() - Unregisters the fabric
+ * devices from the msm bus
+ */
+void msm_bus_fabric_device_unregister(struct msm_bus_fabric_device *fabdev)
+{
+	device_unregister(&fabdev->dev);
+	atomic_dec(&num_fab);
+}
+
+static void __exit msm_bus_exit(void)
+{
+	bus_unregister(&msm_bus_type);
+}
+
+static int __init msm_bus_init(void)
+{
+	int retval = 0;
+
+	retval = bus_register(&msm_bus_type);
+	if (retval)
+		MSM_BUS_ERR("bus_register error! %d\n",
+			retval);
+	return retval;
+}
+postcore_initcall(msm_bus_init);
+module_exit(msm_bus_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:msm_bus");
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_core.h b/drivers/soc/qcom/msm_bus/msm_bus_core.h
new file mode 100644
index 0000000..0d58c76
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_core.h
@@ -0,0 +1,414 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_CORE_H
+#define _ARCH_ARM_MACH_MSM_BUS_CORE_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/radix-tree.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm-bus.h>
+
+#define MSM_BUS_DBG(msg, ...) \
+	pr_debug(msg, ## __VA_ARGS__)
+#define MSM_BUS_ERR(msg, ...) \
+	pr_err(msg, ## __VA_ARGS__)
+#define MSM_BUS_WARN(msg, ...) \
+	pr_warn(msg, ## __VA_ARGS__)
+#define MSM_FAB_ERR(msg, ...) \
+	dev_err(&fabric->fabdev.dev, msg, ## __VA_ARGS__)
+
+#define IS_MASTER_VALID(mas) \
+	(((mas >= MSM_BUS_MASTER_FIRST) && (mas <= MSM_BUS_MASTER_LAST)) \
+	 ? 1 : 0)
+#define IS_SLAVE_VALID(slv) \
+	(((slv >= MSM_BUS_SLAVE_FIRST) && (slv <= MSM_BUS_SLAVE_LAST)) ? 1 : 0)
+
+#define INTERLEAVED_BW(fab_pdata, bw, ports) \
+	((fab_pdata->il_flag) ? ((bw < 0) \
+	? -msm_bus_div64((ports), (-bw)) : msm_bus_div64((ports), (bw))) : (bw))
+#define INTERLEAVED_VAL(fab_pdata, n) \
+	((fab_pdata->il_flag) ? (n) : 1)
+#define KBTOB(a) (a * 1000ULL)
+#define MAX_REG_NAME	(50)
+
+enum msm_bus_dbg_op_type {
+	MSM_BUS_DBG_UNREGISTER = -2,
+	MSM_BUS_DBG_REGISTER,
+	MSM_BUS_DBG_OP = 1,
+};
+
+enum msm_bus_hw_sel {
+	MSM_BUS_RPM = 0,
+	MSM_BUS_NOC,
+	MSM_BUS_BIMC,
+};
+
+struct msm_bus_arb_ops {
+	uint32_t (*register_client)(struct msm_bus_scale_pdata *pdata);
+	int (*update_request)(uint32_t cl, unsigned int index);
+	int (*update_context)(uint32_t cl, bool active_only,
+						unsigned int ctx_idx);
+	void (*unregister_client)(uint32_t cl);
+	struct msm_bus_client_handle*
+		(*register_cl)(uint32_t mas, uint32_t slv, char *name,
+						bool active_only);
+	int (*update_bw)(struct msm_bus_client_handle *cl, u64 ab, u64 ib);
+	void (*unregister)(struct msm_bus_client_handle *cl);
+	int (*update_bw_context)(struct msm_bus_client_handle *cl, u64 act_ab,
+				u64 act_ib, u64 dual_ib, u64 dual_ab);
+	int (*query_usecase)(struct msm_bus_tcs_usecase *tcs_usecase,
+				uint32_t cl, unsigned int index);
+	int (*query_usecase_all)(struct msm_bus_tcs_handle *tcs_handle,
+				uint32_t cl);
+
+};
+
+enum {
+	SLAVE_NODE,
+	MASTER_NODE,
+	CLK_NODE,
+	NR_LIM_NODE,
+};
+
+
+extern struct bus_type msm_bus_type;
+extern struct msm_bus_arb_ops arb_ops;
+extern void msm_bus_arb_setops_legacy(struct msm_bus_arb_ops *arb_ops);
+
+struct msm_bus_node_info {
+	unsigned int id;
+	unsigned int priv_id;
+	unsigned int mas_hw_id;
+	unsigned int slv_hw_id;
+	int gateway;
+	int *masterp;
+	int *qport;
+	int num_mports;
+	int *slavep;
+	int num_sports;
+	int *tier;
+	int num_tiers;
+	int ahb;
+	int hw_sel;
+	const char *slaveclk[NUM_CTX];
+	const char *memclk[NUM_CTX];
+	const char *iface_clk_node;
+	unsigned int buswidth;
+	unsigned int ws;
+	unsigned int mode;
+	unsigned int perm_mode;
+	unsigned int prio_lvl;
+	unsigned int prio_rd;
+	unsigned int prio_wr;
+	unsigned int prio1;
+	unsigned int prio0;
+	unsigned int num_thresh;
+	u64 *th;
+	u64 cur_lim_bw;
+	unsigned int mode_thresh;
+	bool dual_conf;
+	u64 *bimc_bw;
+	bool nr_lim;
+	u32 ff;
+	bool rt_mas;
+	u32 bimc_gp;
+	u32 bimc_thmp;
+	u64 floor_bw;
+	const char *name;
+};
+
+struct path_node {
+	uint64_t clk[NUM_CTX];
+	uint64_t bw[NUM_CTX];
+	uint64_t *sel_clk;
+	uint64_t *sel_bw;
+	int next;
+};
+
+struct msm_bus_link_info {
+	uint64_t clk[NUM_CTX];
+	uint64_t *sel_clk;
+	uint64_t memclk;
+	int64_t bw[NUM_CTX];
+	int64_t *sel_bw;
+	int *tier;
+	int num_tiers;
+};
+
+struct nodeclk {
+	struct clk *clk;
+	struct regulator *reg;
+	uint64_t rate;
+	bool dirty;
+	bool enable_only_clk;
+	bool setrate_only_clk;
+	bool enable;
+	char reg_name[MAX_REG_NAME];
+};
+
+struct msm_bus_inode_info {
+	struct msm_bus_node_info *node_info;
+	uint64_t max_bw;
+	uint64_t max_clk;
+	uint64_t cur_lim_bw;
+	uint64_t cur_prg_bw;
+	struct msm_bus_link_info link_info;
+	int num_pnodes;
+	struct path_node *pnode;
+	int commit_index;
+	struct nodeclk nodeclk[NUM_CTX];
+	struct nodeclk memclk[NUM_CTX];
+	struct nodeclk iface_clk;
+	void *hw_data;
+};
+
+struct msm_bus_node_hw_info {
+	bool dirty;
+	unsigned int hw_id;
+	uint64_t bw;
+};
+
+struct msm_bus_hw_algorithm {
+	int (*allocate_commit_data)(struct msm_bus_fabric_registration
+		*fab_pdata, void **cdata, int ctx);
+	void *(*allocate_hw_data)(struct platform_device *pdev,
+		struct msm_bus_fabric_registration *fab_pdata);
+	void (*node_init)(void *hw_data, struct msm_bus_inode_info *info);
+	void (*free_commit_data)(void *cdata);
+	void (*update_bw)(struct msm_bus_inode_info *hop,
+		struct msm_bus_inode_info *info,
+		struct msm_bus_fabric_registration *fab_pdata,
+		void *sel_cdata, int *master_tiers,
+		int64_t add_bw);
+	void (*fill_cdata_buffer)(int *curr, char *buf, const int max_size,
+		void *cdata, int nmasters, int nslaves, int ntslaves);
+	int (*commit)(struct msm_bus_fabric_registration
+		*fab_pdata, void *hw_data, void **cdata);
+	int (*port_unhalt)(uint32_t haltid, uint8_t mport);
+	int (*port_halt)(uint32_t haltid, uint8_t mport);
+	void (*config_master)(struct msm_bus_fabric_registration *fab_pdata,
+		struct msm_bus_inode_info *info,
+		uint64_t req_clk, uint64_t req_bw);
+	void (*config_limiter)(struct msm_bus_fabric_registration *fab_pdata,
+		struct msm_bus_inode_info *info);
+	bool (*update_bw_reg)(int mode);
+};
+
+struct msm_bus_fabric_device {
+	int id;
+	const char *name;
+	struct device dev;
+	const struct msm_bus_fab_algorithm *algo;
+	const struct msm_bus_board_algorithm *board_algo;
+	struct msm_bus_hw_algorithm hw_algo;
+	int visited;
+	int num_nr_lim;
+	u64 nr_lim_thresh;
+	u32 eff_fact;
+};
+#define to_msm_bus_fabric_device(d) container_of(d, \
+		struct msm_bus_fabric_device, d)
+
+struct msm_bus_fabric {
+	struct msm_bus_fabric_device fabdev;
+	int ahb;
+	void *cdata[NUM_CTX];
+	bool arb_dirty;
+	bool clk_dirty;
+	struct radix_tree_root fab_tree;
+	int num_nodes;
+	struct list_head gateways;
+	struct msm_bus_inode_info info;
+	struct msm_bus_fabric_registration *pdata;
+	void *hw_data;
+};
+#define to_msm_bus_fabric(d) container_of(d, \
+	struct msm_bus_fabric, d)
+
+
+struct msm_bus_fab_algorithm {
+	int (*update_clks)(struct msm_bus_fabric_device *fabdev,
+		struct msm_bus_inode_info *pme, int index,
+		uint64_t curr_clk, uint64_t req_clk,
+		uint64_t bwsum, int flag, int ctx,
+		unsigned int cl_active_flag);
+	int (*port_halt)(struct msm_bus_fabric_device *fabdev, int portid);
+	int (*port_unhalt)(struct msm_bus_fabric_device *fabdev, int portid);
+	int (*commit)(struct msm_bus_fabric_device *fabdev);
+	struct msm_bus_inode_info *(*find_node)(struct msm_bus_fabric_device
+		*fabdev, int id);
+	struct msm_bus_inode_info *(*find_gw_node)(struct msm_bus_fabric_device
+		*fabdev, int id);
+	struct list_head *(*get_gw_list)(struct msm_bus_fabric_device *fabdev);
+	void (*update_bw)(struct msm_bus_fabric_device *fabdev, struct
+		msm_bus_inode_info * hop, struct msm_bus_inode_info *info,
+		int64_t add_bw, int *master_tiers, int ctx);
+	void (*config_master)(struct msm_bus_fabric_device *fabdev,
+		struct msm_bus_inode_info *info, uint64_t req_clk,
+		uint64_t req_bw);
+	void (*config_limiter)(struct msm_bus_fabric_device *fabdev,
+		struct msm_bus_inode_info *info);
+};
+
+struct msm_bus_board_algorithm {
+	int board_nfab;
+	void (*assign_iids)(struct msm_bus_fabric_registration *fabreg,
+		int fabid);
+	int (*get_iid)(int id);
+};
+
+/**
+ * Used to store the list of fabrics and other info to be
+ * maintained outside the fabric structure.
+ * Used while calculating path, and to find fabric ptrs
+ */
+struct msm_bus_fabnodeinfo {
+	struct list_head list;
+	struct msm_bus_inode_info *info;
+};
+
+struct msm_bus_client {
+	int id;
+	struct msm_bus_scale_pdata *pdata;
+	int *src_pnode;
+	int curr;
+	struct device **src_devs;
+};
+
+uint64_t msm_bus_div64(uint64_t num, unsigned int base);
+int msm_bus_fabric_device_register(struct msm_bus_fabric_device *fabric);
+void msm_bus_fabric_device_unregister(struct msm_bus_fabric_device *fabric);
+struct msm_bus_fabric_device *msm_bus_get_fabric_device(int fabid);
+int msm_bus_get_num_fab(void);
+
+
+int msm_bus_hw_fab_init(struct msm_bus_fabric_registration *pdata,
+	struct msm_bus_hw_algorithm *hw_algo);
+void msm_bus_board_init(struct msm_bus_fabric_registration *pdata);
+#if defined(CONFIG_MSM_RPM_SMD)
+int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata,
+	struct msm_bus_hw_algorithm *hw_algo);
+int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration
+	*fab_pdata, void *hw_data, void **cdata);
+void msm_bus_rpm_fill_cdata_buffer(int *curr, char *buf, const int max_size,
+	void *cdata, int nmasters, int nslaves, int ntslaves);
+#else
+static inline int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata,
+	struct msm_bus_hw_algorithm *hw_algo)
+{
+	return 0;
+}
+static inline int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration
+	*fab_pdata, void *hw_data, void **cdata)
+{
+	return 0;
+}
+static inline void msm_bus_rpm_fill_cdata_buffer(int *curr, char *buf,
+	const int max_size, void *cdata, int nmasters, int nslaves,
+	int ntslaves)
+{
+}
+#endif
+
+int msm_bus_noc_hw_init(struct msm_bus_fabric_registration *pdata,
+	struct msm_bus_hw_algorithm *hw_algo);
+int msm_bus_bimc_hw_init(struct msm_bus_fabric_registration *pdata,
+	struct msm_bus_hw_algorithm *hw_algo);
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_QCOM_BUS_SCALING)
+void msm_bus_dbg_client_data(struct msm_bus_scale_pdata *pdata, int index,
+	uint32_t cl);
+void msm_bus_dbg_commit_data(const char *fabname, void *cdata,
+	int nmasters, int nslaves, int ntslaves, int op);
+int msm_bus_dbg_add_client(const struct msm_bus_client_handle *pdata);
+int msm_bus_dbg_rec_transaction(const struct msm_bus_client_handle *pdata,
+						u64 ab, u64 ib);
+void msm_bus_dbg_remove_client(const struct msm_bus_client_handle *pdata);
+
+#else
+static inline void msm_bus_dbg_client_data(struct msm_bus_scale_pdata *pdata,
+	int index, uint32_t cl)
+{
+}
+static inline void msm_bus_dbg_commit_data(const char *fabname,
+	void *cdata, int nmasters, int nslaves, int ntslaves,
+	int op)
+{
+}
+static inline void msm_bus_dbg_remove_client
+		(const struct msm_bus_client_handle *pdata)
+{
+}
+
+static inline int
+msm_bus_dbg_rec_transaction(const struct msm_bus_client_handle *pdata,
+						u64 ab, u64 ib)
+{
+	return 0;
+}
+
+static inline int
+msm_bus_dbg_add_client(const struct msm_bus_client_handle *pdata)
+{
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_CORESIGHT
+int msmbus_coresight_init(struct platform_device *pdev);
+void msmbus_coresight_remove(struct platform_device *pdev);
+int msmbus_coresight_init_adhoc(struct platform_device *pdev,
+		struct device_node *of_node);
+void msmbus_coresight_remove_adhoc(struct platform_device *pdev);
+#else
+static inline int msmbus_coresight_init(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static inline void msmbus_coresight_remove(struct platform_device *pdev)
+{
+}
+
+static inline int msmbus_coresight_init_adhoc(struct platform_device *pdev,
+		struct device_node *of_node)
+{
+	return 0;
+}
+
+static inline void msmbus_coresight_remove_adhoc(struct platform_device *pdev)
+{
+}
+#endif
+
+
+#ifdef CONFIG_OF
+void msm_bus_of_get_nfab(struct platform_device *pdev,
+		struct msm_bus_fabric_registration *pdata);
+struct msm_bus_fabric_registration
+	*msm_bus_of_get_fab_data(struct platform_device *pdev);
+static inline void msm_bus_board_set_nfab(struct msm_bus_fabric_registration
+		*pdata,	int nfab)
+{
+}
+#else
+void msm_bus_board_set_nfab(struct msm_bus_fabric_registration *pdata,
+	int nfab);
+static inline void msm_bus_of_get_nfab(struct platform_device *pdev,
+		struct msm_bus_fabric_registration *pdata)
+{
+}
+
+static inline struct msm_bus_fabric_registration
+	*msm_bus_of_get_fab_data(struct platform_device *pdev)
+{
+	return NULL;
+}
+#endif
+
+#endif /*_ARCH_ARM_MACH_MSM_BUS_CORE_H*/
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_dbg.c b/drivers/soc/qcom/msm_bus/msm_bus_dbg.c
new file mode 100644
index 0000000..b6abc56
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_dbg.c
@@ -0,0 +1,974 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2010-2012, 2014-2018, The Linux Foundation. All rights
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/rtmutex.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/hrtimer.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm-bus.h>
+#include <linux/msm_bus_rules.h>
+#include "msm_bus_core.h"
+#include "msm_bus_adhoc.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/trace_msm_bus.h>
+
+#define MAX_BUFF_SIZE 4096
+#define FILL_LIMIT 128
+
+static struct dentry *clients;
+static struct dentry *dir;
+static DEFINE_MUTEX(msm_bus_dbg_fablist_lock);
+static DEFINE_RT_MUTEX(msm_bus_dbg_cllist_lock);
+struct msm_bus_dbg_state {
+	uint32_t cl;
+	uint8_t enable;
+	uint8_t current_index;
+} clstate;
+
+struct msm_bus_cldata {
+	const struct msm_bus_scale_pdata *pdata;
+	const struct msm_bus_client_handle *handle;
+	int index;
+	uint32_t clid;
+	int size;
+	struct dentry *file;
+	struct list_head list;
+	char buffer[MAX_BUFF_SIZE];
+};
+
+struct msm_bus_fab_list {
+	const char *name;
+	int size;
+	struct dentry *file;
+	struct list_head list;
+	char buffer[MAX_BUFF_SIZE];
+};
+
+static char *rules_buf;
+
+LIST_HEAD(fabdata_list);
+LIST_HEAD(cl_list);
+
+/**
+ * The following structures and functions are used for
+ * the test-client which can be created at run-time.
+ */
+
+static struct msm_bus_vectors init_vectors[1];
+static struct msm_bus_vectors current_vectors[1];
+static struct msm_bus_vectors requested_vectors[1];
+
+static struct msm_bus_paths shell_client_usecases[] = {
+	{
+		.num_paths = ARRAY_SIZE(init_vectors),
+		.vectors = init_vectors,
+	},
+	{
+		.num_paths = ARRAY_SIZE(current_vectors),
+		.vectors = current_vectors,
+	},
+	{
+		.num_paths = ARRAY_SIZE(requested_vectors),
+		.vectors = requested_vectors,
+	},
+};
+
+static struct msm_bus_scale_pdata shell_client = {
+	.usecase = shell_client_usecases,
+	.num_usecases = ARRAY_SIZE(shell_client_usecases),
+	.name = "test-client",
+};
+
+static void msm_bus_dbg_init_vectors(void)
+{
+	init_vectors[0].src = -1;
+	init_vectors[0].dst = -1;
+	init_vectors[0].ab = 0;
+	init_vectors[0].ib = 0;
+	current_vectors[0].src = -1;
+	current_vectors[0].dst = -1;
+	current_vectors[0].ab = 0;
+	current_vectors[0].ib = 0;
+	requested_vectors[0].src = -1;
+	requested_vectors[0].dst = -1;
+	requested_vectors[0].ab = 0;
+	requested_vectors[0].ib = 0;
+	clstate.enable = 0;
+	clstate.current_index = 0;
+}
+
+static int msm_bus_dbg_update_cl_request(uint32_t cl)
+{
+	int ret = 0;
+
+	if (clstate.current_index < 2)
+		clstate.current_index = 2;
+	else {
+		clstate.current_index = 1;
+		current_vectors[0].ab = requested_vectors[0].ab;
+		current_vectors[0].ib = requested_vectors[0].ib;
+	}
+
+	if (clstate.enable) {
+		MSM_BUS_DBG("Updating request for shell client, index: %d\n",
+			clstate.current_index);
+		ret = msm_bus_scale_client_update_request(clstate.cl,
+			clstate.current_index);
+	} else
+		MSM_BUS_DBG("Enable bit not set. Skipping update request\n");
+
+	return ret;
+}
+
+static void msm_bus_dbg_unregister_client(uint32_t cl)
+{
+	MSM_BUS_DBG("Unregistering shell client\n");
+	msm_bus_scale_unregister_client(clstate.cl);
+	clstate.cl = 0;
+}
+
+static uint32_t msm_bus_dbg_register_client(void)
+{
+	int ret = 0;
+
+	if (init_vectors[0].src != requested_vectors[0].src) {
+		MSM_BUS_DBG("Shell client master changed. Unregistering\n");
+		msm_bus_dbg_unregister_client(clstate.cl);
+	}
+	if (init_vectors[0].dst != requested_vectors[0].dst) {
+		MSM_BUS_DBG("Shell client slave changed. Unregistering\n");
+		msm_bus_dbg_unregister_client(clstate.cl);
+	}
+
+	current_vectors[0].src = init_vectors[0].src;
+	requested_vectors[0].src = init_vectors[0].src;
+	current_vectors[0].dst = init_vectors[0].dst;
+	requested_vectors[0].dst = init_vectors[0].dst;
+
+	if (!clstate.enable) {
+		MSM_BUS_DBG("Enable bit not set, skipping registration: cl %d\n"
+			, clstate.cl);
+		return 0;
+	}
+
+	if (clstate.cl) {
+		MSM_BUS_DBG("Client  registered, skipping registration\n");
+		return clstate.cl;
+	}
+
+	MSM_BUS_DBG("Registering shell client\n");
+	ret = msm_bus_scale_register_client(&shell_client);
+	return ret;
+}
+
+static int msm_bus_dbg_mas_get(void  *data, u64 *val)
+{
+	*val = init_vectors[0].src;
+	MSM_BUS_DBG("Get master: %llu\n", *val);
+	return 0;
+}
+
+static int msm_bus_dbg_mas_set(void  *data, u64 val)
+{
+	init_vectors[0].src = val;
+	MSM_BUS_DBG("Set master: %llu\n", val);
+	clstate.cl = msm_bus_dbg_register_client();
+	return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(shell_client_mas_fops, msm_bus_dbg_mas_get,
+	msm_bus_dbg_mas_set, "%llu\n");
+
+static int msm_bus_dbg_slv_get(void  *data, u64 *val)
+{
+	*val = init_vectors[0].dst;
+	MSM_BUS_DBG("Get slave: %llu\n", *val);
+	return 0;
+}
+
+static int msm_bus_dbg_slv_set(void  *data, u64 val)
+{
+	init_vectors[0].dst = val;
+	MSM_BUS_DBG("Set slave: %llu\n", val);
+	clstate.cl = msm_bus_dbg_register_client();
+	return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(shell_client_slv_fops, msm_bus_dbg_slv_get,
+	msm_bus_dbg_slv_set, "%llu\n");
+
+static int msm_bus_dbg_ab_get(void  *data, u64 *val)
+{
+	*val = requested_vectors[0].ab;
+	MSM_BUS_DBG("Get ab: %llu\n", *val);
+	return 0;
+}
+
+static int msm_bus_dbg_ab_set(void  *data, u64 val)
+{
+	requested_vectors[0].ab = val;
+	MSM_BUS_DBG("Set ab: %llu\n", val);
+	return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(shell_client_ab_fops, msm_bus_dbg_ab_get,
+	msm_bus_dbg_ab_set, "%llu\n");
+
+static int msm_bus_dbg_ib_get(void  *data, u64 *val)
+{
+	*val = requested_vectors[0].ib;
+	MSM_BUS_DBG("Get ib: %llu\n", *val);
+	return 0;
+}
+
+static int msm_bus_dbg_ib_set(void  *data, u64 val)
+{
+	requested_vectors[0].ib = val;
+	MSM_BUS_DBG("Set ib: %llu\n", val);
+	return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(shell_client_ib_fops, msm_bus_dbg_ib_get,
+	msm_bus_dbg_ib_set, "%llu\n");
+
+static int msm_bus_dbg_en_get(void  *data, u64 *val)
+{
+	*val = clstate.enable;
+	MSM_BUS_DBG("Get enable: %llu\n", *val);
+	return 0;
+}
+
+static int msm_bus_dbg_en_set(void  *data, u64 val)
+{
+	int ret = 0;
+
+	clstate.enable = val;
+	if (clstate.enable) {
+		if (!clstate.cl) {
+			MSM_BUS_DBG("client: %u\n", clstate.cl);
+			clstate.cl = msm_bus_dbg_register_client();
+			if (clstate.cl)
+				ret = msm_bus_dbg_update_cl_request(clstate.cl);
+		} else {
+			MSM_BUS_DBG("update request for cl: %u\n", clstate.cl);
+			ret = msm_bus_dbg_update_cl_request(clstate.cl);
+		}
+	}
+
+	MSM_BUS_DBG("Set enable: %llu\n", val);
+	return ret;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(shell_client_en_fops, msm_bus_dbg_en_get,
+	msm_bus_dbg_en_set, "%llu\n");
+
+/**
+ * The following functions are used for viewing the client data
+ * and changing the client request at run-time
+ */
+
+static ssize_t client_data_read(struct file *file, char __user *buf,
+	size_t count, loff_t *ppos)
+{
+	int bsize = 0;
+	uint32_t cl = (uint32_t)(uintptr_t)file->private_data;
+	struct msm_bus_cldata *cldata = NULL;
+	const struct msm_bus_client_handle *handle = file->private_data;
+	int found = 0;
+	ssize_t ret;
+
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_for_each_entry(cldata, &cl_list, list) {
+		if ((cldata->clid == cl) ||
+			(cldata->handle && (cldata->handle == handle))) {
+			found = 1;
+			break;
+		}
+	}
+
+	if (!found) {
+		rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+		return 0;
+	}
+
+	bsize = cldata->size;
+	ret = simple_read_from_buffer(buf, count, ppos,
+		cldata->buffer, bsize);
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+
+	return ret;
+}
+
+static const struct file_operations client_data_fops = {
+	.open		= simple_open,
+	.read		= client_data_read,
+};
+
+struct dentry *msm_bus_dbg_create(const char *name, mode_t mode,
+	struct dentry *dent, uint32_t clid)
+{
+	if (dent == NULL) {
+		MSM_BUS_DBG("debugfs not ready yet\n");
+		return NULL;
+	}
+	return debugfs_create_file(name, mode, dent, (void *)(uintptr_t)clid,
+		&client_data_fops);
+}
+
+int msm_bus_dbg_add_client(const struct msm_bus_client_handle *pdata)
+
+{
+	struct msm_bus_cldata *cldata;
+
+	cldata = kzalloc(sizeof(struct msm_bus_cldata), GFP_KERNEL);
+	if (!cldata) {
+		MSM_BUS_DBG("Failed to allocate memory for client data\n");
+		return -ENOMEM;
+	}
+	cldata->handle = pdata;
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_add_tail(&cldata->list, &cl_list);
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+	return 0;
+}
+
+int msm_bus_dbg_rec_transaction(const struct msm_bus_client_handle *pdata,
+						u64 ab, u64 ib)
+{
+	struct msm_bus_cldata *cldata;
+	int i;
+	struct timespec ts;
+	bool found = false;
+	char *buf = NULL;
+
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_for_each_entry(cldata, &cl_list, list) {
+		if (cldata->handle == pdata) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found) {
+		rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+		return -ENOENT;
+	}
+
+	if (cldata->file == NULL) {
+		if (pdata->name == NULL) {
+			MSM_BUS_DBG("Client doesn't have a name\n");
+			rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+			return -EINVAL;
+		}
+		cldata->file = debugfs_create_file(pdata->name, 0444,
+				clients, (void *)pdata, &client_data_fops);
+	}
+
+	if (cldata->size < (MAX_BUFF_SIZE - FILL_LIMIT))
+		i = cldata->size;
+	else {
+		i = 0;
+		cldata->size = 0;
+	}
+	buf = cldata->buffer;
+	ts = ktime_to_timespec(ktime_get());
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n%ld.%09lu\n",
+		ts.tv_sec, ts.tv_nsec);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "master: ");
+
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d  ", pdata->mas);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nslave : ");
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d  ", pdata->slv);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nab     : ");
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu  ", ab);
+
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nib     : ");
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu  ", ib);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n");
+	cldata->size = i;
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+
+	trace_bus_update_request((int)ts.tv_sec, (int)ts.tv_nsec,
+		pdata->name, pdata->mas, pdata->slv, ab, ib);
+
+	return i;
+}
+
+void msm_bus_dbg_remove_client(const struct msm_bus_client_handle *pdata)
+{
+	struct msm_bus_cldata *cldata = NULL;
+
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_for_each_entry(cldata, &cl_list, list) {
+		if (cldata->handle == pdata) {
+			debugfs_remove(cldata->file);
+			list_del(&cldata->list);
+			kfree(cldata);
+			break;
+		}
+	}
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+}
+
+static int msm_bus_dbg_record_client(const struct msm_bus_scale_pdata *pdata,
+	int index, uint32_t clid, struct dentry *file)
+{
+	struct msm_bus_cldata *cldata;
+
+	cldata = kmalloc(sizeof(struct msm_bus_cldata), GFP_KERNEL);
+	if (!cldata) {
+		MSM_BUS_DBG("Failed to allocate memory for client data\n");
+		return -ENOMEM;
+	}
+	cldata->pdata = pdata;
+	cldata->index = index;
+	cldata->clid = clid;
+	cldata->file = file;
+	cldata->size = 0;
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_add_tail(&cldata->list, &cl_list);
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+	return 0;
+}
+
+static void msm_bus_dbg_free_client(uint32_t clid)
+{
+	struct msm_bus_cldata *cldata = NULL;
+
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_for_each_entry(cldata, &cl_list, list) {
+		if (cldata->clid == clid) {
+			debugfs_remove(cldata->file);
+			list_del(&cldata->list);
+			kfree(cldata);
+			break;
+		}
+	}
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+}
+
+static int msm_bus_dbg_fill_cl_buffer(const struct msm_bus_scale_pdata *pdata,
+	int index, uint32_t clid)
+{
+	int i = 0, j;
+	char *buf = NULL;
+	struct msm_bus_cldata *cldata = NULL;
+	struct timespec ts;
+	int found = 0;
+
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_for_each_entry(cldata, &cl_list, list) {
+		if (cldata->clid == clid) {
+			found = 1;
+			break;
+		}
+	}
+
+	if (!found) {
+		rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+		return -ENOENT;
+	}
+
+	if (cldata->file == NULL) {
+		if (pdata->name == NULL) {
+			rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+			MSM_BUS_DBG("Client doesn't have a name\n");
+			return -EINVAL;
+		}
+		cldata->file = msm_bus_dbg_create(pdata->name, 0444,
+			clients, clid);
+	}
+
+	if (cldata->size < (MAX_BUFF_SIZE - FILL_LIMIT))
+		i = cldata->size;
+	else {
+		i = 0;
+		cldata->size = 0;
+	}
+	buf = cldata->buffer;
+	ts = ktime_to_timespec(ktime_get());
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n%ld.%09lu\n",
+		ts.tv_sec, ts.tv_nsec);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "curr   : %d\n", index);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "masters: ");
+
+	for (j = 0; j < pdata->usecase->num_paths; j++)
+		i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d  ",
+			pdata->usecase[index].vectors[j].src);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nslaves : ");
+	for (j = 0; j < pdata->usecase->num_paths; j++)
+		i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d  ",
+			pdata->usecase[index].vectors[j].dst);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nab     : ");
+	for (j = 0; j < pdata->usecase->num_paths; j++)
+		i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu  ",
+			pdata->usecase[index].vectors[j].ab);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nib     : ");
+	for (j = 0; j < pdata->usecase->num_paths; j++)
+		i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu  ",
+			pdata->usecase[index].vectors[j].ib);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n");
+
+	for (j = 0; j < pdata->usecase->num_paths; j++)
+		trace_bus_update_request((int)ts.tv_sec, (int)ts.tv_nsec,
+		pdata->name,
+		pdata->usecase[index].vectors[j].src,
+		pdata->usecase[index].vectors[j].dst,
+		pdata->usecase[index].vectors[j].ab,
+		pdata->usecase[index].vectors[j].ib);
+
+	cldata->index = index;
+	cldata->size = i;
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+
+	return i;
+}
+
+static ssize_t  msm_bus_dbg_update_request_write(struct file *file,
+	const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+	struct msm_bus_cldata *cldata;
+	unsigned long index = 0;
+	int ret = 0;
+	char *chid;
+	char *buf = kmalloc((sizeof(char) * (cnt + 1)), GFP_KERNEL);
+	int found = 0;
+	uint32_t clid;
+	ssize_t res = cnt;
+
+	if (!buf)
+		return -ENOMEM;
+
+	if (cnt == 0) {
+		res = 0;
+		goto out;
+	}
+	if (copy_from_user(buf, ubuf, cnt)) {
+		res = -EFAULT;
+		goto out;
+	}
+	buf[cnt] = '\0';
+	chid = buf;
+	MSM_BUS_DBG("buffer: %s\n size: %zu\n", buf, sizeof(ubuf));
+
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_for_each_entry(cldata, &cl_list, list) {
+		if (strnstr(chid, cldata->pdata->name, cnt)) {
+			found = 1;
+			strsep(&chid, " ");
+			if (chid) {
+				ret = kstrtoul(chid, 10, &index);
+				if (ret) {
+					MSM_BUS_DBG("Index conversion\n"
+						" failed\n");
+					rt_mutex_unlock(
+						&msm_bus_dbg_cllist_lock);
+					res = -EFAULT;
+					goto out;
+				}
+			} else {
+				MSM_BUS_DBG("Error parsing input. Index not\n"
+					" found\n");
+				found = 0;
+			}
+			if (index > cldata->pdata->num_usecases) {
+				MSM_BUS_DBG("Invalid index!\n");
+				rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+				res = -EINVAL;
+				goto out;
+			}
+			clid = cldata->clid;
+			break;
+		}
+	}
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+
+	if (found)
+		msm_bus_scale_client_update_request(clid, index);
+
+out:
+	kfree(buf);
+	return res;
+}
+
+/**
+ * The following functions are used for viewing the commit data
+ * for each fabric
+ */
+static ssize_t fabric_data_read(struct file *file, char __user *buf,
+	size_t count, loff_t *ppos)
+{
+	struct msm_bus_fab_list *fablist = NULL;
+	int bsize = 0;
+	ssize_t ret;
+	const char *name = file->private_data;
+	int found = 0;
+
+	mutex_lock(&msm_bus_dbg_fablist_lock);
+	list_for_each_entry(fablist, &fabdata_list, list) {
+		if (strcmp(fablist->name, name) == 0) {
+			found = 1;
+			break;
+		}
+	}
+	if (!found) {
+		mutex_unlock(&msm_bus_dbg_fablist_lock);
+		return -ENOENT;
+	}
+	bsize = fablist->size;
+	ret = simple_read_from_buffer(buf, count, ppos,
+		fablist->buffer, bsize);
+	mutex_unlock(&msm_bus_dbg_fablist_lock);
+	return ret;
+}
+
+static const struct file_operations fabric_data_fops = {
+	.open		= simple_open,
+	.read		= fabric_data_read,
+};
+
+static ssize_t rules_dbg_read(struct file *file, char __user *buf,
+	size_t count, loff_t *ppos)
+{
+	ssize_t ret;
+
+	memset(rules_buf, 0, MAX_BUFF_SIZE);
+	print_rules_buf(rules_buf, MAX_BUFF_SIZE);
+	ret = simple_read_from_buffer(buf, count, ppos,
+		rules_buf, MAX_BUFF_SIZE);
+	return ret;
+}
+
+static int rules_dbg_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static const struct file_operations rules_dbg_fops = {
+	.open		= simple_open,
+	.read		= rules_dbg_read,
+};
+
+static int msm_bus_dbg_record_fabric(const char *fabname, struct dentry *file)
+{
+	struct msm_bus_fab_list *fablist;
+	int ret = 0;
+
+	mutex_lock(&msm_bus_dbg_fablist_lock);
+	fablist = kmalloc(sizeof(struct msm_bus_fab_list), GFP_KERNEL);
+	if (!fablist) {
+		MSM_BUS_DBG("Failed to allocate memory for commit data\n");
+		ret =  -ENOMEM;
+		goto err;
+	}
+
+	fablist->name = fabname;
+	fablist->size = 0;
+	list_add_tail(&fablist->list, &fabdata_list);
+err:
+	mutex_unlock(&msm_bus_dbg_fablist_lock);
+	return ret;
+}
+
+static void msm_bus_dbg_free_fabric(const char *fabname)
+{
+	struct msm_bus_fab_list *fablist = NULL;
+
+	mutex_lock(&msm_bus_dbg_fablist_lock);
+	list_for_each_entry(fablist, &fabdata_list, list) {
+		if (strcmp(fablist->name, fabname) == 0) {
+			debugfs_remove(fablist->file);
+			list_del(&fablist->list);
+			kfree(fablist);
+			break;
+		}
+	}
+	mutex_unlock(&msm_bus_dbg_fablist_lock);
+}
+
+static int msm_bus_dbg_fill_fab_buffer(const char *fabname,
+	void *cdata, int nmasters, int nslaves,
+	int ntslaves)
+{
+	int i;
+	char *buf = NULL;
+	struct msm_bus_fab_list *fablist = NULL;
+	struct timespec ts;
+	int found = 0;
+
+	mutex_lock(&msm_bus_dbg_fablist_lock);
+	list_for_each_entry(fablist, &fabdata_list, list) {
+		if (strcmp(fablist->name, fabname) == 0) {
+			found = 1;
+			break;
+		}
+	}
+	if (!found) {
+		mutex_unlock(&msm_bus_dbg_fablist_lock);
+		return -ENOENT;
+	}
+
+	if (fablist->file == NULL) {
+		MSM_BUS_DBG("Fabric dbg entry does not exist\n");
+		mutex_unlock(&msm_bus_dbg_fablist_lock);
+		return -EFAULT;
+	}
+
+	if (fablist->size < MAX_BUFF_SIZE - 256)
+		i = fablist->size;
+	else {
+		i = 0;
+		fablist->size = 0;
+	}
+	buf = fablist->buffer;
+	mutex_unlock(&msm_bus_dbg_fablist_lock);
+	ts = ktime_to_timespec(ktime_get());
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n%ld.%09lu\n",
+		ts.tv_sec, ts.tv_nsec);
+
+	msm_bus_rpm_fill_cdata_buffer(&i, buf, MAX_BUFF_SIZE, cdata,
+		nmasters, nslaves, ntslaves);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n");
+	mutex_lock(&msm_bus_dbg_fablist_lock);
+	fablist->size = i;
+	mutex_unlock(&msm_bus_dbg_fablist_lock);
+	return 0;
+}
+
+static const struct file_operations msm_bus_dbg_update_request_fops = {
+	.open = simple_open,
+	.write = msm_bus_dbg_update_request_write,
+};
+
+static ssize_t msm_bus_dbg_dump_clients_read(struct file *file,
+	char __user *buf, size_t count, loff_t *ppos)
+{
+	int j, cnt;
+	char msg[50];
+	struct msm_bus_cldata *cldata = NULL;
+
+	cnt = scnprintf(msg, 50,
+		"\nDumping curent client votes to trace log\n");
+	if (*ppos)
+		goto exit_dump_clients_read;
+
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_for_each_entry(cldata, &cl_list, list) {
+		if (IS_ERR_OR_NULL(cldata->pdata))
+			continue;
+		for (j = 0; j < cldata->pdata->usecase->num_paths; j++) {
+			if (cldata->index == -1)
+				continue;
+			trace_bus_client_status(
+			cldata->pdata->name,
+			cldata->pdata->usecase[cldata->index].vectors[j].src,
+			cldata->pdata->usecase[cldata->index].vectors[j].dst,
+			cldata->pdata->usecase[cldata->index].vectors[j].ab,
+			cldata->pdata->usecase[cldata->index].vectors[j].ib,
+			cldata->pdata->active_only);
+		}
+	}
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+exit_dump_clients_read:
+	return simple_read_from_buffer(buf, count, ppos, msg, cnt);
+}
+
+static const struct file_operations msm_bus_dbg_dump_clients_fops = {
+	.open		= simple_open,
+	.read		= msm_bus_dbg_dump_clients_read,
+};
+
+/**
+ * msm_bus_dbg_client_data() - Add debug data for clients
+ * @pdata: Platform data of the client
+ * @index: The current index or operation to be performed
+ * @clid: Client handle obtained during registration
+ */
+void msm_bus_dbg_client_data(struct msm_bus_scale_pdata *pdata, int index,
+	uint32_t clid)
+{
+	struct dentry *file = NULL;
+
+	if (index == MSM_BUS_DBG_REGISTER) {
+		msm_bus_dbg_record_client(pdata, index, clid, file);
+		if (!pdata->name) {
+			MSM_BUS_DBG("Cannot create debugfs entry. Null name\n");
+			return;
+		}
+	} else if (index == MSM_BUS_DBG_UNREGISTER) {
+		msm_bus_dbg_free_client(clid);
+		MSM_BUS_DBG("Client %d unregistered\n", clid);
+	} else
+		msm_bus_dbg_fill_cl_buffer(pdata, index, clid);
+}
+EXPORT_SYMBOL(msm_bus_dbg_client_data);
+
+/**
+ * msm_bus_dbg_commit_data() - Add commit data from fabrics
+ * @fabname: Fabric name specified in platform data
+ * @cdata: Commit Data
+ * @nmasters: Number of masters attached to fabric
+ * @nslaves: Number of slaves attached to fabric
+ * @ntslaves: Number of tiered slaves attached to fabric
+ * @op: Operation to be performed
+ */
+void msm_bus_dbg_commit_data(const char *fabname, void *cdata,
+	int nmasters, int nslaves, int ntslaves, int op)
+{
+	struct dentry *file = NULL;
+
+	if (op == MSM_BUS_DBG_REGISTER)
+		msm_bus_dbg_record_fabric(fabname, file);
+	else if (op == MSM_BUS_DBG_UNREGISTER)
+		msm_bus_dbg_free_fabric(fabname);
+	else
+		msm_bus_dbg_fill_fab_buffer(fabname, cdata, nmasters,
+			nslaves, ntslaves);
+}
+EXPORT_SYMBOL(msm_bus_dbg_commit_data);
+
+static int __init msm_bus_debugfs_init(void)
+{
+	struct dentry *commit, *shell_client, *rules_dbg;
+	struct msm_bus_fab_list *fablist;
+	struct msm_bus_cldata *cldata = NULL;
+	uint64_t val = 0;
+
+	dir = debugfs_create_dir("msm-bus-dbg", NULL);
+	if ((!dir) || IS_ERR(dir)) {
+		MSM_BUS_ERR("Couldn't create msm-bus-dbg\n");
+		goto err;
+	}
+
+	clients = debugfs_create_dir("client-data", dir);
+	if ((!dir) || IS_ERR(dir)) {
+		MSM_BUS_ERR("Couldn't create clients\n");
+		goto err;
+	}
+
+	shell_client = debugfs_create_dir("shell-client", dir);
+	if ((!dir) || IS_ERR(dir)) {
+		MSM_BUS_ERR("Couldn't create clients\n");
+		goto err;
+	}
+
+	commit = debugfs_create_dir("commit-data", dir);
+	if ((!dir) || IS_ERR(dir)) {
+		MSM_BUS_ERR("Couldn't create commit\n");
+		goto err;
+	}
+
+	rules_dbg = debugfs_create_dir("rules-dbg", dir);
+	if ((!rules_dbg) || IS_ERR(rules_dbg)) {
+		MSM_BUS_ERR("Couldn't create rules-dbg\n");
+		goto err;
+	}
+
+	if (debugfs_create_file("print_rules", 0644,
+		rules_dbg, &val, &rules_dbg_fops) == NULL)
+		goto err;
+
+	if (debugfs_create_file("update_request", 0644,
+		shell_client, &val, &shell_client_en_fops) == NULL)
+		goto err;
+	if (debugfs_create_file("ib", 0644, shell_client, &val,
+		&shell_client_ib_fops) == NULL)
+		goto err;
+	if (debugfs_create_file("ab", 0644, shell_client, &val,
+		&shell_client_ab_fops) == NULL)
+		goto err;
+	if (debugfs_create_file("slv", 0644, shell_client,
+		&val, &shell_client_slv_fops) == NULL)
+		goto err;
+	if (debugfs_create_file("mas", 0644, shell_client,
+		&val, &shell_client_mas_fops) == NULL)
+		goto err;
+	if (debugfs_create_file("update-request", 0644,
+		clients, NULL, &msm_bus_dbg_update_request_fops) == NULL)
+		goto err;
+
+	rules_buf = kzalloc(MAX_BUFF_SIZE, GFP_KERNEL);
+	if (!rules_buf) {
+		MSM_BUS_ERR("Failed to alloc rules_buf");
+		goto err;
+	}
+
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_for_each_entry(cldata, &cl_list, list) {
+		if (cldata->pdata) {
+			if (cldata->pdata->name == NULL) {
+				MSM_BUS_DBG("Client name not found\n");
+				continue;
+			}
+			cldata->file = msm_bus_dbg_create(cldata->pdata->name,
+					0444, clients, cldata->clid);
+		} else if (cldata->handle) {
+			if (cldata->handle->name == NULL) {
+				MSM_BUS_DBG("Client doesn't have a name\n");
+				continue;
+			}
+			cldata->file = debugfs_create_file(cldata->handle->name,
+							0444, clients,
+							(void *)cldata->handle,
+							&client_data_fops);
+		}
+	}
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+
+	if (debugfs_create_file("dump_clients", 0644,
+		clients, NULL, &msm_bus_dbg_dump_clients_fops) == NULL)
+		goto err;
+
+	mutex_lock(&msm_bus_dbg_fablist_lock);
+	list_for_each_entry(fablist, &fabdata_list, list) {
+		fablist->file = debugfs_create_file(fablist->name, 0444,
+			commit, (void *)fablist->name, &fabric_data_fops);
+		if (fablist->file == NULL) {
+			MSM_BUS_DBG("Cannot create files for commit data\n");
+			kfree(rules_buf);
+			mutex_unlock(&msm_bus_dbg_fablist_lock);
+			goto err;
+		}
+	}
+	mutex_unlock(&msm_bus_dbg_fablist_lock);
+
+	msm_bus_dbg_init_vectors();
+	return 0;
+err:
+	debugfs_remove_recursive(dir);
+	return -ENODEV;
+}
+late_initcall(msm_bus_debugfs_init);
+
+static void __exit msm_bus_dbg_teardown(void)
+{
+	struct msm_bus_fab_list *fablist = NULL, *fablist_temp;
+	struct msm_bus_cldata *cldata = NULL, *cldata_temp;
+
+	debugfs_remove_recursive(dir);
+
+	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+	list_for_each_entry_safe(cldata, cldata_temp, &cl_list, list) {
+		list_del(&cldata->list);
+		kfree(cldata);
+	}
+	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+
+	mutex_lock(&msm_bus_dbg_fablist_lock);
+	list_for_each_entry_safe(fablist, fablist_temp, &fabdata_list, list) {
+		list_del(&fablist->list);
+		kfree(fablist);
+	}
+	kfree(rules_buf);
+	mutex_unlock(&msm_bus_dbg_fablist_lock);
+}
+module_exit(msm_bus_dbg_teardown);
+MODULE_DESCRIPTION("Debugfs for msm bus scaling client");
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c
new file mode 100644
index 0000000..f7513b0
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c
@@ -0,0 +1,1255 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2016, 2018, Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <soc/qcom/rpm-smd.h>
+#include <trace/events/trace_msm_bus.h>
+#include "msm_bus_core.h"
+#include "msm_bus_adhoc.h"
+#include "msm_bus_noc.h"
+#include "msm_bus_bimc.h"
+
+static int msm_bus_dev_init_qos(struct device *dev, void *data);
+
+ssize_t bw_show(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	struct msm_bus_node_info_type *node_info = NULL;
+	struct msm_bus_node_device_type *bus_node = NULL;
+	int i;
+	int off = 0;
+
+	bus_node = to_msm_bus_node(dev);
+	if (!bus_node)
+		return -EINVAL;
+
+	node_info = bus_node->node_info;
+
+	for (i = 0; i < bus_node->num_lnodes; i++) {
+		if (!bus_node->lnode_list[i].in_use)
+			continue;
+		off += scnprintf((buf + off), PAGE_SIZE,
+		"[%d]:%s:Act_IB %llu Act_AB %llu Slp_IB %llu Slp_AB %llu\n",
+			i, bus_node->lnode_list[i].cl_name,
+			bus_node->lnode_list[i].lnode_ib[ACTIVE_CTX],
+			bus_node->lnode_list[i].lnode_ab[ACTIVE_CTX],
+			bus_node->lnode_list[i].lnode_ib[DUAL_CTX],
+			bus_node->lnode_list[i].lnode_ab[DUAL_CTX]);
+	}
+	off += scnprintf((buf + off), PAGE_SIZE,
+	"Max_Act_IB %llu Sum_Act_AB %llu Act_Util_fact %d Act_Vrail_comp %d\n",
+		bus_node->node_bw[ACTIVE_CTX].max_ib,
+		bus_node->node_bw[ACTIVE_CTX].sum_ab,
+		bus_node->node_bw[ACTIVE_CTX].util_used,
+		bus_node->node_bw[ACTIVE_CTX].vrail_used);
+	off += scnprintf((buf + off), PAGE_SIZE,
+	"Max_Slp_IB %llu Sum_Slp_AB %llu Slp_Util_fact %d Slp_Vrail_comp %d\n",
+		bus_node->node_bw[DUAL_CTX].max_ib,
+		bus_node->node_bw[DUAL_CTX].sum_ab,
+		bus_node->node_bw[DUAL_CTX].util_used,
+		bus_node->node_bw[DUAL_CTX].vrail_used);
+	return off;
+}
+
+ssize_t bw_store(struct device *dev, struct device_attribute *attr,
+			   const char *buf, size_t count)
+{
+	return count;
+}
+
+DEVICE_ATTR_RW(bw);
+
+struct static_rules_type {
+	int num_rules;
+	struct bus_rule_type *rules;
+};
+
+static struct static_rules_type static_rules;
+
+static int bus_get_reg(struct nodeclk *nclk, struct device *dev)
+{
+	int ret = 0;
+	struct msm_bus_node_device_type *node_dev;
+
+	if (!(dev && nclk))
+		return -ENXIO;
+
+	node_dev = to_msm_bus_node(dev);
+	if (!strlen(nclk->reg_name)) {
+		dev_dbg(dev, "No regulator exist for node %d\n",
+						node_dev->node_info->id);
+		goto exit_of_get_reg;
+	} else {
+		if (!(IS_ERR_OR_NULL(nclk->reg)))
+			goto exit_of_get_reg;
+
+		nclk->reg = devm_regulator_get(dev, nclk->reg_name);
+		if (IS_ERR_OR_NULL(nclk->reg)) {
+			ret =
+			(IS_ERR(nclk->reg) ? PTR_ERR(nclk->reg) : -ENXIO);
+			dev_err(dev, "Error: Failed to get regulator %s:%d\n",
+							nclk->reg_name, ret);
+		} else {
+			dev_dbg(dev, "Successfully got regulator for %d\n",
+				node_dev->node_info->id);
+		}
+	}
+
+exit_of_get_reg:
+	return ret;
+}
+
+static int bus_enable_reg(struct nodeclk *nclk)
+{
+	int ret = 0;
+
+	if (!nclk) {
+		ret = -ENXIO;
+		goto exit_bus_enable_reg;
+	}
+
+	if ((IS_ERR_OR_NULL(nclk->reg))) {
+		ret = -ENXIO;
+		goto exit_bus_enable_reg;
+	}
+
+	ret = regulator_enable(nclk->reg);
+	if (ret) {
+		MSM_BUS_ERR("Failed to enable regulator for %s\n",
+							nclk->reg_name);
+		goto exit_bus_enable_reg;
+	}
+	pr_debug("%s: Enabled Reg\n", __func__);
+exit_bus_enable_reg:
+	return ret;
+}
+
+static int bus_disable_reg(struct nodeclk *nclk)
+{
+	int ret = 0;
+
+	if (!nclk) {
+		ret = -ENXIO;
+		goto exit_bus_disable_reg;
+	}
+
+	if ((IS_ERR_OR_NULL(nclk->reg))) {
+		ret = -ENXIO;
+		goto exit_bus_disable_reg;
+	}
+
+	regulator_disable(nclk->reg);
+	pr_debug("%s: Disabled Reg\n", __func__);
+exit_bus_disable_reg:
+	return ret;
+}
+
+static int enable_nodeclk(struct nodeclk *nclk, struct device *dev)
+{
+	int ret = 0;
+
+	if (!nclk->enable && !nclk->setrate_only_clk) {
+		if (dev && strlen(nclk->reg_name)) {
+			if (IS_ERR_OR_NULL(nclk->reg)) {
+				ret = bus_get_reg(nclk, dev);
+				if (ret) {
+					dev_dbg(dev,
+						"Failed to get reg.Err %d\n",
+									ret);
+					goto exit_enable_nodeclk;
+				}
+			}
+
+			ret = bus_enable_reg(nclk);
+			if (ret) {
+				dev_dbg(dev, "Failed to enable reg. Err %d\n",
+									ret);
+				goto exit_enable_nodeclk;
+			}
+		}
+		ret = clk_prepare_enable(nclk->clk);
+
+		if (ret) {
+			MSM_BUS_ERR("%s: failed to enable clk ", __func__);
+			nclk->enable = false;
+		} else
+			nclk->enable = true;
+	}
+exit_enable_nodeclk:
+	return ret;
+}
+
+static int disable_nodeclk(struct nodeclk *nclk)
+{
+	int ret = 0;
+
+	if (nclk->enable && !nclk->setrate_only_clk) {
+		clk_disable_unprepare(nclk->clk);
+		nclk->enable = false;
+		bus_disable_reg(nclk);
+	}
+	return ret;
+}
+
+static int setrate_nodeclk(struct nodeclk *nclk, long rate)
+{
+	int ret = 0;
+
+	if (!nclk->enable_only_clk)
+		ret = clk_set_rate(nclk->clk, rate);
+
+	if (ret)
+		MSM_BUS_ERR("%s: failed to setrate clk", __func__);
+	return ret;
+}
+
+static int send_rpm_msg(struct msm_bus_node_device_type *ndev, int ctx)
+{
+	int ret = 0;
+	int rsc_type;
+	struct msm_rpm_kvp rpm_kvp;
+	int rpm_ctx;
+
+	if (!ndev) {
+		MSM_BUS_ERR("%s: Error getting node info.", __func__);
+		ret = -ENODEV;
+		goto exit_send_rpm_msg;
+	}
+
+	rpm_kvp.length = sizeof(uint64_t);
+	rpm_kvp.key = RPM_MASTER_FIELD_BW;
+
+	if (ctx == DUAL_CTX)
+		rpm_ctx = MSM_RPM_CTX_SLEEP_SET;
+	else
+		rpm_ctx = MSM_RPM_CTX_ACTIVE_SET;
+
+	rpm_kvp.data = (uint8_t *)&ndev->node_bw[ctx].sum_ab;
+
+	if (ndev->node_info->mas_rpm_id != -1) {
+		rsc_type = RPM_BUS_MASTER_REQ;
+		ret = msm_rpm_send_message(rpm_ctx, rsc_type,
+			ndev->node_info->mas_rpm_id, &rpm_kvp, 1);
+		if (ret) {
+			MSM_BUS_ERR("%s: Failed to send RPM message:",
+					__func__);
+			MSM_BUS_ERR("%s:Node Id %d RPM id %d",
+			__func__, ndev->node_info->id,
+				 ndev->node_info->mas_rpm_id);
+			goto exit_send_rpm_msg;
+		}
+		trace_bus_agg_bw(ndev->node_info->id,
+			ndev->node_info->mas_rpm_id, rpm_ctx,
+			ndev->node_bw[ctx].sum_ab);
+	}
+
+	if (ndev->node_info->slv_rpm_id != -1) {
+		rsc_type = RPM_BUS_SLAVE_REQ;
+		ret = msm_rpm_send_message(rpm_ctx, rsc_type,
+			ndev->node_info->slv_rpm_id, &rpm_kvp, 1);
+		if (ret) {
+			MSM_BUS_ERR("%s: Failed to send RPM message:",
+						__func__);
+			MSM_BUS_ERR("%s: Node Id %d RPM id %d",
+			__func__, ndev->node_info->id,
+				ndev->node_info->slv_rpm_id);
+			goto exit_send_rpm_msg;
+		}
+		trace_bus_agg_bw(ndev->node_info->id,
+			ndev->node_info->slv_rpm_id, rpm_ctx,
+			ndev->node_bw[ctx].sum_ab);
+	}
+exit_send_rpm_msg:
+	return ret;
+}
+
+static int flush_bw_data(struct msm_bus_node_device_type *node_info, int ctx)
+{
+	int ret = 0;
+
+	if (!node_info) {
+		MSM_BUS_ERR("%s: Unable to find bus device for device",
+			__func__);
+		ret = -ENODEV;
+		goto exit_flush_bw_data;
+	}
+
+	if (node_info->node_bw[ctx].last_sum_ab !=
+				node_info->node_bw[ctx].sum_ab) {
+		if (node_info->ap_owned) {
+			struct msm_bus_node_device_type *bus_device =
+			to_msm_bus_node(node_info->node_info->bus_device);
+			struct msm_bus_fab_device_type *fabdev =
+							bus_device->fabdev;
+
+			/*
+			 * For AP owned ports, only care about the Active
+			 * context bandwidth.
+			 */
+			if (fabdev && (ctx == ACTIVE_CTX) &&
+				fabdev->noc_ops.update_bw_reg &&
+				fabdev->noc_ops.update_bw_reg
+					(node_info->node_info->qos_params.mode))
+				ret = fabdev->noc_ops.set_bw(node_info,
+							fabdev->qos_base,
+							fabdev->base_offset,
+							fabdev->qos_off,
+							fabdev->qos_freq);
+		} else {
+			ret = send_rpm_msg(node_info, ctx);
+
+			if (ret)
+				MSM_BUS_ERR("%s: Failed to send RPM msg for%d",
+				__func__, node_info->node_info->id);
+		}
+		node_info->node_bw[ctx].last_sum_ab =
+					node_info->node_bw[ctx].sum_ab;
+	}
+
+exit_flush_bw_data:
+	return ret;
+
+}
+
+static int flush_clk_data(struct msm_bus_node_device_type *node, int ctx)
+{
+	struct nodeclk *nodeclk = NULL;
+	int ret = 0;
+
+	if (!node) {
+		MSM_BUS_ERR("Unable to find bus device");
+		ret = -ENODEV;
+		goto exit_flush_clk_data;
+	}
+
+	nodeclk = &node->clk[ctx];
+
+	if (IS_ERR_OR_NULL(nodeclk) || IS_ERR_OR_NULL(nodeclk->clk))
+		goto exit_flush_clk_data;
+
+	if (nodeclk->rate != node->node_bw[ctx].cur_clk_hz) {
+		long rounded_rate;
+
+		nodeclk->rate = node->node_bw[ctx].cur_clk_hz;
+		nodeclk->dirty = true;
+
+		if (nodeclk->rate) {
+			rounded_rate = clk_round_rate(nodeclk->clk,
+							nodeclk->rate);
+			ret = setrate_nodeclk(nodeclk, rounded_rate);
+
+			if (ret) {
+				MSM_BUS_ERR("%s: Failed to set_rate %lu for %d",
+					__func__, rounded_rate,
+						node->node_info->id);
+				ret = -ENODEV;
+				goto exit_flush_clk_data;
+			}
+
+			ret = enable_nodeclk(nodeclk, &node->dev);
+
+			if ((node->node_info->is_fab_dev) &&
+				!IS_ERR_OR_NULL(node->bus_qos_clk.clk))
+				ret = enable_nodeclk(&node->bus_qos_clk,
+							&node->dev);
+		} else {
+			if ((node->node_info->is_fab_dev) &&
+				!IS_ERR_OR_NULL(node->bus_qos_clk.clk))
+				ret = disable_nodeclk(&node->bus_qos_clk);
+
+			ret = disable_nodeclk(nodeclk);
+		}
+
+		if (ret) {
+			MSM_BUS_ERR("%s: Failed to enable for %d", __func__,
+						node->node_info->id);
+			ret = -ENODEV;
+			goto exit_flush_clk_data;
+		}
+		MSM_BUS_DBG("%s: Updated %d clk to %llu", __func__,
+				node->node_info->id, nodeclk->rate);
+	}
+exit_flush_clk_data:
+	/* Reset the aggregated clock rate for fab devices*/
+	if (node && node->node_info->is_fab_dev)
+		node->node_bw[ctx].cur_clk_hz = 0;
+
+	if (nodeclk)
+		nodeclk->dirty = false;
+	return ret;
+}
+
+static int msm_bus_agg_fab_clks(struct msm_bus_node_device_type *bus_dev)
+{
+	int ret = 0;
+	struct msm_bus_node_device_type *node;
+	int ctx;
+
+	list_for_each_entry(node, &bus_dev->devlist, dev_link) {
+		for (ctx = 0; ctx < NUM_CTX; ctx++) {
+			if (node->node_bw[ctx].cur_clk_hz >=
+					bus_dev->node_bw[ctx].cur_clk_hz)
+				bus_dev->node_bw[ctx].cur_clk_hz =
+						node->node_bw[ctx].cur_clk_hz;
+		}
+	}
+	return ret;
+}
+
+int msm_bus_commit_data(struct list_head *clist)
+{
+	int ret = 0;
+	int ctx;
+	struct msm_bus_node_device_type *node;
+	struct msm_bus_node_device_type *node_tmp;
+
+	list_for_each_entry(node, clist, link) {
+		/* Aggregate the bus clocks */
+		if (node->node_info->is_fab_dev)
+			msm_bus_agg_fab_clks(node);
+	}
+
+	list_for_each_entry_safe(node, node_tmp, clist, link) {
+		if (unlikely(node->node_info->defer_qos))
+			msm_bus_dev_init_qos(&node->dev, NULL);
+
+		for (ctx = 0; ctx < NUM_CTX; ctx++) {
+			ret = flush_clk_data(node, ctx);
+			if (ret)
+				MSM_BUS_ERR("%s: Err flushing clk data for:%d",
+						__func__, node->node_info->id);
+			ret = flush_bw_data(node, ctx);
+			if (ret)
+				MSM_BUS_ERR("%s: Error flushing bw data for %d",
+					__func__, node->node_info->id);
+		}
+		node->dirty = false;
+		list_del_init(&node->link);
+	}
+	return ret;
+}
+
+void *msm_bus_realloc_devmem(struct device *dev, void *p, size_t old_size,
+					size_t new_size, gfp_t flags)
+{
+	void *ret;
+	size_t copy_size = old_size;
+
+	if (!new_size) {
+		devm_kfree(dev, p);
+		return ZERO_SIZE_PTR;
+	}
+
+	if (new_size < old_size)
+		copy_size = new_size;
+
+	ret = devm_kzalloc(dev, new_size, flags);
+	if (!ret)
+		goto exit_realloc_devmem;
+
+	memcpy(ret, p, copy_size);
+	devm_kfree(dev, p);
+exit_realloc_devmem:
+	return ret;
+}
+
+static void msm_bus_fab_init_noc_ops(struct msm_bus_node_device_type *bus_dev)
+{
+	switch (bus_dev->fabdev->bus_type) {
+	case MSM_BUS_NOC:
+		msm_bus_noc_set_ops(bus_dev);
+		break;
+	case MSM_BUS_BIMC:
+		msm_bus_bimc_set_ops(bus_dev);
+		break;
+	default:
+		MSM_BUS_ERR("%s: Invalid Bus type", __func__);
+	}
+}
+
+static int msm_bus_disable_node_qos_clk(struct msm_bus_node_device_type *node)
+{
+	struct msm_bus_node_device_type *bus_node = NULL;
+	int i;
+	int ret = 0;
+
+	if (!node || (!to_msm_bus_node(node->node_info->bus_device))) {
+		ret = -ENXIO;
+		goto exit_disable_node_qos_clk;
+	}
+
+	for (i = 0; i < node->num_node_qos_clks; i++)
+		ret = disable_nodeclk(&node->node_qos_clks[i]);
+
+	bus_node = to_msm_bus_node(node->node_info->bus_device);
+
+	for (i = 0; i < bus_node->num_node_qos_clks; i++)
+		ret = disable_nodeclk(&bus_node->node_qos_clks[i]);
+
+exit_disable_node_qos_clk:
+	return ret;
+}
+
+static int msm_bus_enable_node_qos_clk(struct msm_bus_node_device_type *node)
+{
+	struct msm_bus_node_device_type *bus_node = NULL;
+	int i;
+	int ret;
+	long rounded_rate;
+
+	if (!node || (!to_msm_bus_node(node->node_info->bus_device))) {
+		ret = -ENXIO;
+		goto exit_enable_node_qos_clk;
+	}
+	bus_node = to_msm_bus_node(node->node_info->bus_device);
+
+	for (i = 0; i < node->num_node_qos_clks; i++) {
+		if (!node->node_qos_clks[i].enable_only_clk) {
+			rounded_rate =
+				clk_round_rate(
+					node->node_qos_clks[i].clk, 1);
+			ret = setrate_nodeclk(&node->node_qos_clks[i],
+								rounded_rate);
+			if (ret)
+				MSM_BUS_DBG("%s: Failed set rate clk,node %d\n",
+					__func__, node->node_info->id);
+		}
+		ret = enable_nodeclk(&node->node_qos_clks[i],
+					node->node_info->bus_device);
+		if (ret) {
+			MSM_BUS_DBG("%s: Failed to set Qos Clks ret %d\n",
+				__func__, ret);
+			msm_bus_disable_node_qos_clk(node);
+			goto exit_enable_node_qos_clk;
+		}
+
+	}
+
+	for (i = 0; i < bus_node->num_node_qos_clks; i++) {
+		if (!bus_node->node_qos_clks[i].enable_only_clk) {
+			rounded_rate =
+				clk_round_rate(
+					bus_node->node_qos_clks[i].clk, 1);
+			ret = setrate_nodeclk(&bus_node->node_qos_clks[i],
+								rounded_rate);
+			if (ret)
+				MSM_BUS_DBG("%s: Failed set rate clk,node %d\n",
+					__func__, node->node_info->id);
+		}
+		ret = enable_nodeclk(&bus_node->node_qos_clks[i],
+					node->node_info->bus_device);
+		if (ret) {
+			MSM_BUS_DBG("%s: Failed to set Qos Clks ret %d\n",
+				__func__, ret);
+			msm_bus_disable_node_qos_clk(node);
+			goto exit_enable_node_qos_clk;
+		}
+
+	}
+exit_enable_node_qos_clk:
+	return ret;
+}
+
+int msm_bus_enable_limiter(struct msm_bus_node_device_type *node_dev,
+				int enable, uint64_t lim_bw)
+{
+	int ret = 0;
+	struct msm_bus_node_device_type *bus_node_dev;
+
+	if (!node_dev) {
+		MSM_BUS_ERR("No device specified");
+		ret = -ENXIO;
+		goto exit_enable_limiter;
+	}
+
+	if (!node_dev->ap_owned) {
+		MSM_BUS_ERR("Device is not AP owned %d",
+						node_dev->node_info->id);
+		ret = -ENXIO;
+		goto exit_enable_limiter;
+	}
+
+	bus_node_dev = to_msm_bus_node(node_dev->node_info->bus_device);
+	if (!bus_node_dev) {
+		MSM_BUS_ERR("Unable to get bus device infofor %d",
+			node_dev->node_info->id);
+		ret = -ENXIO;
+		goto exit_enable_limiter;
+	}
+	if (bus_node_dev->fabdev &&
+		bus_node_dev->fabdev->noc_ops.limit_mport) {
+		if (ret < 0) {
+			MSM_BUS_ERR("Can't Enable QoS clk %d",
+				node_dev->node_info->id);
+			goto exit_enable_limiter;
+		}
+		bus_node_dev->fabdev->noc_ops.limit_mport(
+				node_dev,
+				bus_node_dev->fabdev->qos_base,
+				bus_node_dev->fabdev->base_offset,
+				bus_node_dev->fabdev->qos_off,
+				bus_node_dev->fabdev->qos_freq,
+				enable, lim_bw);
+	}
+
+exit_enable_limiter:
+	return ret;
+}
+
+static int msm_bus_dev_init_qos(struct device *dev, void *data)
+{
+	int ret = 0;
+	struct msm_bus_node_device_type *node_dev = NULL;
+
+	node_dev = to_msm_bus_node(dev);
+	if (!node_dev) {
+		MSM_BUS_ERR("%s: Unable to get node device info", __func__);
+		ret = -ENXIO;
+		goto exit_init_qos;
+	}
+
+	MSM_BUS_DBG("Device = %d", node_dev->node_info->id);
+
+	if (node_dev->ap_owned) {
+		struct msm_bus_node_device_type *bus_node_info;
+
+		bus_node_info =
+			to_msm_bus_node(node_dev->node_info->bus_device);
+
+		if (!bus_node_info) {
+			MSM_BUS_ERR("%s: Unable to get bus device info for %d",
+				__func__,
+				node_dev->node_info->id);
+			ret = -ENXIO;
+			goto exit_init_qos;
+		}
+
+		if (bus_node_info->fabdev &&
+			bus_node_info->fabdev->noc_ops.qos_init) {
+			int ret = 0;
+
+			if (node_dev->ap_owned &&
+				(node_dev->node_info->qos_params.mode) != -1) {
+
+				if (bus_node_info->fabdev->bypass_qos_prg)
+					goto exit_init_qos;
+
+				ret = msm_bus_enable_node_qos_clk(node_dev);
+				if (ret < 0) {
+					MSM_BUS_DBG("Can't Enable QoS clk %d\n",
+					node_dev->node_info->id);
+					node_dev->node_info->defer_qos = true;
+					goto exit_init_qos;
+				}
+
+				bus_node_info->fabdev->noc_ops.qos_init(
+					node_dev,
+					bus_node_info->fabdev->qos_base,
+					bus_node_info->fabdev->base_offset,
+					bus_node_info->fabdev->qos_off,
+					bus_node_info->fabdev->qos_freq);
+				ret = msm_bus_disable_node_qos_clk(node_dev);
+				node_dev->node_info->defer_qos = false;
+			}
+		} else
+			MSM_BUS_ERR("%s: Skipping QOS init for %d",
+				__func__, node_dev->node_info->id);
+	}
+exit_init_qos:
+	return ret;
+}
+
+static int msm_bus_fabric_init(struct device *dev,
+			struct msm_bus_node_device_type *pdata)
+{
+	struct msm_bus_fab_device_type *fabdev;
+	struct msm_bus_node_device_type *node_dev = NULL;
+	int ret = 0;
+
+	node_dev = to_msm_bus_node(dev);
+	if (!node_dev) {
+		MSM_BUS_ERR("%s: Unable to get bus device info", __func__);
+		ret = -ENXIO;
+		goto exit_fabric_init;
+	}
+
+	if (node_dev->node_info->virt_dev) {
+		MSM_BUS_ERR("%s: Skip Fab init for virtual device %d", __func__,
+						node_dev->node_info->id);
+		goto exit_fabric_init;
+	}
+
+	fabdev = devm_kzalloc(dev, sizeof(struct msm_bus_fab_device_type),
+								GFP_KERNEL);
+	if (!fabdev) {
+		ret = -ENOMEM;
+		goto exit_fabric_init;
+	}
+
+	node_dev->fabdev = fabdev;
+	fabdev->pqos_base = pdata->fabdev->pqos_base;
+	fabdev->qos_range = pdata->fabdev->qos_range;
+	fabdev->base_offset = pdata->fabdev->base_offset;
+	fabdev->qos_off = pdata->fabdev->qos_off;
+	fabdev->qos_freq = pdata->fabdev->qos_freq;
+	fabdev->bus_type = pdata->fabdev->bus_type;
+	fabdev->bypass_qos_prg = pdata->fabdev->bypass_qos_prg;
+	msm_bus_fab_init_noc_ops(node_dev);
+
+	fabdev->qos_base = devm_ioremap(dev,
+				fabdev->pqos_base, fabdev->qos_range);
+	if (!fabdev->qos_base) {
+		MSM_BUS_ERR("%s: Error remapping address 0x%zx :bus device %d",
+			__func__,
+			 (size_t)fabdev->pqos_base, node_dev->node_info->id);
+		ret = -ENOMEM;
+		goto exit_fabric_init;
+	}
+
+exit_fabric_init:
+	return ret;
+}
+
+static int msm_bus_init_clk(struct device *bus_dev,
+				struct msm_bus_node_device_type *pdata)
+{
+	unsigned int ctx;
+	struct msm_bus_node_device_type *node_dev = to_msm_bus_node(bus_dev);
+	int i;
+
+	for (ctx = 0; ctx < NUM_CTX; ctx++) {
+		if (!IS_ERR_OR_NULL(pdata->clk[ctx].clk)) {
+			node_dev->clk[ctx].clk = pdata->clk[ctx].clk;
+			node_dev->clk[ctx].enable_only_clk =
+					pdata->clk[ctx].enable_only_clk;
+			node_dev->clk[ctx].setrate_only_clk =
+					pdata->clk[ctx].setrate_only_clk;
+			node_dev->clk[ctx].enable = false;
+			node_dev->clk[ctx].dirty = false;
+			strlcpy(node_dev->clk[ctx].reg_name,
+				pdata->clk[ctx].reg_name, MAX_REG_NAME);
+			node_dev->clk[ctx].reg = NULL;
+			bus_get_reg(&node_dev->clk[ctx], bus_dev);
+			MSM_BUS_DBG("%s: Valid node clk node %d ctx %d\n",
+				__func__, node_dev->node_info->id, ctx);
+		}
+	}
+
+	if (!IS_ERR_OR_NULL(pdata->bus_qos_clk.clk)) {
+		node_dev->bus_qos_clk.clk = pdata->bus_qos_clk.clk;
+		node_dev->bus_qos_clk.enable_only_clk =
+					pdata->bus_qos_clk.enable_only_clk;
+		node_dev->bus_qos_clk.setrate_only_clk =
+					pdata->bus_qos_clk.setrate_only_clk;
+		node_dev->bus_qos_clk.enable = false;
+		strlcpy(node_dev->bus_qos_clk.reg_name,
+			pdata->bus_qos_clk.reg_name, MAX_REG_NAME);
+		node_dev->bus_qos_clk.reg = NULL;
+		MSM_BUS_DBG("%s: Valid bus qos clk node %d\n", __func__,
+						node_dev->node_info->id);
+	}
+
+	if (pdata->num_node_qos_clks) {
+		node_dev->num_node_qos_clks = pdata->num_node_qos_clks;
+		node_dev->node_qos_clks = devm_kzalloc(bus_dev,
+			(node_dev->num_node_qos_clks * sizeof(struct nodeclk)),
+			GFP_KERNEL);
+		if (!node_dev->node_qos_clks)
+			return -ENOMEM;
+
+		for (i = 0; i < pdata->num_node_qos_clks; i++) {
+			node_dev->node_qos_clks[i].clk =
+					pdata->node_qos_clks[i].clk;
+			node_dev->node_qos_clks[i].enable_only_clk =
+					pdata->node_qos_clks[i].enable_only_clk;
+			node_dev->node_qos_clks[i].setrate_only_clk =
+				pdata->node_qos_clks[i].setrate_only_clk;
+			node_dev->node_qos_clks[i].enable = false;
+			strlcpy(node_dev->node_qos_clks[i].reg_name,
+				pdata->node_qos_clks[i].reg_name, MAX_REG_NAME);
+			node_dev->node_qos_clks[i].reg = NULL;
+			MSM_BUS_DBG("%s: Valid qos clk[%d] node %d %d Reg%s\n",
+					__func__, i,
+					node_dev->node_info->id,
+					node_dev->num_node_qos_clks,
+					node_dev->node_qos_clks[i].reg_name);
+		}
+	}
+
+	return 0;
+}
+
+static int msm_bus_copy_node_info(struct msm_bus_node_device_type *pdata,
+				struct device *bus_dev)
+{
+	int ret = 0;
+	struct msm_bus_node_info_type *node_info = NULL;
+	struct msm_bus_node_info_type *pdata_node_info = NULL;
+	struct msm_bus_node_device_type *bus_node = NULL;
+
+	bus_node = to_msm_bus_node(bus_dev);
+
+	if (!bus_node || !pdata) {
+		ret = -ENXIO;
+		MSM_BUS_ERR("%s: NULL pointers for pdata or bus_node",
+			__func__);
+		goto exit_copy_node_info;
+	}
+
+	node_info = bus_node->node_info;
+	pdata_node_info = pdata->node_info;
+
+	node_info->name = pdata_node_info->name;
+	node_info->id =  pdata_node_info->id;
+	node_info->bus_device_id = pdata_node_info->bus_device_id;
+	node_info->mas_rpm_id = pdata_node_info->mas_rpm_id;
+	node_info->slv_rpm_id = pdata_node_info->slv_rpm_id;
+	node_info->num_connections = pdata_node_info->num_connections;
+	node_info->num_blist = pdata_node_info->num_blist;
+	node_info->num_qports = pdata_node_info->num_qports;
+	node_info->virt_dev = pdata_node_info->virt_dev;
+	node_info->is_fab_dev = pdata_node_info->is_fab_dev;
+	node_info->qos_params.mode = pdata_node_info->qos_params.mode;
+	node_info->qos_params.prio1 = pdata_node_info->qos_params.prio1;
+	node_info->qos_params.prio0 = pdata_node_info->qos_params.prio0;
+	node_info->qos_params.reg_prio1 = pdata_node_info->qos_params.reg_prio1;
+	node_info->qos_params.reg_prio0 = pdata_node_info->qos_params.reg_prio0;
+	node_info->qos_params.prio_lvl = pdata_node_info->qos_params.prio_lvl;
+	node_info->qos_params.prio_rd = pdata_node_info->qos_params.prio_rd;
+	node_info->qos_params.prio_wr = pdata_node_info->qos_params.prio_wr;
+	node_info->qos_params.gp = pdata_node_info->qos_params.gp;
+	node_info->qos_params.thmp = pdata_node_info->qos_params.thmp;
+	node_info->qos_params.ws = pdata_node_info->qos_params.ws;
+	node_info->qos_params.bw_buffer = pdata_node_info->qos_params.bw_buffer;
+	node_info->agg_params.buswidth = pdata_node_info->agg_params.buswidth;
+	node_info->agg_params.agg_scheme =
+					pdata_node_info->agg_params.agg_scheme;
+	node_info->agg_params.vrail_comp =
+					pdata_node_info->agg_params.vrail_comp;
+	node_info->agg_params.num_aggports =
+				pdata_node_info->agg_params.num_aggports;
+	node_info->agg_params.num_util_levels =
+				pdata_node_info->agg_params.num_util_levels;
+	node_info->agg_params.util_levels = devm_kzalloc(bus_dev,
+			sizeof(struct node_util_levels_type) *
+			node_info->agg_params.num_util_levels,
+			GFP_KERNEL);
+	if (!node_info->agg_params.util_levels) {
+		MSM_BUS_ERR("%s: Agg util level alloc failed\n", __func__);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+	memcpy(node_info->agg_params.util_levels,
+		pdata_node_info->agg_params.util_levels,
+		sizeof(struct node_util_levels_type) *
+			pdata_node_info->agg_params.num_util_levels);
+
+	node_info->dev_connections = devm_kzalloc(bus_dev,
+			sizeof(struct device *) *
+				pdata_node_info->num_connections,
+			GFP_KERNEL);
+	if (!node_info->dev_connections) {
+		MSM_BUS_ERR("%s:Bus dev connections alloc failed\n", __func__);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	node_info->connections = devm_kzalloc(bus_dev,
+			sizeof(int) * pdata_node_info->num_connections,
+			GFP_KERNEL);
+	if (!node_info->connections) {
+		MSM_BUS_ERR("%s:Bus connections alloc failed\n", __func__);
+		devm_kfree(bus_dev, node_info->dev_connections);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	memcpy(node_info->connections,
+		pdata_node_info->connections,
+		sizeof(int) * pdata_node_info->num_connections);
+
+	node_info->black_connections = devm_kzalloc(bus_dev,
+			sizeof(struct device *) *
+				pdata_node_info->num_blist,
+			GFP_KERNEL);
+	if (!node_info->black_connections) {
+		MSM_BUS_ERR("%s: Bus black connections alloc failed\n",
+			__func__);
+		devm_kfree(bus_dev, node_info->dev_connections);
+		devm_kfree(bus_dev, node_info->connections);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	node_info->bl_cons = devm_kzalloc(bus_dev,
+			pdata_node_info->num_blist * sizeof(int),
+			GFP_KERNEL);
+	if (!node_info->bl_cons) {
+		MSM_BUS_ERR("%s:Bus black list connections alloc failed\n",
+					__func__);
+		devm_kfree(bus_dev, node_info->black_connections);
+		devm_kfree(bus_dev, node_info->dev_connections);
+		devm_kfree(bus_dev, node_info->connections);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	memcpy(node_info->bl_cons,
+		pdata_node_info->bl_cons,
+		sizeof(int) * pdata_node_info->num_blist);
+
+	node_info->qport = devm_kzalloc(bus_dev,
+			sizeof(int) * pdata_node_info->num_qports,
+			GFP_KERNEL);
+	if (!node_info->qport) {
+		MSM_BUS_ERR("%s:Bus qport allocation failed\n", __func__);
+		devm_kfree(bus_dev, node_info->dev_connections);
+		devm_kfree(bus_dev, node_info->connections);
+		devm_kfree(bus_dev, node_info->bl_cons);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	memcpy(node_info->qport,
+		pdata_node_info->qport,
+		sizeof(int) * pdata_node_info->num_qports);
+
+exit_copy_node_info:
+	return ret;
+}
+
+static struct device *msm_bus_device_init(
+			struct msm_bus_node_device_type *pdata)
+{
+	struct device *bus_dev = NULL;
+	struct msm_bus_node_device_type *bus_node = NULL;
+	struct msm_bus_node_info_type *node_info = NULL;
+	int ret = 0;
+
+	/**
+	 * Init here so we can use devm calls
+	 */
+
+	bus_node = kzalloc(sizeof(struct msm_bus_node_device_type), GFP_KERNEL);
+	if (!bus_node) {
+		ret = -ENOMEM;
+		goto err_device_init;
+	}
+	bus_dev = &bus_node->dev;
+	device_initialize(bus_dev);
+
+	node_info = devm_kzalloc(bus_dev,
+			sizeof(struct msm_bus_node_info_type), GFP_KERNEL);
+	if (!node_info) {
+		ret = -ENOMEM;
+		goto err_put_device;
+	}
+
+	bus_node->node_info = node_info;
+	bus_node->ap_owned = pdata->ap_owned;
+	bus_dev->of_node = pdata->of_node;
+
+	ret = msm_bus_copy_node_info(pdata, bus_dev);
+	if (ret)
+		goto err_put_device;
+
+	bus_dev->bus = &msm_bus_type;
+	dev_set_name(bus_dev, bus_node->node_info->name);
+
+	ret = device_add(bus_dev);
+	if (ret) {
+		MSM_BUS_ERR("%s: Error registering device %d",
+				__func__, pdata->node_info->id);
+		goto err_put_device;
+	}
+	device_create_file(bus_dev, &dev_attr_bw);
+	INIT_LIST_HEAD(&bus_node->devlist);
+	return bus_dev;
+
+err_put_device:
+	put_device(bus_dev);
+	bus_dev = NULL;
+	kfree(bus_node);
+err_device_init:
+	return ERR_PTR(ret);
+}
+
+static int msm_bus_setup_dev_conn(struct device *bus_dev, void *data)
+{
+	struct msm_bus_node_device_type *bus_node = NULL;
+	int ret = 0;
+	int j;
+	struct msm_bus_node_device_type *fab;
+
+	bus_node = to_msm_bus_node(bus_dev);
+	if (!bus_node) {
+		MSM_BUS_ERR("%s: Can't get device info", __func__);
+		ret = -ENODEV;
+		goto exit_setup_dev_conn;
+	}
+
+	/* Setup parent bus device for this node */
+	if (!bus_node->node_info->is_fab_dev) {
+		struct device *bus_parent_device =
+			bus_find_device(&msm_bus_type, NULL,
+				(void *)&bus_node->node_info->bus_device_id,
+				msm_bus_device_match_adhoc);
+
+		if (!bus_parent_device) {
+			MSM_BUS_ERR("%s: Error finding parentdev %d parent %d",
+				__func__,
+				bus_node->node_info->id,
+				bus_node->node_info->bus_device_id);
+			ret = -ENXIO;
+			goto exit_setup_dev_conn;
+		}
+		bus_node->node_info->bus_device = bus_parent_device;
+		fab = to_msm_bus_node(bus_parent_device);
+		list_add_tail(&bus_node->dev_link, &fab->devlist);
+	}
+
+	bus_node->node_info->is_traversed = false;
+
+	for (j = 0; j < bus_node->node_info->num_connections; j++) {
+		bus_node->node_info->dev_connections[j] =
+			bus_find_device(&msm_bus_type, NULL,
+				(void *)&bus_node->node_info->connections[j],
+				msm_bus_device_match_adhoc);
+
+		if (!bus_node->node_info->dev_connections[j]) {
+			MSM_BUS_ERR("%s: Error finding conn %d for device %d",
+				__func__, bus_node->node_info->connections[j],
+				 bus_node->node_info->id);
+			ret = -ENODEV;
+			goto exit_setup_dev_conn;
+		}
+	}
+
+	for (j = 0; j < bus_node->node_info->num_blist; j++) {
+		bus_node->node_info->black_connections[j] =
+			bus_find_device(&msm_bus_type, NULL,
+			(void *)
+			&bus_node->node_info->bl_cons[j],
+				msm_bus_device_match_adhoc);
+
+		if (!bus_node->node_info->black_connections[j]) {
+			MSM_BUS_ERR("%s: Error finding conn %d for device %d\n",
+			__func__,
+			bus_node->node_info->bl_cons[j],
+			bus_node->node_info->id);
+			ret = -ENODEV;
+			goto exit_setup_dev_conn;
+		}
+	}
+
+exit_setup_dev_conn:
+	return ret;
+}
+
+static int msm_bus_node_debug(struct device *bus_dev, void *data)
+{
+	int j;
+	int ret = 0;
+	struct msm_bus_node_device_type *bus_node = NULL;
+
+	bus_node = to_msm_bus_node(bus_dev);
+	if (!bus_node) {
+		MSM_BUS_ERR("%s: Can't get device info", __func__);
+		ret = -ENODEV;
+		goto exit_node_debug;
+	}
+
+	MSM_BUS_DBG("Device = %d buswidth %u", bus_node->node_info->id,
+				bus_node->node_info->agg_params.buswidth);
+	for (j = 0; j < bus_node->node_info->num_connections; j++) {
+		struct msm_bus_node_device_type *bdev =
+		to_msm_bus_node(bus_node->node_info->dev_connections[j]);
+		MSM_BUS_DBG("\n\t Connection[%d] %d", j, bdev->node_info->id);
+	}
+
+	if (bus_node->node_info->is_fab_dev)
+		msm_bus_floor_init(bus_dev);
+
+exit_node_debug:
+	return ret;
+}
+
+static int msm_bus_free_dev(struct device *dev, void *data)
+{
+	struct msm_bus_node_device_type *bus_node = NULL;
+
+	bus_node = to_msm_bus_node(dev);
+
+	if (bus_node)
+		MSM_BUS_ERR("\n%s: Removing device %d", __func__,
+						bus_node->node_info->id);
+	device_unregister(dev);
+	kfree(bus_node);
+	return 0;
+}
+
+int msm_bus_device_remove(struct platform_device *pdev)
+{
+	bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_free_dev);
+	return 0;
+}
+
+static int msm_bus_device_probe(struct platform_device *pdev)
+{
+	unsigned int i, ret;
+	struct msm_bus_device_node_registration *pdata;
+
+	/* If possible, get pdata from device-tree */
+	if (pdev->dev.of_node)
+		pdata = msm_bus_of_to_pdata(pdev);
+	else {
+		pdata =
+		(struct msm_bus_device_node_registration *)
+		pdev->dev.platform_data;
+	}
+
+	if (IS_ERR_OR_NULL(pdata)) {
+		MSM_BUS_ERR("No platform data found");
+		ret = -ENODATA;
+		goto exit_device_probe;
+	}
+
+	for (i = 0; i < pdata->num_devices; i++) {
+		struct device *node_dev = NULL;
+
+		node_dev = msm_bus_device_init(&pdata->info[i]);
+
+		if (IS_ERR(node_dev)) {
+			MSM_BUS_ERR("%s: Error during dev init for %d",
+				__func__, pdata->info[i].node_info->id);
+			ret = PTR_ERR(node_dev);
+			goto exit_device_probe;
+		}
+
+		ret = msm_bus_init_clk(node_dev, &pdata->info[i]);
+		if (ret) {
+			MSM_BUS_ERR("\n Failed to init bus clk. ret %d", ret);
+			msm_bus_device_remove(pdev);
+			goto exit_device_probe;
+		}
+		/*Is this a fabric device ?*/
+		if (pdata->info[i].node_info->is_fab_dev) {
+			MSM_BUS_DBG("%s: %d is a fab", __func__,
+						pdata->info[i].node_info->id);
+			ret = msm_bus_fabric_init(node_dev, &pdata->info[i]);
+			if (ret) {
+				MSM_BUS_ERR("%s: Error intializing fab %d",
+					__func__, pdata->info[i].node_info->id);
+				goto exit_device_probe;
+			}
+		}
+	}
+
+	ret = bus_for_each_dev(&msm_bus_type, NULL, NULL,
+						msm_bus_setup_dev_conn);
+	if (ret) {
+		MSM_BUS_ERR("%s: Error setting up dev connections", __func__);
+		goto exit_device_probe;
+	}
+
+	/*
+	 * Setup the QoS for the nodes, don't check the error codes as we
+	 * defer QoS programming to the first transaction in cases of failure
+	 * and we want to continue the probe.
+	 */
+	ret = bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_dev_init_qos);
+
+	/* Register the arb layer ops */
+	msm_bus_arb_setops_adhoc(&arb_ops);
+	bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_node_debug);
+
+	devm_kfree(&pdev->dev, pdata->info);
+	devm_kfree(&pdev->dev, pdata);
+
+	dev_info(&pdev->dev, "Bus scaling driver probe successful\n");
+
+exit_device_probe:
+	return ret;
+}
+
+static int msm_bus_device_rules_probe(struct platform_device *pdev)
+{
+	struct bus_rule_type *rule_data = NULL;
+	int num_rules = 0;
+
+	num_rules = msm_bus_of_get_static_rules(pdev, &rule_data);
+
+	if (!rule_data)
+		goto exit_rules_probe;
+
+	msm_rule_register(num_rules, rule_data, NULL);
+	static_rules.num_rules = num_rules;
+	static_rules.rules = rule_data;
+	pdev->dev.platform_data = &static_rules;
+
+exit_rules_probe:
+	return 0;
+}
+
+int msm_bus_device_rules_remove(struct platform_device *pdev)
+{
+	struct static_rules_type *static_rules = NULL;
+
+	static_rules = pdev->dev.platform_data;
+	if (static_rules)
+		msm_rule_unregister(static_rules->num_rules,
+					static_rules->rules, NULL);
+	return 0;
+}
+
+
+static const struct of_device_id rules_match[] = {
+	{.compatible = "qcom,msm-bus-static-bw-rules"},
+	{}
+};
+
+static struct platform_driver msm_bus_rules_driver = {
+	.probe = msm_bus_device_rules_probe,
+	.remove = msm_bus_device_rules_remove,
+	.driver = {
+		.name = "msm_bus_rules_device",
+		.of_match_table = rules_match,
+	},
+};
+
+static const struct of_device_id fabric_match[] = {
+	{.compatible = "qcom,msm-bus-device"},
+	{}
+};
+
+static struct platform_driver msm_bus_device_driver = {
+	.probe = msm_bus_device_probe,
+	.remove = msm_bus_device_remove,
+	.driver = {
+		.name = "msm_bus_device",
+		.of_match_table = fabric_match,
+	},
+};
+
+int __init msm_bus_device_init_driver(void)
+{
+	int rc;
+
+	MSM_BUS_ERR("msm_bus_fabric_init_driver\n");
+	rc =  platform_driver_register(&msm_bus_device_driver);
+
+	if (rc) {
+		MSM_BUS_ERR("Failed to register bus device driver");
+		return rc;
+	}
+	return platform_driver_register(&msm_bus_rules_driver);
+}
+fs_initcall(msm_bus_device_init_driver);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
new file mode 100644
index 0000000..1665104
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
@@ -0,0 +1,2032 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/rpmh.h>
+#include <soc/qcom/tcs.h>
+#include <trace/events/trace_msm_bus.h>
+#include <dt-bindings/msm/msm-bus-ids.h>
+#include "msm_bus_core.h"
+#include "msm_bus_rpmh.h"
+#include "msm_bus_noc.h"
+#include "msm_bus_bimc.h"
+
+#define MSM_BUS_RSC_COUNT		(MSM_BUS_RSC_LAST-MSM_BUS_RSC_FIRST+1)
+
+#define BCM_TCS_CMD_COMMIT_SHFT		30
+#define BCM_TCS_CMD_COMMIT_MASK		0x40000000
+#define BCM_TCS_CMD_VALID_SHFT		29
+#define BCM_TCS_CMD_VALID_MASK		0x20000000
+#define BCM_TCS_CMD_VOTE_X_SHFT		14
+#define BCM_TCS_CMD_VOTE_MASK		0x3FFF
+#define BCM_TCS_CMD_VOTE_Y_SHFT		0
+#define BCM_TCS_CMD_VOTE_Y_MASK		0xFFFC000
+
+#define BCM_TCS_CMD(commit, valid, vote_x, vote_y) \
+	(((commit & 0x1) << BCM_TCS_CMD_COMMIT_SHFT) |\
+	((valid & 0x1) << BCM_TCS_CMD_VALID_SHFT) |\
+	((vote_x & BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_X_SHFT) |\
+	((vote_y & BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_Y_SHFT))
+
+static int msm_bus_dev_init_qos(struct device *dev, void *data);
+static int msm_bus_dev_sbm_config(struct device *dev, bool enable);
+
+static struct list_head bcm_query_list_inorder[VCD_MAX_CNT];
+static struct msm_bus_node_device_type *cur_rsc;
+static bool init_time = true;
+
+struct msm_bus_rsc_client {
+	uint32_t rsc_id;
+	struct device *client;
+};
+
+struct msm_bus_rsc_client rsc_clients[MSM_BUS_RSC_COUNT];
+
+struct bcm_db {
+	uint32_t unit_size;
+	uint16_t width;
+	uint8_t clk_domain;
+	uint8_t reserved;
+};
+
+ssize_t bw_show(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	struct msm_bus_node_info_type *node_info = NULL;
+	struct msm_bus_node_device_type *bus_node = NULL;
+	int i;
+	int off = 0;
+
+	bus_node = to_msm_bus_node(dev);
+	if (!bus_node)
+		return -EINVAL;
+
+	node_info = bus_node->node_info;
+
+	for (i = 0; i < bus_node->num_lnodes; i++) {
+		if (!bus_node->lnode_list[i].in_use)
+			continue;
+		off += scnprintf((buf + off), PAGE_SIZE,
+		"[%d]:%s:Act_IB %llu Act_AB %llu Slp_IB %llu Slp_AB %llu\n",
+			i, bus_node->lnode_list[i].cl_name,
+			bus_node->lnode_list[i].lnode_ib[ACTIVE_CTX],
+			bus_node->lnode_list[i].lnode_ab[ACTIVE_CTX],
+			bus_node->lnode_list[i].lnode_ib[DUAL_CTX],
+			bus_node->lnode_list[i].lnode_ab[DUAL_CTX]);
+	}
+	off += scnprintf((buf + off), PAGE_SIZE,
+	"Max_Act_IB %llu Sum_Act_AB %llu Act_Util_fact %d Act_Vrail_comp %d\n",
+		bus_node->node_bw[ACTIVE_CTX].max_ib,
+		bus_node->node_bw[ACTIVE_CTX].sum_ab,
+		bus_node->node_bw[ACTIVE_CTX].util_used,
+		bus_node->node_bw[ACTIVE_CTX].vrail_used);
+	off += scnprintf((buf + off), PAGE_SIZE,
+	"Max_Slp_IB %llu Sum_Slp_AB %llu Slp_Util_fact %d Slp_Vrail_comp %d\n",
+		bus_node->node_bw[DUAL_CTX].max_ib,
+		bus_node->node_bw[DUAL_CTX].sum_ab,
+		bus_node->node_bw[DUAL_CTX].util_used,
+		bus_node->node_bw[DUAL_CTX].vrail_used);
+	return off;
+}
+
+ssize_t bw_store(struct device *dev, struct device_attribute *attr,
+			   const char *buf, size_t count)
+{
+	return count;
+}
+
+DEVICE_ATTR_RW(bw);
+
+struct static_rules_type {
+	int num_rules;
+	struct bus_rule_type *rules;
+};
+
+static struct static_rules_type static_rules;
+
+static int bus_get_reg(struct nodeclk *nclk, struct device *dev)
+{
+	int ret = 0;
+	struct msm_bus_node_device_type *node_dev;
+
+	if (!(dev && nclk))
+		return -ENXIO;
+
+	node_dev = to_msm_bus_node(dev);
+	if (!strlen(nclk->reg_name)) {
+		dev_dbg(dev, "No regulator exist for node %d\n",
+						node_dev->node_info->id);
+		goto exit_of_get_reg;
+	} else {
+		if (!(IS_ERR_OR_NULL(nclk->reg)))
+			goto exit_of_get_reg;
+
+		nclk->reg = devm_regulator_get(dev, nclk->reg_name);
+		if (IS_ERR_OR_NULL(nclk->reg)) {
+			ret =
+			(IS_ERR(nclk->reg) ? PTR_ERR(nclk->reg) : -ENXIO);
+			dev_err(dev, "Error: Failed to get regulator %s:%d\n",
+							nclk->reg_name, ret);
+		} else {
+			dev_dbg(dev, "Successfully got regulator for %d\n",
+				node_dev->node_info->id);
+		}
+	}
+
+exit_of_get_reg:
+	return ret;
+}
+
+static int bus_enable_reg(struct nodeclk *nclk)
+{
+	int ret = 0;
+
+	if (!nclk) {
+		ret = -ENXIO;
+		goto exit_bus_enable_reg;
+	}
+
+	if ((IS_ERR_OR_NULL(nclk->reg))) {
+		ret = -ENXIO;
+		goto exit_bus_enable_reg;
+	}
+
+	ret = regulator_enable(nclk->reg);
+	if (ret) {
+		MSM_BUS_ERR("Failed to enable regulator for %s\n",
+							nclk->reg_name);
+		goto exit_bus_enable_reg;
+	}
+	pr_debug("%s: Enabled Reg\n", __func__);
+exit_bus_enable_reg:
+	return ret;
+}
+
+static int bus_disable_reg(struct nodeclk *nclk)
+{
+	int ret = 0;
+
+	if (!nclk) {
+		ret = -ENXIO;
+		goto exit_bus_disable_reg;
+	}
+
+	if ((IS_ERR_OR_NULL(nclk->reg))) {
+		ret = -ENXIO;
+		goto exit_bus_disable_reg;
+	}
+
+	regulator_disable(nclk->reg);
+	pr_debug("%s: Disabled Reg\n", __func__);
+exit_bus_disable_reg:
+	return ret;
+}
+
+static int enable_nodeclk(struct nodeclk *nclk, struct device *dev)
+{
+	int ret = 0;
+
+	if (!nclk->enable && !nclk->setrate_only_clk) {
+		if (dev && strlen(nclk->reg_name)) {
+			if (IS_ERR_OR_NULL(nclk->reg)) {
+				ret = bus_get_reg(nclk, dev);
+				if (ret) {
+					dev_dbg(dev,
+						"Failed to get reg.Err %d\n",
+									ret);
+					goto exit_enable_nodeclk;
+				}
+			}
+
+			ret = bus_enable_reg(nclk);
+			if (ret) {
+				dev_dbg(dev, "Failed to enable reg. Err %d\n",
+									ret);
+				goto exit_enable_nodeclk;
+			}
+		}
+		ret = clk_prepare_enable(nclk->clk);
+
+		if (ret) {
+			MSM_BUS_ERR("%s: failed to enable clk ", __func__);
+			nclk->enable = false;
+		} else
+			nclk->enable = true;
+	}
+exit_enable_nodeclk:
+	return ret;
+}
+
+static int disable_nodeclk(struct nodeclk *nclk)
+{
+	int ret = 0;
+
+	if (nclk->enable && !nclk->setrate_only_clk) {
+		clk_disable_unprepare(nclk->clk);
+		nclk->enable = false;
+		bus_disable_reg(nclk);
+	}
+	return ret;
+}
+
+static int setrate_nodeclk(struct nodeclk *nclk, long rate)
+{
+	int ret = 0;
+
+	if (!nclk->enable_only_clk)
+		ret = clk_set_rate(nclk->clk, rate);
+
+	if (ret)
+		MSM_BUS_ERR("%s: failed to setrate clk", __func__);
+	return ret;
+}
+
+static int tcs_cmd_gen(struct msm_bus_node_device_type *cur_bcm,
+				struct tcs_cmd *cmd, uint64_t vec_a,
+					uint64_t vec_b, bool commit)
+{
+	int ret = 0;
+	bool valid = true;
+
+	if (!cmd)
+		return ret;
+
+	if (vec_a == 0 && vec_b == 0)
+		valid = false;
+
+	if (vec_a > BCM_TCS_CMD_VOTE_MASK)
+		vec_a = BCM_TCS_CMD_VOTE_MASK;
+
+	if (vec_b > BCM_TCS_CMD_VOTE_MASK)
+		vec_b = BCM_TCS_CMD_VOTE_MASK;
+
+	cmd->addr = cur_bcm->bcmdev->addr;
+	cmd->data = BCM_TCS_CMD(commit, valid, vec_a, vec_b);
+	cmd->wait = commit;
+
+	return ret;
+}
+
+static int tcs_cmd_list_gen(int *n_active,
+				int *n_wake,
+				int *n_sleep,
+				struct tcs_cmd *cmdlist_active,
+				struct tcs_cmd *cmdlist_wake,
+				struct tcs_cmd *cmdlist_sleep,
+				struct list_head *cur_bcm_clist)
+{
+	struct msm_bus_node_device_type *cur_bcm = NULL;
+	int i = 0;
+	int k = 0;
+	int idx = 0;
+	int last_tcs = -1;
+	bool commit = false;
+
+	if (!cmdlist_active)
+		goto exit_tcs_cmd_list_gen;
+
+	for (i = 0; i < VCD_MAX_CNT; i++) {
+		last_tcs = -1;
+		if (list_empty(&cur_bcm_clist[i]))
+			continue;
+		list_for_each_entry(cur_bcm, &cur_bcm_clist[i], link) {
+			if (cur_bcm->updated ||
+				(cur_bcm->node_vec[DUAL_CTX].vec_a == 0 &&
+				cur_bcm->node_vec[ACTIVE_CTX].vec_a == 0 &&
+				cur_bcm->node_vec[DUAL_CTX].vec_b == 0 &&
+				cur_bcm->node_vec[ACTIVE_CTX].vec_b == 0 &&
+				init_time)) {
+				if (last_tcs != -1 &&
+					list_is_last(&cur_bcm->link,
+						&cur_bcm_clist[i])) {
+					cmdlist_active[last_tcs].data |=
+						BCM_TCS_CMD_COMMIT_MASK;
+					cmdlist_active[last_tcs].wait
+								= true;
+				}
+				continue;
+			}
+			n_active[idx]++;
+			commit = false;
+			if (list_is_last(&cur_bcm->link,
+						&cur_bcm_clist[i])) {
+				commit = true;
+				idx++;
+			}
+			tcs_cmd_gen(cur_bcm, &cmdlist_active[k],
+				cur_bcm->node_vec[ACTIVE_CTX].vec_a,
+				cur_bcm->node_vec[ACTIVE_CTX].vec_b, commit);
+			last_tcs = k;
+			k++;
+			cur_bcm->updated = true;
+		}
+	}
+
+	if (!cmdlist_wake || !cmdlist_sleep)
+		goto exit_tcs_cmd_list_gen;
+
+	k = 0;
+	idx = 0;
+	for (i = 0; i < VCD_MAX_CNT; i++) {
+		last_tcs = -1;
+		if (list_empty(&cur_bcm_clist[i]))
+			continue;
+		list_for_each_entry(cur_bcm, &cur_bcm_clist[i], link) {
+			commit = false;
+			if ((cur_bcm->node_vec[DUAL_CTX].vec_a ==
+				cur_bcm->node_vec[ACTIVE_CTX].vec_a) &&
+				(cur_bcm->node_vec[DUAL_CTX].vec_b ==
+				cur_bcm->node_vec[ACTIVE_CTX].vec_b)) {
+				if (last_tcs != -1 &&
+					list_is_last(&cur_bcm->link,
+					&cur_bcm_clist[i])) {
+					cmdlist_wake[last_tcs].data |=
+						BCM_TCS_CMD_COMMIT_MASK;
+					cmdlist_sleep[last_tcs].data |=
+						BCM_TCS_CMD_COMMIT_MASK;
+					cmdlist_wake[last_tcs].wait = true;
+					cmdlist_sleep[last_tcs].wait = true;
+					idx++;
+				}
+				continue;
+			}
+			last_tcs = k;
+			n_sleep[idx]++;
+			n_wake[idx]++;
+			if (list_is_last(&cur_bcm->link,
+						&cur_bcm_clist[i])) {
+				commit = true;
+				idx++;
+			}
+
+			tcs_cmd_gen(cur_bcm, &cmdlist_wake[k],
+				cur_bcm->node_vec[ACTIVE_CTX].vec_a,
+				cur_bcm->node_vec[ACTIVE_CTX].vec_b, commit);
+
+			tcs_cmd_gen(cur_bcm, &cmdlist_sleep[k],
+				cur_bcm->node_vec[DUAL_CTX].vec_a,
+				cur_bcm->node_vec[DUAL_CTX].vec_b, commit);
+			k++;
+		}
+	}
+
+exit_tcs_cmd_list_gen:
+	return k;
+}
+
+static int tcs_cmd_query_list_gen(struct tcs_cmd *cmdlist_active)
+{
+	struct msm_bus_node_device_type *cur_bcm = NULL;
+	struct list_head *bcm_list_inorder = NULL;
+	int i = 0;
+	int k = 0;
+	bool commit = false;
+	int ret = 0;
+
+	if (!cmdlist_active)
+		goto exit_tcs_cmd_list_gen;
+
+	bcm_list_inorder = bcm_query_list_inorder;
+
+	for (i = 0; i < VCD_MAX_CNT; i++) {
+		if (list_empty(&bcm_list_inorder[i]))
+			continue;
+		list_for_each_entry(cur_bcm, &bcm_list_inorder[i], query_link) {
+			commit = false;
+			if (list_is_last(&cur_bcm->query_link,
+						&bcm_list_inorder[i])) {
+				commit = true;
+			}
+			tcs_cmd_gen(cur_bcm, &cmdlist_active[k],
+				cur_bcm->node_vec[ACTIVE_CTX].query_vec_a,
+				cur_bcm->node_vec[ACTIVE_CTX].query_vec_b,
+								commit);
+			k++;
+		}
+	}
+
+exit_tcs_cmd_list_gen:
+	return ret;
+}
+
+static int bcm_clist_add(struct msm_bus_node_device_type *cur_dev)
+{
+	int ret = 0;
+	int cur_vcd = 0;
+	int i = 0;
+	struct msm_bus_node_device_type *cur_bcm = NULL;
+
+	if (!cur_dev->node_info->num_bcm_devs)
+		goto exit_bcm_clist_add;
+
+	for (i = 0; i < cur_dev->node_info->num_bcm_devs; i++) {
+		cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]);
+		cur_vcd = cur_bcm->bcmdev->clk_domain;
+
+		if (!cur_bcm->node_info->num_rsc_devs)
+			goto exit_bcm_clist_add;
+
+		if (!cur_rsc)
+			cur_rsc = to_msm_bus_node(
+					cur_bcm->node_info->rsc_devs[0]);
+
+		if (!cur_bcm->dirty) {
+			list_add_tail(&cur_bcm->link,
+					&cur_rsc->rscdev->bcm_clist[cur_vcd]);
+			cur_bcm->dirty = true;
+		}
+		cur_bcm->updated = false;
+	}
+
+exit_bcm_clist_add:
+	return ret;
+}
+
+static void tcs_cmd_n_shrink(int *n)
+{
+	int i = 0, j = 0, sum = 0;
+
+	do {
+		if (sum + n[i] > MAX_RPMH_PAYLOAD) {
+			n[j] = sum;
+			sum = 0;
+			j++;
+		}
+		sum += n[i];
+	} while (n[i++]);
+
+	n[j] = sum;
+	n[j+1] = 0;
+}
+
+static int bcm_query_list_add(struct msm_bus_node_device_type *cur_dev)
+{
+	int ret = 0;
+	int cur_vcd = 0;
+	int i = 0;
+	struct msm_bus_node_device_type *cur_bcm = NULL;
+
+	if (!cur_dev->node_info->num_bcm_devs)
+		goto exit_bcm_query_list_add;
+
+	for (i = 0; i < cur_dev->node_info->num_bcm_devs; i++) {
+		cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]);
+		cur_vcd = cur_bcm->bcmdev->clk_domain;
+
+		if (!cur_bcm->query_dirty) {
+			list_add_tail(&cur_bcm->query_link,
+					&bcm_query_list_inorder[cur_vcd]);
+			cur_bcm->query_dirty = true;
+		}
+	}
+
+exit_bcm_query_list_add:
+	return ret;
+}
+
+static int bcm_clist_clean(struct msm_bus_node_device_type *cur_dev)
+{
+	int ret = 0;
+	int i = 0;
+	struct msm_bus_node_device_type *cur_bcm = NULL;
+
+	if (!cur_dev->node_info->num_bcm_devs)
+		goto exit_bcm_clist_clean;
+
+	for (i = 0; i < cur_dev->node_info->num_bcm_devs; i++) {
+		cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]);
+
+		if (cur_bcm->node_vec[DUAL_CTX].vec_a == 0 &&
+			cur_bcm->node_vec[ACTIVE_CTX].vec_a == 0 &&
+			cur_bcm->node_vec[DUAL_CTX].vec_b == 0 &&
+			cur_bcm->node_vec[ACTIVE_CTX].vec_b == 0 &&
+			!init_time) {
+			cur_bcm->dirty = false;
+			list_del_init(&cur_bcm->link);
+		}
+	}
+
+exit_bcm_clist_clean:
+	return ret;
+}
+
+static int bcm_query_list_clean(struct msm_bus_node_device_type *cur_dev)
+{
+	int ret = 0;
+	int i = 0;
+	struct msm_bus_node_device_type *cur_bcm = NULL;
+
+	if (!cur_dev->node_info->num_bcm_devs)
+		goto exit_bcm_clist_add;
+
+	for (i = 0; i < cur_dev->node_info->num_bcm_devs; i++) {
+		cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]);
+
+		cur_bcm->query_dirty = false;
+		list_del_init(&cur_bcm->query_link);
+	}
+
+exit_bcm_clist_add:
+	return ret;
+}
+
+int msm_bus_commit_data(struct list_head *clist)
+{
+	int ret = 0;
+	int bcm_cnt;
+	struct msm_bus_node_device_type *node = NULL;
+	struct msm_bus_node_device_type *node_tmp = NULL;
+	struct msm_bus_node_device_type *cur_bcm = NULL;
+	struct tcs_cmd *cmdlist_active = NULL;
+	struct tcs_cmd *cmdlist_wake = NULL;
+	struct tcs_cmd *cmdlist_sleep = NULL;
+	struct device *cur_mbox = NULL;
+	struct list_head *cur_bcm_clist = NULL;
+	int n_active[VCD_MAX_CNT];
+	int n_wake[VCD_MAX_CNT];
+	int n_sleep[VCD_MAX_CNT];
+	int cnt_vcd = 0;
+	int cnt_active = 0;
+	int cnt_wake = 0;
+	int cnt_sleep = 0;
+	int i = 0;
+
+	if (!clist)
+		return ret;
+
+	list_for_each_entry_safe(node, node_tmp, clist, link) {
+		bcm_clist_add(node);
+		msm_bus_dev_sbm_config(&node->dev, false);
+	}
+
+	if (!cur_rsc) {
+		MSM_BUS_ERR("%s: Error for cur_rsc is NULL.\n", __func__);
+		return ret;
+	}
+
+	cur_mbox = cur_rsc->rscdev->mbox;
+	cur_bcm_clist = cur_rsc->rscdev->bcm_clist;
+	cmdlist_active = cur_rsc->rscdev->cmdlist_active;
+	cmdlist_wake = cur_rsc->rscdev->cmdlist_wake;
+	cmdlist_sleep = cur_rsc->rscdev->cmdlist_sleep;
+
+	for (i = 0; i < VCD_MAX_CNT; i++) {
+		n_active[i] = 0;
+		n_wake[i] = 0;
+		n_sleep[i] = 0;
+
+		if (list_empty(&cur_bcm_clist[i]))
+			continue;
+		list_for_each_entry(cur_bcm, &cur_bcm_clist[i], link) {
+			if ((cur_bcm->node_vec[DUAL_CTX].vec_a !=
+				cur_bcm->node_vec[ACTIVE_CTX].vec_a) ||
+				(cur_bcm->node_vec[DUAL_CTX].vec_b !=
+				cur_bcm->node_vec[ACTIVE_CTX].vec_b)) {
+				cnt_sleep++;
+				cnt_wake++;
+			}
+			if (cur_bcm->updated ||
+				(cur_bcm->node_vec[DUAL_CTX].vec_a == 0 &&
+				cur_bcm->node_vec[ACTIVE_CTX].vec_a == 0 &&
+				cur_bcm->node_vec[DUAL_CTX].vec_b == 0 &&
+				cur_bcm->node_vec[ACTIVE_CTX].vec_b == 0 &&
+				init_time))
+				continue;
+			cnt_active++;
+		}
+		cnt_vcd++;
+	}
+
+	if (!cnt_active)
+		goto exit_msm_bus_commit_data;
+
+	bcm_cnt = tcs_cmd_list_gen(n_active, n_wake, n_sleep, cmdlist_active,
+				cmdlist_wake, cmdlist_sleep, cur_bcm_clist);
+
+	tcs_cmd_n_shrink(n_active);
+	tcs_cmd_n_shrink(n_wake);
+	tcs_cmd_n_shrink(n_sleep);
+
+	ret = rpmh_invalidate(cur_mbox);
+	if (ret)
+		MSM_BUS_ERR("%s: Error invalidating mbox: %d\n",
+						__func__, ret);
+
+	if (cur_rsc->node_info->id == MSM_BUS_RSC_DISP) {
+		ret = rpmh_write_batch(cur_mbox, cur_rsc->rscdev->req_state,
+						cmdlist_active, n_active);
+		/*
+		 * Ignore -EBUSY from rpmh_write if it's an AMC
+		 * request to Display RSC which are invalid when
+		 * the display RSC is in solver mode and the bus driver
+		 * does not know the current state of the display RSC.
+		 */
+		if (ret && ret != -EBUSY)
+			MSM_BUS_ERR("%s: error sending active/awake sets: %d\n",
+						__func__, ret);
+	} else {
+		ret = rpmh_write_batch(cur_mbox, cur_rsc->rscdev->req_state,
+						cmdlist_active, n_active);
+		if (ret)
+			MSM_BUS_ERR("%s: error sending active/awake sets: %d\n",
+						__func__, ret);
+	}
+	if (cnt_wake) {
+		ret = rpmh_write_batch(cur_mbox, RPMH_WAKE_ONLY_STATE,
+							cmdlist_wake, n_wake);
+		if (ret)
+			MSM_BUS_ERR("%s: error sending wake sets: %d\n",
+							__func__, ret);
+	}
+	if (cnt_sleep) {
+		ret = rpmh_write_batch(cur_mbox, RPMH_SLEEP_STATE,
+							cmdlist_sleep, n_sleep);
+		if (ret)
+			MSM_BUS_ERR("%s: error sending sleep sets: %d\n",
+							__func__, ret);
+	}
+
+	list_for_each_entry_safe(node, node_tmp, clist, link) {
+		if (unlikely(node->node_info->defer_qos))
+			msm_bus_dev_init_qos(&node->dev, NULL);
+		msm_bus_dev_sbm_config(&node->dev, true);
+	}
+
+exit_msm_bus_commit_data:
+	list_for_each_entry_safe(node, node_tmp, clist, link) {
+		bcm_clist_clean(node);
+		node->dirty = false;
+		list_del_init(&node->link);
+	}
+	cur_rsc = NULL;
+	return ret;
+}
+
+int msm_bus_query_gen(struct list_head *query_list,
+				struct msm_bus_tcs_usecase *tcs_usecase)
+{
+	int ret = 0;
+	struct msm_bus_node_device_type *node = NULL;
+	struct msm_bus_node_device_type *node_tmp = NULL;
+	struct msm_bus_node_device_type *cur_bcm = NULL;
+	int *n_active = NULL;
+	int cnt_vcd = 0;
+	int cnt_active = 0;
+	int i = 0;
+
+	list_for_each_entry_safe(node, node_tmp, query_list, query_link)
+		bcm_query_list_add(node);
+
+	for (i = 0; i < VCD_MAX_CNT; i++) {
+		if (list_empty(&bcm_query_list_inorder[i]))
+			continue;
+		list_for_each_entry(cur_bcm, &bcm_query_list_inorder[i],
+							query_link) {
+			cnt_active++;
+		}
+		cnt_vcd++;
+	}
+
+	tcs_usecase->num_cmds = cnt_active;
+	ret = tcs_cmd_query_list_gen(tcs_usecase->cmds);
+
+	list_for_each_entry_safe(node, node_tmp, query_list, query_link) {
+		bcm_query_list_clean(node);
+		node->query_dirty = false;
+		list_del_init(&node->query_link);
+	}
+
+	kfree(n_active);
+	return ret;
+}
+
+static void bcm_commit_single_req(struct msm_bus_node_device_type *cur_bcm,
+					uint64_t vec_a, uint64_t vec_b)
+{
+	struct msm_bus_node_device_type *cur_rsc = NULL;
+	struct device *cur_mbox = NULL;
+	struct tcs_cmd cmd_active;
+
+	if (!cur_bcm->node_info->num_rsc_devs)
+		return;
+
+	cur_rsc = to_msm_bus_node(cur_bcm->node_info->rsc_devs[0]);
+	cur_mbox = cur_rsc->rscdev->mbox;
+
+	tcs_cmd_gen(cur_bcm, &cmd_active, vec_a, vec_b, true);
+	rpmh_write(cur_mbox, RPMH_ACTIVE_ONLY_STATE, &cmd_active, 1);
+}
+
+void *msm_bus_realloc_devmem(struct device *dev, void *p, size_t old_size,
+					size_t new_size, gfp_t flags)
+{
+	void *ret;
+	size_t copy_size = old_size;
+
+	if (!new_size) {
+		devm_kfree(dev, p);
+		return ZERO_SIZE_PTR;
+	}
+
+	if (new_size < old_size)
+		copy_size = new_size;
+
+	ret = devm_kzalloc(dev, new_size, flags);
+	if (!ret) {
+		MSM_BUS_ERR("%s: Error Reallocating memory", __func__);
+		goto exit_realloc_devmem;
+	}
+
+	memcpy(ret, p, copy_size);
+	devm_kfree(dev, p);
+exit_realloc_devmem:
+	return ret;
+}
+
+static void msm_bus_fab_init_noc_ops(struct msm_bus_node_device_type *bus_dev)
+{
+	switch (bus_dev->fabdev->bus_type) {
+	case MSM_BUS_NOC:
+		msm_bus_noc_set_ops(bus_dev);
+		break;
+	case MSM_BUS_BIMC:
+		msm_bus_bimc_set_ops(bus_dev);
+		break;
+	default:
+		MSM_BUS_ERR("%s: Invalid Bus type", __func__);
+	}
+}
+
+static int msm_bus_disable_node_qos_clk(struct msm_bus_node_device_type *node)
+{
+	int i;
+	int ret = 0;
+
+	if (!node) {
+		ret = -ENXIO;
+		goto exit_disable_node_qos_clk;
+	}
+
+	for (i = 0; i < node->num_node_qos_clks; i++)
+		ret = disable_nodeclk(&node->node_qos_clks[i]);
+
+exit_disable_node_qos_clk:
+	return ret;
+}
+
+static int msm_bus_enable_node_qos_clk(struct msm_bus_node_device_type *node)
+{
+	int i;
+	int ret = 0;
+	long rounded_rate;
+
+	for (i = 0; i < node->num_node_qos_clks; i++) {
+		if (!node->node_qos_clks[i].enable_only_clk) {
+			rounded_rate =
+				clk_round_rate(
+					node->node_qos_clks[i].clk, 1);
+			ret = setrate_nodeclk(&node->node_qos_clks[i],
+								rounded_rate);
+			if (ret)
+				MSM_BUS_DBG("%s: Failed set rate clk,node %d\n",
+					__func__, node->node_info->id);
+		}
+		ret = enable_nodeclk(&node->node_qos_clks[i],
+					node->node_info->bus_device);
+		if (ret) {
+			MSM_BUS_DBG("%s: Failed to set Qos Clks ret %d\n",
+				__func__, ret);
+			msm_bus_disable_node_qos_clk(node);
+			goto exit_enable_node_qos_clk;
+		}
+	}
+exit_enable_node_qos_clk:
+	return ret;
+}
+
+static int msm_bus_vote_qos_bcms(struct msm_bus_node_device_type *node)
+{
+	struct msm_bus_node_device_type *cur_dev = NULL;
+	struct msm_bus_node_device_type *cur_bcm = NULL;
+	int i;
+	struct device *dev = NULL;
+
+	if (!node || (!to_msm_bus_node(node->node_info->bus_device)))
+		return -ENXIO;
+
+	cur_dev = node;
+
+	for (i = 0; i < cur_dev->num_qos_bcms; i++) {
+		dev = bus_find_device(&msm_bus_type, NULL,
+				(void *) &cur_dev->qos_bcms[i].qos_bcm_id,
+					msm_bus_device_match_adhoc);
+
+		if (!dev) {
+			MSM_BUS_ERR("Can't find dev node for %d",
+					cur_dev->qos_bcms[i].qos_bcm_id);
+			return -ENODEV;
+		}
+
+		cur_bcm = to_msm_bus_node(dev);
+		if (cur_bcm->node_vec[ACTIVE_CTX].vec_a != 0 ||
+			cur_bcm->node_vec[ACTIVE_CTX].vec_b != 0 ||
+			cur_bcm->node_vec[DUAL_CTX].vec_a != 0 ||
+			cur_bcm->node_vec[DUAL_CTX].vec_b != 0)
+			return 0;
+
+		bcm_commit_single_req(cur_bcm,
+					cur_dev->qos_bcms[i].vec.vec_a,
+					cur_dev->qos_bcms[i].vec.vec_b);
+	}
+
+	return 0;
+}
+
+static int msm_bus_rm_vote_qos_bcms(struct msm_bus_node_device_type *node)
+{
+	struct msm_bus_node_device_type *cur_dev = NULL;
+	struct msm_bus_node_device_type *cur_bcm = NULL;
+	int i;
+	struct device *dev = NULL;
+
+	if (!node || (!to_msm_bus_node(node->node_info->bus_device)))
+		return -ENXIO;
+
+	cur_dev = node;
+
+	for (i = 0; i < cur_dev->num_qos_bcms; i++) {
+		dev = bus_find_device(&msm_bus_type, NULL,
+				(void *) &cur_dev->qos_bcms[i].qos_bcm_id,
+					msm_bus_device_match_adhoc);
+
+		if (!dev) {
+			MSM_BUS_ERR("Can't find dev node for %d",
+					cur_dev->qos_bcms[i].qos_bcm_id);
+			return -ENODEV;
+		}
+
+		cur_bcm = to_msm_bus_node(dev);
+		if (cur_bcm->node_vec[ACTIVE_CTX].vec_a != 0 ||
+			cur_bcm->node_vec[ACTIVE_CTX].vec_b != 0 ||
+			cur_bcm->node_vec[DUAL_CTX].vec_a != 0 ||
+			cur_bcm->node_vec[DUAL_CTX].vec_b != 0)
+			return 0;
+
+		bcm_commit_single_req(cur_bcm, 0, 0);
+	}
+
+	return 0;
+}
+
+int msm_bus_enable_limiter(struct msm_bus_node_device_type *node_dev,
+				int enable, uint64_t lim_bw)
+{
+	int ret = 0;
+	struct msm_bus_node_device_type *bus_node_dev;
+
+	if (!node_dev) {
+		MSM_BUS_ERR("No device specified");
+		ret = -ENXIO;
+		goto exit_enable_limiter;
+	}
+
+	if (!node_dev->ap_owned) {
+		MSM_BUS_ERR("Device is not AP owned %d",
+						node_dev->node_info->id);
+		ret = -ENXIO;
+		goto exit_enable_limiter;
+	}
+
+	bus_node_dev = to_msm_bus_node(node_dev->node_info->bus_device);
+	if (!bus_node_dev) {
+		MSM_BUS_ERR("Unable to get bus device infofor %d",
+			node_dev->node_info->id);
+		ret = -ENXIO;
+		goto exit_enable_limiter;
+	}
+	if (bus_node_dev->fabdev &&
+		bus_node_dev->fabdev->noc_ops.limit_mport) {
+		if (ret < 0) {
+			MSM_BUS_ERR("Can't Enable QoS clk %d",
+				node_dev->node_info->id);
+			goto exit_enable_limiter;
+		}
+		bus_node_dev->fabdev->noc_ops.limit_mport(
+				node_dev,
+				bus_node_dev->fabdev->qos_base,
+				bus_node_dev->fabdev->base_offset,
+				bus_node_dev->fabdev->qos_off,
+				bus_node_dev->fabdev->qos_freq,
+				enable, lim_bw);
+	}
+
+exit_enable_limiter:
+	return ret;
+}
+
+static int msm_bus_dev_init_qos(struct device *dev, void *data)
+{
+	int ret = 0;
+	struct msm_bus_node_device_type *node_dev = NULL;
+
+	node_dev = to_msm_bus_node(dev);
+	if (!node_dev) {
+		MSM_BUS_ERR("%s: Unable to get node device info", __func__);
+		ret = -ENXIO;
+		goto exit_init_qos;
+	}
+
+	MSM_BUS_DBG("Device = %d", node_dev->node_info->id);
+
+	if (node_dev->node_info->qos_params.defer_init_qos) {
+		node_dev->node_info->qos_params.defer_init_qos = false;
+		node_dev->node_info->defer_qos = true;
+		goto exit_init_qos;
+	}
+
+	if (node_dev->ap_owned) {
+		struct msm_bus_node_device_type *bus_node_info;
+
+		bus_node_info =
+			to_msm_bus_node(node_dev->node_info->bus_device);
+
+		if (!bus_node_info) {
+			MSM_BUS_ERR("%s: Unable to get bus device info for %d",
+				__func__,
+				node_dev->node_info->id);
+			ret = -ENXIO;
+			goto exit_init_qos;
+		}
+
+		if (bus_node_info->fabdev &&
+			bus_node_info->fabdev->noc_ops.qos_init) {
+			int ret = 0;
+
+			if (node_dev->ap_owned) {
+				if (bus_node_info->fabdev->bypass_qos_prg)
+					goto exit_init_qos;
+
+				ret = msm_bus_vote_qos_bcms(node_dev);
+				ret = msm_bus_enable_node_qos_clk(node_dev);
+				if (ret < 0) {
+					MSM_BUS_DBG("Can't Enable QoS clk %d\n",
+					node_dev->node_info->id);
+					node_dev->node_info->defer_qos = true;
+					goto exit_init_qos;
+				}
+
+				bus_node_info->fabdev->noc_ops.qos_init(
+					node_dev,
+					bus_node_info->fabdev->qos_base,
+					bus_node_info->fabdev->base_offset,
+					bus_node_info->fabdev->qos_off,
+					bus_node_info->fabdev->qos_freq);
+				ret = msm_bus_disable_node_qos_clk(node_dev);
+				ret = msm_bus_rm_vote_qos_bcms(node_dev);
+				node_dev->node_info->defer_qos = false;
+			}
+		} else
+			MSM_BUS_ERR("%s: Skipping QOS init for %d",
+				__func__, node_dev->node_info->id);
+	}
+exit_init_qos:
+	return ret;
+}
+
+static int msm_bus_dev_sbm_config(struct device *dev, bool enable)
+{
+	int ret = 0, idx = 0;
+	struct msm_bus_node_device_type *node_dev = NULL;
+	struct msm_bus_node_device_type *fab_dev = NULL;
+
+	node_dev = to_msm_bus_node(dev);
+	if (!node_dev) {
+		MSM_BUS_ERR("%s: Unable to get node device info", __func__);
+		return -ENXIO;
+	}
+
+	if (!node_dev->node_info->num_disable_ports)
+		return 0;
+
+	if ((node_dev->node_bw[DUAL_CTX].sum_ab ||
+		node_dev->node_bw[DUAL_CTX].max_ib ||
+		!node_dev->is_connected) && !enable)
+		return 0;
+	else if (((!node_dev->node_bw[DUAL_CTX].sum_ab &&
+		!node_dev->node_bw[DUAL_CTX].max_ib) ||
+		node_dev->is_connected) && enable)
+		return 0;
+
+	if (enable) {
+		for (idx = 0; idx < node_dev->num_regs; idx++) {
+			if (!node_dev->node_regs[idx].reg)
+				node_dev->node_regs[idx].reg =
+				devm_regulator_get(dev,
+				node_dev->node_regs[idx].name);
+
+			if ((IS_ERR_OR_NULL(node_dev->node_regs[idx].reg)))
+				return -ENXIO;
+			ret = regulator_enable(node_dev->node_regs[idx].reg);
+			if (ret) {
+				MSM_BUS_ERR("%s: Failed to enable reg:%s\n",
+				__func__, node_dev->node_regs[idx].name);
+				return ret;
+			}
+		}
+		node_dev->is_connected = true;
+	}
+
+	fab_dev = to_msm_bus_node(node_dev->node_info->bus_device);
+	if (!fab_dev) {
+		MSM_BUS_ERR("%s: Unable to get bus device info for %d",
+			__func__,
+			node_dev->node_info->id);
+		return -ENXIO;
+	}
+
+	if (fab_dev->fabdev &&
+			fab_dev->fabdev->noc_ops.sbm_config) {
+		ret = fab_dev->fabdev->noc_ops.sbm_config(
+			node_dev,
+			fab_dev->fabdev->qos_base,
+			fab_dev->fabdev->sbm_offset,
+			enable);
+	}
+
+	if (!enable) {
+		for (idx = 0; idx < node_dev->num_regs; idx++) {
+			if (!node_dev->node_regs[idx].reg)
+				node_dev->node_regs[idx].reg =
+				devm_regulator_get(dev,
+					node_dev->node_regs[idx].name);
+
+			if ((IS_ERR_OR_NULL(node_dev->node_regs[idx].reg)))
+				return -ENXIO;
+			ret = regulator_disable(node_dev->node_regs[idx].reg);
+			if (ret) {
+				MSM_BUS_ERR("%s: Failed to disable reg:%s\n",
+				__func__, node_dev->node_regs[idx].name);
+				return ret;
+			}
+		}
+		node_dev->is_connected = false;
+	}
+	return ret;
+}
+
+static int msm_bus_fabric_init(struct device *dev,
+			struct msm_bus_node_device_type *pdata)
+{
+	struct msm_bus_fab_device_type *fabdev;
+	struct msm_bus_node_device_type *node_dev = NULL;
+	int ret = 0;
+
+	node_dev = to_msm_bus_node(dev);
+	if (!node_dev) {
+		MSM_BUS_ERR("%s: Unable to get bus device info", __func__);
+		ret = -ENXIO;
+		goto exit_fabric_init;
+	}
+
+	if (node_dev->node_info->virt_dev) {
+		MSM_BUS_ERR("%s: Skip Fab init for virtual device %d", __func__,
+						node_dev->node_info->id);
+		goto exit_fabric_init;
+	}
+
+	fabdev = devm_kzalloc(dev, sizeof(struct msm_bus_fab_device_type),
+								GFP_KERNEL);
+	if (!fabdev) {
+		MSM_BUS_ERR("Fabric alloc failed\n");
+		ret = -ENOMEM;
+		goto exit_fabric_init;
+	}
+
+	node_dev->fabdev = fabdev;
+	fabdev->pqos_base = pdata->fabdev->pqos_base;
+	fabdev->qos_range = pdata->fabdev->qos_range;
+	fabdev->base_offset = pdata->fabdev->base_offset;
+	fabdev->qos_off = pdata->fabdev->qos_off;
+	fabdev->qos_freq = pdata->fabdev->qos_freq;
+	fabdev->bus_type = pdata->fabdev->bus_type;
+	fabdev->bypass_qos_prg = pdata->fabdev->bypass_qos_prg;
+	fabdev->sbm_offset = pdata->fabdev->sbm_offset;
+	msm_bus_fab_init_noc_ops(node_dev);
+
+	fabdev->qos_base = devm_ioremap(dev,
+				fabdev->pqos_base, fabdev->qos_range);
+	if (!fabdev->qos_base) {
+		MSM_BUS_ERR("%s: Error remapping address 0x%zx :bus device %d",
+			__func__,
+			 (size_t)fabdev->pqos_base, node_dev->node_info->id);
+		ret = -ENOMEM;
+		goto exit_fabric_init;
+	}
+
+exit_fabric_init:
+	return ret;
+}
+
+static int msm_bus_bcm_init(struct device *dev,
+			struct msm_bus_node_device_type *pdata)
+{
+	struct msm_bus_bcm_device_type *bcmdev;
+	struct msm_bus_node_device_type *node_dev = NULL;
+	struct bcm_db aux_data = {0};
+	int ret = 0;
+	int i = 0;
+
+	node_dev = to_msm_bus_node(dev);
+	if (!node_dev) {
+		ret = -ENXIO;
+		goto exit_bcm_init;
+	}
+
+	bcmdev = devm_kzalloc(dev, sizeof(struct msm_bus_bcm_device_type),
+								GFP_KERNEL);
+	if (!bcmdev) {
+		ret = -ENOMEM;
+		goto exit_bcm_init;
+	}
+
+	node_dev->bcmdev = bcmdev;
+	bcmdev->name = pdata->bcmdev->name;
+	if (!cmd_db_read_aux_data_len(bcmdev->name)) {
+		MSM_BUS_ERR("%s: Error getting bcm info, bcm:%s",
+			__func__, bcmdev->name);
+		ret = -ENXIO;
+		goto exit_bcm_init;
+	}
+
+	cmd_db_read_aux_data(bcmdev->name, (u8 *)&aux_data,
+						sizeof(struct bcm_db));
+	bcmdev->addr = cmd_db_read_addr(bcmdev->name);
+	bcmdev->width = (uint32_t)aux_data.width;
+	bcmdev->clk_domain = aux_data.clk_domain;
+	bcmdev->unit_size = aux_data.unit_size;
+	bcmdev->type = 0;
+	bcmdev->num_bus_devs = 0;
+
+	// Add way to count # of VCDs, initialize LL
+	for (i = 0; i < VCD_MAX_CNT; i++)
+		INIT_LIST_HEAD(&bcm_query_list_inorder[i]);
+
+exit_bcm_init:
+	return ret;
+}
+
+static int msm_bus_rsc_init(struct platform_device *pdev,
+			struct device *dev,
+			struct msm_bus_node_device_type *pdata)
+{
+	struct msm_bus_rsc_device_type *rscdev;
+	struct msm_bus_node_device_type *node_dev = NULL;
+	int ret = 0;
+	int i = 0;
+
+	node_dev = to_msm_bus_node(dev);
+	if (!node_dev) {
+		ret = -ENXIO;
+		goto exit_rsc_init;
+	}
+
+	rscdev = devm_kzalloc(dev, sizeof(struct msm_bus_rsc_device_type),
+								GFP_KERNEL);
+	if (!rscdev) {
+		ret = -ENOMEM;
+		goto exit_rsc_init;
+	}
+
+	node_dev->rscdev = rscdev;
+	rscdev->req_state = pdata->rscdev->req_state;
+
+	for (i = 0; i < MSM_BUS_RSC_COUNT; i++) {
+		if (rsc_clients[i].rsc_id == node_dev->node_info->id) {
+			rscdev->mbox = rsc_clients[i].client;
+
+			if (IS_ERR_OR_NULL(rscdev->mbox)) {
+				MSM_BUS_ERR("%s: Failed to get mbox:%s",
+					__func__, node_dev->node_info->name);
+			}
+			break;
+		}
+	}
+
+
+	// Add way to count # of VCDs, initialize LL
+	for (i = 0; i < VCD_MAX_CNT; i++)
+		INIT_LIST_HEAD(&rscdev->bcm_clist[i]);
+
+exit_rsc_init:
+	return ret;
+}
+
+static int msm_bus_postcon_setup(struct device *bus_dev, void *data)
+{
+	struct msm_bus_node_device_type *bus_node = NULL;
+	struct msm_bus_rsc_device_type *rscdev;
+
+	bus_node = to_msm_bus_node(bus_dev);
+	if (!bus_node) {
+		MSM_BUS_ERR("%s: Can't get device info", __func__);
+		return -ENODEV;
+	}
+
+	if (bus_node->node_info->is_rsc_dev) {
+		rscdev = bus_node->rscdev;
+		rscdev->cmdlist_active = devm_kcalloc(bus_dev,
+					rscdev->num_bcm_devs,
+					sizeof(struct tcs_cmd), GFP_KERNEL);
+		if (!rscdev->cmdlist_active)
+			return -ENOMEM;
+
+		rscdev->cmdlist_wake = devm_kcalloc(bus_dev,
+					rscdev->num_bcm_devs,
+					sizeof(struct tcs_cmd), GFP_KERNEL);
+		if (!rscdev->cmdlist_wake)
+			return -ENOMEM;
+
+		rscdev->cmdlist_sleep = devm_kcalloc(bus_dev,
+					rscdev->num_bcm_devs,
+					sizeof(struct tcs_cmd),	GFP_KERNEL);
+		if (!rscdev->cmdlist_sleep)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int msm_bus_init_clk(struct device *bus_dev,
+				struct msm_bus_node_device_type *pdata)
+{
+	unsigned int ctx;
+	struct msm_bus_node_device_type *node_dev = to_msm_bus_node(bus_dev);
+	int i;
+
+	for (ctx = 0; ctx < NUM_CTX; ctx++) {
+		if (!IS_ERR_OR_NULL(pdata->clk[ctx].clk)) {
+			node_dev->clk[ctx].clk = pdata->clk[ctx].clk;
+			node_dev->clk[ctx].enable_only_clk =
+					pdata->clk[ctx].enable_only_clk;
+			node_dev->clk[ctx].setrate_only_clk =
+					pdata->clk[ctx].setrate_only_clk;
+			node_dev->clk[ctx].enable = false;
+			node_dev->clk[ctx].dirty = false;
+			strlcpy(node_dev->clk[ctx].reg_name,
+				pdata->clk[ctx].reg_name, MAX_REG_NAME);
+			node_dev->clk[ctx].reg = NULL;
+			bus_get_reg(&node_dev->clk[ctx], bus_dev);
+			MSM_BUS_DBG("%s: Valid node clk node %d ctx %d\n",
+				__func__, node_dev->node_info->id, ctx);
+		}
+	}
+
+	if (!IS_ERR_OR_NULL(pdata->bus_qos_clk.clk)) {
+		node_dev->bus_qos_clk.clk = pdata->bus_qos_clk.clk;
+		node_dev->bus_qos_clk.enable_only_clk =
+					pdata->bus_qos_clk.enable_only_clk;
+		node_dev->bus_qos_clk.setrate_only_clk =
+					pdata->bus_qos_clk.setrate_only_clk;
+		node_dev->bus_qos_clk.enable = false;
+		strlcpy(node_dev->bus_qos_clk.reg_name,
+			pdata->bus_qos_clk.reg_name, MAX_REG_NAME);
+		node_dev->bus_qos_clk.reg = NULL;
+		MSM_BUS_DBG("%s: Valid bus qos clk node %d\n", __func__,
+						node_dev->node_info->id);
+	}
+
+	if (pdata->num_node_qos_clks) {
+		node_dev->num_node_qos_clks = pdata->num_node_qos_clks;
+		node_dev->node_qos_clks = devm_kzalloc(bus_dev,
+			(node_dev->num_node_qos_clks * sizeof(struct nodeclk)),
+			GFP_KERNEL);
+		if (!node_dev->node_qos_clks)
+			return -ENOMEM;
+
+		for (i = 0; i < pdata->num_node_qos_clks; i++) {
+			node_dev->node_qos_clks[i].clk =
+					pdata->node_qos_clks[i].clk;
+			node_dev->node_qos_clks[i].enable_only_clk =
+					pdata->node_qos_clks[i].enable_only_clk;
+			node_dev->node_qos_clks[i].setrate_only_clk =
+				pdata->node_qos_clks[i].setrate_only_clk;
+			node_dev->node_qos_clks[i].enable = false;
+			strlcpy(node_dev->node_qos_clks[i].reg_name,
+				pdata->node_qos_clks[i].reg_name, MAX_REG_NAME);
+			node_dev->node_qos_clks[i].reg = NULL;
+			MSM_BUS_DBG("%s: Valid qos clk[%d] node %d %d Reg%s\n",
+					__func__, i,
+					node_dev->node_info->id,
+					node_dev->num_node_qos_clks,
+					node_dev->node_qos_clks[i].reg_name);
+		}
+	}
+
+	return 0;
+}
+
+static int msm_bus_copy_node_info(struct msm_bus_node_device_type *pdata,
+				struct device *bus_dev)
+{
+	int ret = 0, i = 0;
+	struct msm_bus_node_info_type *node_info = NULL;
+	struct msm_bus_node_info_type *pdata_node_info = NULL;
+	struct msm_bus_node_device_type *bus_node = NULL;
+
+	bus_node = to_msm_bus_node(bus_dev);
+
+	if (!bus_node || !pdata) {
+		ret = -ENXIO;
+		MSM_BUS_ERR("%s: Invalid pointers pdata %p, bus_node %p",
+			__func__, pdata, bus_node);
+		goto exit_copy_node_info;
+	}
+
+	node_info = bus_node->node_info;
+	pdata_node_info = pdata->node_info;
+
+	node_info->name = pdata_node_info->name;
+	node_info->id =  pdata_node_info->id;
+	node_info->bcm_req_idx = devm_kzalloc(bus_dev,
+			sizeof(int) * pdata_node_info->num_bcm_devs,
+			GFP_KERNEL);
+	if (!node_info->bcm_req_idx) {
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	for (i = 0; i < pdata_node_info->num_bcm_devs; i++)
+		node_info->bcm_req_idx[i] = -1;
+
+	node_info->bus_device_id = pdata_node_info->bus_device_id;
+	node_info->mas_rpm_id = pdata_node_info->mas_rpm_id;
+	node_info->slv_rpm_id = pdata_node_info->slv_rpm_id;
+	node_info->num_connections = pdata_node_info->num_connections;
+	node_info->num_blist = pdata_node_info->num_blist;
+	node_info->num_bcm_devs = pdata_node_info->num_bcm_devs;
+	node_info->num_rsc_devs = pdata_node_info->num_rsc_devs;
+	node_info->num_qports = pdata_node_info->num_qports;
+	node_info->num_disable_ports = pdata_node_info->num_disable_ports;
+	node_info->disable_ports = pdata_node_info->disable_ports;
+	node_info->virt_dev = pdata_node_info->virt_dev;
+	node_info->is_fab_dev = pdata_node_info->is_fab_dev;
+	node_info->is_bcm_dev = pdata_node_info->is_bcm_dev;
+	node_info->is_rsc_dev = pdata_node_info->is_rsc_dev;
+	node_info->qos_params.prio_dflt = pdata_node_info->qos_params.prio_dflt;
+	node_info->qos_params.limiter.bw =
+				pdata_node_info->qos_params.limiter.bw;
+	node_info->qos_params.limiter.sat =
+				pdata_node_info->qos_params.limiter.sat;
+	node_info->qos_params.limiter_en =
+				pdata_node_info->qos_params.limiter_en;
+	node_info->qos_params.reg.low_prio =
+				pdata_node_info->qos_params.reg.low_prio;
+	node_info->qos_params.reg.hi_prio =
+				pdata_node_info->qos_params.reg.hi_prio;
+	node_info->qos_params.reg.bw =
+				pdata_node_info->qos_params.reg.bw;
+	node_info->qos_params.reg.sat =
+				pdata_node_info->qos_params.reg.sat;
+	node_info->qos_params.reg_mode.read =
+				pdata_node_info->qos_params.reg_mode.read;
+	node_info->qos_params.reg_mode.write =
+				pdata_node_info->qos_params.reg_mode.write;
+	node_info->qos_params.urg_fwd_en =
+				pdata_node_info->qos_params.urg_fwd_en;
+	node_info->qos_params.defer_init_qos =
+				pdata_node_info->qos_params.defer_init_qos;
+	node_info->agg_params.buswidth = pdata_node_info->agg_params.buswidth;
+	node_info->agg_params.agg_scheme =
+					pdata_node_info->agg_params.agg_scheme;
+	node_info->agg_params.vrail_comp =
+					pdata_node_info->agg_params.vrail_comp;
+	node_info->agg_params.num_aggports =
+				pdata_node_info->agg_params.num_aggports;
+	node_info->agg_params.num_util_levels =
+				pdata_node_info->agg_params.num_util_levels;
+	node_info->agg_params.util_levels = devm_kzalloc(bus_dev,
+			sizeof(struct node_util_levels_type) *
+			node_info->agg_params.num_util_levels,
+			GFP_KERNEL);
+	if (!node_info->agg_params.util_levels) {
+		MSM_BUS_ERR("%s: Agg util level alloc failed\n", __func__);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+	memcpy(node_info->agg_params.util_levels,
+		pdata_node_info->agg_params.util_levels,
+		sizeof(struct node_util_levels_type) *
+			pdata_node_info->agg_params.num_util_levels);
+
+	node_info->dev_connections = devm_kzalloc(bus_dev,
+			sizeof(struct device *) *
+				pdata_node_info->num_connections,
+			GFP_KERNEL);
+	if (!node_info->dev_connections) {
+		MSM_BUS_ERR("%s:Bus dev connections alloc failed\n", __func__);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	node_info->connections = devm_kzalloc(bus_dev,
+			sizeof(int) * pdata_node_info->num_connections,
+			GFP_KERNEL);
+	if (!node_info->connections) {
+		MSM_BUS_ERR("%s:Bus connections alloc failed\n", __func__);
+		devm_kfree(bus_dev, node_info->dev_connections);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	memcpy(node_info->connections,
+		pdata_node_info->connections,
+		sizeof(int) * pdata_node_info->num_connections);
+
+	node_info->black_connections = devm_kzalloc(bus_dev,
+			sizeof(struct device *) *
+				pdata_node_info->num_blist,
+			GFP_KERNEL);
+	if (!node_info->black_connections) {
+		MSM_BUS_ERR("%s: Bus black connections alloc failed\n",
+			__func__);
+		devm_kfree(bus_dev, node_info->dev_connections);
+		devm_kfree(bus_dev, node_info->connections);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	node_info->bl_cons = devm_kzalloc(bus_dev,
+			pdata_node_info->num_blist * sizeof(int),
+			GFP_KERNEL);
+	if (!node_info->bl_cons) {
+		MSM_BUS_ERR("%s:Bus black list connections alloc failed\n",
+					__func__);
+		devm_kfree(bus_dev, node_info->black_connections);
+		devm_kfree(bus_dev, node_info->dev_connections);
+		devm_kfree(bus_dev, node_info->connections);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	memcpy(node_info->bl_cons,
+		pdata_node_info->bl_cons,
+		sizeof(int) * pdata_node_info->num_blist);
+
+	node_info->bcm_devs = devm_kzalloc(bus_dev,
+			sizeof(struct device *) *
+				pdata_node_info->num_bcm_devs,
+			GFP_KERNEL);
+	if (!node_info->bcm_devs) {
+		MSM_BUS_ERR("%s:Bcm dev connections alloc failed\n", __func__);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	node_info->bcm_dev_ids = devm_kzalloc(bus_dev,
+			sizeof(int) * pdata_node_info->num_bcm_devs,
+			GFP_KERNEL);
+	if (!node_info->bcm_dev_ids) {
+		MSM_BUS_ERR("%s:Bus connections alloc failed\n", __func__);
+		devm_kfree(bus_dev, node_info->bcm_devs);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	memcpy(node_info->bcm_dev_ids,
+		pdata_node_info->bcm_dev_ids,
+		sizeof(int) * pdata_node_info->num_bcm_devs);
+
+	node_info->rsc_devs = devm_kzalloc(bus_dev,
+			sizeof(struct device *) *
+				pdata_node_info->num_rsc_devs,
+			GFP_KERNEL);
+	if (!node_info->rsc_devs) {
+		MSM_BUS_ERR("%s:rsc dev connections alloc failed\n", __func__);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	node_info->rsc_dev_ids = devm_kzalloc(bus_dev,
+			sizeof(int) * pdata_node_info->num_rsc_devs,
+			GFP_KERNEL);
+	if (!node_info->rsc_dev_ids) {
+		MSM_BUS_ERR("%s:Bus connections alloc failed\n", __func__);
+		devm_kfree(bus_dev, node_info->rsc_devs);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	memcpy(node_info->rsc_dev_ids,
+		pdata_node_info->rsc_dev_ids,
+		sizeof(int) * pdata_node_info->num_rsc_devs);
+
+	node_info->qport = devm_kzalloc(bus_dev,
+			sizeof(int) * pdata_node_info->num_qports,
+			GFP_KERNEL);
+	if (!node_info->qport) {
+		MSM_BUS_ERR("%s:Bus qport allocation failed\n", __func__);
+		devm_kfree(bus_dev, node_info->dev_connections);
+		devm_kfree(bus_dev, node_info->connections);
+		devm_kfree(bus_dev, node_info->bl_cons);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	memcpy(node_info->qport,
+		pdata_node_info->qport,
+		sizeof(int) * pdata_node_info->num_qports);
+
+exit_copy_node_info:
+	return ret;
+}
+
+static struct device *msm_bus_device_init(
+			struct msm_bus_node_device_type *pdata)
+{
+	struct device *bus_dev = NULL;
+	struct msm_bus_node_device_type *bus_node = NULL;
+	struct msm_bus_node_info_type *node_info = NULL;
+	int ret = -ENODEV, i = 0;
+
+	/*
+	 * Init here so we can use devm calls
+	 */
+
+	bus_node = kzalloc(sizeof(struct msm_bus_node_device_type), GFP_KERNEL);
+	if (!bus_node) {
+		ret = -ENOMEM;
+		goto err_device_init;
+	}
+	bus_dev = &bus_node->dev;
+	device_initialize(bus_dev);
+
+	node_info = devm_kzalloc(bus_dev,
+			sizeof(struct msm_bus_node_info_type), GFP_KERNEL);
+	if (!node_info) {
+		ret = -ENOMEM;
+		goto err_put_device;
+	}
+
+	bus_node->node_info = node_info;
+	bus_node->ap_owned = pdata->ap_owned;
+	bus_node->dirty = false;
+	bus_node->num_qos_bcms = pdata->num_qos_bcms;
+	if (bus_node->num_qos_bcms) {
+		bus_node->qos_bcms = devm_kzalloc(bus_dev,
+					(sizeof(struct qos_bcm_type) *
+					bus_node->num_qos_bcms), GFP_KERNEL);
+		if (!bus_node->qos_bcms) {
+			ret = -ENOMEM;
+			goto err_put_device;
+		}
+		for (i = 0; i < bus_node->num_qos_bcms; i++) {
+			bus_node->qos_bcms[i].qos_bcm_id =
+					pdata->qos_bcms[i].qos_bcm_id;
+			bus_node->qos_bcms[i].vec.vec_a =
+					pdata->qos_bcms[i].vec.vec_a;
+			bus_node->qos_bcms[i].vec.vec_b =
+					pdata->qos_bcms[i].vec.vec_b;
+		}
+	}
+	bus_node->num_regs = pdata->num_regs;
+	if (bus_node->num_regs)
+		bus_node->node_regs = pdata->node_regs;
+
+	bus_dev->of_node = pdata->of_node;
+
+	ret = msm_bus_copy_node_info(pdata, bus_dev);
+	if (ret)
+		goto err_put_device;
+
+	bus_dev->bus = &msm_bus_type;
+	dev_set_name(bus_dev, bus_node->node_info->name);
+
+	ret = device_add(bus_dev);
+	if (ret) {
+		MSM_BUS_ERR("%s: Error registering device %d",
+				__func__, pdata->node_info->id);
+		goto err_put_device;
+	}
+	device_create_file(bus_dev, &dev_attr_bw);
+	INIT_LIST_HEAD(&bus_node->devlist);
+	return bus_dev;
+
+err_put_device:
+	put_device(bus_dev);
+	bus_dev = NULL;
+	kfree(bus_node);
+err_device_init:
+	return ERR_PTR(ret);
+}
+
+static int msm_bus_setup_dev_conn(struct device *bus_dev, void *data)
+{
+	struct msm_bus_node_device_type *bus_node = NULL;
+	struct msm_bus_node_device_type *bcm_node = NULL;
+	struct msm_bus_node_device_type *rsc_node = NULL;
+	int ret = 0;
+	int j;
+	struct msm_bus_node_device_type *fab;
+
+	bus_node = to_msm_bus_node(bus_dev);
+	if (!bus_node) {
+		MSM_BUS_ERR("%s: Can't get device info", __func__);
+		ret = -ENODEV;
+		goto exit_setup_dev_conn;
+	}
+
+	/* Setup parent bus device for this node */
+	if (!bus_node->node_info->is_fab_dev &&
+		!bus_node->node_info->is_bcm_dev &&
+		!bus_node->node_info->is_rsc_dev) {
+		struct device *bus_parent_device =
+			bus_find_device(&msm_bus_type, NULL,
+				(void *)&bus_node->node_info->bus_device_id,
+				msm_bus_device_match_adhoc);
+
+		if (!bus_parent_device) {
+			MSM_BUS_ERR("%s: Error finding parentdev %d parent %d",
+				__func__,
+				bus_node->node_info->id,
+				bus_node->node_info->bus_device_id);
+			ret = -ENXIO;
+			goto exit_setup_dev_conn;
+		}
+		bus_node->node_info->bus_device = bus_parent_device;
+		fab = to_msm_bus_node(bus_parent_device);
+		list_add_tail(&bus_node->dev_link, &fab->devlist);
+	}
+
+	bus_node->node_info->is_traversed = false;
+
+	for (j = 0; j < bus_node->node_info->num_connections; j++) {
+		bus_node->node_info->dev_connections[j] =
+			bus_find_device(&msm_bus_type, NULL,
+				(void *)&bus_node->node_info->connections[j],
+				msm_bus_device_match_adhoc);
+
+		if (!bus_node->node_info->dev_connections[j]) {
+			MSM_BUS_ERR("%s: Error finding conn %d for device %d",
+				__func__, bus_node->node_info->connections[j],
+				 bus_node->node_info->id);
+			ret = -ENODEV;
+			goto exit_setup_dev_conn;
+		}
+	}
+
+	for (j = 0; j < bus_node->node_info->num_blist; j++) {
+		bus_node->node_info->black_connections[j] =
+			bus_find_device(&msm_bus_type, NULL,
+				(void *)&bus_node->node_info->bl_cons[j],
+				msm_bus_device_match_adhoc);
+
+		if (!bus_node->node_info->black_connections[j]) {
+			MSM_BUS_ERR("%s: Error finding conn %d for device %d\n",
+				__func__, bus_node->node_info->bl_cons[j],
+				bus_node->node_info->id);
+			ret = -ENODEV;
+			goto exit_setup_dev_conn;
+		}
+	}
+
+	for (j = 0; j < bus_node->node_info->num_bcm_devs; j++) {
+		bus_node->node_info->bcm_devs[j] =
+			bus_find_device(&msm_bus_type, NULL,
+				(void *)&bus_node->node_info->bcm_dev_ids[j],
+				msm_bus_device_match_adhoc);
+
+		if (!bus_node->node_info->bcm_devs[j]) {
+			MSM_BUS_ERR("%s: Error finding conn %d for device %d",
+				__func__, bus_node->node_info->bcm_dev_ids[j],
+				 bus_node->node_info->id);
+			ret = -ENODEV;
+			goto exit_setup_dev_conn;
+		}
+		bcm_node = to_msm_bus_node(bus_node->node_info->bcm_devs[j]);
+		bcm_node->bcmdev->num_bus_devs++;
+	}
+
+	for (j = 0; j < bus_node->node_info->num_rsc_devs; j++) {
+		bus_node->node_info->rsc_devs[j] =
+			bus_find_device(&msm_bus_type, NULL,
+				(void *)&bus_node->node_info->rsc_dev_ids[j],
+				msm_bus_device_match_adhoc);
+
+		if (!bus_node->node_info->rsc_devs[j]) {
+			MSM_BUS_ERR("%s: Error finding conn %d for device %d",
+				__func__, bus_node->node_info->rsc_dev_ids[j],
+				 bus_node->node_info->id);
+			ret = -ENODEV;
+			goto exit_setup_dev_conn;
+		}
+		rsc_node = to_msm_bus_node(bus_node->node_info->rsc_devs[j]);
+		rsc_node->rscdev->num_bcm_devs++;
+	}
+
+exit_setup_dev_conn:
+	return ret;
+}
+
+static int msm_bus_node_debug(struct device *bus_dev, void *data)
+{
+	int j;
+	int ret = 0;
+	struct msm_bus_node_device_type *bus_node = NULL;
+
+	bus_node = to_msm_bus_node(bus_dev);
+	if (!bus_node) {
+		MSM_BUS_ERR("%s: Can't get device info", __func__);
+		ret = -ENODEV;
+		goto exit_node_debug;
+	}
+
+	MSM_BUS_DBG("Device = %d buswidth %u", bus_node->node_info->id,
+				bus_node->node_info->agg_params.buswidth);
+	for (j = 0; j < bus_node->node_info->num_connections; j++) {
+		struct msm_bus_node_device_type *bdev =
+		to_msm_bus_node(bus_node->node_info->dev_connections[j]);
+		MSM_BUS_DBG("\n\t Connection[%d] %d", j, bdev->node_info->id);
+	}
+
+	if (bus_node->node_info->is_fab_dev)
+		msm_bus_floor_init(bus_dev);
+
+exit_node_debug:
+	return ret;
+}
+
+static int msm_bus_free_dev(struct device *dev, void *data)
+{
+	struct msm_bus_node_device_type *bus_node = NULL;
+
+	bus_node = to_msm_bus_node(dev);
+
+	if (bus_node)
+		MSM_BUS_ERR("\n%s: Removing device %d", __func__,
+						bus_node->node_info->id);
+	device_unregister(dev);
+	kfree(bus_node);
+	return 0;
+}
+
+int msm_bus_device_remove(struct platform_device *pdev)
+{
+	bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_free_dev);
+	return 0;
+}
+
+static int msm_bus_device_probe(struct platform_device *pdev)
+{
+	unsigned int i = 1, ret;
+	struct msm_bus_device_node_registration *pdata;
+
+	MSM_BUS_ERR("msm_bus: Probe started");
+	/* If possible, get pdata from device-tree */
+	if (pdev->dev.of_node)
+		pdata = msm_bus_of_to_pdata(pdev);
+	else {
+		pdata = (struct msm_bus_device_node_registration *)
+			pdev->dev.platform_data;
+	}
+
+	MSM_BUS_ERR("msm_bus: DT Parsing complete");
+
+	if (IS_ERR_OR_NULL(pdata)) {
+		MSM_BUS_ERR("No platform data found");
+		ret = -ENODATA;
+		goto exit_device_probe;
+	}
+
+	for (i = 0; i < pdata->num_devices; i++) {
+		struct device *node_dev = NULL;
+
+		node_dev = msm_bus_device_init(&pdata->info[i]);
+
+		if (IS_ERR(node_dev)) {
+			MSM_BUS_ERR("%s: Error during dev init for %d",
+				__func__, pdata->info[i].node_info->id);
+			ret = PTR_ERR(node_dev);
+			goto exit_device_probe;
+		}
+
+		ret = msm_bus_init_clk(node_dev, &pdata->info[i]);
+		if (ret) {
+			MSM_BUS_ERR("\n Failed to init bus clk. ret %d", ret);
+			msm_bus_device_remove(pdev);
+			goto exit_device_probe;
+		}
+		/*Is this a fabric device ?*/
+		if (pdata->info[i].node_info->is_fab_dev) {
+			MSM_BUS_DBG("%s: %d is a fab", __func__,
+						pdata->info[i].node_info->id);
+			ret = msm_bus_fabric_init(node_dev, &pdata->info[i]);
+			if (ret) {
+				MSM_BUS_ERR("%s: Error intializing fab %d",
+					__func__, pdata->info[i].node_info->id);
+				goto exit_device_probe;
+			}
+		}
+		if (pdata->info[i].node_info->is_bcm_dev) {
+			ret = msm_bus_bcm_init(node_dev, &pdata->info[i]);
+			if (ret) {
+				MSM_BUS_ERR("%s: Error intializing bcm %d",
+					__func__, pdata->info[i].node_info->id);
+				goto exit_device_probe;
+			}
+		}
+		if (pdata->info[i].node_info->is_rsc_dev) {
+			ret = msm_bus_rsc_init(pdev, node_dev, &pdata->info[i]);
+			if (ret) {
+				MSM_BUS_ERR("%s: Error intializing rsc %d",
+					__func__, pdata->info[i].node_info->id);
+				goto exit_device_probe;
+			}
+		}
+	}
+
+	ret = bus_for_each_dev(&msm_bus_type, NULL, NULL,
+						msm_bus_setup_dev_conn);
+	if (ret) {
+		MSM_BUS_ERR("%s: Error setting up dev connections", __func__);
+		goto exit_device_probe;
+	}
+
+	ret = bus_for_each_dev(&msm_bus_type, NULL, NULL,
+						msm_bus_postcon_setup);
+	if (ret) {
+		MSM_BUS_ERR("%s: Error post connection setup", __func__);
+		goto exit_device_probe;
+	}
+
+	/*
+	 * Setup the QoS for the nodes, don't check the error codes as we
+	 * defer QoS programming to the first transaction in cases of failure
+	 * and we want to continue the probe.
+	 */
+	ret = bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_dev_init_qos);
+
+	/* Register the arb layer ops */
+	msm_bus_arb_setops_adhoc(&arb_ops);
+	bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_node_debug);
+
+	devm_kfree(&pdev->dev, pdata->info);
+	devm_kfree(&pdev->dev, pdata);
+exit_device_probe:
+	return ret;
+}
+
+static int msm_bus_device_rules_probe(struct platform_device *pdev)
+{
+	struct bus_rule_type *rule_data = NULL;
+	int num_rules = 0;
+
+	num_rules = msm_bus_of_get_static_rules(pdev, &rule_data);
+
+	if (!rule_data)
+		goto exit_rules_probe;
+
+	msm_rule_register(num_rules, rule_data, NULL);
+	static_rules.num_rules = num_rules;
+	static_rules.rules = rule_data;
+	pdev->dev.platform_data = &static_rules;
+
+exit_rules_probe:
+	return 0;
+}
+
+int msm_bus_device_rules_remove(struct platform_device *pdev)
+{
+	struct static_rules_type *static_rules = NULL;
+
+	static_rules = pdev->dev.platform_data;
+	if (static_rules)
+		msm_rule_unregister(static_rules->num_rules,
+					static_rules->rules, NULL);
+	return 0;
+}
+
+static int msm_bus_rsc_probe(struct platform_device *pdev)
+{
+	int i = 0;
+	int ret = 0;
+	uint32_t rsc_id = 0;
+
+	ret = of_property_read_u32(pdev->dev.of_node, "qcom,msm-bus-id",
+								&rsc_id);
+	if (ret) {
+		MSM_BUS_ERR("unable to find msm bus id\n");
+		return ret;
+	}
+
+	for (i = 0; i < MSM_BUS_RSC_COUNT - 1; i++) {
+		if (!rsc_clients[i].rsc_id) {
+			rsc_clients[i].rsc_id = rsc_id;
+			rsc_clients[i].client = &pdev->dev;
+			if (IS_ERR_OR_NULL(rsc_clients[i].client)) {
+				rsc_clients[i].rsc_id = 0;
+				rsc_clients[i].client = NULL;
+			}
+		}
+	}
+	return 0;
+}
+
+int msm_bus_rsc_remove(struct platform_device *pdev)
+{
+	int i;
+
+	for (i = 0; i < MSM_BUS_RSC_COUNT - 1; i++) {
+		rsc_clients[i].rsc_id = 0;
+		rsc_clients[i].client = NULL;
+	}
+	return 0;
+}
+
+static const struct of_device_id rules_match[] = {
+	{.compatible = "qcom,msm-bus-static-bw-rules"},
+	{}
+};
+
+static struct platform_driver msm_bus_rules_driver = {
+	.probe = msm_bus_device_rules_probe,
+	.remove = msm_bus_device_rules_remove,
+	.driver = {
+		.name = "msm_bus_rules_device",
+		.of_match_table = rules_match,
+	},
+};
+
+static const struct of_device_id fabric_match[] = {
+	{.compatible = "qcom,msm-bus-device"},
+	{}
+};
+
+static struct platform_driver msm_bus_device_driver = {
+	.probe = msm_bus_device_probe,
+	.remove = msm_bus_device_remove,
+	.driver = {
+		.name = "msm_bus_device",
+		.of_match_table = fabric_match,
+	},
+};
+
+static const struct of_device_id rsc_match[] = {
+	{.compatible = "qcom,msm-bus-rsc"},
+	{}
+};
+
+static struct platform_driver msm_bus_rsc_driver = {
+	.probe = msm_bus_rsc_probe,
+	.remove = msm_bus_rsc_remove,
+	.driver = {
+		.name = "msm_bus_rsc",
+		.of_match_table = rsc_match,
+	},
+};
+
+int __init msm_bus_rsc_init_driver(void)
+{
+	int rc;
+
+	rc =  platform_driver_register(&msm_bus_rsc_driver);
+	if (rc)
+		MSM_BUS_ERR("Failed to register msm bus rsc device driver");
+
+	return rc;
+}
+
+
+int __init msm_bus_device_init_driver(void)
+{
+	int rc;
+
+	MSM_BUS_ERR("msm_bus_fabric_rpmh_init_driver\n");
+	rc =  platform_driver_register(&msm_bus_device_driver);
+
+	if (rc) {
+		MSM_BUS_ERR("Failed to register bus device driver");
+		return rc;
+	}
+	return platform_driver_register(&msm_bus_rules_driver);
+}
+
+int __init msm_bus_device_late_init(void)
+{
+	commit_late_init_data(true);
+	MSM_BUS_ERR("msm_bus_late_init: Remove handoff bw requests\n");
+	init_time = false;
+	return commit_late_init_data(false);
+}
+core_initcall(msm_bus_rsc_init_driver);
+subsys_initcall(msm_bus_device_init_driver);
+late_initcall_sync(msm_bus_device_late_init);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_noc.h b/drivers/soc/qcom/msm_bus/msm_bus_noc.h
new file mode 100644
index 0000000..a93f6ff
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_noc.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2012-2016, 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_BIMC_H
+#define _ARCH_ARM_MACH_MSM_BUS_BIMC_H
+
+enum msm_bus_noc_qos_mode_type {
+	NOC_QOS_MODE_FIXED = 0,
+	NOC_QOS_MODE_LIMITER,
+	NOC_QOS_MODE_BYPASS,
+	NOC_QOS_MODE_REGULATOR,
+	NOC_QOS_MODE_MAX,
+};
+
+enum msm_bus_noc_qos_mode_perm {
+	NOC_QOS_PERM_MODE_FIXED = (1 << NOC_QOS_MODE_FIXED),
+	NOC_QOS_PERM_MODE_LIMITER = (1 << NOC_QOS_MODE_LIMITER),
+	NOC_QOS_PERM_MODE_BYPASS = (1 << NOC_QOS_MODE_BYPASS),
+	NOC_QOS_PERM_MODE_REGULATOR = (1 << NOC_QOS_MODE_REGULATOR),
+};
+
+#define NOC_QOS_MODES_ALL_PERM (NOC_QOS_PERM_MODE_FIXED | \
+	NOC_QOS_PERM_MODE_LIMITER | NOC_QOS_PERM_MODE_BYPASS | \
+	NOC_QOS_PERM_MODE_REGULATOR)
+
+struct msm_bus_noc_commit {
+	struct msm_bus_node_hw_info *mas;
+	struct msm_bus_node_hw_info *slv;
+};
+
+struct msm_bus_noc_info {
+	void __iomem *base;
+	uint32_t base_addr;
+	uint32_t nmasters;
+	uint32_t nqos_masters;
+	uint32_t nslaves;
+	uint32_t qos_freq; /* QOS Clock in KHz */
+	uint32_t qos_baseoffset;
+	uint32_t qos_delta;
+	uint32_t *mas_modes;
+	uint32_t sbm_offset;
+	struct msm_bus_noc_commit cdata[NUM_CTX];
+};
+
+struct msm_bus_noc_qos_priority {
+	uint32_t high_prio;
+	uint32_t low_prio;
+	uint32_t read_prio;
+	uint32_t write_prio;
+	uint32_t p1;
+	uint32_t p0;
+};
+
+struct msm_bus_noc_qos_bw {
+	uint64_t bw; /* Bandwidth in bytes per second */
+	uint32_t ws; /* Window size in nano seconds */
+};
+
+void msm_bus_noc_init(struct msm_bus_noc_info *ninfo);
+uint8_t msm_bus_noc_get_qos_mode(void __iomem *base, uint32_t qos_off,
+	uint32_t mport, uint32_t qos_delta, uint32_t mode, uint32_t perm_mode);
+void msm_bus_noc_get_qos_priority(void __iomem *base, uint32_t qos_off,
+	uint32_t mport, uint32_t qos_delta,
+	struct msm_bus_noc_qos_priority *qprio);
+void msm_bus_noc_get_qos_bw(void __iomem *base, uint32_t qos_off,
+	uint32_t qos_freq, uint32_t mport, uint32_t qos_delta,
+	uint8_t perm_mode, struct msm_bus_noc_qos_bw *qbw);
+#endif /*_ARCH_ARM_MACH_MSM_BUS_NOC_H */
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_noc_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_noc_adhoc.c
new file mode 100644
index 0000000..93d7642
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_noc_adhoc.c
@@ -0,0 +1,583 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "AXI: NOC: %s(): " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/msm-bus-board.h>
+#include "msm_bus_core.h"
+#include "msm_bus_noc.h"
+#include "msm_bus_adhoc.h"
+
+/* NOC_QOS generic */
+#define __CLZ(x) ((8 * sizeof(uint32_t)) - 1 - __fls(x))
+#define SAT_SCALE 16	/* 16 bytes minimum for saturation */
+#define BW_SCALE  256	/* 1/256 byte per cycle unit */
+#define QOS_DEFAULT_BASEOFFSET		0x00003000
+#define QOS_DEFAULT_DELTA		0x80
+#define MAX_BW_FIELD (NOC_QOS_BWn_BW_BMSK >> NOC_QOS_BWn_BW_SHFT)
+#define MAX_SAT_FIELD (NOC_QOS_SATn_SAT_BMSK >> NOC_QOS_SATn_SAT_SHFT)
+#define MIN_SAT_FIELD	1
+#define MIN_BW_FIELD	1
+
+#define NOC_QOS_REG_BASE(b, o)		((b) + (o))
+
+#define NOC_QOS_PRIORITYn_ADDR(b, o, n, d)	\
+	(NOC_QOS_REG_BASE(b, o) + 0x8 + (d) * (n))
+enum noc_qos_id_priorityn {
+	NOC_QOS_PRIORITYn_RMSK		= 0x0000000f,
+	NOC_QOS_PRIORITYn_MAXn		= 32,
+	NOC_QOS_PRIORITYn_P1_BMSK	= 0xc,
+	NOC_QOS_PRIORITYn_P1_SHFT	= 0x2,
+	NOC_QOS_PRIORITYn_P0_BMSK	= 0x3,
+	NOC_QOS_PRIORITYn_P0_SHFT	= 0x0,
+};
+
+#define NOC_QOS_MODEn_ADDR(b, o, n, d) \
+	(NOC_QOS_REG_BASE(b, o) + 0xC + (d) * (n))
+enum noc_qos_id_moden_rmsk {
+	NOC_QOS_MODEn_RMSK		= 0x00000003,
+	NOC_QOS_MODEn_MAXn		= 32,
+	NOC_QOS_MODEn_MODE_BMSK		= 0x3,
+	NOC_QOS_MODEn_MODE_SHFT		= 0x0,
+};
+
+#define NOC_QOS_BWn_ADDR(b, o, n, d) \
+	(NOC_QOS_REG_BASE(b, o) + 0x10 + (d) * (n))
+enum noc_qos_id_bwn {
+	NOC_QOS_BWn_RMSK		= 0x0000ffff,
+	NOC_QOS_BWn_MAXn		= 32,
+	NOC_QOS_BWn_BW_BMSK		= 0xffff,
+	NOC_QOS_BWn_BW_SHFT		= 0x0,
+};
+
+/* QOS Saturation registers */
+#define NOC_QOS_SATn_ADDR(b, o, n, d) \
+	(NOC_QOS_REG_BASE(b, o) + 0x14 + (d) * (n))
+enum noc_qos_id_saturationn {
+	NOC_QOS_SATn_RMSK		= 0x000003ff,
+	NOC_QOS_SATn_MAXn		= 32,
+	NOC_QOS_SATn_SAT_BMSK		= 0x3ff,
+	NOC_QOS_SATn_SAT_SHFT		= 0x0,
+};
+
+static int noc_div(uint64_t *a, uint32_t b)
+{
+	if ((*a > 0) && (*a < b)) {
+		*a = 0;
+		return 1;
+	} else {
+		return do_div(*a, b);
+	}
+}
+
+/**
+ * Calculates bw hardware is using from register values
+ * bw returned is in bytes/sec
+ */
+static uint64_t noc_bw(uint32_t bw_field, uint32_t qos_freq)
+{
+	uint64_t res;
+	uint32_t rem, scale;
+
+	res = 2 * qos_freq * bw_field;
+	scale = BW_SCALE * 1000;
+	rem = noc_div(&res, scale);
+	MSM_BUS_DBG("NOC: Calculated bw: %llu\n", res * 1000000ULL);
+	return res * 1000000ULL;
+}
+
+/**
+ * Calculate the max BW in Bytes/s for a given time-base.
+ */
+static uint32_t noc_bw_ceil(int bw_field, uint32_t qos_freq_khz)
+{
+	uint64_t bw_temp = 2 * qos_freq_khz * bw_field;
+	uint32_t scale = 1000 * BW_SCALE;
+
+	noc_div(&bw_temp, scale);
+	return bw_temp * 1000000;
+}
+#define MAX_BW(timebase) noc_bw_ceil(MAX_BW_FIELD, (timebase))
+
+/**
+ * Calculates ws hardware is using from register values
+ * ws returned is in nanoseconds
+ */
+static uint32_t noc_ws(uint64_t bw, uint32_t sat, uint32_t qos_freq)
+{
+	if (bw && qos_freq) {
+		uint32_t bwf = bw * qos_freq;
+		uint64_t scale = 1000000000000LL * BW_SCALE *
+			SAT_SCALE * sat;
+		noc_div(&scale, bwf);
+		MSM_BUS_DBG("NOC: Calculated ws: %llu\n", scale);
+		return scale;
+	}
+
+	return 0;
+}
+#define MAX_WS(bw, timebase) noc_ws((bw), MAX_SAT_FIELD, (timebase))
+
+/* Calculate bandwidth field value for requested bandwidth  */
+static uint32_t noc_bw_field(uint64_t bw_bps, uint32_t qos_freq_khz)
+{
+	uint32_t bw_field = 0;
+
+	if (bw_bps) {
+		uint32_t rem;
+		uint64_t bw_capped = min_t(uint64_t, bw_bps,
+						MAX_BW(qos_freq_khz));
+		uint64_t bwc = bw_capped * BW_SCALE;
+		uint64_t qf = 2 * qos_freq_khz * 1000;
+
+		rem = noc_div(&bwc, qf);
+		bw_field = (uint32_t)max_t(unsigned long, bwc, MIN_BW_FIELD);
+		bw_field = (uint32_t)min_t(unsigned long, bw_field,
+								MAX_BW_FIELD);
+	}
+
+	MSM_BUS_DBG("NOC: bw_field: %u\n", bw_field);
+	return bw_field;
+}
+
+static uint32_t noc_sat_field(uint64_t bw, uint32_t ws, uint32_t qos_freq)
+{
+	uint32_t sat_field = 0;
+
+	if (bw) {
+		/* Limit to max bw and scale bw to 100 KB increments */
+		uint64_t tbw, tscale;
+		uint64_t bw_scaled = min_t(uint64_t, bw, MAX_BW(qos_freq));
+		uint32_t rem = noc_div(&bw_scaled, 100000);
+
+		/*
+		 *	SATURATION =
+		 *	(BW [MBps] * integration window [us] *
+		 *		time base frequency [MHz]) / (256 * 16)
+		 */
+		tbw = bw_scaled * ws * qos_freq;
+		tscale = BW_SCALE * SAT_SCALE * 1000000LL;
+		rem = noc_div(&tbw, tscale);
+		sat_field = (uint32_t)max_t(unsigned long, tbw, MIN_SAT_FIELD);
+		sat_field = (uint32_t)min_t(unsigned long, sat_field,
+							MAX_SAT_FIELD);
+	}
+
+	MSM_BUS_DBG("NOC: sat_field: %d\n", sat_field);
+	return sat_field;
+}
+
+static void noc_set_qos_mode(void __iomem *base, uint32_t qos_off,
+		uint32_t mport, uint32_t qos_delta, uint8_t mode,
+		uint8_t perm_mode)
+{
+	if (mode < NOC_QOS_MODE_MAX &&
+		((1 << mode) & perm_mode)) {
+		uint32_t reg_val;
+
+		reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off,
+			mport, qos_delta)) & NOC_QOS_MODEn_RMSK;
+		writel_relaxed(((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK))) |
+			(mode & NOC_QOS_MODEn_MODE_BMSK)),
+			NOC_QOS_MODEn_ADDR(base, qos_off, mport, qos_delta));
+	}
+	/* Ensure qos mode is set before exiting */
+	wmb();
+}
+
+static void noc_set_qos_priority(void __iomem *base, uint32_t qos_off,
+		uint32_t mport, uint32_t qos_delta,
+		struct msm_bus_noc_qos_priority *priority)
+{
+	uint32_t reg_val, val;
+
+	reg_val = readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport,
+		qos_delta)) & NOC_QOS_PRIORITYn_RMSK;
+	val = priority->p1 << NOC_QOS_PRIORITYn_P1_SHFT;
+	writel_relaxed(((reg_val & (~(NOC_QOS_PRIORITYn_P1_BMSK))) |
+		(val & NOC_QOS_PRIORITYn_P1_BMSK)),
+		NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport, qos_delta));
+
+	reg_val = readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport,
+								qos_delta))
+		& NOC_QOS_PRIORITYn_RMSK;
+	writel_relaxed(((reg_val & (~(NOC_QOS_PRIORITYn_P0_BMSK))) |
+		(priority->p0 & NOC_QOS_PRIORITYn_P0_BMSK)),
+		NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport, qos_delta));
+	/* Ensure qos priority is set before exiting */
+	wmb();
+}
+
+static void msm_bus_noc_set_qos_bw(void __iomem *base, uint32_t qos_off,
+		uint32_t qos_freq, uint32_t mport, uint32_t qos_delta,
+		uint8_t perm_mode, struct msm_bus_noc_qos_bw *qbw)
+{
+	uint32_t reg_val, val, mode;
+
+	if (!qos_freq) {
+		MSM_BUS_DBG("Zero QoS Freq\n");
+		return;
+	}
+
+	/* If Limiter or Regulator modes are not supported, bw not available*/
+	if (perm_mode & (NOC_QOS_PERM_MODE_LIMITER |
+		NOC_QOS_PERM_MODE_REGULATOR)) {
+		uint32_t bw_val = noc_bw_field(qbw->bw, qos_freq);
+		uint32_t sat_val = noc_sat_field(qbw->bw, qbw->ws,
+			qos_freq);
+
+		MSM_BUS_DBG("NOC: BW: perm_mode: %d bw_val: %d, sat_val: %d\n",
+			perm_mode, bw_val, sat_val);
+		/*
+		 * If in Limiter/Regulator mode, first go to fixed mode.
+		 * Clear QoS accumulator
+		 **/
+		mode = readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off,
+			mport, qos_delta)) & NOC_QOS_MODEn_MODE_BMSK;
+		if (mode == NOC_QOS_MODE_REGULATOR || mode ==
+			NOC_QOS_MODE_LIMITER) {
+			reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR(
+				base, qos_off, mport, qos_delta));
+			val = NOC_QOS_MODE_FIXED;
+			writel_relaxed((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK)))
+				| (val & NOC_QOS_MODEn_MODE_BMSK),
+				NOC_QOS_MODEn_ADDR(base, qos_off, mport,
+								qos_delta));
+		}
+
+		reg_val = readl_relaxed(NOC_QOS_BWn_ADDR(base, qos_off, mport,
+								qos_delta));
+		val = bw_val << NOC_QOS_BWn_BW_SHFT;
+		writel_relaxed(((reg_val & (~(NOC_QOS_BWn_BW_BMSK))) |
+			(val & NOC_QOS_BWn_BW_BMSK)),
+			NOC_QOS_BWn_ADDR(base, qos_off, mport, qos_delta));
+
+		MSM_BUS_DBG("NOC: BW: Wrote value: 0x%x\n", ((reg_val &
+			(~NOC_QOS_BWn_BW_BMSK)) | (val &
+			NOC_QOS_BWn_BW_BMSK)));
+
+		reg_val = readl_relaxed(NOC_QOS_SATn_ADDR(base, qos_off,
+			mport, qos_delta));
+		val = sat_val << NOC_QOS_SATn_SAT_SHFT;
+		writel_relaxed(((reg_val & (~(NOC_QOS_SATn_SAT_BMSK))) |
+			(val & NOC_QOS_SATn_SAT_BMSK)),
+			NOC_QOS_SATn_ADDR(base, qos_off, mport, qos_delta));
+
+		MSM_BUS_DBG("NOC: SAT: Wrote value: 0x%x\n", ((reg_val &
+			(~NOC_QOS_SATn_SAT_BMSK)) | (val &
+			NOC_QOS_SATn_SAT_BMSK)));
+
+		/* Set mode back to what it was initially */
+		reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off,
+			mport, qos_delta));
+		writel_relaxed((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK)))
+			| (mode & NOC_QOS_MODEn_MODE_BMSK),
+			NOC_QOS_MODEn_ADDR(base, qos_off, mport, qos_delta));
+		/* Ensure that all writes for bandwidth registers have
+		 * completed before returning
+		 */
+		wmb();
+	}
+}
+
+uint8_t msm_bus_noc_get_qos_mode(void __iomem *base, uint32_t qos_off,
+	uint32_t mport, uint32_t qos_delta, uint32_t mode, uint32_t perm_mode)
+{
+	if (perm_mode == NOC_QOS_MODES_ALL_PERM)
+		return readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off,
+			mport, qos_delta)) & NOC_QOS_MODEn_MODE_BMSK;
+	else
+		return 31 - __CLZ(mode &
+			NOC_QOS_MODES_ALL_PERM);
+}
+
+void msm_bus_noc_get_qos_priority(void __iomem *base, uint32_t qos_off,
+	uint32_t mport, uint32_t qos_delta,
+	struct msm_bus_noc_qos_priority *priority)
+{
+	priority->p1 = (readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off,
+		mport, qos_delta)) & NOC_QOS_PRIORITYn_P1_BMSK) >>
+		NOC_QOS_PRIORITYn_P1_SHFT;
+
+	priority->p0 = (readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off,
+		mport, qos_delta)) & NOC_QOS_PRIORITYn_P0_BMSK) >>
+		NOC_QOS_PRIORITYn_P0_SHFT;
+}
+
+void msm_bus_noc_get_qos_bw(void __iomem *base, uint32_t qos_off,
+	uint32_t qos_freq,
+	uint32_t mport, uint32_t qos_delta, uint8_t perm_mode,
+	struct msm_bus_noc_qos_bw *qbw)
+{
+	if (perm_mode & (NOC_QOS_PERM_MODE_LIMITER |
+		NOC_QOS_PERM_MODE_REGULATOR)) {
+		uint32_t bw_val = readl_relaxed(NOC_QOS_BWn_ADDR(
+			base, qos_off, mport, qos_delta)) & NOC_QOS_BWn_BW_BMSK;
+		uint32_t sat = readl_relaxed(NOC_QOS_SATn_ADDR(
+			base, qos_off, mport, qos_delta))
+						& NOC_QOS_SATn_SAT_BMSK;
+
+		qbw->bw = noc_bw(bw_val, qos_freq);
+		qbw->ws = noc_ws(qbw->bw, sat, qos_freq);
+	} else {
+		qbw->bw = 0;
+		qbw->ws = 0;
+	}
+}
+
+static bool msm_bus_noc_update_bw_reg(int mode)
+{
+	bool ret = false;
+
+	if ((mode == NOC_QOS_MODE_LIMITER) ||
+			(mode == NOC_QOS_MODE_REGULATOR))
+		ret = true;
+
+	return ret;
+}
+
+static int msm_bus_noc_qos_init(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base,
+				uint32_t qos_off, uint32_t qos_delta,
+				uint32_t qos_freq)
+{
+	struct msm_bus_noc_qos_priority prio;
+	int ret = 0;
+	int i;
+
+	prio.p1 = info->node_info->qos_params.prio1;
+	prio.p0 = info->node_info->qos_params.prio0;
+
+	if (!info->node_info->qport) {
+		MSM_BUS_DBG("No QoS Ports to init\n");
+		ret = 0;
+		goto err_qos_init;
+	}
+
+	for (i = 0; i < info->node_info->num_qports; i++) {
+		if (info->node_info->qos_params.mode != NOC_QOS_MODE_BYPASS) {
+			noc_set_qos_priority(qos_base, qos_off,
+					info->node_info->qport[i], qos_delta,
+					&prio);
+
+			if (info->node_info->qos_params.mode !=
+							NOC_QOS_MODE_FIXED) {
+				struct msm_bus_noc_qos_bw qbw;
+
+				qbw.ws = info->node_info->qos_params.ws;
+				qbw.bw = 0;
+				msm_bus_noc_set_qos_bw(qos_base, qos_off,
+					qos_freq,
+					info->node_info->qport[i],
+					qos_delta,
+					info->node_info->qos_params.mode,
+					&qbw);
+			}
+		}
+
+		noc_set_qos_mode(qos_base, qos_off, info->node_info->qport[i],
+				qos_delta, info->node_info->qos_params.mode,
+				(1 << info->node_info->qos_params.mode));
+	}
+err_qos_init:
+	return ret;
+}
+
+static int msm_bus_noc_set_bw(struct msm_bus_node_device_type *dev,
+				void __iomem *qos_base,
+				uint32_t qos_off, uint32_t qos_delta,
+				uint32_t qos_freq)
+{
+	int ret = 0;
+	uint64_t bw = 0;
+	int i;
+	struct msm_bus_node_info_type *info = dev->node_info;
+
+	if (info && info->num_qports &&
+		((info->qos_params.mode == NOC_QOS_MODE_REGULATOR) ||
+		(info->qos_params.mode ==
+			NOC_QOS_MODE_LIMITER))) {
+		struct msm_bus_noc_qos_bw qos_bw;
+
+		bw = msm_bus_div64(dev->node_bw[ACTIVE_CTX].sum_ab,
+				info->num_qports);
+
+		for (i = 0; i < info->num_qports; i++) {
+			if (!info->qport) {
+				MSM_BUS_DBG("No qos ports to update!\n");
+				break;
+			}
+
+			qos_bw.bw = bw;
+			qos_bw.ws = info->qos_params.ws;
+			msm_bus_noc_set_qos_bw(qos_base, qos_off, qos_freq,
+				info->qport[i], qos_delta,
+				(1 << info->qos_params.mode), &qos_bw);
+			MSM_BUS_DBG("NOC: QoS: Update mas_bw: ws: %u\n",
+				qos_bw.ws);
+		}
+	}
+	return ret;
+}
+
+static int msm_bus_noc_set_lim_mode(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base, uint32_t qos_off,
+				uint32_t qos_delta, uint32_t qos_freq,
+				u64 lim_bw)
+{
+	int i;
+
+	if (info && info->node_info->num_qports) {
+		struct msm_bus_noc_qos_bw qos_bw;
+
+		if (lim_bw != info->node_info->lim_bw) {
+			for (i = 0; i < info->node_info->num_qports; i++) {
+				qos_bw.bw = lim_bw;
+				qos_bw.ws = info->node_info->qos_params.ws;
+					msm_bus_noc_set_qos_bw(qos_base,
+					qos_off, qos_freq,
+					info->node_info->qport[i], qos_delta,
+					(1 << NOC_QOS_MODE_LIMITER), &qos_bw);
+			}
+			info->node_info->lim_bw = lim_bw;
+		}
+
+		for (i = 0; i < info->node_info->num_qports; i++) {
+			noc_set_qos_mode(qos_base, qos_off,
+					info->node_info->qport[i],
+					qos_delta,
+					NOC_QOS_MODE_LIMITER,
+					(1 << NOC_QOS_MODE_LIMITER));
+		}
+	}
+
+	return 0;
+}
+
+static int msm_bus_noc_set_reg_mode(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base, uint32_t qos_off,
+				uint32_t qos_delta, uint32_t qos_freq,
+				u64 lim_bw)
+{
+	int i;
+
+	if (info && info->node_info->num_qports) {
+		struct msm_bus_noc_qos_priority prio;
+		struct msm_bus_noc_qos_bw qos_bw;
+
+		for (i = 0; i < info->node_info->num_qports; i++) {
+			prio.p1 =
+				info->node_info->qos_params.reg_prio1;
+			prio.p0 =
+				info->node_info->qos_params.reg_prio0;
+			noc_set_qos_priority(qos_base, qos_off,
+					info->node_info->qport[i],
+					qos_delta,
+					&prio);
+		}
+
+		if (lim_bw != info->node_info->lim_bw) {
+			for (i = 0; i < info->node_info->num_qports; i++) {
+				qos_bw.bw = lim_bw;
+				qos_bw.ws = info->node_info->qos_params.ws;
+				msm_bus_noc_set_qos_bw(qos_base, qos_off,
+					qos_freq,
+					info->node_info->qport[i], qos_delta,
+					(1 << NOC_QOS_MODE_REGULATOR), &qos_bw);
+			}
+			info->node_info->lim_bw = lim_bw;
+		}
+
+		for (i = 0; i < info->node_info->num_qports; i++) {
+			noc_set_qos_mode(qos_base, qos_off,
+					info->node_info->qport[i],
+					qos_delta,
+					NOC_QOS_MODE_REGULATOR,
+					(1 << NOC_QOS_MODE_REGULATOR));
+		}
+	}
+	return 0;
+}
+
+static int msm_bus_noc_set_def_mode(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base, uint32_t qos_off,
+				uint32_t qos_delta, uint32_t qos_freq,
+				u64 lim_bw)
+{
+	int i;
+
+	for (i = 0; i < info->node_info->num_qports; i++) {
+		if (info->node_info->qos_params.mode ==
+						NOC_QOS_MODE_FIXED) {
+			struct msm_bus_noc_qos_priority prio;
+
+			prio.p1 =
+				info->node_info->qos_params.prio1;
+			prio.p0 =
+				info->node_info->qos_params.prio0;
+			noc_set_qos_priority(qos_base, qos_off,
+					info->node_info->qport[i],
+					qos_delta, &prio);
+		}
+		noc_set_qos_mode(qos_base, qos_off,
+			info->node_info->qport[i],
+			qos_delta,
+			info->node_info->qos_params.mode,
+			(1 << info->node_info->qos_params.mode));
+	}
+	return 0;
+}
+
+static int msm_bus_noc_limit_mport(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base, uint32_t qos_off,
+				uint32_t qos_delta, uint32_t qos_freq,
+				int enable_lim, u64 lim_bw)
+{
+	int ret = 0;
+
+	if (!(info && info->node_info->num_qports)) {
+		MSM_BUS_ERR("Invalid Node info or no Qports to program");
+		ret = -ENXIO;
+		goto exit_limit_mport;
+	}
+
+	if (lim_bw) {
+		switch (enable_lim) {
+		case THROTTLE_REG:
+			msm_bus_noc_set_reg_mode(info, qos_base, qos_off,
+						qos_delta, qos_freq, lim_bw);
+			break;
+		case THROTTLE_ON:
+			msm_bus_noc_set_lim_mode(info, qos_base, qos_off,
+						qos_delta, qos_freq, lim_bw);
+			break;
+		default:
+			msm_bus_noc_set_def_mode(info, qos_base, qos_off,
+						qos_delta, qos_freq, lim_bw);
+			break;
+		}
+	} else
+		msm_bus_noc_set_def_mode(info, qos_base, qos_off,
+					qos_delta, qos_freq, lim_bw);
+
+exit_limit_mport:
+	return ret;
+}
+
+int msm_bus_noc_set_ops(struct msm_bus_node_device_type *bus_dev)
+{
+	if (!bus_dev)
+		return -ENODEV;
+
+	bus_dev->fabdev->noc_ops.qos_init = msm_bus_noc_qos_init;
+	bus_dev->fabdev->noc_ops.set_bw = msm_bus_noc_set_bw;
+	bus_dev->fabdev->noc_ops.limit_mport = msm_bus_noc_limit_mport;
+	bus_dev->fabdev->noc_ops.update_bw_reg = msm_bus_noc_update_bw_reg;
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_bus_noc_set_ops);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_noc_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_noc_rpmh.c
new file mode 100644
index 0000000..d6a0ad2
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_noc_rpmh.c
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "AXI: NOC: %s(): " fmt, __func__
+
+#include <linux/bitops.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/msm-bus-board.h>
+#include "msm_bus_core.h"
+#include "msm_bus_noc.h"
+#include "msm_bus_rpmh.h"
+
+/* NOC_QOS generic */
+#define __CLZ(x) ((8 * sizeof(uint32_t)) - 1 - __fls(x))
+#define SAT_SCALE 16	/* 16 bytes minimum for saturation */
+#define BW_SCALE  256	/* 1/256 byte per cycle unit */
+#define QOS_DEFAULT_BASEOFFSET		0x00003000
+#define QOS_DEFAULT_DELTA		0x80
+#define MAX_BW_FIELD (NOC_QOS_BWn_BW_BMSK >> NOC_QOS_BWn_BW_SHFT)
+#define MAX_SAT_FIELD (NOC_QOS_SATn_SAT_BMSK >> NOC_QOS_SATn_SAT_SHFT)
+#define MIN_SAT_FIELD	1
+#define MIN_BW_FIELD	1
+#define READ_TIMEOUT_MS	msecs_to_jiffies(1)
+#define READ_DELAY_US	10
+
+#define NOC_QOS_REG_BASE(b, o)		((b) + (o))
+
+/*Sideband Manager Disable Macros*/
+#define DISABLE_SBM_FLAGOUTCLR0_LOW_OFF		0x80
+#define DISABLE_SBM_FLAGOUTCLR0_HIGH_OFF	0x84
+#define DISABLE_SBM_FLAGOUTSET0_LOW_OFF		0x88
+#define DISABLE_SBM_FLAGOUTSET0_HIGH_OFF	0x8C
+#define DISABLE_SBM_FLAGOUTSTATUS0_LOW_OFF	0x90
+#define DISABLE_SBM_FLAGOUTSTATUS0_HIGH_OFF	0x94
+#define DISABLE_SBM_SENSEIN0_LOW_OFF		0x100
+#define DISABLE_SBM_SENSEIN0_HIGH_OFF		0x104
+
+#define DISABLE_SBM_REG_BASE(b, o, d)	((b) + (o) + (d))
+
+#define NOC_QOS_MAINCTL_LOWn_ADDR(b, o, n, d)	\
+	(NOC_QOS_REG_BASE(b, o) + 0x8 + (d) * (n))
+enum noc_qos_id_mainctl_lown {
+	NOC_QOS_MCTL_DFLT_PRIOn_BMSK	= 0x00000070,
+	NOC_QOS_MCTL_DFLT_PRIOn_SHFT	= 0x4,
+	NOC_QOS_MCTL_URGFWD_ENn_BMSK	= 0x00000008,
+	NOC_QOS_MCTL_URGFWD_ENn_SHFT	= 0x3,
+	NOC_QOS_MCTL_LIMIT_ENn_BMSK	= 0x00000001,
+	NOC_QOS_MCTL_LIMIT_ENn_SHFT	= 0x0,
+};
+
+#define NOC_QOS_LIMITBWn_ADDR(b, o, n, d)	\
+	(NOC_QOS_REG_BASE(b, o) + 0x18 + (d) * (n))
+enum noc_qos_id_limitbwn {
+	NOC_QOS_LIMITBW_BWn_BMSK	= 0x000007FF,
+	NOC_QOS_LIMITBW_BWn_SHFT	= 0x0,
+	NOC_QOS_LIMITBW_SATn_BMSK	= 0x03FF0000,
+	NOC_QOS_LIMITBW_SATn_SHFT	= 0x11,
+};
+
+#define NOC_QOS_REGUL0CTLn_ADDR(b, o, n, d)	\
+	(NOC_QOS_REG_BASE(b, o) + 0x40 + (d) * (n))
+enum noc_qos_id_regul0ctln {
+	NOC_QOS_REGUL0CTL_HI_PRIOn_BMSK	= 0x00007000,
+	NOC_QOS_REGUL0CTL_HI_PRIOn_SHFT	= 0x8,
+	NOC_QOS_REGUL0CTL_LW_PRIOn_BMSK	= 0x00000700,
+	NOC_QOS_REGUL0CTL_LW_PRIOn_SHFT	= 0xC,
+	NOC_QOS_REGUL0CTL_WRENn_BMSK	= 0x00000002,
+	NOC_QOS_REGUL0CTL_WRENn_SHFT	= 0x1,
+	NOC_QOS_REGUL0CTL_RDENn_BMSK	= 0x00000001,
+	NOC_QOS_REGUL0CTL_RDENn_SHFT	= 0x0,
+};
+
+#define NOC_QOS_REGUL0BWn_ADDR(b, o, n, d)	\
+	(NOC_QOS_REG_BASE(b, o) + 0x48 + (d) * (n))
+enum noc_qos_id_regul0bwbwn {
+	NOC_QOS_REGUL0BW_BWn_BMSK	= 0x000007FF,
+	NOC_QOS_REGUL0BW_BWn_SHFT	= 0x0,
+	NOC_QOS_REGUL0BW_SATn_BMSK	= 0x03FF0000,
+	NOC_QOS_REGUL0BW_SATn_SHFT	= 0x11,
+};
+
+#define NOC_QOS_MODEn_ADDR(b, o, n, d) \
+	(NOC_QOS_REG_BASE(b, o) + 0xC + (d) * (n))
+enum noc_qos_id_moden_rmsk {
+	NOC_QOS_MODEn_RMSK		= 0x00000003,
+	NOC_QOS_MODEn_MAXn		= 32,
+	NOC_QOS_MODEn_MODE_BMSK		= 0x3,
+	NOC_QOS_MODEn_MODE_SHFT		= 0x0,
+};
+
+#define NOC_QOS_BWn_ADDR(b, o, n, d) \
+	(NOC_QOS_REG_BASE(b, o) + 0x10 + (d) * (n))
+enum noc_qos_id_bwn {
+	NOC_QOS_BWn_RMSK		= 0x0000ffff,
+	NOC_QOS_BWn_MAXn		= 32,
+	NOC_QOS_BWn_BW_BMSK		= 0xffff,
+	NOC_QOS_BWn_BW_SHFT		= 0x0,
+};
+
+/* QOS Saturation registers */
+#define NOC_QOS_SATn_ADDR(b, o, n, d) \
+	(NOC_QOS_REG_BASE(b, o) + 0x14 + (d) * (n))
+enum noc_qos_id_saturationn {
+	NOC_QOS_SATn_RMSK		= 0x000003ff,
+	NOC_QOS_SATn_MAXn		= 32,
+	NOC_QOS_SATn_SAT_BMSK		= 0x3ff,
+	NOC_QOS_SATn_SAT_SHFT		= 0x0,
+};
+
+static int noc_div(uint64_t *a, uint32_t b)
+{
+	if ((*a > 0) && (*a < b)) {
+		*a = 0;
+		return 1;
+	} else {
+		return do_div(*a, b);
+	}
+}
+
+/**
+ * Calculates bw hardware is using from register values
+ * bw returned is in bytes/sec
+ */
+static uint64_t noc_bw(uint32_t bw_field, uint32_t qos_freq)
+{
+	uint64_t res;
+	uint32_t rem, scale;
+
+	res = 2 * qos_freq * bw_field;
+	scale = BW_SCALE * 1000;
+	rem = noc_div(&res, scale);
+	MSM_BUS_DBG("NOC: Calculated bw: %llu\n", res * 1000000ULL);
+	return res * 1000000ULL;
+}
+
+/**
+ * Calculate the max BW in Bytes/s for a given time-base.
+ */
+#define MAX_BW(timebase) noc_bw_ceil(MAX_BW_FIELD, (timebase))
+
+/**
+ * Calculates ws hardware is using from register values
+ * ws returned is in nanoseconds
+ */
+static uint32_t noc_ws(uint64_t bw, uint32_t sat, uint32_t qos_freq)
+{
+	if (bw && qos_freq) {
+		uint32_t bwf = bw * qos_freq;
+		uint64_t scale = 1000000000000LL * BW_SCALE *
+			SAT_SCALE * sat;
+		noc_div(&scale, bwf);
+		MSM_BUS_DBG("NOC: Calculated ws: %llu\n", scale);
+		return scale;
+	}
+
+	return 0;
+}
+#define MAX_WS(bw, timebase) noc_ws((bw), MAX_SAT_FIELD, (timebase))
+
+static void noc_set_qos_dflt_prio(void __iomem *base, uint32_t qos_off,
+		uint32_t mport, uint32_t qos_delta,
+		uint32_t prio)
+{
+	uint32_t reg_val, val;
+
+	reg_val = readl_relaxed(NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport,
+		qos_delta));
+	val = prio << NOC_QOS_MCTL_DFLT_PRIOn_SHFT;
+	writel_relaxed(((reg_val & (~(NOC_QOS_MCTL_DFLT_PRIOn_BMSK))) |
+		(val & NOC_QOS_MCTL_DFLT_PRIOn_BMSK)),
+		NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport, qos_delta));
+
+	/* Ensure qos priority is set before exiting */
+	wmb();
+}
+
+static void noc_set_qos_limiter(void __iomem *base, uint32_t qos_off,
+		uint32_t mport, uint32_t qos_delta,
+		struct msm_bus_noc_limiter *lim, uint32_t lim_en)
+{
+	uint32_t reg_val, val;
+
+	reg_val = readl_relaxed(NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport,
+		qos_delta));
+
+	writel_relaxed((reg_val & (~(NOC_QOS_MCTL_LIMIT_ENn_BMSK))),
+		NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport, qos_delta));
+
+	/* Ensure we disable limiter before config*/
+	wmb();
+
+	reg_val = readl_relaxed(NOC_QOS_LIMITBWn_ADDR(base, qos_off, mport,
+		qos_delta));
+	val = lim->bw << NOC_QOS_LIMITBW_BWn_SHFT;
+	writel_relaxed(((reg_val & (~(NOC_QOS_LIMITBW_BWn_BMSK))) |
+		(val & NOC_QOS_LIMITBW_BWn_BMSK)),
+		NOC_QOS_LIMITBWn_ADDR(base, qos_off, mport, qos_delta));
+
+	reg_val = readl_relaxed(NOC_QOS_LIMITBWn_ADDR(base, qos_off, mport,
+		qos_delta));
+	val = lim->sat << NOC_QOS_LIMITBW_SATn_SHFT;
+	writel_relaxed(((reg_val & (~(NOC_QOS_LIMITBW_SATn_BMSK))) |
+		(val & NOC_QOS_LIMITBW_SATn_BMSK)),
+		NOC_QOS_LIMITBWn_ADDR(base, qos_off, mport, qos_delta));
+
+	/* Ensure qos limiter settings in place before possibly enabling */
+	wmb();
+
+	reg_val = readl_relaxed(NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport,
+		qos_delta));
+	val = lim_en << NOC_QOS_MCTL_LIMIT_ENn_SHFT;
+	writel_relaxed(((reg_val & (~(NOC_QOS_MCTL_LIMIT_ENn_BMSK))) |
+		(val & NOC_QOS_MCTL_LIMIT_ENn_BMSK)),
+		NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport, qos_delta));
+
+	/* Ensure qos limiter writes take place before exiting*/
+	wmb();
+}
+
+static void noc_set_qos_regulator(void __iomem *base, uint32_t qos_off,
+		uint32_t mport, uint32_t qos_delta,
+		struct msm_bus_noc_regulator *reg,
+		struct msm_bus_noc_regulator_mode *reg_mode)
+{
+	uint32_t reg_val, val;
+
+	reg_val = readl_relaxed(NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport,
+		qos_delta)) & (NOC_QOS_REGUL0CTL_WRENn_BMSK |
+						NOC_QOS_REGUL0CTL_RDENn_BMSK);
+
+	writel_relaxed((reg_val & (~(NOC_QOS_REGUL0CTL_WRENn_BMSK |
+						NOC_QOS_REGUL0CTL_RDENn_BMSK))),
+		NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport, qos_delta));
+
+	/* Ensure qos regulator is disabled before configuring */
+	wmb();
+
+	reg_val = readl_relaxed(NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport,
+		qos_delta)) & NOC_QOS_REGUL0CTL_HI_PRIOn_BMSK;
+	val = reg->hi_prio << NOC_QOS_REGUL0CTL_HI_PRIOn_SHFT;
+	writel_relaxed(((reg_val & (~(NOC_QOS_REGUL0CTL_HI_PRIOn_BMSK))) |
+		(val & NOC_QOS_REGUL0CTL_HI_PRIOn_BMSK)),
+		NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport, qos_delta));
+
+	reg_val = readl_relaxed(NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport,
+		qos_delta)) & NOC_QOS_REGUL0CTL_LW_PRIOn_BMSK;
+	val = reg->low_prio << NOC_QOS_REGUL0CTL_LW_PRIOn_SHFT;
+	writel_relaxed(((reg_val & (~(NOC_QOS_REGUL0CTL_LW_PRIOn_BMSK))) |
+		(val & NOC_QOS_REGUL0CTL_LW_PRIOn_BMSK)),
+		NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport, qos_delta));
+
+	reg_val = readl_relaxed(NOC_QOS_REGUL0BWn_ADDR(base, qos_off, mport,
+		qos_delta)) & NOC_QOS_REGUL0BW_BWn_BMSK;
+	val = reg->bw << NOC_QOS_REGUL0BW_BWn_SHFT;
+	writel_relaxed(((reg_val & (~(NOC_QOS_REGUL0BW_BWn_BMSK))) |
+		(val & NOC_QOS_REGUL0BW_BWn_BMSK)),
+		NOC_QOS_REGUL0BWn_ADDR(base, qos_off, mport, qos_delta));
+
+	reg_val = readl_relaxed(NOC_QOS_REGUL0BWn_ADDR(base, qos_off, mport,
+		qos_delta)) & NOC_QOS_REGUL0BW_SATn_BMSK;
+	val = reg->sat << NOC_QOS_REGUL0BW_SATn_SHFT;
+	writel_relaxed(((reg_val & (~(NOC_QOS_REGUL0BW_SATn_BMSK))) |
+		(val & NOC_QOS_REGUL0BW_SATn_BMSK)),
+		NOC_QOS_REGUL0BWn_ADDR(base, qos_off, mport, qos_delta));
+
+	/* Ensure regulator is configured before possibly enabling */
+	wmb();
+
+	reg_val = readl_relaxed(NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport,
+		qos_delta));
+	val = reg_mode->write << NOC_QOS_REGUL0CTL_WRENn_SHFT;
+	writel_relaxed(((reg_val & (~(NOC_QOS_REGUL0CTL_WRENn_BMSK))) |
+		(val & NOC_QOS_REGUL0CTL_WRENn_BMSK)),
+		NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport, qos_delta));
+
+	reg_val = readl_relaxed(NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport,
+		qos_delta));
+	val = reg_mode->read << NOC_QOS_REGUL0CTL_RDENn_SHFT;
+	writel_relaxed(((reg_val & (~(NOC_QOS_REGUL0CTL_RDENn_BMSK))) |
+		(val & NOC_QOS_REGUL0CTL_RDENn_BMSK)),
+		NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport, qos_delta));
+
+	/* Ensure regulator is ready before exiting */
+	wmb();
+}
+
+static void noc_set_qos_forwarding(void __iomem *base, uint32_t qos_off,
+		uint32_t mport, uint32_t qos_delta,
+		bool urg_fwd_en)
+{
+	uint32_t reg_val, val;
+
+	reg_val = readl_relaxed(NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport,
+		qos_delta));
+	val = (urg_fwd_en ? 1:0) << NOC_QOS_MCTL_URGFWD_ENn_SHFT;
+	writel_relaxed(((reg_val & (~(NOC_QOS_MCTL_URGFWD_ENn_BMSK))) |
+		(val & NOC_QOS_MCTL_URGFWD_ENn_BMSK)),
+		NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport, qos_delta));
+
+	/* Ensure qos priority is set before exiting */
+	wmb();
+}
+
+void msm_bus_noc_get_qos_bw(void __iomem *base, uint32_t qos_off,
+	uint32_t qos_freq,
+	uint32_t mport, uint32_t qos_delta, uint8_t perm_mode,
+	struct msm_bus_noc_qos_bw *qbw)
+{
+	if (perm_mode & (NOC_QOS_PERM_MODE_LIMITER |
+		NOC_QOS_PERM_MODE_REGULATOR)) {
+		uint32_t bw_val = readl_relaxed(NOC_QOS_BWn_ADDR(
+			base, qos_off, mport, qos_delta)) & NOC_QOS_BWn_BW_BMSK;
+		uint32_t sat = readl_relaxed(NOC_QOS_SATn_ADDR(
+			base, qos_off, mport, qos_delta))
+						& NOC_QOS_SATn_SAT_BMSK;
+
+		qbw->bw = noc_bw(bw_val, qos_freq);
+		qbw->ws = noc_ws(qbw->bw, sat, qos_freq);
+	} else {
+		qbw->bw = 0;
+		qbw->ws = 0;
+	}
+}
+
+static int msm_bus_noc_qos_init(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base,
+				uint32_t qos_off, uint32_t qos_delta,
+				uint32_t qos_freq)
+{
+	struct msm_bus_noc_qos_params *qos_params;
+	int ret = 0;
+	int i;
+
+	qos_params = &info->node_info->qos_params;
+
+	if (!info->node_info->qport) {
+		MSM_BUS_DBG("No QoS Ports to init\n");
+		ret = 0;
+		goto err_qos_init;
+	}
+
+	for (i = 0; i < info->node_info->num_qports; i++) {
+		noc_set_qos_dflt_prio(qos_base, qos_off,
+					info->node_info->qport[i],
+					qos_delta,
+					qos_params->prio_dflt);
+
+		noc_set_qos_limiter(qos_base, qos_off,
+					info->node_info->qport[i],
+					qos_delta,
+					&qos_params->limiter,
+					qos_params->limiter_en);
+
+		noc_set_qos_regulator(qos_base, qos_off,
+					info->node_info->qport[i],
+					qos_delta,
+					&qos_params->reg,
+					&qos_params->reg_mode);
+
+		noc_set_qos_forwarding(qos_base, qos_off,
+					info->node_info->qport[i],
+					qos_delta,
+					qos_params->urg_fwd_en);
+	}
+err_qos_init:
+	return ret;
+}
+
+static int msm_bus_noc_sbm_config(struct msm_bus_node_device_type *node_dev,
+				void __iomem *noc_base, uint32_t sbm_offset,
+				bool enable)
+{
+	int ret = 0, idx;
+	unsigned long j, j_timeout;
+	uint32_t flagset_offset, flagclr_offset, sense_offset;
+
+	for (idx = 0; idx < node_dev->node_info->num_disable_ports; idx++) {
+		uint32_t disable_port = node_dev->node_info->disable_ports[idx];
+		uint32_t reg_val = 0;
+
+		if (disable_port >= 64) {
+			return -EINVAL;
+		} else if (disable_port < 32) {
+			flagset_offset = DISABLE_SBM_FLAGOUTSET0_LOW_OFF;
+			flagclr_offset = DISABLE_SBM_FLAGOUTCLR0_LOW_OFF;
+			sense_offset = DISABLE_SBM_SENSEIN0_LOW_OFF;
+		} else {
+			flagset_offset = DISABLE_SBM_FLAGOUTSET0_HIGH_OFF;
+			flagclr_offset = DISABLE_SBM_FLAGOUTCLR0_HIGH_OFF;
+			sense_offset = DISABLE_SBM_SENSEIN0_HIGH_OFF;
+			disable_port = disable_port - 32;
+		}
+
+		if (enable) {
+			reg_val |= 0x1 << disable_port;
+			writel_relaxed(reg_val, DISABLE_SBM_REG_BASE(noc_base,
+					sbm_offset, flagclr_offset));
+			/* Ensure SBM reconnect took place */
+			wmb();
+
+			j = jiffies;
+			j_timeout = j + READ_TIMEOUT_MS;
+			while (((0x1 << disable_port) &
+				readl_relaxed(DISABLE_SBM_REG_BASE(noc_base,
+				sbm_offset, sense_offset)))) {
+				udelay(READ_DELAY_US);
+				j = jiffies;
+				if (time_after(j, j_timeout)) {
+					MSM_BUS_ERR("%s: SBM enable timeout.\n",
+								 __func__);
+					goto sbm_timeout;
+				}
+			}
+		} else {
+			reg_val |= 0x1 << disable_port;
+			writel_relaxed(reg_val, DISABLE_SBM_REG_BASE(noc_base,
+					sbm_offset, flagset_offset));
+			/* Ensure SBM disconnect took place */
+			wmb();
+
+			j = jiffies;
+			j_timeout = j + READ_TIMEOUT_MS;
+			while (!((0x1 << disable_port) &
+				readl_relaxed(DISABLE_SBM_REG_BASE(noc_base,
+				sbm_offset, sense_offset)))) {
+				udelay(READ_DELAY_US);
+				j = jiffies;
+				if (time_after(j, j_timeout)) {
+					MSM_BUS_ERR("%s: SBM disable timeout.\n"
+								, __func__);
+					goto sbm_timeout;
+				}
+			}
+		}
+	}
+	return ret;
+
+sbm_timeout:
+	return -ETIME;
+
+}
+
+int msm_bus_noc_set_ops(struct msm_bus_node_device_type *bus_dev)
+{
+	if (!bus_dev)
+		return -ENODEV;
+
+	bus_dev->fabdev->noc_ops.qos_init = msm_bus_noc_qos_init;
+	bus_dev->fabdev->noc_ops.sbm_config = msm_bus_noc_sbm_config;
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_bus_noc_set_ops);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_of.c b/drivers/soc/qcom/msm_bus/msm_bus_of.c
new file mode 100644
index 0000000..3d025c4
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_of.c
@@ -0,0 +1,742 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include "msm_bus_core.h"
+
+static const char * const hw_sel_name[] = {"RPM", "NoC", "BIMC", NULL};
+static const char * const mode_sel_name[] = {"Fixed", "Limiter", "Bypass",
+						"Regulator", NULL};
+
+static int get_num(const char *const str[], const char *name)
+{
+	int i = 0;
+
+	do {
+		if (!strcmp(name, str[i]))
+			return i;
+
+		i++;
+	} while (str[i] != NULL);
+
+	pr_err("Error: string %s not found\n", name);
+	return -EINVAL;
+}
+
+static struct msm_bus_scale_pdata *get_pdata(struct device *dev,
+	struct device_node *of_node)
+{
+	struct msm_bus_scale_pdata *pdata = NULL;
+	struct msm_bus_paths *usecase = NULL;
+	struct msm_bus_lat_vectors *usecase_lat = NULL;
+	int i = 0, j, ret, num_usecases = 0, num_paths, len;
+	const uint32_t *vec_arr = NULL;
+	bool mem_err = false;
+
+	if (!dev) {
+		pr_err("Error: Null device\n");
+		return NULL;
+	}
+
+	pdata = devm_kzalloc(dev, sizeof(struct msm_bus_scale_pdata),
+		GFP_KERNEL);
+	if (!pdata) {
+		mem_err = true;
+		goto err;
+	}
+
+	ret = of_property_read_string(of_node, "qcom,msm-bus,name",
+		&pdata->name);
+	if (ret) {
+		pr_err("Error: Client name not found\n");
+		goto err;
+	}
+
+	ret = of_property_read_u32(of_node, "qcom,msm-bus,num-cases",
+		&num_usecases);
+	if (ret) {
+		pr_err("Error: num-usecases not found\n");
+		goto err;
+	}
+
+	pdata->num_usecases = num_usecases;
+
+	if (of_property_read_bool(of_node, "qcom,msm-bus,active-only"))
+		pdata->active_only = 1;
+	else {
+		pr_debug("active_only flag absent.\n");
+		pr_debug("Using dual context by default\n");
+	}
+
+	pdata->alc = of_property_read_bool(of_node, "qcom,msm-bus,alc-voter");
+
+	if (pdata->alc) {
+		usecase_lat = devm_kzalloc(dev,
+				(sizeof(struct msm_bus_lat_vectors) *
+				pdata->num_usecases), GFP_KERNEL);
+		if (!usecase_lat) {
+			mem_err = true;
+			goto err;
+		}
+
+		vec_arr = of_get_property(of_node,
+					"qcom,msm-bus,vectors-alc", &len);
+		if (vec_arr == NULL) {
+			pr_err("Error: Lat vector array not found\n");
+			goto err;
+		}
+
+		if (len != num_usecases * sizeof(uint32_t) * 2) {
+			pr_err("Error: Length-error on getting vectors\n");
+			goto err;
+		}
+
+		for (i = 0; i < num_usecases; i++) {
+			int index = i * 2;
+
+			usecase_lat[i].fal_ns = (uint64_t)
+				be32_to_cpu(vec_arr[index]);
+			usecase_lat[i].idle_t_ns = (uint64_t)
+				be32_to_cpu(vec_arr[index + 1]);
+		}
+
+		pdata->usecase_lat = usecase_lat;
+		return pdata;
+	}
+
+	usecase = devm_kzalloc(dev, (sizeof(struct msm_bus_paths) *
+		pdata->num_usecases), GFP_KERNEL);
+	if (!usecase) {
+		mem_err = true;
+		goto err;
+	}
+
+	ret = of_property_read_u32(of_node, "qcom,msm-bus,num-paths",
+		&num_paths);
+	if (ret) {
+		pr_err("Error: num_paths not found\n");
+		goto err;
+	}
+
+	vec_arr = of_get_property(of_node, "qcom,msm-bus,vectors-KBps", &len);
+	if (vec_arr == NULL) {
+		pr_err("Error: Vector array not found\n");
+		goto err;
+	}
+
+	if (len != num_usecases * num_paths * sizeof(uint32_t) * 4) {
+		pr_err("Error: Length-error on getting vectors\n");
+		goto err;
+	}
+
+	for (i = 0; i < num_usecases; i++) {
+		usecase[i].num_paths = num_paths;
+		usecase[i].vectors = devm_kzalloc(dev, num_paths *
+			sizeof(struct msm_bus_vectors), GFP_KERNEL);
+		if (!usecase[i].vectors) {
+			mem_err = true;
+			pr_err("Error: Mem alloc failure in vectors\n");
+			goto err;
+		}
+
+		for (j = 0; j < num_paths; j++) {
+			int index = ((i * num_paths) + j) * 4;
+
+			usecase[i].vectors[j].src = be32_to_cpu(vec_arr[index]);
+			usecase[i].vectors[j].dst =
+				be32_to_cpu(vec_arr[index + 1]);
+			usecase[i].vectors[j].ab = (uint64_t)
+				KBTOB(be32_to_cpu(vec_arr[index + 2]));
+			usecase[i].vectors[j].ib = (uint64_t)
+				KBTOB(be32_to_cpu(vec_arr[index + 3]));
+		}
+	}
+
+	pdata->usecase = usecase;
+	return pdata;
+err:
+	if (mem_err) {
+		for (; i > 0; i--)
+			kfree(usecase[i-1].vectors);
+	}
+
+	return NULL;
+}
+
+/**
+ * msm_bus_cl_get_pdata() - Generate bus client data from device tree
+ * provided by clients.
+ *
+ * of_node: Device tree node to extract information from
+ *
+ * The function returns a valid pointer to the allocated bus-scale-pdata
+ * if the vectors were correctly read from the client's device node.
+ * Any error in reading or parsing the device node will return NULL
+ * to the caller.
+ */
+struct msm_bus_scale_pdata *msm_bus_cl_get_pdata(struct platform_device *pdev)
+{
+	struct device_node *of_node;
+	struct msm_bus_scale_pdata *pdata = NULL;
+
+	if (!pdev) {
+		pr_err("Error: Null Platform device\n");
+		return NULL;
+	}
+
+	of_node = pdev->dev.of_node;
+	pdata = get_pdata(&pdev->dev, of_node);
+	if (!pdata) {
+		pr_err("client has to provide missing entry for successful registration\n");
+		return NULL;
+	}
+
+	return pdata;
+}
+EXPORT_SYMBOL(msm_bus_cl_get_pdata);
+
+/**
+ * msm_bus_cl_get_pdata_from_dev() - Generate bus client data from device tree
+ * provided by clients.
+ *
+ * of_node: Device tree node to extract information from
+ *
+ * The function returns a valid pointer to the allocated bus-scale-pdata
+ * if the vectors were correctly read from the client's device node.
+ * Any error in reading or parsing the device node will return NULL
+ * to the caller.
+ */
+struct msm_bus_scale_pdata *msm_bus_cl_get_pdata_from_dev(struct device *dev)
+{
+	struct device_node *of_node;
+	struct msm_bus_scale_pdata *pdata = NULL;
+
+	of_node = dev->of_node;
+
+	if (!of_node)
+		return NULL;
+
+	pdata = get_pdata(dev, of_node);
+	if (!pdata) {
+		pr_err("client has to provide missing entry for successful registration\n");
+		return NULL;
+	}
+
+	return pdata;
+}
+EXPORT_SYMBOL(msm_bus_cl_get_pdata_from_dev);
+
+/**
+ * msm_bus_cl_pdata_from_node() - Generate bus client data from device tree
+ * node provided by clients. This function should be used when a client
+ * driver needs to register multiple bus-clients from a single device-tree
+ * node associated with the platform-device.
+ *
+ * of_node: The subnode containing information about the bus scaling
+ * data
+ *
+ * pdev: Platform device associated with the device-tree node
+ *
+ * The function returns a valid pointer to the allocated bus-scale-pdata
+ * if the vectors were correctly read from the client's device node.
+ * Any error in reading or parsing the device node will return NULL
+ * to the caller.
+ */
+struct msm_bus_scale_pdata *msm_bus_pdata_from_node(
+		struct platform_device *pdev, struct device_node *of_node)
+{
+	struct msm_bus_scale_pdata *pdata = NULL;
+
+	if (!pdev) {
+		pr_err("Error: Null Platform device\n");
+		return NULL;
+	}
+
+	if (!of_node) {
+		pr_err("Error: Null of_node passed to bus driver\n");
+		return NULL;
+	}
+
+	pdata = get_pdata(&pdev->dev, of_node);
+	if (!pdata) {
+		pr_err("client has to provide missing entry for successful registration\n");
+		return NULL;
+	}
+
+	return pdata;
+}
+EXPORT_SYMBOL(msm_bus_pdata_from_node);
+
+static int *get_arr(struct platform_device *pdev,
+		const struct device_node *node, const char *prop,
+		int *nports)
+{
+	int size = 0, ret;
+	int *arr = NULL;
+
+	if (of_get_property(node, prop, &size)) {
+		*nports = size / sizeof(int);
+	} else {
+		pr_debug("Property %s not available\n", prop);
+		*nports = 0;
+		return NULL;
+	}
+
+	if (!size) {
+		*nports = 0;
+		return NULL;
+	}
+
+	arr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(arr)) {
+		pr_err("Error: Failed to alloc mem for %s\n", prop);
+		return NULL;
+	}
+
+	ret = of_property_read_u32_array(node, prop, (u32 *)arr, *nports);
+	if (ret) {
+		pr_err("Error in reading property: %s\n", prop);
+		goto err;
+	}
+
+	return arr;
+err:
+	devm_kfree(&pdev->dev, arr);
+	return NULL;
+}
+
+static u64 *get_th_params(struct platform_device *pdev,
+		const struct device_node *node, const char *prop,
+		int *nports)
+{
+	int size = 0, ret;
+	u64 *ret_arr = NULL;
+	int *arr = NULL;
+	int i;
+
+	if (of_get_property(node, prop, &size)) {
+		*nports = size / sizeof(int);
+	} else {
+		pr_debug("Property %s not available\n", prop);
+		*nports = 0;
+		return NULL;
+	}
+
+	if (!size) {
+		*nports = 0;
+		return NULL;
+	}
+
+	ret_arr = devm_kzalloc(&pdev->dev, (*nports * sizeof(u64)),
+							GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(ret_arr)) {
+		pr_err("Error: Failed to alloc mem for ret arr %s\n", prop);
+		return NULL;
+	}
+
+	arr = kzalloc(size, GFP_KERNEL);
+	if ((ZERO_OR_NULL_PTR(arr))) {
+		pr_err("Error: Failed to alloc temp mem for %s\n", prop);
+		return NULL;
+	}
+
+	ret = of_property_read_u32_array(node, prop, (u32 *)arr, *nports);
+	if (ret) {
+		pr_err("Error in reading property: %s\n", prop);
+		goto err;
+	}
+
+	for (i = 0; i < *nports; i++)
+		ret_arr[i] = (uint64_t)KBTOB(arr[i]);
+
+	MSM_BUS_DBG("%s: num entries %d prop %s", __func__, *nports, prop);
+
+	for (i = 0; i < *nports; i++)
+		MSM_BUS_DBG("Th %d val %llu", i, ret_arr[i]);
+
+	kfree(arr);
+	return ret_arr;
+err:
+	kfree(arr);
+	devm_kfree(&pdev->dev, ret_arr);
+	return NULL;
+}
+
+static struct msm_bus_node_info *get_nodes(struct device_node *of_node,
+	struct platform_device *pdev,
+	struct msm_bus_fabric_registration *pdata)
+{
+	struct msm_bus_node_info *info;
+	struct device_node *child_node = NULL;
+	int i = 0, ret;
+	int num_bw = 0;
+	u32 temp;
+
+	for_each_child_of_node(of_node, child_node) {
+		i++;
+	}
+
+	pdata->len = i;
+	info = devm_kzalloc(&pdev->dev, sizeof(struct msm_bus_node_info) *
+			pdata->len, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(info)) {
+		pr_err("Failed to alloc memory for nodes: %d\n", pdata->len);
+		goto err;
+	}
+
+	i = 0;
+	child_node = NULL;
+	for_each_child_of_node(of_node, child_node) {
+		const char *sel_str;
+
+		ret = of_property_read_string(child_node, "label",
+			&info[i].name);
+		if (ret)
+			pr_err("Error reading node label\n");
+
+		ret = of_property_read_u32(child_node, "cell-id", &info[i].id);
+		if (ret) {
+			pr_err("Error reading node id\n");
+			goto err;
+		}
+
+		if (of_property_read_bool(child_node, "qcom,gateway"))
+			info[i].gateway = 1;
+
+		of_property_read_u32(child_node, "qcom,mas-hw-id",
+			&info[i].mas_hw_id);
+
+		of_property_read_u32(child_node, "qcom,slv-hw-id",
+			&info[i].slv_hw_id);
+		info[i].masterp = get_arr(pdev, child_node,
+					"qcom,masterp", &info[i].num_mports);
+		/* No need to store number of qports */
+		info[i].qport = get_arr(pdev, child_node,
+					"qcom,qport", &ret);
+		pdata->nmasters += info[i].num_mports;
+
+
+		info[i].slavep = get_arr(pdev, child_node,
+					"qcom,slavep", &info[i].num_sports);
+		pdata->nslaves += info[i].num_sports;
+
+
+		info[i].tier = get_arr(pdev, child_node,
+					"qcom,tier", &info[i].num_tiers);
+
+		if (of_property_read_bool(child_node, "qcom,ahb"))
+			info[i].ahb = 1;
+
+		ret = of_property_read_string(child_node, "qcom,hw-sel",
+			&sel_str);
+		if (ret)
+			info[i].hw_sel = 0;
+		else {
+			ret =  get_num(hw_sel_name, sel_str);
+			if (ret < 0) {
+				pr_err("Invalid hw-sel\n");
+				goto err;
+			}
+
+			info[i].hw_sel = ret;
+		}
+
+		of_property_read_u32(child_node, "qcom,buswidth",
+			&info[i].buswidth);
+		of_property_read_u32(child_node, "qcom,ws", &info[i].ws);
+
+		info[i].dual_conf =
+			of_property_read_bool(child_node, "qcom,dual-conf");
+
+
+		info[i].th = get_th_params(pdev, child_node, "qcom,thresh",
+						&info[i].num_thresh);
+
+		info[i].bimc_bw = get_th_params(pdev, child_node,
+						"qcom,bimc,bw", &num_bw);
+
+		if (num_bw != info[i].num_thresh) {
+			pr_err("%s:num_bw %d must equal num_thresh %d\n",
+				__func__, num_bw, info[i].num_thresh);
+			pr_err("%s:Err setting up dual conf for %s\n",
+				__func__, info[i].name);
+			goto err;
+		}
+
+		of_property_read_u32(child_node, "qcom,bimc,gp",
+			&info[i].bimc_gp);
+		of_property_read_u32(child_node, "qcom,bimc,thmp",
+			&info[i].bimc_thmp);
+
+		ret = of_property_read_string(child_node, "qcom,mode-thresh",
+			&sel_str);
+		if (ret)
+			info[i].mode_thresh = 0;
+		else {
+			ret = get_num(mode_sel_name, sel_str);
+			if (ret < 0) {
+				pr_err("Unknown mode :%s\n", sel_str);
+				goto err;
+			}
+
+			info[i].mode_thresh = ret;
+			MSM_BUS_DBG("AXI: THreshold mode set: %d\n",
+					info[i].mode_thresh);
+		}
+
+		ret = of_property_read_string(child_node, "qcom,mode",
+				&sel_str);
+
+		if (ret)
+			info[i].mode = 0;
+		else {
+			ret = get_num(mode_sel_name, sel_str);
+			if (ret < 0) {
+				pr_err("Unknown mode :%s\n", sel_str);
+				goto err;
+			}
+
+			info[i].mode = ret;
+		}
+
+		info[i].nr_lim =
+			of_property_read_bool(child_node, "qcom,nr-lim");
+
+		ret = of_property_read_u32(child_node, "qcom,ff",
+							&info[i].ff);
+		if (ret) {
+			pr_debug("fudge factor not present %d\n", info[i].id);
+			info[i].ff = 0;
+		}
+
+		ret = of_property_read_u32(child_node, "qcom,floor-bw",
+						&temp);
+		if (ret) {
+			pr_debug("fabdev floor bw not present %d\n",
+							info[i].id);
+			info[i].floor_bw = 0;
+		} else {
+			info[i].floor_bw = KBTOB(temp);
+		}
+
+		info[i].rt_mas =
+			of_property_read_bool(child_node, "qcom,rt-mas");
+
+		ret = of_property_read_string(child_node, "qcom,perm-mode",
+			&sel_str);
+		if (ret)
+			info[i].perm_mode = 0;
+		else {
+			ret = get_num(mode_sel_name, sel_str);
+			if (ret < 0)
+				goto err;
+
+			info[i].perm_mode = 1 << ret;
+		}
+
+		of_property_read_u32(child_node, "qcom,prio-lvl",
+			&info[i].prio_lvl);
+		of_property_read_u32(child_node, "qcom,prio-rd",
+			&info[i].prio_rd);
+		of_property_read_u32(child_node, "qcom,prio-wr",
+			&info[i].prio_wr);
+		of_property_read_u32(child_node, "qcom,prio0", &info[i].prio0);
+		of_property_read_u32(child_node, "qcom,prio1", &info[i].prio1);
+		ret = of_property_read_string(child_node, "qcom,slaveclk-dual",
+			&info[i].slaveclk[DUAL_CTX]);
+		if (!ret)
+			pr_debug("Got slaveclk_dual: %s\n",
+				info[i].slaveclk[DUAL_CTX]);
+		else
+			info[i].slaveclk[DUAL_CTX] = NULL;
+
+		ret = of_property_read_string(child_node,
+			"qcom,slaveclk-active", &info[i].slaveclk[ACTIVE_CTX]);
+		if (!ret)
+			pr_debug("Got slaveclk_active\n");
+		else
+			info[i].slaveclk[ACTIVE_CTX] = NULL;
+
+		ret = of_property_read_string(child_node, "qcom,memclk-dual",
+			&info[i].memclk[DUAL_CTX]);
+		if (!ret)
+			pr_debug("Got memclk_dual\n");
+		else
+			info[i].memclk[DUAL_CTX] = NULL;
+
+		ret = of_property_read_string(child_node, "qcom,memclk-active",
+			&info[i].memclk[ACTIVE_CTX]);
+		if (!ret)
+			pr_debug("Got memclk_active\n");
+		else
+			info[i].memclk[ACTIVE_CTX] = NULL;
+
+		ret = of_property_read_string(child_node, "qcom,iface-clk-node",
+			&info[i].iface_clk_node);
+		if (!ret)
+			pr_debug("Got iface_clk_node\n");
+		else
+			info[i].iface_clk_node = NULL;
+
+		pr_debug("Node name: %s\n", info[i].name);
+		i++;
+	}
+
+	pr_debug("Bus %d added: %d masters\n", pdata->id, pdata->nmasters);
+	pr_debug("Bus %d added: %d slaves\n", pdata->id, pdata->nslaves);
+	return info;
+err:
+	return NULL;
+}
+
+void msm_bus_of_get_nfab(struct platform_device *pdev,
+		struct msm_bus_fabric_registration *pdata)
+{
+	struct device_node *of_node;
+	int ret, nfab = 0;
+
+	if (!pdev) {
+		pr_err("Error: Null platform device\n");
+		return;
+	}
+
+	of_node = pdev->dev.of_node;
+	ret = of_property_read_u32(of_node, "qcom,nfab",
+		&nfab);
+	if (!ret)
+		pr_debug("Fab_of: Read number of buses: %u\n", nfab);
+
+	msm_bus_board_set_nfab(pdata, nfab);
+}
+
+struct msm_bus_fabric_registration
+	*msm_bus_of_get_fab_data(struct platform_device *pdev)
+{
+	struct device_node *of_node;
+	struct msm_bus_fabric_registration *pdata;
+	bool mem_err = false;
+	int ret = 0;
+	const char *sel_str;
+	u32 temp;
+
+	if (!pdev) {
+		pr_err("Error: Null platform device\n");
+		return NULL;
+	}
+
+	of_node = pdev->dev.of_node;
+	pdata = devm_kzalloc(&pdev->dev,
+			sizeof(struct msm_bus_fabric_registration), GFP_KERNEL);
+	if (!pdata) {
+		mem_err = true;
+		goto err;
+	}
+
+	ret = of_property_read_string(of_node, "label", &pdata->name);
+	if (ret) {
+		pr_err("Error: label not found\n");
+		goto err;
+	}
+	pr_debug("Fab_of: Read name: %s\n", pdata->name);
+
+	ret = of_property_read_u32(of_node, "cell-id",
+		&pdata->id);
+	if (ret) {
+		pr_err("Error: num-usecases not found\n");
+		goto err;
+	}
+	pr_debug("Fab_of: Read id: %u\n", pdata->id);
+
+	if (of_property_read_bool(of_node, "qcom,ahb"))
+		pdata->ahb = 1;
+
+	ret = of_property_read_string(of_node, "qcom,fabclk-dual",
+		&pdata->fabclk[DUAL_CTX]);
+	if (ret) {
+		pr_debug("fabclk_dual not available\n");
+		pdata->fabclk[DUAL_CTX] = NULL;
+	} else
+		pr_debug("Fab_of: Read clk dual ctx: %s\n",
+			pdata->fabclk[DUAL_CTX]);
+	ret = of_property_read_string(of_node, "qcom,fabclk-active",
+		&pdata->fabclk[ACTIVE_CTX]);
+	if (ret) {
+		pr_debug("Error: fabclk_active not available\n");
+		pdata->fabclk[ACTIVE_CTX] = NULL;
+	} else
+		pr_debug("Fab_of: Read clk act ctx: %s\n",
+			pdata->fabclk[ACTIVE_CTX]);
+
+	ret = of_property_read_u32(of_node, "qcom,ntieredslaves",
+		&pdata->ntieredslaves);
+	if (ret) {
+		pr_err("Error: ntieredslaves not found\n");
+		goto err;
+	}
+
+	ret = of_property_read_u32(of_node, "qcom,qos-freq", &pdata->qos_freq);
+	if (ret)
+		pr_debug("qos_freq not available\n");
+
+	ret = of_property_read_string(of_node, "qcom,hw-sel", &sel_str);
+	if (ret) {
+		pr_err("Error: hw_sel not found\n");
+		goto err;
+	} else {
+		ret = get_num(hw_sel_name, sel_str);
+		if (ret < 0)
+			goto err;
+
+		pdata->hw_sel = ret;
+	}
+
+	if (of_property_read_bool(of_node, "qcom,virt"))
+		pdata->virt = true;
+
+	ret = of_property_read_u32(of_node, "qcom,qos-baseoffset",
+						&pdata->qos_baseoffset);
+	if (ret)
+		pr_debug("%s:qos_baseoffset not available\n", __func__);
+
+	ret = of_property_read_u32(of_node, "qcom,qos-delta",
+						&pdata->qos_delta);
+	if (ret)
+		pr_debug("%s:qos_delta not available\n", __func__);
+
+	if (of_property_read_bool(of_node, "qcom,rpm-en"))
+		pdata->rpm_enabled = 1;
+
+	ret = of_property_read_u32(of_node, "qcom,nr-lim-thresh",
+						&temp);
+
+	if (ret) {
+		pr_err("nr-lim threshold not specified\n");
+		pdata->nr_lim_thresh = 0;
+	} else {
+		pdata->nr_lim_thresh = KBTOB(temp);
+	}
+
+	ret = of_property_read_u32(of_node, "qcom,eff-fact",
+						&pdata->eff_fact);
+	if (ret) {
+		pr_err("Fab eff-factor not present\n");
+		pdata->eff_fact = 0;
+	}
+
+	pdata->info = get_nodes(of_node, pdev, pdata);
+	return pdata;
+err:
+	return NULL;
+}
+EXPORT_SYMBOL(msm_bus_of_get_fab_data);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c
new file mode 100644
index 0000000..bc5e70e
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c
@@ -0,0 +1,886 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm_bus_rules.h>
+#include "msm_bus_core.h"
+#include "msm_bus_adhoc.h"
+
+#define DEFAULT_QOS_FREQ	19200
+#define DEFAULT_UTIL_FACT	100
+#define DEFAULT_VRAIL_COMP	100
+#define DEFAULT_AGG_SCHEME	AGG_SCHEME_LEG
+
+static int get_qos_mode(struct platform_device *pdev,
+			struct device_node *node, const char *qos_mode)
+{
+	static char const *qos_names[] = {"fixed", "limiter",
+						"bypass", "regulator"};
+	int i = 0;
+	int ret = -1;
+
+	if (!qos_mode)
+		goto exit_get_qos_mode;
+
+	for (i = 0; i < ARRAY_SIZE(qos_names); i++) {
+		if (!strcmp(qos_mode, qos_names[i]))
+			break;
+	}
+	if (i == ARRAY_SIZE(qos_names))
+		dev_err(&pdev->dev, "Cannot match mode qos %s using Bypass\n",
+				qos_mode);
+	else
+		ret = i;
+
+exit_get_qos_mode:
+	return ret;
+}
+
+static int *get_arr(struct platform_device *pdev,
+		struct device_node *node, const char *prop,
+		int *nports)
+{
+	int size = 0, ret;
+	int *arr = NULL;
+
+	if (of_get_property(node, prop, &size)) {
+		*nports = size / sizeof(int);
+	} else {
+		dev_dbg(&pdev->dev, "Property %s not available\n", prop);
+		*nports = 0;
+		return NULL;
+	}
+
+	arr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+	if ((size > 0) && ZERO_OR_NULL_PTR(arr))
+		return NULL;
+
+	ret = of_property_read_u32_array(node, prop, (u32 *)arr, *nports);
+	if (ret) {
+		dev_err(&pdev->dev, "Error in reading property: %s\n", prop);
+		goto arr_err;
+	}
+
+	return arr;
+arr_err:
+	devm_kfree(&pdev->dev, arr);
+	return NULL;
+}
+
+static struct msm_bus_fab_device_type *get_fab_device_info(
+		struct device_node *dev_node,
+		struct platform_device *pdev)
+{
+	struct msm_bus_fab_device_type *fab_dev;
+	unsigned int ret;
+	struct resource *res;
+	const char *base_name;
+
+	fab_dev = devm_kzalloc(&pdev->dev,
+			sizeof(struct msm_bus_fab_device_type),
+			GFP_KERNEL);
+	if (!fab_dev)
+		return NULL;
+
+	ret = of_property_read_string(dev_node, "qcom,base-name", &base_name);
+	if (ret) {
+		dev_err(&pdev->dev, "Error: Unable to get base address name\n");
+		goto fab_dev_err;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, base_name);
+	if (!res) {
+		dev_err(&pdev->dev, "Error getting qos base addr %s\n",
+								base_name);
+		goto fab_dev_err;
+	}
+	fab_dev->pqos_base = res->start;
+	fab_dev->qos_range = resource_size(res);
+	fab_dev->bypass_qos_prg = of_property_read_bool(dev_node,
+						"qcom,bypass-qos-prg");
+
+	ret = of_property_read_u32(dev_node, "qcom,base-offset",
+			&fab_dev->base_offset);
+	if (ret)
+		dev_dbg(&pdev->dev, "Bus base offset is missing\n");
+
+	ret = of_property_read_u32(dev_node, "qcom,qos-off",
+			&fab_dev->qos_off);
+	if (ret)
+		dev_dbg(&pdev->dev, "Bus qos off is missing\n");
+
+
+	ret = of_property_read_u32(dev_node, "qcom,bus-type",
+						&fab_dev->bus_type);
+	if (ret) {
+		dev_warn(&pdev->dev, "Bus type is missing\n");
+		goto fab_dev_err;
+	}
+
+	ret = of_property_read_u32(dev_node, "qcom,qos-freq",
+						&fab_dev->qos_freq);
+	if (ret) {
+		dev_dbg(&pdev->dev, "Bus qos freq is missing\n");
+		fab_dev->qos_freq = DEFAULT_QOS_FREQ;
+	}
+
+	return fab_dev;
+
+fab_dev_err:
+	devm_kfree(&pdev->dev, fab_dev);
+	fab_dev = 0;
+	return NULL;
+}
+
+static void get_qos_params(
+		struct device_node * const dev_node,
+		struct platform_device * const pdev,
+		struct msm_bus_node_info_type *node_info)
+{
+	const char *qos_mode = NULL;
+	unsigned int ret;
+	unsigned int temp;
+
+	ret = of_property_read_string(dev_node, "qcom,qos-mode", &qos_mode);
+
+	if (ret)
+		node_info->qos_params.mode = -1;
+	else
+		node_info->qos_params.mode = get_qos_mode(pdev, dev_node,
+								qos_mode);
+
+	of_property_read_u32(dev_node, "qcom,prio-lvl",
+					&node_info->qos_params.prio_lvl);
+
+	of_property_read_u32(dev_node, "qcom,prio1",
+						&node_info->qos_params.prio1);
+
+	of_property_read_u32(dev_node, "qcom,prio0",
+						&node_info->qos_params.prio0);
+
+	of_property_read_u32(dev_node, "qcom,reg-prio1",
+					&node_info->qos_params.reg_prio1);
+
+	of_property_read_u32(dev_node, "qcom,reg-prio0",
+					&node_info->qos_params.reg_prio0);
+
+	of_property_read_u32(dev_node, "qcom,prio-rd",
+					&node_info->qos_params.prio_rd);
+
+	of_property_read_u32(dev_node, "qcom,prio-wr",
+						&node_info->qos_params.prio_wr);
+
+	of_property_read_u32(dev_node, "qcom,gp",
+						&node_info->qos_params.gp);
+
+	of_property_read_u32(dev_node, "qcom,thmp",
+						&node_info->qos_params.thmp);
+
+	of_property_read_u32(dev_node, "qcom,ws",
+						&node_info->qos_params.ws);
+
+	ret = of_property_read_u32(dev_node, "qcom,bw_buffer", &temp);
+
+	if (ret)
+		node_info->qos_params.bw_buffer = 0;
+	else
+		node_info->qos_params.bw_buffer = KBTOB(temp);
+
+}
+
+static int msm_bus_of_parse_clk_array(struct device_node *dev_node,
+			struct device_node *gdsc_node,
+			struct platform_device *pdev, struct nodeclk **clk_arr,
+			int *num_clks, int id)
+{
+	int ret = 0;
+	int idx = 0;
+	struct property *prop;
+	const char *clk_name;
+	int clks = 0;
+
+	clks = of_property_count_strings(dev_node, "clock-names");
+	if (clks < 0) {
+		dev_err(&pdev->dev, "No qos clks node %d\n", id);
+		ret = clks;
+		goto exit_of_parse_clk_array;
+	}
+
+	*num_clks = clks;
+	*clk_arr = devm_kzalloc(&pdev->dev,
+			(clks * sizeof(struct nodeclk)), GFP_KERNEL);
+
+	if (!(*clk_arr)) {
+		ret = -ENOMEM;
+		*num_clks = 0;
+		goto exit_of_parse_clk_array;
+	}
+
+	of_property_for_each_string(dev_node, "clock-names", prop, clk_name) {
+		char gdsc_string[MAX_REG_NAME];
+
+		(*clk_arr)[idx].clk = of_clk_get_by_name(dev_node, clk_name);
+
+		if (IS_ERR_OR_NULL((*clk_arr)[idx].clk)) {
+			dev_err(&pdev->dev,
+				"Failed to get clk %s for bus%d\n", clk_name,
+									id);
+			continue;
+		}
+		if (strnstr(clk_name, "no-rate", strlen(clk_name)))
+			(*clk_arr)[idx].enable_only_clk = true;
+
+		scnprintf(gdsc_string, MAX_REG_NAME, "%s-supply", clk_name);
+
+		if (of_find_property(gdsc_node, gdsc_string, NULL))
+			scnprintf((*clk_arr)[idx].reg_name,
+				MAX_REG_NAME, "%s", clk_name);
+		else
+			scnprintf((*clk_arr)[idx].reg_name,
+					MAX_REG_NAME, "%c", '\0');
+
+		idx++;
+	}
+exit_of_parse_clk_array:
+	return ret;
+}
+
+static void get_agg_params(
+		struct device_node * const dev_node,
+		struct platform_device * const pdev,
+		struct msm_bus_node_info_type *node_info)
+{
+	int ret;
+
+
+	ret = of_property_read_u32(dev_node, "qcom,buswidth",
+					&node_info->agg_params.buswidth);
+	if (ret) {
+		dev_dbg(&pdev->dev, "Using default 8 bytes %d", node_info->id);
+		node_info->agg_params.buswidth = 8;
+	}
+
+	ret = of_property_read_u32(dev_node, "qcom,agg-ports",
+				   &node_info->agg_params.num_aggports);
+	if (ret)
+		node_info->agg_params.num_aggports = node_info->num_qports;
+
+	ret = of_property_read_u32(dev_node, "qcom,agg-scheme",
+					&node_info->agg_params.agg_scheme);
+	if (ret) {
+		if (node_info->is_fab_dev)
+			node_info->agg_params.agg_scheme = DEFAULT_AGG_SCHEME;
+		else
+			node_info->agg_params.agg_scheme = AGG_SCHEME_NONE;
+	}
+
+	ret = of_property_read_u32(dev_node, "qcom,vrail-comp",
+					&node_info->agg_params.vrail_comp);
+	if (ret) {
+		if (node_info->is_fab_dev)
+			node_info->agg_params.vrail_comp = DEFAULT_VRAIL_COMP;
+		else
+			node_info->agg_params.vrail_comp = 0;
+	}
+
+	if (node_info->agg_params.agg_scheme == AGG_SCHEME_1) {
+		uint32_t len = 0;
+		const uint32_t *util_levels;
+		int i, index = 0;
+
+		util_levels =
+			of_get_property(dev_node, "qcom,util-levels", &len);
+		if (!util_levels)
+			goto err_get_agg_params;
+
+		node_info->agg_params.num_util_levels =
+					len / (sizeof(uint32_t) * 2);
+		node_info->agg_params.util_levels = devm_kzalloc(&pdev->dev,
+			(node_info->agg_params.num_util_levels *
+			sizeof(struct node_util_levels_type)), GFP_KERNEL);
+
+		if (IS_ERR_OR_NULL(node_info->agg_params.util_levels))
+			goto err_get_agg_params;
+
+		for (i = 0; i < node_info->agg_params.num_util_levels; i++) {
+			node_info->agg_params.util_levels[i].threshold =
+				KBTOB(be32_to_cpu(util_levels[index++]));
+			node_info->agg_params.util_levels[i].util_fact =
+					be32_to_cpu(util_levels[index++]);
+			dev_dbg(&pdev->dev, "[%d]:Thresh:%llu util_fact:%d\n",
+				i,
+				node_info->agg_params.util_levels[i].threshold,
+				node_info->agg_params.util_levels[i].util_fact);
+		}
+	} else {
+		uint32_t util_fact;
+
+		ret = of_property_read_u32(dev_node, "qcom,util-fact",
+								&util_fact);
+		if (ret) {
+			if (node_info->is_fab_dev)
+				util_fact = DEFAULT_UTIL_FACT;
+			else
+				util_fact = 0;
+		}
+
+		if (util_fact) {
+			node_info->agg_params.num_util_levels = 1;
+			node_info->agg_params.util_levels =
+			devm_kzalloc(&pdev->dev,
+				(node_info->agg_params.num_util_levels *
+				sizeof(struct node_util_levels_type)),
+				GFP_KERNEL);
+			if (IS_ERR_OR_NULL(node_info->agg_params.util_levels))
+				goto err_get_agg_params;
+			node_info->agg_params.util_levels[0].util_fact =
+								util_fact;
+		}
+
+	}
+
+	return;
+err_get_agg_params:
+	node_info->agg_params.num_util_levels = 0;
+	node_info->agg_params.agg_scheme = DEFAULT_AGG_SCHEME;
+}
+
+static struct msm_bus_node_info_type *get_node_info_data(
+		struct device_node * const dev_node,
+		struct platform_device * const pdev)
+{
+	struct msm_bus_node_info_type *node_info;
+	unsigned int ret;
+	int size;
+	int i;
+	struct device_node *con_node;
+	struct device_node *bus_dev;
+
+	node_info = devm_kzalloc(&pdev->dev,
+			sizeof(struct msm_bus_node_info_type),
+			GFP_KERNEL);
+	if (!node_info) {
+		dev_err(&pdev->dev,
+			"Error: Unable to allocate memory for node_info\n");
+		return NULL;
+	}
+
+	ret = of_property_read_u32(dev_node, "cell-id", &node_info->id);
+	if (ret) {
+		dev_warn(&pdev->dev, "Bus node is missing cell-id\n");
+		goto node_info_err;
+	}
+	ret = of_property_read_string(dev_node, "label", &node_info->name);
+	if (ret) {
+		dev_warn(&pdev->dev, "Bus node is missing name\n");
+		goto node_info_err;
+	}
+	node_info->qport = get_arr(pdev, dev_node, "qcom,qport",
+			&node_info->num_qports);
+
+	if (of_get_property(dev_node, "qcom,connections", &size)) {
+		node_info->num_connections = size / sizeof(int);
+		node_info->connections = devm_kzalloc(&pdev->dev, size,
+				GFP_KERNEL);
+	} else {
+		node_info->num_connections = 0;
+		node_info->connections = 0;
+	}
+
+	for (i = 0; i < node_info->num_connections; i++) {
+		con_node = of_parse_phandle(dev_node, "qcom,connections", i);
+		if (IS_ERR_OR_NULL(con_node))
+			goto node_info_err;
+
+		if (of_property_read_u32(con_node, "cell-id",
+				&node_info->connections[i]))
+			goto node_info_err;
+		of_node_put(con_node);
+	}
+
+	if (of_get_property(dev_node, "qcom,blacklist", &size)) {
+		node_info->num_blist = size/sizeof(u32);
+		node_info->bl_cons = devm_kzalloc(&pdev->dev,
+		size, GFP_KERNEL);
+	} else {
+		node_info->num_blist = 0;
+		node_info->bl_cons = 0;
+	}
+
+	for (i = 0; i < node_info->num_blist; i++) {
+		con_node = of_parse_phandle(dev_node, "qcom,blacklist", i);
+		if (IS_ERR_OR_NULL(con_node))
+			goto node_info_err;
+
+		if (of_property_read_u32(con_node, "cell-id",
+				&node_info->bl_cons[i]))
+			goto node_info_err;
+		of_node_put(con_node);
+	}
+
+	bus_dev = of_parse_phandle(dev_node, "qcom,bus-dev", 0);
+	if (!IS_ERR_OR_NULL(bus_dev)) {
+		if (of_property_read_u32(bus_dev, "cell-id",
+			&node_info->bus_device_id)) {
+			dev_err(&pdev->dev, "Can't find bus device. Node %d\n",
+					node_info->id);
+			goto node_info_err;
+		}
+
+		of_node_put(bus_dev);
+	} else {
+		dev_dbg(&pdev->dev, "Can't find bdev phandle for %d",
+					node_info->id);
+	}
+
+	node_info->is_fab_dev = of_property_read_bool(dev_node, "qcom,fab-dev");
+	node_info->virt_dev = of_property_read_bool(dev_node, "qcom,virt-dev");
+
+
+	ret = of_property_read_u32(dev_node, "qcom,mas-rpm-id",
+						&node_info->mas_rpm_id);
+	if (ret) {
+		dev_dbg(&pdev->dev, "mas rpm id is missing\n");
+		node_info->mas_rpm_id = -1;
+	}
+
+	ret = of_property_read_u32(dev_node, "qcom,slv-rpm-id",
+						&node_info->slv_rpm_id);
+	if (ret) {
+		dev_dbg(&pdev->dev, "slv rpm id is missing\n");
+		node_info->slv_rpm_id = -1;
+	}
+
+	get_agg_params(dev_node, pdev, node_info);
+	get_qos_params(dev_node, pdev, node_info);
+
+	return node_info;
+
+node_info_err:
+	devm_kfree(&pdev->dev, node_info);
+	node_info = 0;
+	return NULL;
+}
+
+static int get_bus_node_device_data(
+		struct device_node * const dev_node,
+		struct platform_device * const pdev,
+		struct msm_bus_node_device_type * const node_device)
+{
+	bool enable_only;
+	bool setrate_only;
+	struct device_node *qos_clk_node;
+
+	node_device->node_info = get_node_info_data(dev_node, pdev);
+	if (IS_ERR_OR_NULL(node_device->node_info)) {
+		dev_err(&pdev->dev, "Error: Node info missing\n");
+		return -ENODATA;
+	}
+	node_device->ap_owned = of_property_read_bool(dev_node,
+							"qcom,ap-owned");
+
+	if (node_device->node_info->is_fab_dev) {
+		dev_dbg(&pdev->dev, "Dev %d\n", node_device->node_info->id);
+
+		if (!node_device->node_info->virt_dev) {
+			node_device->fabdev =
+				get_fab_device_info(dev_node, pdev);
+			if (IS_ERR_OR_NULL(node_device->fabdev)) {
+				dev_err(&pdev->dev,
+					"Error: Fabric device info missing\n");
+				devm_kfree(&pdev->dev, node_device->node_info);
+				return -ENODATA;
+			}
+		}
+
+		enable_only = of_property_read_bool(dev_node,
+							"qcom,enable-only-clk");
+		node_device->clk[DUAL_CTX].enable_only_clk = enable_only;
+		node_device->clk[ACTIVE_CTX].enable_only_clk = enable_only;
+
+		/*
+		 * Doesn't make sense to have a clk handle you can't enable or
+		 * set rate on.
+		 */
+		if (!enable_only) {
+			setrate_only = of_property_read_bool(dev_node,
+						"qcom,setrate-only-clk");
+			node_device->clk[DUAL_CTX].setrate_only_clk =
+								setrate_only;
+			node_device->clk[ACTIVE_CTX].setrate_only_clk =
+								setrate_only;
+		}
+
+		node_device->clk[DUAL_CTX].clk = of_clk_get_by_name(dev_node,
+							"bus_clk");
+
+		if (IS_ERR_OR_NULL(node_device->clk[DUAL_CTX].clk)) {
+			int ret;
+
+			dev_err(&pdev->dev,
+				"%s:Failed to get bus clk for bus%d ctx%d\n",
+				__func__, node_device->node_info->id,
+								DUAL_CTX);
+			ret = (IS_ERR(node_device->clk[DUAL_CTX].clk) ?
+			PTR_ERR(node_device->clk[DUAL_CTX].clk) : -ENXIO);
+			return ret;
+		}
+
+		if (of_find_property(dev_node, "bus-gdsc-supply", NULL))
+			scnprintf(node_device->clk[DUAL_CTX].reg_name,
+				MAX_REG_NAME, "%s", "bus-gdsc");
+		else
+			scnprintf(node_device->clk[DUAL_CTX].reg_name,
+				MAX_REG_NAME, "%c", '\0');
+
+		node_device->clk[ACTIVE_CTX].clk = of_clk_get_by_name(dev_node,
+							"bus_a_clk");
+		if (IS_ERR_OR_NULL(node_device->clk[ACTIVE_CTX].clk)) {
+			int ret;
+
+			dev_err(&pdev->dev,
+				"Failed to get bus clk for bus%d ctx%d\n",
+				 node_device->node_info->id, ACTIVE_CTX);
+			ret = (IS_ERR(node_device->clk[DUAL_CTX].clk) ?
+			PTR_ERR(node_device->clk[DUAL_CTX].clk) : -ENXIO);
+			return ret;
+		}
+
+		if (of_find_property(dev_node, "bus-a-gdsc-supply", NULL))
+			scnprintf(node_device->clk[ACTIVE_CTX].reg_name,
+				MAX_REG_NAME, "%s", "bus-a-gdsc");
+		else
+			scnprintf(node_device->clk[ACTIVE_CTX].reg_name,
+				MAX_REG_NAME, "%c", '\0');
+
+		node_device->bus_qos_clk.clk = of_clk_get_by_name(dev_node,
+							"bus_qos_clk");
+
+		if (IS_ERR_OR_NULL(node_device->bus_qos_clk.clk)) {
+			dev_dbg(&pdev->dev,
+				"%s:Failed to get bus qos clk for %d",
+				__func__, node_device->node_info->id);
+			scnprintf(node_device->bus_qos_clk.reg_name,
+					MAX_REG_NAME, "%c", '\0');
+		} else {
+			if (of_find_property(dev_node, "bus-qos-gdsc-supply",
+								NULL))
+				scnprintf(node_device->bus_qos_clk.reg_name,
+					MAX_REG_NAME, "%s", "bus-qos-gdsc");
+			else
+				scnprintf(node_device->bus_qos_clk.reg_name,
+					MAX_REG_NAME, "%c", '\0');
+		}
+
+		qos_clk_node = of_get_child_by_name(dev_node,
+						"qcom,node-qos-clks");
+
+		if (qos_clk_node) {
+			if (msm_bus_of_parse_clk_array(qos_clk_node, dev_node,
+						pdev,
+						&node_device->node_qos_clks,
+						&node_device->num_node_qos_clks,
+						node_device->node_info->id)) {
+				dev_info(&pdev->dev,
+					"Bypass QoS programming\n");
+				node_device->fabdev->bypass_qos_prg = true;
+			}
+			of_node_put(qos_clk_node);
+		}
+	} else {
+		node_device->bus_qos_clk.clk = of_clk_get_by_name(dev_node,
+							"bus_qos_clk");
+
+		if (IS_ERR_OR_NULL(node_device->bus_qos_clk.clk))
+			dev_dbg(&pdev->dev,
+				"%s:Failed to get bus qos clk for mas%d",
+				__func__, node_device->node_info->id);
+
+		if (of_find_property(dev_node, "bus-qos-gdsc-supply",
+									NULL))
+			scnprintf(node_device->bus_qos_clk.reg_name,
+				MAX_REG_NAME, "%s", "bus-qos-gdsc");
+		else
+			scnprintf(node_device->bus_qos_clk.reg_name,
+				MAX_REG_NAME, "%c", '\0');
+
+		enable_only = of_property_read_bool(dev_node,
+							"qcom,enable-only-clk");
+		node_device->clk[DUAL_CTX].enable_only_clk = enable_only;
+		node_device->bus_qos_clk.enable_only_clk = enable_only;
+
+		/*
+		 * Doesn't make sense to have a clk handle you can't enable or
+		 * set rate on.
+		 */
+		if (!enable_only) {
+			setrate_only = of_property_read_bool(dev_node,
+						"qcom,setrate-only-clk");
+			node_device->clk[DUAL_CTX].setrate_only_clk =
+								setrate_only;
+			node_device->clk[ACTIVE_CTX].setrate_only_clk =
+								setrate_only;
+		}
+
+		qos_clk_node = of_get_child_by_name(dev_node,
+						"qcom,node-qos-clks");
+
+		if (qos_clk_node) {
+			if (msm_bus_of_parse_clk_array(qos_clk_node, dev_node,
+						pdev,
+						&node_device->node_qos_clks,
+						&node_device->num_node_qos_clks,
+						node_device->node_info->id)) {
+				dev_info(&pdev->dev,
+					"Bypass QoS programming\n");
+				node_device->fabdev->bypass_qos_prg = true;
+			}
+			of_node_put(qos_clk_node);
+		}
+
+		node_device->clk[DUAL_CTX].clk = of_clk_get_by_name(dev_node,
+							"node_clk");
+
+		if (IS_ERR_OR_NULL(node_device->clk[DUAL_CTX].clk))
+			dev_dbg(&pdev->dev,
+				"%s:Failed to get bus clk for bus%d ctx%d",
+				__func__, node_device->node_info->id,
+								DUAL_CTX);
+
+		if (of_find_property(dev_node, "node-gdsc-supply", NULL))
+			scnprintf(node_device->clk[DUAL_CTX].reg_name,
+				MAX_REG_NAME, "%s", "node-gdsc");
+		else
+			scnprintf(node_device->clk[DUAL_CTX].reg_name,
+				MAX_REG_NAME, "%c", '\0');
+
+	}
+	return 0;
+}
+
+struct msm_bus_device_node_registration
+	*msm_bus_of_to_pdata(struct platform_device *pdev)
+{
+	struct device_node *of_node, *child_node;
+	struct msm_bus_device_node_registration *pdata;
+	unsigned int i = 0, j;
+	unsigned int ret;
+
+	if (!pdev) {
+		pr_err("Error: Null platform device\n");
+		return NULL;
+	}
+
+	of_node = pdev->dev.of_node;
+
+	pdata = devm_kzalloc(&pdev->dev,
+			sizeof(struct msm_bus_device_node_registration),
+			GFP_KERNEL);
+	if (!pdata)
+		return NULL;
+
+	pdata->num_devices = of_get_child_count(of_node);
+
+	pdata->info = devm_kzalloc(&pdev->dev,
+			sizeof(struct msm_bus_node_device_type) *
+			pdata->num_devices, GFP_KERNEL);
+
+	if (!pdata->info)
+		goto node_reg_err;
+
+	ret = 0;
+	for_each_child_of_node(of_node, child_node) {
+		ret = get_bus_node_device_data(child_node, pdev,
+				&pdata->info[i]);
+		if (ret) {
+			dev_err(&pdev->dev, "Error: unable to initialize bus nodes\n");
+			goto node_reg_err_1;
+		}
+		pdata->info[i].of_node = child_node;
+		i++;
+	}
+
+	dev_dbg(&pdev->dev, "bus topology:\n");
+	for (i = 0; i < pdata->num_devices; i++) {
+		dev_dbg(&pdev->dev, "id %d\nnum_qports %d\nnum_connections %d",
+				pdata->info[i].node_info->id,
+				pdata->info[i].node_info->num_qports,
+				pdata->info[i].node_info->num_connections);
+		dev_dbg(&pdev->dev, "\nbus_device_id %d\n buswidth %d\n",
+				pdata->info[i].node_info->bus_device_id,
+				pdata->info[i].node_info->agg_params.buswidth);
+		for (j = 0; j < pdata->info[i].node_info->num_connections;
+									j++) {
+			dev_dbg(&pdev->dev, "connection[%d]: %d\n", j,
+				pdata->info[i].node_info->connections[j]);
+		}
+		for (j = 0; j < pdata->info[i].node_info->num_blist;
+									 j++) {
+			dev_dbg(&pdev->dev, "black_listed_node[%d]: %d\n", j,
+				pdata->info[i].node_info->bl_cons[j]);
+		}
+		if (pdata->info[i].fabdev)
+			dev_dbg(&pdev->dev, "base_addr %zu\nbus_type %d\n",
+				(size_t)pdata->info[i].fabdev->pqos_base,
+					pdata->info[i].fabdev->bus_type);
+	}
+	return pdata;
+
+node_reg_err_1:
+	devm_kfree(&pdev->dev, pdata->info);
+node_reg_err:
+	devm_kfree(&pdev->dev, pdata);
+	pdata = NULL;
+	return NULL;
+}
+
+static int msm_bus_of_get_ids(struct platform_device *pdev,
+			struct device_node *dev_node, int **dev_ids,
+			int *num_ids, char *prop_name)
+{
+	int ret = 0;
+	int size, i;
+	struct device_node *rule_node;
+	int *ids = NULL;
+
+	if (of_get_property(dev_node, prop_name, &size)) {
+		*num_ids = size / sizeof(int);
+		ids = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+	} else {
+		dev_err(&pdev->dev, "No rule nodes, skipping node\n");
+		ret = -ENXIO;
+		goto exit_get_ids;
+	}
+
+	*dev_ids = ids;
+	for (i = 0; i < *num_ids; i++) {
+		rule_node = of_parse_phandle(dev_node, prop_name, i);
+		if (IS_ERR_OR_NULL(rule_node)) {
+			dev_err(&pdev->dev, "Can't get rule node id\n");
+			ret = -ENXIO;
+			goto err_get_ids;
+		}
+
+		if (of_property_read_u32(rule_node, "cell-id",
+				&ids[i])) {
+			dev_err(&pdev->dev, "Can't get rule node id\n");
+			ret = -ENXIO;
+			goto err_get_ids;
+		}
+		of_node_put(rule_node);
+	}
+exit_get_ids:
+	return ret;
+err_get_ids:
+	devm_kfree(&pdev->dev, ids);
+	of_node_put(rule_node);
+	ids = NULL;
+	return ret;
+}
+
+int msm_bus_of_get_static_rules(struct platform_device *pdev,
+					struct bus_rule_type **static_rules)
+{
+	int ret = 0;
+	struct device_node *of_node, *child_node;
+	int num_rules = 0;
+	int rule_idx = 0;
+	int bw_fld = 0;
+	int i;
+	struct bus_rule_type *local_rule = NULL;
+
+	of_node = pdev->dev.of_node;
+	num_rules = of_get_child_count(of_node);
+	local_rule = devm_kzalloc(&pdev->dev,
+				sizeof(struct bus_rule_type) * num_rules,
+				GFP_KERNEL);
+
+	if (IS_ERR_OR_NULL(local_rule)) {
+		ret = -ENOMEM;
+		goto exit_static_rules;
+	}
+
+	*static_rules = local_rule;
+	for_each_child_of_node(of_node, child_node) {
+		ret = msm_bus_of_get_ids(pdev, child_node,
+			&local_rule[rule_idx].src_id,
+			&local_rule[rule_idx].num_src,
+			"qcom,src-nodes");
+
+		ret = msm_bus_of_get_ids(pdev, child_node,
+			&local_rule[rule_idx].dst_node,
+			&local_rule[rule_idx].num_dst,
+			"qcom,dest-node");
+
+		ret = of_property_read_u32(child_node, "qcom,src-field",
+				&local_rule[rule_idx].src_field);
+		if (ret) {
+			dev_err(&pdev->dev, "src-field missing\n");
+			ret = -ENXIO;
+			goto err_static_rules;
+		}
+
+		ret = of_property_read_u32(child_node, "qcom,src-op",
+				&local_rule[rule_idx].op);
+		if (ret) {
+			dev_err(&pdev->dev, "src-op missing\n");
+			ret = -ENXIO;
+			goto err_static_rules;
+		}
+
+		ret = of_property_read_u32(child_node, "qcom,mode",
+				&local_rule[rule_idx].mode);
+		if (ret) {
+			dev_err(&pdev->dev, "mode missing\n");
+			ret = -ENXIO;
+			goto err_static_rules;
+		}
+
+		ret = of_property_read_u32(child_node, "qcom,thresh", &bw_fld);
+		if (ret) {
+			dev_err(&pdev->dev, "thresh missing\n");
+			ret = -ENXIO;
+			goto err_static_rules;
+		} else
+			local_rule[rule_idx].thresh = KBTOB(bw_fld);
+
+		ret = of_property_read_u32(child_node, "qcom,dest-bw",
+								&bw_fld);
+		if (ret)
+			local_rule[rule_idx].dst_bw = 0;
+		else
+			local_rule[rule_idx].dst_bw = KBTOB(bw_fld);
+
+		rule_idx++;
+	}
+	ret = rule_idx;
+exit_static_rules:
+	return ret;
+err_static_rules:
+	for (i = 0; i < num_rules; i++) {
+		if (!IS_ERR_OR_NULL(local_rule)) {
+			if (!IS_ERR_OR_NULL(local_rule[i].src_id))
+				devm_kfree(&pdev->dev,
+						local_rule[i].src_id);
+			if (!IS_ERR_OR_NULL(local_rule[i].dst_node))
+				devm_kfree(&pdev->dev,
+						local_rule[i].dst_node);
+			devm_kfree(&pdev->dev, local_rule);
+		}
+	}
+	*static_rules = NULL;
+	return ret;
+}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c
new file mode 100644
index 0000000..3a4b770
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c
@@ -0,0 +1,703 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm_bus_rules.h>
+#include "msm_bus_core.h"
+#include "msm_bus_rpmh.h"
+
+#define DEFAULT_QOS_FREQ	19200
+#define DEFAULT_UTIL_FACT	100
+#define DEFAULT_VRAIL_COMP	100
+#define DEFAULT_AGG_SCHEME	AGG_SCHEME_LEG
+
+static int *get_arr(struct platform_device *pdev,
+		struct device_node *node, const char *prop,
+		int *nports)
+{
+	int size = 0, ret;
+	int *arr = NULL;
+
+	if (of_get_property(node, prop, &size)) {
+		*nports = size / sizeof(int);
+	} else {
+		dev_dbg(&pdev->dev, "Property %s not available\n", prop);
+		*nports = 0;
+		return NULL;
+	}
+
+	arr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(arr)) {
+		dev_err(&pdev->dev, "Error: Failed to alloc mem for %s\n",
+				prop);
+		return NULL;
+	}
+
+	ret = of_property_read_u32_array(node, prop, (u32 *)arr, *nports);
+	if (ret) {
+		dev_err(&pdev->dev, "Error in reading property: %s\n", prop);
+		goto arr_err;
+	}
+
+	return arr;
+arr_err:
+	devm_kfree(&pdev->dev, arr);
+	return NULL;
+}
+
+static struct msm_bus_rsc_device_type *get_rsc_device_info(
+		struct device_node *dev_node,
+		struct platform_device *pdev)
+{
+	struct msm_bus_rsc_device_type *rsc_dev;
+
+	rsc_dev = devm_kzalloc(&pdev->dev, sizeof(*rsc_dev),
+			GFP_KERNEL);
+	if (!rsc_dev)
+		return NULL;
+
+	if (of_property_read_u32(dev_node, "qcom,req_state",
+			&rsc_dev->req_state)) {
+		dev_dbg(&pdev->dev, "req_state missing, using default\n");
+		rsc_dev->req_state = 2;
+	}
+
+	return rsc_dev;
+}
+
+static struct msm_bus_bcm_device_type *get_bcm_device_info(
+		struct device_node *dev_node,
+		struct platform_device *pdev)
+{
+	struct msm_bus_bcm_device_type *bcm_dev;
+
+	bcm_dev = devm_kzalloc(&pdev->dev, sizeof(*bcm_dev),
+			GFP_KERNEL);
+	if (!bcm_dev)
+		return NULL;
+
+	if (of_property_read_string(dev_node, "qcom,bcm-name",
+							&bcm_dev->name)) {
+		devm_kfree(&pdev->dev, bcm_dev);
+		return NULL;
+	}
+
+	return bcm_dev;
+}
+
+static struct msm_bus_fab_device_type *get_fab_device_info(
+		struct device_node *dev_node,
+		struct platform_device *pdev)
+{
+	struct msm_bus_fab_device_type *fab_dev;
+	struct resource *res;
+	const char *base_name;
+
+	fab_dev = devm_kzalloc(&pdev->dev, sizeof(*fab_dev),
+			GFP_KERNEL);
+	if (!fab_dev)
+		return NULL;
+
+	if (of_property_read_string(dev_node, "qcom,base-name", &base_name)) {
+		devm_kfree(&pdev->dev, fab_dev);
+		return NULL;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, base_name);
+	if (!res) {
+		devm_kfree(&pdev->dev, fab_dev);
+		return NULL;
+	}
+	fab_dev->pqos_base = res->start;
+	fab_dev->qos_range = resource_size(res);
+	fab_dev->bypass_qos_prg = of_property_read_bool(dev_node,
+						"qcom,bypass-qos-prg");
+
+	if (of_property_read_u32(dev_node, "qcom,base-offset",
+			&fab_dev->base_offset))
+		dev_dbg(&pdev->dev, "Bus base offset is missing\n");
+
+	if (of_property_read_u32(dev_node, "qcom,sbm-offset",
+			&fab_dev->sbm_offset))
+		dev_dbg(&pdev->dev, "sbm disable offset is missing\n");
+
+	if (of_property_read_u32(dev_node, "qcom,qos-off",
+			&fab_dev->qos_off))
+		dev_dbg(&pdev->dev, "Bus qos off is missing\n");
+
+	if (of_property_read_u32(dev_node, "qcom,bus-type",
+						&fab_dev->bus_type))
+		dev_warn(&pdev->dev, "Bus type is missing\n");
+
+	if (of_property_read_u32(dev_node, "qcom,qos-freq",
+						&fab_dev->qos_freq)) {
+		dev_dbg(&pdev->dev, "Bus qos freq is missing\n");
+		fab_dev->qos_freq = DEFAULT_QOS_FREQ;
+	}
+
+	return fab_dev;
+}
+
+static void get_qos_params(
+		struct device_node * const dev_node,
+		struct platform_device * const pdev,
+		struct msm_bus_node_info_type *node_info)
+{
+	const uint32_t *vec_arr;
+	int len;
+
+	of_property_read_u32(dev_node, "qcom,prio",
+					&node_info->qos_params.prio_dflt);
+
+	vec_arr = of_get_property(dev_node, "qcom,lim-params", &len);
+	if (vec_arr != NULL && len == sizeof(uint32_t) * 2) {
+		node_info->qos_params.limiter.bw = be32_to_cpu(vec_arr[0]);
+		node_info->qos_params.limiter.sat = be32_to_cpu(vec_arr[1]);
+	}
+
+	node_info->qos_params.limiter_en = of_property_read_bool(dev_node,
+						"qcom,lim-en");
+
+	vec_arr = of_get_property(dev_node, "qcom,qos-reg-params", &len);
+	if (vec_arr != NULL && len == sizeof(uint32_t) * 4) {
+		node_info->qos_params.reg.low_prio = be32_to_cpu(vec_arr[0]);
+		node_info->qos_params.reg.hi_prio = be32_to_cpu(vec_arr[1]);
+		node_info->qos_params.reg.bw = be32_to_cpu(vec_arr[2]);
+		node_info->qos_params.reg.sat = be32_to_cpu(vec_arr[3]);
+	}
+
+	vec_arr = of_get_property(dev_node, "qcom,qos-reg-mode", &len);
+	if (vec_arr != NULL && len == sizeof(uint32_t) * 2) {
+		node_info->qos_params.reg_mode.read = be32_to_cpu(vec_arr[0]);
+		node_info->qos_params.reg_mode.write = be32_to_cpu(vec_arr[1]);
+	}
+
+	node_info->qos_params.urg_fwd_en = of_property_read_bool(dev_node,
+						"qcom,forwarding");
+
+	node_info->qos_params.defer_init_qos = of_property_read_bool(dev_node,
+						"qcom,defer-init-qos");
+}
+
+static int msm_bus_of_parse_clk_array(struct device_node *dev_node,
+			struct device_node *gdsc_node,
+			struct platform_device *pdev, struct nodeclk **clk_arr,
+			int *num_clks, int id)
+{
+	int ret = 0;
+	int idx = 0;
+	struct property *prop;
+	const char *clk_name;
+	int clks = 0;
+
+	clks = of_property_count_strings(dev_node, "clock-names");
+	if (clks < 0)
+		return clks;
+
+	*num_clks = clks;
+	*clk_arr = devm_kcalloc(&pdev->dev, clks, sizeof(*(*clk_arr)),
+				GFP_KERNEL);
+	if (!(*clk_arr)) {
+		*num_clks = 0;
+		return -ENOMEM;
+	}
+	of_property_for_each_string(dev_node, "clock-names", prop, clk_name) {
+		char gdsc_string[MAX_REG_NAME];
+
+		(*clk_arr)[idx].clk = of_clk_get_by_name(dev_node, clk_name);
+		if (IS_ERR_OR_NULL((*clk_arr)[idx].clk)) {
+			*num_clks = 0;
+			goto exit_of_parse_clk_array;
+		}
+		if (strnstr(clk_name, "no-rate", strlen(clk_name)))
+			(*clk_arr)[idx].enable_only_clk = true;
+
+		scnprintf(gdsc_string, sizeof(gdsc_string), "%s-supply",
+			clk_name);
+
+		if (of_find_property(gdsc_node, gdsc_string, NULL))
+			strlcpy((*clk_arr)[idx].reg_name, clk_name,
+				MAX_REG_NAME);
+		idx++;
+	}
+exit_of_parse_clk_array:
+	return ret;
+}
+
+static void get_agg_params(
+		struct device_node * const dev_node,
+		struct msm_bus_node_info_type *node_info)
+{
+	if (of_property_read_u32(dev_node, "qcom,buswidth",
+					&node_info->agg_params.buswidth))
+		node_info->agg_params.buswidth = 8;
+
+	node_info->agg_params.num_aggports = node_info->num_qports;
+	of_property_read_u32(dev_node, "qcom,agg-ports",
+				   &node_info->agg_params.num_aggports);
+
+	if (of_property_read_u32(dev_node, "qcom,agg-scheme",
+					&node_info->agg_params.agg_scheme))
+		node_info->agg_params.agg_scheme =
+		(node_info->is_fab_dev) ? DEFAULT_AGG_SCHEME : AGG_SCHEME_NONE;
+
+	if (of_property_read_u32(dev_node, "qcom,vrail-comp",
+					&node_info->agg_params.vrail_comp))
+		node_info->agg_params.vrail_comp =
+		(node_info->is_fab_dev) ? DEFAULT_VRAIL_COMP : 0;
+}
+
+static int read_cell_ids(struct device *dev, struct device_node *const dev_node,
+		const char *name,  u32 **data, u32 *count)
+{
+	u32 size;
+	int i, ret;
+
+	if (!of_get_property(dev_node, name, &size))
+		return 0;
+
+	*count = size / sizeof(u32);
+	*data = devm_kcalloc(dev, *count, size, GFP_KERNEL);
+	if (!*data)
+		return -ENOMEM;
+
+	for (i = 0; i < *count; i++) {
+		struct device_node *node;
+
+		node = of_parse_phandle(dev_node, name, i);
+		if (IS_ERR_OR_NULL(node))
+			return -ENOENT;
+
+		ret = of_property_read_u32(node, "cell-id", &(*data)[i]);
+
+		of_node_put(node);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static struct msm_bus_node_info_type *get_node_info_data(
+		struct device_node * const dev_node,
+		struct platform_device * const pdev)
+{
+	struct msm_bus_node_info_type *node_info;
+	int ret;
+	int num_disable_ports;
+	struct device_node *bus_dev;
+
+	node_info = devm_kzalloc(&pdev->dev, sizeof(*node_info),
+			GFP_KERNEL);
+	if (!node_info)
+		return NULL;
+
+	if (of_property_read_u32(dev_node, "cell-id", &node_info->id))
+		return NULL;
+
+	if (of_property_read_string(dev_node, "label", &node_info->name))
+		return NULL;
+
+	node_info->qport = get_arr(pdev, dev_node, "qcom,qport",
+			&node_info->num_qports);
+
+	num_disable_ports = of_property_count_elems_of_size(dev_node,
+			 "qcom,disable-ports", sizeof(uint32_t));
+
+	if (num_disable_ports > 0) {
+		node_info->num_disable_ports = num_disable_ports;
+		node_info->disable_ports = devm_kcalloc(&pdev->dev,
+			num_disable_ports, sizeof(uint32_t),
+							GFP_KERNEL);
+		if (!node_info->disable_ports)
+			return NULL;
+		of_property_read_u32_array(dev_node, "qcom,disable-ports",
+					node_info->disable_ports,
+					node_info->num_disable_ports);
+	}
+
+	if (read_cell_ids(&pdev->dev, dev_node, "qcom,connections",
+		&node_info->connections, &node_info->num_connections))
+		return NULL;
+
+	if (read_cell_ids(&pdev->dev, dev_node, "qcom,blacklist",
+		&node_info->bl_cons, &node_info->num_blist))
+		return NULL;
+
+	bus_dev = of_parse_phandle(dev_node, "qcom,bus-dev", 0);
+	if (!IS_ERR_OR_NULL(bus_dev)) {
+		ret = of_property_read_u32(bus_dev, "cell-id",
+			&node_info->bus_device_id);
+		of_node_put(bus_dev);
+		if (ret) {
+			dev_err(&pdev->dev, "Can't find bus device. Node %d\n",
+					node_info->id);
+			return NULL;
+		}
+	} else
+		dev_dbg(&pdev->dev, "Can't find bdev phandle for %d",
+					node_info->id);
+
+	if (read_cell_ids(&pdev->dev, dev_node, "qcom,bcms",
+		&node_info->bcm_dev_ids, &node_info->num_bcm_devs))
+		return NULL;
+
+	if (read_cell_ids(&pdev->dev, dev_node, "qcom,rscs",
+		&node_info->rsc_dev_ids, &node_info->num_rsc_devs))
+		return NULL;
+
+	node_info->is_bcm_dev = of_property_read_bool(dev_node, "qcom,bcm-dev");
+	node_info->is_rsc_dev = of_property_read_bool(dev_node, "qcom,rsc-dev");
+	node_info->is_fab_dev = of_property_read_bool(dev_node, "qcom,fab-dev");
+	node_info->virt_dev = of_property_read_bool(dev_node, "qcom,virt-dev");
+
+	get_agg_params(dev_node, node_info);
+	get_qos_params(dev_node, pdev, node_info);
+
+	return node_info;
+}
+
+static int get_bus_node_device_data(
+		struct device_node * const dev_node,
+		struct platform_device * const pdev,
+		struct msm_bus_node_device_type * const node_device)
+{
+	bool enable_only;
+	bool setrate_only;
+	int num_elems = 0, num_bcms = 0, i = 0, ret = 0, num_regs = 0;
+	uint32_t *vec_arr = NULL;
+	struct qos_bcm_type *qos_bcms = NULL;
+	struct device_node *qos_clk_node = NULL;
+	const char *reg_name;
+	struct property *prop;
+
+	node_device->node_info = get_node_info_data(dev_node, pdev);
+	if (IS_ERR_OR_NULL(node_device->node_info)) {
+		dev_err(&pdev->dev, "Error: Node info missing\n");
+		return -ENODATA;
+	}
+	node_device->ap_owned = of_property_read_bool(dev_node,
+							"qcom,ap-owned");
+
+	if (node_device->node_info->is_bcm_dev) {
+		node_device->bcmdev = get_bcm_device_info(dev_node, pdev);
+		if (!node_device->bcmdev)
+			return -ENODATA;
+	}
+
+	if (node_device->node_info->is_rsc_dev) {
+		node_device->rscdev = get_rsc_device_info(dev_node, pdev);
+		if (!node_device->rscdev)
+			return -ENODATA;
+	}
+
+	if (node_device->node_info->is_fab_dev) {
+		dev_dbg(&pdev->dev, "Dev %d\n", node_device->node_info->id);
+
+		if (!node_device->node_info->virt_dev) {
+			node_device->fabdev =
+				get_fab_device_info(dev_node, pdev);
+			if (!node_device->fabdev)
+				return -ENODATA;
+		}
+	} else {
+		num_elems = of_property_count_elems_of_size(dev_node,
+					"qcom,node-qos-bcms", sizeof(uint32_t));
+
+		if (num_elems > 0) {
+			if (num_elems % 3 != 0) {
+				pr_err("Error: Length-error on getting vectors\n");
+				return -ENODATA;
+			}
+
+			vec_arr = devm_kcalloc(&pdev->dev, num_elems,
+						sizeof(*vec_arr), GFP_KERNEL);
+			if (!vec_arr)
+				return -ENOMEM;
+
+			ret = of_property_read_u32_array(dev_node,
+						"qcom,node-qos-bcms", vec_arr,
+								num_elems);
+			if (ret) {
+				pr_err("Error: problem reading qos-bcm vectors\n");
+				return ret;
+			}
+			num_bcms = num_elems / 3;
+			node_device->num_qos_bcms = num_bcms;
+
+			qos_bcms = devm_kcalloc(&pdev->dev, num_bcms,
+						sizeof(*qos_bcms), GFP_KERNEL);
+			if (!qos_bcms)
+				return -ENOMEM;
+
+			for (i = 0; i < num_bcms; i++) {
+				int index = i * 3;
+
+				qos_bcms[i].qos_bcm_id = vec_arr[index];
+				qos_bcms[i].vec.vec_a =
+					(uint64_t)KBTOB(vec_arr[index + 1]);
+				qos_bcms[i].vec.vec_b =
+					(uint64_t)KBTOB(vec_arr[index + 2]);
+			}
+			node_device->qos_bcms = qos_bcms;
+		}
+
+		enable_only = of_property_read_bool(dev_node,
+							"qcom,enable-only-clk");
+		node_device->clk[DUAL_CTX].enable_only_clk = enable_only;
+
+		/*
+		 * Doesn't make sense to have a clk handle you can't enable or
+		 * set rate on.
+		 */
+		if (!enable_only) {
+			setrate_only = of_property_read_bool(dev_node,
+						"qcom,setrate-only-clk");
+			node_device->clk[DUAL_CTX].setrate_only_clk =
+								setrate_only;
+			node_device->clk[ACTIVE_CTX].setrate_only_clk =
+								setrate_only;
+		}
+
+		qos_clk_node = of_get_child_by_name(dev_node,
+						"qcom,node-qos-clks");
+
+		if (qos_clk_node) {
+			if (msm_bus_of_parse_clk_array(qos_clk_node, dev_node,
+						pdev,
+						&node_device->node_qos_clks,
+						&node_device->num_node_qos_clks,
+						node_device->node_info->id)) {
+				dev_dbg(&pdev->dev, "Bypass QoS programming");
+				node_device->fabdev->bypass_qos_prg = true;
+			}
+			of_node_put(qos_clk_node);
+		}
+		node_device->clk[DUAL_CTX].clk = of_clk_get_by_name(dev_node,
+							"node_clk");
+
+		if (IS_ERR_OR_NULL(node_device->clk[DUAL_CTX].clk))
+			dev_dbg(&pdev->dev,
+				"%s:Failed to get bus clk for bus%d ctx%d",
+				__func__, node_device->node_info->id,
+								DUAL_CTX);
+
+		if (of_find_property(dev_node, "node-gdsc-supply", NULL))
+			strlcpy(node_device->clk[DUAL_CTX].reg_name,
+				"node-gdsc", MAX_REG_NAME);
+
+		num_regs = of_property_count_strings(dev_node,
+							"node-reg-names");
+		if (num_regs > 0) {
+			i = 0;
+			node_device->num_regs = num_regs;
+			node_device->node_regs = devm_kcalloc(&pdev->dev,
+				num_regs, sizeof(*node_device->node_regs),
+								GFP_KERNEL);
+
+			of_property_for_each_string(dev_node, "node-reg-names",
+							prop, reg_name) {
+				strlcpy(node_device->node_regs[i].name,
+					reg_name, MAX_REG_NAME);
+				i++;
+			}
+		}
+	}
+	return 0;
+}
+
+struct msm_bus_device_node_registration
+	*msm_bus_of_to_pdata(struct platform_device *pdev)
+{
+	struct device_node *of_node, *child_node;
+	struct msm_bus_device_node_registration *pdata;
+	unsigned int i = 0, j;
+	unsigned int ret;
+
+	of_node = pdev->dev.of_node;
+
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return NULL;
+
+	pdata->num_devices = of_get_child_count(of_node);
+
+	pdata->info = devm_kcalloc(&pdev->dev, pdata->num_devices,
+				sizeof(*pdata->info), GFP_KERNEL);
+	if (!pdata->info)
+		return NULL;
+
+	ret = 0;
+	for_each_child_of_node(of_node, child_node) {
+		ret = get_bus_node_device_data(child_node, pdev,
+				&pdata->info[i]);
+		if (ret)
+			return NULL;
+		pdata->info[i].of_node = child_node;
+		i++;
+	}
+
+	dev_dbg(&pdev->dev, "bus topology:\n");
+	for (i = 0; i < pdata->num_devices; i++) {
+		dev_dbg(&pdev->dev, "id %d\n", pdata->info[i].node_info->id);
+		dev_dbg(&pdev->dev, "num_qports %d\n",
+					pdata->info[i].node_info->num_qports);
+		dev_dbg(&pdev->dev, "num_connections %d\n",
+				pdata->info[i].node_info->num_connections);
+		dev_dbg(&pdev->dev, "bus_device_id %d\n",
+				pdata->info[i].node_info->bus_device_id);
+		dev_dbg(&pdev->dev, "buswidth %d\n",
+				pdata->info[i].node_info->agg_params.buswidth);
+		for (j = 0; j < pdata->info[i].node_info->num_connections;
+									j++) {
+			dev_dbg(&pdev->dev, "connection[%d]: %d\n", j,
+				pdata->info[i].node_info->connections[j]);
+		}
+		for (j = 0; j < pdata->info[i].node_info->num_blist;
+									 j++) {
+			dev_dbg(&pdev->dev, "black_listed_node[%d]: %d\n", j,
+				pdata->info[i].node_info->bl_cons[j]);
+		}
+		if (pdata->info[i].fabdev) {
+			dev_dbg(&pdev->dev, "base_addr %zu\n",
+				(size_t)pdata->info[i].fabdev->pqos_base);
+			dev_dbg(&pdev->dev, "bus_type %d\n",
+					pdata->info[i].fabdev->bus_type);
+		}
+	}
+	return pdata;
+}
+
+static int msm_bus_of_get_ids(struct platform_device *pdev,
+			struct device_node *dev_node, int **dev_ids,
+			int *num_ids, char *prop_name)
+{
+	int ret = 0;
+	int size, i;
+	struct device_node *rule_node;
+	int *ids = NULL;
+
+	*num_ids = of_property_count_elems_of_size(dev_node, prop_name, size);
+	if (!*num_ids) {
+		dev_err(&pdev->dev, "No rule nodes, skipping node\n");
+		ret = -ENXIO;
+	}
+
+	ids = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+	if (!ids)
+		return -ENOMEM;
+
+	*dev_ids = ids;
+	for (i = 0; i < *num_ids; i++) {
+		rule_node = of_parse_phandle(dev_node, prop_name, i);
+		if (IS_ERR_OR_NULL(rule_node))
+			goto err_get_ids;
+
+		if (of_property_read_u32(rule_node, "cell-id",
+				&ids[i]))
+			goto err_get_ids;
+
+		of_node_put(rule_node);
+	}
+
+	return 0;
+err_get_ids:
+	dev_err(&pdev->dev, "Can't get rule node id\n");
+	devm_kfree(&pdev->dev, ids);
+	of_node_put(rule_node);
+	return ret;
+}
+
+int msm_bus_of_get_static_rules(struct platform_device *pdev,
+					struct bus_rule_type **static_rules)
+{
+	int ret = 0;
+	struct device_node *of_node, *child_node;
+	int num_rules = 0;
+	int rule_idx = 0;
+	int bw_fld = 0;
+	int i;
+	struct bus_rule_type *local_rule = NULL;
+
+	of_node = pdev->dev.of_node;
+	num_rules = of_get_child_count(of_node);
+	local_rule = devm_kcalloc(&pdev->dev, num_rules,
+				sizeof(*local_rule), GFP_KERNEL);
+
+	if (!local_rule)
+		return -ENOMEM;
+
+	*static_rules = local_rule;
+	for_each_child_of_node(of_node, child_node) {
+		ret = msm_bus_of_get_ids(pdev, child_node,
+			&local_rule[rule_idx].src_id,
+			&local_rule[rule_idx].num_src,
+			"qcom,src-nodes");
+
+		ret = msm_bus_of_get_ids(pdev, child_node,
+			&local_rule[rule_idx].dst_node,
+			&local_rule[rule_idx].num_dst,
+			"qcom,dest-node");
+
+		ret = of_property_read_u32(child_node, "qcom,src-field",
+				&local_rule[rule_idx].src_field);
+		if (ret) {
+			dev_err(&pdev->dev, "src-field missing\n");
+			goto err_static_rules;
+		}
+
+		ret = of_property_read_u32(child_node, "qcom,src-op",
+				&local_rule[rule_idx].op);
+		if (ret) {
+			dev_err(&pdev->dev, "src-op missing\n");
+			goto err_static_rules;
+		}
+
+		ret = of_property_read_u32(child_node, "qcom,mode",
+				&local_rule[rule_idx].mode);
+		if (ret) {
+			dev_err(&pdev->dev, "mode missing\n");
+			goto err_static_rules;
+		}
+
+		ret = of_property_read_u32(child_node, "qcom,thresh", &bw_fld);
+		if (ret) {
+			dev_err(&pdev->dev, "thresh missing\n");
+			goto err_static_rules;
+		} else
+			local_rule[rule_idx].thresh = KBTOB(bw_fld);
+
+		ret = of_property_read_u32(child_node, "qcom,dest-bw",
+								&bw_fld);
+		if (ret)
+			local_rule[rule_idx].dst_bw = 0;
+		else
+			local_rule[rule_idx].dst_bw = KBTOB(bw_fld);
+
+		rule_idx++;
+	}
+	ret = rule_idx;
+err_static_rules:
+	for (i = 0; i < num_rules; i++) {
+		if (local_rule) {
+			devm_kfree(&pdev->dev, local_rule[i].src_id);
+			devm_kfree(&pdev->dev, local_rule[i].dst_node);
+			devm_kfree(&pdev->dev, local_rule);
+		}
+	}
+	*static_rules = NULL;
+	return ret;
+}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rpm_smd.c b/drivers/soc/qcom/msm_bus/msm_bus_rpm_smd.c
new file mode 100644
index 0000000..d78fd8b5f
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_rpm_smd.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2012-2016, 2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include "msm_bus_core.h"
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <soc/qcom/rpm-smd.h>
+
+/* Stubs for backward compatibility */
+void msm_bus_rpm_set_mt_mask(void)
+{
+}
+
+bool msm_bus_rpm_is_mem_interleaved(void)
+{
+	return true;
+}
+
+struct commit_data {
+	struct msm_bus_node_hw_info *mas_arb;
+	struct msm_bus_node_hw_info *slv_arb;
+};
+
+#ifdef CONFIG_DEBUG_FS
+void msm_bus_rpm_fill_cdata_buffer(int *curr, char *buf, const int max_size,
+	void *cdata, int nmasters, int nslaves, int ntslaves)
+{
+	int c;
+	struct commit_data *cd = (struct commit_data *)cdata;
+
+	*curr += scnprintf(buf + *curr, max_size - *curr, "\nMas BW:\n");
+	for (c = 0; c < nmasters; c++)
+		*curr += scnprintf(buf + *curr, max_size - *curr,
+			"%d: %llu\t", cd->mas_arb[c].hw_id,
+			cd->mas_arb[c].bw);
+	*curr += scnprintf(buf + *curr, max_size - *curr, "\nSlave BW:\n");
+	for (c = 0; c < nslaves; c++) {
+		*curr += scnprintf(buf + *curr, max_size - *curr,
+		"%d: %llu\t", cd->slv_arb[c].hw_id,
+		cd->slv_arb[c].bw);
+	}
+}
+#endif
+
+static int msm_bus_rpm_compare_cdata(
+	struct msm_bus_fabric_registration *fab_pdata,
+	struct commit_data *cd1, struct commit_data *cd2)
+{
+	size_t n;
+	int ret;
+
+	n = sizeof(struct msm_bus_node_hw_info) * fab_pdata->nmasters * 2;
+	ret = memcmp(cd1->mas_arb, cd2->mas_arb, n);
+	if (ret) {
+		MSM_BUS_DBG("Master Arb Data not equal\n");
+		return ret;
+	}
+
+	n = sizeof(struct msm_bus_node_hw_info) * fab_pdata->nslaves * 2;
+	ret = memcmp(cd1->slv_arb, cd2->slv_arb, n);
+	if (ret) {
+		MSM_BUS_DBG("Master Arb Data not equal\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int msm_bus_rpm_req(int ctx, uint32_t rsc_type, uint32_t key,
+	struct msm_bus_node_hw_info *hw_info, bool valid)
+{
+	struct msm_rpm_request *rpm_req;
+	int ret = 0, msg_id;
+
+	if (ctx == ACTIVE_CTX)
+		ctx = MSM_RPM_CTX_ACTIVE_SET;
+	else if (ctx == DUAL_CTX)
+		ctx = MSM_RPM_CTX_SLEEP_SET;
+
+	rpm_req = msm_rpm_create_request(ctx, rsc_type, hw_info->hw_id, 1);
+	if (rpm_req == NULL) {
+		MSM_BUS_WARN("RPM: Couldn't create RPM Request\n");
+		return -ENXIO;
+	}
+
+	if (valid) {
+		ret = msm_rpm_add_kvp_data(rpm_req, key, (const uint8_t *)
+			&hw_info->bw, (int)(sizeof(uint64_t)));
+		if (ret) {
+			MSM_BUS_WARN("RPM: Add KVP failed for RPM Req:%u\n",
+				rsc_type);
+			goto free_rpm_request;
+		}
+
+		MSM_BUS_DBG("Added Key: %d, Val: %llu, size: %zu\n", key,
+			hw_info->bw, sizeof(uint64_t));
+	} else {
+		/* Invalidate RPM requests */
+		ret = msm_rpm_add_kvp_data(rpm_req, 0, NULL, 0);
+		if (ret) {
+			MSM_BUS_WARN("RPM: Add KVP failed for RPM Req:%u\n",
+				rsc_type);
+			goto free_rpm_request;
+		}
+	}
+
+	msg_id = msm_rpm_send_request(rpm_req);
+	if (!msg_id) {
+		MSM_BUS_WARN("RPM: No message ID for req\n");
+		ret = -ENXIO;
+		goto free_rpm_request;
+	}
+
+	ret = msm_rpm_wait_for_ack(msg_id);
+	if (ret) {
+		MSM_BUS_WARN("RPM: Ack failed\n");
+		goto free_rpm_request;
+	}
+
+free_rpm_request:
+	msm_rpm_free_request(rpm_req);
+
+	return ret;
+}
+
+static int msm_bus_rpm_commit_arb(struct msm_bus_fabric_registration
+	*fab_pdata, int ctx, void *rpm_data,
+	struct commit_data *cd, bool valid)
+{
+	int i, status = 0, rsc_type, key;
+
+	MSM_BUS_DBG("Context: %d\n", ctx);
+	rsc_type = RPM_BUS_MASTER_REQ;
+	key = RPM_MASTER_FIELD_BW;
+	for (i = 0; i < fab_pdata->nmasters; i++) {
+		if (!cd->mas_arb[i].dirty)
+			continue;
+
+		MSM_BUS_DBG("MAS HWID: %d, BW: %llu DIRTY: %d\n",
+			cd->mas_arb[i].hw_id,
+			cd->mas_arb[i].bw,
+			cd->mas_arb[i].dirty);
+		status = msm_bus_rpm_req(ctx, rsc_type, key,
+			&cd->mas_arb[i], valid);
+		if (status) {
+			MSM_BUS_ERR("RPM: Req fail: mas:%d, bw:%llu\n",
+				cd->mas_arb[i].hw_id,
+				cd->mas_arb[i].bw);
+			break;
+		}
+		cd->mas_arb[i].dirty = false;
+	}
+
+	rsc_type = RPM_BUS_SLAVE_REQ;
+	key = RPM_SLAVE_FIELD_BW;
+	for (i = 0; i < fab_pdata->nslaves; i++) {
+		if (!cd->slv_arb[i].dirty)
+			continue;
+
+		MSM_BUS_DBG("SLV HWID: %d, BW: %llu DIRTY: %d\n",
+			cd->slv_arb[i].hw_id,
+			cd->slv_arb[i].bw,
+			cd->slv_arb[i].dirty);
+		status = msm_bus_rpm_req(ctx, rsc_type, key,
+			&cd->slv_arb[i], valid);
+		if (status) {
+			MSM_BUS_ERR("RPM: Req fail: slv:%d, bw:%llu\n",
+				cd->slv_arb[i].hw_id,
+				cd->slv_arb[i].bw);
+			break;
+		}
+		cd->slv_arb[i].dirty = false;
+	}
+
+	return status;
+}
+
+/*
+ * msm_bus_remote_hw_commit() - Commit the arbitration data to RPM
+ * @fabric: Fabric for which the data should be committed
+ */
+int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration
+	*fab_pdata, void *hw_data, void **cdata)
+{
+
+	int ret;
+	bool valid;
+	struct commit_data *dual_cd, *act_cd;
+	void *rpm_data = hw_data;
+
+	MSM_BUS_DBG("\nReached RPM Commit\n");
+	dual_cd = (struct commit_data *)cdata[DUAL_CTX];
+	act_cd = (struct commit_data *)cdata[ACTIVE_CTX];
+
+	/*
+	 * If the arb data for active set and sleep set is
+	 * different, commit both sets.
+	 * If the arb data for active set and sleep set is
+	 * the same, invalidate the sleep set.
+	 */
+	ret = msm_bus_rpm_compare_cdata(fab_pdata, act_cd, dual_cd);
+	if (!ret)
+		/* Invalidate sleep set.*/
+		valid = false;
+	else
+		valid = true;
+
+	ret = msm_bus_rpm_commit_arb(fab_pdata, DUAL_CTX, rpm_data,
+		dual_cd, valid);
+	if (ret)
+		MSM_BUS_ERR("Error comiting fabric:%d in %d ctx\n",
+			fab_pdata->id, DUAL_CTX);
+
+	valid = true;
+	ret = msm_bus_rpm_commit_arb(fab_pdata, ACTIVE_CTX, rpm_data, act_cd,
+		valid);
+	if (ret)
+		MSM_BUS_ERR("Error comiting fabric:%d in %d ctx\n",
+			fab_pdata->id, ACTIVE_CTX);
+
+	return ret;
+}
+
+int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata,
+	struct msm_bus_hw_algorithm *hw_algo)
+{
+	if (!pdata->ahb)
+		pdata->rpm_enabled = 1;
+	return 0;
+}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
new file mode 100644
index 0000000..232ffd9
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
@@ -0,0 +1,263 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_ADHOC_H
+#define _ARCH_ARM_MACH_MSM_BUS_ADHOC_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm-bus.h>
+#include <linux/msm_bus_rules.h>
+#include "msm_bus_core.h"
+#include "msm_bus_noc.h"
+
+#define VCD_MAX_CNT 16
+
+struct msm_bus_node_device_type;
+
+struct link_node {
+	uint64_t lnode_ib[NUM_CTX];
+	uint64_t lnode_ab[NUM_CTX];
+	uint64_t query_ib[NUM_CTX];
+	uint64_t query_ab[NUM_CTX];
+	uint64_t alc_idx[NUM_CTX];
+	int next;
+	struct device *next_dev;
+	struct list_head link;
+	uint32_t in_use;
+	const char *cl_name;
+	unsigned int bus_dev_id;
+};
+
+/* New types introduced for adhoc topology */
+struct msm_bus_noc_ops {
+	int (*qos_init)(struct msm_bus_node_device_type *dev,
+			void __iomem *qos_base, uint32_t qos_off,
+			uint32_t qos_delta, uint32_t qos_freq);
+	int (*set_bw)(struct msm_bus_node_device_type *dev,
+			void __iomem *qos_base, uint32_t qos_off,
+			uint32_t qos_delta, uint32_t qos_freq);
+	int (*limit_mport)(struct msm_bus_node_device_type *dev,
+			void __iomem *qos_base, uint32_t qos_off,
+			uint32_t qos_delta, uint32_t qos_freq, int enable_lim,
+			uint64_t lim_bw);
+	bool (*update_bw_reg)(int mode);
+	int (*sbm_config)(struct msm_bus_node_device_type *node_dev,
+			void __iomem *noc_base, uint32_t sbm_offset,
+			bool enable);
+};
+
+struct nodebw {
+	uint64_t sum_ab;
+	uint64_t last_sum_ab;
+	uint64_t last_max_ib;
+	uint64_t max_ib;
+	uint64_t max_ab;
+	uint64_t sum_query_ab;
+	uint64_t max_query_ib;
+	uint64_t max_query_ab;
+	uint64_t max_alc;
+	uint64_t cur_clk_hz;
+	uint32_t util_used;
+	uint32_t vrail_used;
+};
+
+struct nodevector {
+	uint64_t vec_a;
+	uint64_t vec_b;
+	uint64_t query_vec_a;
+	uint64_t query_vec_b;
+};
+
+struct node_regulator {
+	char name[MAX_REG_NAME];
+	struct regulator *reg;
+};
+
+struct qos_bcm_type {
+	int qos_bcm_id;
+	struct nodevector vec;
+};
+
+struct msm_bus_rsc_device_type {
+	struct device *mbox;
+	struct list_head bcm_clist[VCD_MAX_CNT];
+	int req_state;
+	uint32_t acv[NUM_CTX];
+	uint32_t query_acv[NUM_CTX];
+	struct tcs_cmd *cmdlist_active;
+	struct tcs_cmd *cmdlist_wake;
+	struct tcs_cmd *cmdlist_sleep;
+	int num_bcm_devs;
+};
+
+struct msm_bus_bcm_device_type {
+	const char *name;
+	uint32_t width;
+	uint32_t clk_domain;
+	uint32_t type;
+	uint32_t unit_size;
+	uint32_t addr;
+	uint32_t drv_id;
+	int num_bus_devs;
+};
+
+struct msm_bus_fab_device_type {
+	void __iomem *qos_base;
+	phys_addr_t pqos_base;
+	size_t qos_range;
+	uint32_t base_offset;
+	uint32_t qos_freq;
+	uint32_t qos_off;
+	uint32_t sbm_offset;
+	struct msm_bus_noc_ops noc_ops;
+	enum msm_bus_hw_sel bus_type;
+	bool bypass_qos_prg;
+};
+
+struct msm_bus_noc_limiter {
+	uint32_t bw;
+	uint32_t sat;
+};
+
+struct msm_bus_noc_regulator {
+	uint32_t low_prio;
+	uint32_t hi_prio;
+	uint32_t bw;
+	uint32_t sat;
+};
+
+struct msm_bus_noc_regulator_mode {
+	uint32_t read;
+	uint32_t write;
+};
+
+struct msm_bus_noc_qos_params {
+	uint32_t prio_dflt;
+	struct msm_bus_noc_limiter limiter;
+	bool limiter_en;
+	struct msm_bus_noc_regulator reg;
+	struct msm_bus_noc_regulator_mode reg_mode;
+	bool urg_fwd_en;
+	bool defer_init_qos;
+};
+
+struct node_util_levels_type {
+	uint64_t threshold;
+	uint32_t util_fact;
+};
+
+struct node_agg_params_type {
+	uint32_t agg_scheme;
+	uint32_t num_aggports;
+	unsigned int buswidth;
+	uint32_t vrail_comp;
+	uint32_t num_util_levels;
+	struct node_util_levels_type *util_levels;
+};
+
+struct msm_bus_node_info_type {
+	const char *name;
+	unsigned int id;
+	int mas_rpm_id;
+	int slv_rpm_id;
+	int num_ports;
+	int num_qports;
+	int *qport;
+	struct msm_bus_noc_qos_params qos_params;
+	unsigned int num_connections;
+	unsigned int num_blist;
+	unsigned int num_bcm_devs;
+	unsigned int num_rsc_devs;
+	bool is_fab_dev;
+	bool virt_dev;
+	bool is_bcm_dev;
+	bool is_rsc_dev;
+	bool is_traversed;
+	unsigned int *connections;
+	unsigned int *bl_cons;
+	unsigned int *bcm_dev_ids;
+	unsigned int *rsc_dev_ids;
+	struct device **dev_connections;
+	struct device **black_connections;
+	struct device **bcm_devs;
+	struct device **rsc_devs;
+	int *bcm_req_idx;
+	unsigned int bus_device_id;
+	struct device *bus_device;
+	struct rule_update_path_info rule;
+	uint64_t lim_bw;
+	bool defer_qos;
+	uint32_t *disable_ports;
+	int num_disable_ports;
+	struct node_agg_params_type agg_params;
+};
+
+struct msm_bus_node_device_type {
+	struct msm_bus_node_info_type *node_info;
+	struct msm_bus_fab_device_type *fabdev;
+	struct msm_bus_bcm_device_type *bcmdev;
+	struct msm_bus_rsc_device_type *rscdev;
+	int num_lnodes;
+	struct link_node *lnode_list;
+	struct nodebw node_bw[NUM_CTX];
+	struct nodevector node_vec[NUM_CTX];
+	struct list_head link;
+	struct list_head query_link;
+	struct nodeclk clk[NUM_CTX];
+	struct nodeclk bus_qos_clk;
+	uint32_t num_node_qos_clks;
+	struct nodeclk *node_qos_clks;
+	uint32_t num_qos_bcms;
+	struct qos_bcm_type *qos_bcms;
+	uint32_t num_regs;
+	struct node_regulator *node_regs;
+	unsigned int ap_owned;
+	struct device_node *of_node;
+	struct device dev;
+	bool dirty;
+	bool updated;
+	bool query_dirty;
+	struct list_head dev_link;
+	struct list_head devlist;
+	bool is_connected;
+};
+
+static inline struct msm_bus_node_device_type *to_msm_bus_node(struct device *d)
+{
+	return container_of(d, struct msm_bus_node_device_type, dev);
+}
+
+
+int msm_bus_enable_limiter(struct msm_bus_node_device_type *nodedev,
+				int throttle_en, uint64_t lim_bw);
+int msm_bus_commit_data(struct list_head *clist);
+int bcm_remove_handoff_req(struct device *dev, void *data);
+int commit_late_init_data(bool lock);
+int msm_bus_query_gen(struct list_head *qlist,
+				struct msm_bus_tcs_usecase *tcs_usecase);
+void *msm_bus_realloc_devmem(struct device *dev, void *p, size_t old_size,
+					size_t new_size, gfp_t flags);
+
+extern struct msm_bus_device_node_registration
+	*msm_bus_of_to_pdata(struct platform_device *pdev);
+extern void msm_bus_arb_setops_adhoc(struct msm_bus_arb_ops *arb_ops);
+extern int msm_bus_bimc_set_ops(struct msm_bus_node_device_type *bus_dev);
+extern int msm_bus_noc_set_ops(struct msm_bus_node_device_type *bus_dev);
+extern int msm_bus_of_get_static_rules(struct platform_device *pdev,
+					struct bus_rule_type **static_rule);
+extern int msm_rules_update_path(struct list_head *input_list,
+				struct list_head *output_list);
+extern void print_all_rules(void);
+#ifdef CONFIG_DEBUG_BUS_VOTER
+int msm_bus_floor_init(struct device *dev);
+#else
+static inline int msm_bus_floor_init(struct device *dev)
+{
+	return 0;
+}
+#endif /* CONFIG_DBG_BUS_VOTER */
+#endif /* _ARCH_ARM_MACH_MSM_BUS_ADHOC_H */
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rules.c b/drivers/soc/qcom/msm_bus/msm_bus_rules.c
new file mode 100644
index 0000000..ce5c9cd
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_rules.c
@@ -0,0 +1,719 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/list_sort.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm_bus_rules.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/msm-bus.h>
+#include <trace/events/trace_msm_bus.h>
+
+struct node_vote_info {
+	int id;
+	u64 ib;
+	u64 ab;
+	u64 clk;
+};
+
+struct rules_def {
+	int rule_id;
+	int num_src;
+	int state;
+	struct node_vote_info *src_info;
+	struct bus_rule_type rule_ops;
+	bool state_change;
+	struct list_head link;
+};
+
+struct rule_node_info {
+	int id;
+	void *data;
+	struct raw_notifier_head rule_notify_list;
+	struct rules_def *cur_rule;
+	int num_rules;
+	struct list_head node_rules;
+	struct list_head link;
+	struct rule_apply_rcm_info apply;
+};
+
+DEFINE_MUTEX(msm_bus_rules_lock);
+static LIST_HEAD(node_list);
+static struct rule_node_info *get_node(u32 id, void *data);
+static int node_rules_compare(void *priv, struct list_head *a,
+					struct list_head *b);
+
+#define LE(op1, op2)	(op1 <= op2)
+#define LT(op1, op2)	(op1 < op2)
+#define GE(op1, op2)	(op1 >= op2)
+#define GT(op1, op2)	(op1 > op2)
+#define NB_ID		(0x201)
+
+static struct rule_node_info *get_node(u32 id, void *data)
+{
+	struct rule_node_info *node_it = NULL;
+	struct rule_node_info *node_match = NULL;
+
+	list_for_each_entry(node_it, &node_list, link) {
+		if (node_it->id == id) {
+			if (id == NB_ID) {
+				if (node_it->data == data) {
+					node_match = node_it;
+					break;
+				}
+			} else {
+				node_match = node_it;
+				break;
+			}
+		}
+	}
+	return node_match;
+}
+
+static struct rule_node_info *gen_node(u32 id, void *data)
+{
+	struct rule_node_info *node_it = NULL;
+	struct rule_node_info *node_match = NULL;
+
+	list_for_each_entry(node_it, &node_list, link) {
+		if (node_it->id == id) {
+			node_match = node_it;
+			break;
+		}
+	}
+
+	if (!node_match) {
+		node_match = kzalloc(sizeof(struct rule_node_info), GFP_KERNEL);
+		if (!node_match)
+			goto exit_node_match;
+
+		node_match->id = id;
+		node_match->cur_rule = NULL;
+		node_match->num_rules = 0;
+		node_match->data = data;
+		list_add_tail(&node_match->link, &node_list);
+		INIT_LIST_HEAD(&node_match->node_rules);
+		RAW_INIT_NOTIFIER_HEAD(&node_match->rule_notify_list);
+		pr_debug("Added new node %d to list\n", id);
+	}
+exit_node_match:
+	return node_match;
+}
+
+static bool do_compare_op(u64 op1, u64 op2, int op)
+{
+	bool ret = false;
+
+	switch (op) {
+	case OP_LE:
+		ret = LE(op1, op2);
+		break;
+	case OP_LT:
+		ret = LT(op1, op2);
+		break;
+	case OP_GT:
+		ret = GT(op1, op2);
+		break;
+	case OP_GE:
+		ret = GE(op1, op2);
+		break;
+	case OP_NOOP:
+		ret = true;
+		break;
+	default:
+		pr_info("Invalid OP %d\n", op);
+		break;
+	}
+	return ret;
+}
+
+static void update_src_id_vote(struct rule_update_path_info *inp_node,
+				struct rule_node_info *rule_node)
+{
+	struct rules_def *rule;
+	int i;
+
+	list_for_each_entry(rule, &rule_node->node_rules, link) {
+		for (i = 0; i < rule->num_src; i++) {
+			if (rule->src_info[i].id == inp_node->id) {
+				rule->src_info[i].ib = inp_node->ib;
+				rule->src_info[i].ab = inp_node->ab;
+				rule->src_info[i].clk = inp_node->clk;
+			}
+		}
+	}
+}
+
+static u64 get_field(struct rules_def *rule, int src_id)
+{
+	u64 field = 0;
+	int i;
+
+	for (i = 0; i < rule->num_src; i++) {
+		switch (rule->rule_ops.src_field) {
+		case FLD_IB:
+			field += rule->src_info[i].ib;
+			break;
+		case FLD_AB:
+			field += rule->src_info[i].ab;
+			break;
+		case FLD_CLK:
+			field += rule->src_info[i].clk;
+			break;
+		}
+	}
+
+	return field;
+}
+
+static bool check_rule(struct rules_def *rule,
+			struct rule_update_path_info *inp)
+{
+	bool ret = false;
+
+	if (!rule)
+		return ret;
+
+	switch (rule->rule_ops.op) {
+	case OP_LE:
+	case OP_LT:
+	case OP_GT:
+	case OP_GE:
+	{
+		u64 src_field = get_field(rule, inp->id);
+
+		ret = do_compare_op(src_field, rule->rule_ops.thresh,
+							rule->rule_ops.op);
+		break;
+	}
+	default:
+		pr_err("Unsupported op %d\n", rule->rule_ops.op);
+		break;
+	}
+	return ret;
+}
+
+static void match_rule(struct rule_update_path_info *inp_node,
+			struct rule_node_info *node)
+{
+	struct rules_def *rule;
+	int i;
+
+	list_for_each_entry(rule, &node->node_rules, link) {
+		for (i = 0; i < rule->num_src; i++) {
+			if (rule->src_info[i].id != inp_node->id)
+				continue;
+
+			if (check_rule(rule, inp_node)) {
+				trace_bus_rules_matches(
+				(node->cur_rule ?
+					node->cur_rule->rule_id : -1),
+				inp_node->id, inp_node->ab,
+				inp_node->ib, inp_node->clk);
+				if (rule->state ==
+					RULE_STATE_NOT_APPLIED)
+					rule->state_change = true;
+				rule->state = RULE_STATE_APPLIED;
+			} else {
+				if (rule->state ==
+					RULE_STATE_APPLIED)
+					rule->state_change = true;
+				rule->state = RULE_STATE_NOT_APPLIED;
+			}
+		}
+	}
+}
+
+static void apply_rule(struct rule_node_info *node,
+			struct list_head *output_list)
+{
+	struct rules_def *rule;
+	struct rules_def *last_rule;
+
+	last_rule = node->cur_rule;
+	node->cur_rule = NULL;
+	list_for_each_entry(rule, &node->node_rules, link) {
+		if ((rule->state == RULE_STATE_APPLIED) &&
+						!node->cur_rule)
+			node->cur_rule = rule;
+
+		if (node->id == NB_ID) {
+			if (rule->state_change) {
+				rule->state_change = false;
+				raw_notifier_call_chain(&node->rule_notify_list,
+					rule->state, (void *)&rule->rule_ops);
+			}
+		} else {
+			if ((rule->state == RULE_STATE_APPLIED) &&
+			     (node->cur_rule &&
+				(node->cur_rule->rule_id == rule->rule_id))) {
+				node->apply.id = rule->rule_ops.dst_node[0];
+				node->apply.throttle = rule->rule_ops.mode;
+				node->apply.lim_bw = rule->rule_ops.dst_bw;
+				node->apply.after_clk_commit = false;
+				if (last_rule != node->cur_rule)
+					list_add_tail(&node->apply.link,
+								output_list);
+				if (last_rule) {
+					if (node_rules_compare(NULL,
+						&last_rule->link,
+						&node->cur_rule->link) == -1)
+						node->apply.after_clk_commit =
+									true;
+				}
+			}
+			rule->state_change = false;
+		}
+	}
+
+}
+
+int msm_rules_update_path(struct list_head *input_list,
+			struct list_head *output_list)
+{
+	int ret = 0;
+	struct rule_update_path_info  *inp_node;
+	struct rule_node_info *node_it = NULL;
+
+	mutex_lock(&msm_bus_rules_lock);
+	list_for_each_entry(inp_node, input_list, link) {
+		list_for_each_entry(node_it, &node_list, link) {
+			update_src_id_vote(inp_node, node_it);
+			match_rule(inp_node, node_it);
+		}
+	}
+
+	list_for_each_entry(node_it, &node_list, link)
+		apply_rule(node_it, output_list);
+	mutex_unlock(&msm_bus_rules_lock);
+	return ret;
+}
+
+static bool ops_equal(int op1, int op2)
+{
+	bool ret = false;
+
+	switch (op1) {
+	case OP_GT:
+	case OP_GE:
+	case OP_LT:
+	case OP_LE:
+		if (abs(op1 - op2) <= 1)
+			ret = true;
+		break;
+	default:
+		ret = (op1 == op2);
+	}
+
+	return ret;
+}
+
+static bool is_throttle_rule(int mode)
+{
+	bool ret = true;
+
+	if (mode == THROTTLE_OFF)
+		ret = false;
+
+	return ret;
+}
+
+static int node_rules_compare(void *priv, struct list_head *a,
+					struct list_head *b)
+{
+	struct rules_def *ra = container_of(a, struct rules_def, link);
+	struct rules_def *rb = container_of(b, struct rules_def, link);
+	int ret = -1;
+	int64_t th_diff = 0;
+
+
+	if (ra->rule_ops.mode == rb->rule_ops.mode) {
+		if (ops_equal(ra->rule_ops.op, rb->rule_ops.op)) {
+			if ((ra->rule_ops.op == OP_LT) ||
+				(ra->rule_ops.op == OP_LE)) {
+				th_diff = ra->rule_ops.thresh -
+						rb->rule_ops.thresh;
+				if (th_diff > 0)
+					ret = 1;
+				else
+					ret = -1;
+			} else if ((ra->rule_ops.op == OP_GT) ||
+					(ra->rule_ops.op == OP_GE)) {
+				th_diff = rb->rule_ops.thresh -
+							ra->rule_ops.thresh;
+				if (th_diff > 0)
+					ret = 1;
+				else
+					ret = -1;
+			}
+		} else {
+			ret = ra->rule_ops.op - rb->rule_ops.op;
+		}
+	} else if (is_throttle_rule(ra->rule_ops.mode) &&
+				is_throttle_rule(rb->rule_ops.mode)) {
+		if (ra->rule_ops.mode == THROTTLE_ON)
+			ret = -1;
+		else
+			ret = 1;
+	} else if ((ra->rule_ops.mode == THROTTLE_OFF) &&
+		is_throttle_rule(rb->rule_ops.mode)) {
+		ret = 1;
+	} else if (is_throttle_rule(ra->rule_ops.mode) &&
+		(rb->rule_ops.mode == THROTTLE_OFF)) {
+		ret = -1;
+	}
+
+	return ret;
+}
+
+static void print_rules(struct rule_node_info *node_it)
+{
+	struct rules_def *node_rule = NULL;
+	int i;
+
+	if (!node_it) {
+		pr_err("%s: no node for found\n", __func__);
+		return;
+	}
+
+	pr_info("\n Now printing rules for Node %d  cur rule %d\n",
+			node_it->id,
+			(node_it->cur_rule ? node_it->cur_rule->rule_id : -1));
+	list_for_each_entry(node_rule, &node_it->node_rules, link) {
+		pr_info("\n num Rules %d  rule Id %d\n",
+				node_it->num_rules, node_rule->rule_id);
+		pr_info("Rule: src_field %d\n", node_rule->rule_ops.src_field);
+		for (i = 0; i < node_rule->rule_ops.num_src; i++)
+			pr_info("Rule: src %d\n",
+					node_rule->rule_ops.src_id[i]);
+		for (i = 0; i < node_rule->rule_ops.num_dst; i++)
+			pr_info("Rule: dst %d dst_bw %llu\n",
+						node_rule->rule_ops.dst_node[i],
+						node_rule->rule_ops.dst_bw);
+		pr_info("Rule: thresh %llu op %d mode %d State %d\n",
+					node_rule->rule_ops.thresh,
+					node_rule->rule_ops.op,
+					node_rule->rule_ops.mode,
+					node_rule->state);
+	}
+}
+
+void print_all_rules(void)
+{
+	struct rule_node_info *node_it = NULL;
+
+	mutex_lock(&msm_bus_rules_lock);
+	list_for_each_entry(node_it, &node_list, link)
+		print_rules(node_it);
+	mutex_unlock(&msm_bus_rules_lock);
+}
+
+void print_rules_buf(char *buf, int max_buf)
+{
+	struct rule_node_info *node_it = NULL;
+	struct rules_def *node_rule = NULL;
+	int i;
+	int cnt = 0;
+
+	mutex_lock(&msm_bus_rules_lock);
+	list_for_each_entry(node_it, &node_list, link) {
+		cnt += scnprintf(buf + cnt, max_buf - cnt,
+			"\n Now printing rules for Node %d cur_rule %d\n",
+			node_it->id,
+			(node_it->cur_rule ? node_it->cur_rule->rule_id : -1));
+		list_for_each_entry(node_rule, &node_it->node_rules, link) {
+			cnt += scnprintf(buf + cnt, max_buf - cnt,
+				"\nNum Rules:%d ruleId %d STATE:%d change:%d\n",
+				node_it->num_rules, node_rule->rule_id,
+				node_rule->state, node_rule->state_change);
+			cnt += scnprintf(buf + cnt, max_buf - cnt,
+				"Src_field %d\n",
+				node_rule->rule_ops.src_field);
+			for (i = 0; i < node_rule->rule_ops.num_src; i++)
+				cnt += scnprintf(buf + cnt, max_buf - cnt,
+					"Src %d Cur Ib %llu Ab %llu\n",
+					node_rule->rule_ops.src_id[i],
+					node_rule->src_info[i].ib,
+					node_rule->src_info[i].ab);
+			for (i = 0; i < node_rule->rule_ops.num_dst; i++)
+				cnt += scnprintf(buf + cnt, max_buf - cnt,
+					"Dst %d dst_bw %llu\n",
+					node_rule->rule_ops.dst_node[0],
+					node_rule->rule_ops.dst_bw);
+			cnt += scnprintf(buf + cnt, max_buf - cnt,
+					"Thresh %llu op %d mode %d\n",
+					node_rule->rule_ops.thresh,
+					node_rule->rule_ops.op,
+					node_rule->rule_ops.mode);
+		}
+	}
+	mutex_unlock(&msm_bus_rules_lock);
+}
+
+static int copy_rule(struct bus_rule_type *src, struct rules_def *node_rule,
+			struct notifier_block *nb)
+{
+	int i;
+	int ret = 0;
+
+	memcpy(&node_rule->rule_ops, src,
+				sizeof(struct bus_rule_type));
+	node_rule->rule_ops.src_id = kzalloc(
+			(sizeof(int) * node_rule->rule_ops.num_src),
+							GFP_KERNEL);
+	if (!node_rule->rule_ops.src_id) {
+		pr_err("%s:Failed to allocate for src_id\n",
+					__func__);
+		return -ENOMEM;
+	}
+	memcpy(node_rule->rule_ops.src_id, src->src_id,
+				sizeof(int) * src->num_src);
+
+
+	if (!nb) {
+		node_rule->rule_ops.dst_node = kzalloc(
+			(sizeof(int) * node_rule->rule_ops.num_dst),
+						GFP_KERNEL);
+		if (!node_rule->rule_ops.dst_node)
+			return -ENOMEM;
+		memcpy(node_rule->rule_ops.dst_node, src->dst_node,
+						sizeof(int) * src->num_dst);
+	}
+
+	node_rule->num_src = src->num_src;
+	node_rule->src_info = kzalloc(
+		(sizeof(struct node_vote_info) * node_rule->rule_ops.num_src),
+							GFP_KERNEL);
+	if (!node_rule->src_info) {
+		pr_err("%s:Failed to allocate for src_id\n",
+						__func__);
+		return -ENOMEM;
+	}
+	for (i = 0; i < src->num_src; i++)
+		node_rule->src_info[i].id = src->src_id[i];
+
+	return ret;
+}
+
+static bool __rule_register(int num_rules, struct bus_rule_type *rule,
+					struct notifier_block *nb)
+{
+	struct rule_node_info *node = NULL;
+	int i, j;
+	struct rules_def *node_rule = NULL;
+	int num_dst = 0;
+	bool reg_success = true;
+
+	if (num_rules <= 0)
+		return false;
+
+	for (i = 0; i < num_rules; i++) {
+		if (nb)
+			num_dst = 1;
+		else
+			num_dst = rule[i].num_dst;
+
+		for (j = 0; j < num_dst; j++) {
+			int id = 0;
+
+			if (nb)
+				id = NB_ID;
+			else
+				id = rule[i].dst_node[j];
+
+			node = gen_node(id, nb);
+			if (!node) {
+				pr_info("Error getting rule\n");
+				reg_success = false;
+				goto exit_rule_register;
+			}
+			node_rule = kzalloc(sizeof(struct rules_def),
+						GFP_KERNEL);
+			if (!node_rule) {
+				reg_success = false;
+				goto exit_rule_register;
+			}
+
+			if (copy_rule(&rule[i], node_rule, nb)) {
+				pr_err("Error copying rule\n");
+				reg_success = false;
+				goto exit_rule_register;
+			}
+
+			node_rule->rule_id = node->num_rules++;
+			if (nb)
+				node->data = nb;
+
+			list_add_tail(&node_rule->link, &node->node_rules);
+		}
+	}
+	list_sort(NULL, &node->node_rules, node_rules_compare);
+	if (nb && nb != node->rule_notify_list.head)
+		raw_notifier_chain_register(&node->rule_notify_list, nb);
+exit_rule_register:
+	return reg_success;
+}
+
+static int comp_rules(struct bus_rule_type *rulea, struct bus_rule_type *ruleb)
+{
+	int ret = 1;
+
+	if (rulea->num_src == ruleb->num_src)
+		ret = memcmp(rulea->src_id, ruleb->src_id,
+				(sizeof(int) * rulea->num_src));
+	if (!ret && (rulea->num_dst == ruleb->num_dst))
+		ret = memcmp(rulea->dst_node, ruleb->dst_node,
+				(sizeof(int) * rulea->num_dst));
+	if (ret || (rulea->dst_bw != ruleb->dst_bw) ||
+		(rulea->op != ruleb->op) || (rulea->thresh != ruleb->thresh))
+		ret = 1;
+	return ret;
+}
+
+void msm_rule_register(int num_rules, struct bus_rule_type *rule,
+					struct notifier_block *nb)
+{
+	if (!rule || num_rules <= 0)
+		return;
+
+	mutex_lock(&msm_bus_rules_lock);
+	__rule_register(num_rules, rule, nb);
+	mutex_unlock(&msm_bus_rules_lock);
+}
+
+static bool __rule_unregister(int num_rules, struct bus_rule_type *rule,
+					struct notifier_block *nb)
+{
+	int i = 0;
+	struct rule_node_info *node = NULL;
+	struct rule_node_info *node_tmp = NULL;
+	struct rules_def *node_rule;
+	struct rules_def *node_rule_tmp;
+	bool match_found = false;
+
+	if (num_rules <= 0)
+		return false;
+
+	if (nb) {
+		node = get_node(NB_ID, nb);
+		if (!node) {
+			pr_err("%s: Can't find node\n", __func__);
+			goto exit_unregister_rule;
+		}
+		match_found = true;
+		list_for_each_entry_safe(node_rule, node_rule_tmp,
+					&node->node_rules, link) {
+			if (comp_rules(&node_rule->rule_ops,
+					&rule[i]) == 0) {
+				list_del(&node_rule->link);
+				kfree(node_rule);
+				match_found = true;
+				node->num_rules--;
+				list_sort(NULL,
+					&node->node_rules,
+					node_rules_compare);
+				break;
+			}
+		}
+		if (!node->num_rules)
+			raw_notifier_chain_unregister(
+					&node->rule_notify_list, nb);
+	} else {
+		for (i = 0; i < num_rules; i++) {
+			match_found = false;
+
+			list_for_each_entry(node, &node_list, link) {
+				list_for_each_entry_safe(node_rule,
+				node_rule_tmp, &node->node_rules, link) {
+					if (comp_rules(&node_rule->rule_ops,
+						&rule[i]) != 0)
+						continue;
+					list_del(&node_rule->link);
+					kfree(node_rule);
+					match_found = true;
+					node->num_rules--;
+					list_sort(NULL,
+						&node->node_rules,
+						node_rules_compare);
+					break;
+				}
+			}
+		}
+	}
+
+	list_for_each_entry_safe(node, node_tmp,
+					&node_list, link) {
+		if (!node->num_rules) {
+			pr_debug("Deleting Rule node %d\n", node->id);
+			list_del(&node->link);
+			kfree(node);
+		}
+	}
+exit_unregister_rule:
+	return match_found;
+}
+
+void msm_rule_unregister(int num_rules, struct bus_rule_type *rule,
+					struct notifier_block *nb)
+{
+	if (!rule || num_rules <= 0)
+		return;
+
+	mutex_lock(&msm_bus_rules_lock);
+	__rule_unregister(num_rules, rule, nb);
+	mutex_unlock(&msm_bus_rules_lock);
+}
+
+bool msm_rule_update(struct bus_rule_type *old_rule,
+			struct bus_rule_type *new_rule,
+			struct notifier_block *nb)
+{
+	bool rc = true;
+
+	if (!old_rule || !new_rule)
+		return false;
+	mutex_lock(&msm_bus_rules_lock);
+	if (!__rule_unregister(1, old_rule, nb)) {
+		rc = false;
+		goto exit_rule_update;
+	}
+
+	if (!__rule_register(1, new_rule, nb)) {
+		/*
+		 * Registering new rule has failed for some reason, attempt
+		 * to re-register the old rule and return error.
+		 */
+		__rule_register(1, old_rule, nb);
+		rc = false;
+	}
+exit_rule_update:
+	mutex_unlock(&msm_bus_rules_lock);
+	return rc;
+}
+
+void msm_rule_evaluate_rules(int node)
+{
+	struct msm_bus_client_handle *handle;
+
+	handle = msm_bus_scale_register(node, node, "tmp-rm", false);
+	if (!handle)
+		return;
+	msm_bus_scale_update_bw(handle, 0, 0);
+	msm_bus_scale_unregister(handle);
+}
+
+bool msm_rule_are_rules_registered(void)
+{
+	bool ret = false;
+
+	mutex_lock(&msm_bus_rules_lock);
+	if (list_empty(&node_list))
+		ret = false;
+	else
+		ret = true;
+	mutex_unlock(&msm_bus_rules_lock);
+	return ret;
+}
+
diff --git a/drivers/soc/qcom/msm_bus/msm_buspm_coresight_adhoc.c b/drivers/soc/qcom/msm_bus/msm_buspm_coresight_adhoc.c
new file mode 100644
index 0000000..bedd2f9
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_buspm_coresight_adhoc.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/coresight.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/list.h>
+
+struct msmbus_coresight_adhoc_clock_drvdata {
+	const char			*csdev_name;
+	struct clk			*clk;
+	struct list_head		 list;
+};
+
+struct msmbus_coresight_adhoc_drvdata {
+	struct device			*dev;
+	struct coresight_device		*csdev;
+	struct coresight_desc		*desc;
+	struct list_head		 clocks;
+};
+
+static int msmbus_coresight_enable_adhoc(struct coresight_device *csdev)
+{
+	struct msmbus_coresight_adhoc_clock_drvdata *clk;
+	struct msmbus_coresight_adhoc_drvdata *drvdata =
+		dev_get_drvdata(csdev->dev.parent);
+	long rate;
+
+	list_for_each_entry(clk, &drvdata->clocks, list) {
+		if (!strcmp(dev_name(&csdev->dev), clk->csdev_name)) {
+			rate = clk_round_rate(clk->clk, 1L);
+			clk_set_rate(clk->clk, rate);
+			return clk_prepare_enable(clk->clk);
+		}
+	}
+
+	return -ENOENT;
+}
+
+static void msmbus_coresight_disable_adhoc(struct coresight_device *csdev)
+{
+	struct msmbus_coresight_adhoc_clock_drvdata *clk;
+	struct msmbus_coresight_adhoc_drvdata *drvdata =
+		dev_get_drvdata(csdev->dev.parent);
+
+	list_for_each_entry(clk, &drvdata->clocks, list) {
+		if (!strcmp(dev_name(&csdev->dev), clk->csdev_name))
+			clk_disable_unprepare(clk->clk);
+	}
+}
+
+static const struct coresight_ops_source msmbus_coresight_adhoc_source_ops = {
+	.enable		= msmbus_coresight_enable_adhoc,
+	.disable	= msmbus_coresight_disable_adhoc,
+};
+
+static const struct coresight_ops msmbus_coresight_cs_ops = {
+	.source_ops	= &msmbus_coresight_adhoc_source_ops,
+};
+
+void msmbus_coresight_remove_adhoc(struct platform_device *pdev)
+{
+	struct msmbus_coresight_adhoc_clock_drvdata *clk, *next_clk;
+	struct msmbus_coresight_adhoc_drvdata *drvdata =
+		platform_get_drvdata(pdev);
+
+	msmbus_coresight_disable_adhoc(drvdata->csdev);
+	coresight_unregister(drvdata->csdev);
+	list_for_each_entry_safe(clk, next_clk, &drvdata->clocks, list) {
+		list_del(&clk->list);
+		devm_kfree(&pdev->dev, clk);
+	}
+	devm_kfree(&pdev->dev, drvdata->desc);
+	devm_kfree(&pdev->dev, drvdata);
+	platform_set_drvdata(pdev, NULL);
+}
+EXPORT_SYMBOL(msmbus_coresight_remove_adhoc);
+
+static int buspm_of_get_clk_adhoc(struct device_node *of_node,
+	struct msmbus_coresight_adhoc_drvdata *drvdata, const char *name)
+{
+	struct msmbus_coresight_adhoc_clock_drvdata *clk;
+
+	clk = devm_kzalloc(drvdata->dev, sizeof(*clk), GFP_KERNEL);
+
+	if (!clk)
+		return -ENOMEM;
+
+	clk->csdev_name = name;
+
+	clk->clk = of_clk_get_by_name(of_node, "bus_clk");
+	if (IS_ERR(clk->clk)) {
+		pr_err("Error: unable to get clock for coresight node %s\n",
+			name);
+		goto err;
+	}
+
+	list_add(&clk->list, &drvdata->clocks);
+	return 0;
+
+err:
+	devm_kfree(drvdata->dev, clk);
+	return -EINVAL;
+}
+
+int msmbus_coresight_init_adhoc(struct platform_device *pdev,
+		struct device_node *of_node)
+{
+	int ret;
+	struct device *dev = &pdev->dev;
+	struct coresight_platform_data *pdata;
+	struct msmbus_coresight_adhoc_drvdata *drvdata;
+	struct coresight_desc *desc;
+
+	pdata = of_get_coresight_platform_data(dev, of_node);
+	if (IS_ERR(pdata))
+		return PTR_ERR(pdata);
+
+	drvdata = platform_get_drvdata(pdev);
+	if (IS_ERR_OR_NULL(drvdata)) {
+		drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+		if (!drvdata)
+			return -ENOMEM;
+		INIT_LIST_HEAD(&drvdata->clocks);
+		drvdata->dev = &pdev->dev;
+		platform_set_drvdata(pdev, drvdata);
+	}
+	ret = buspm_of_get_clk_adhoc(of_node, drvdata, pdata->name);
+	if (ret) {
+		pr_err("Error getting clocks\n");
+		ret = -ENXIO;
+		goto err1;
+	}
+
+	desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+	if (!desc) {
+		ret = -ENOMEM;
+		goto err1;
+	}
+
+	desc->type = CORESIGHT_DEV_TYPE_SOURCE;
+	desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_BUS;
+	desc->ops = &msmbus_coresight_cs_ops;
+	desc->pdata = pdata;
+	desc->dev = &pdev->dev;
+	drvdata->desc = desc;
+	drvdata->csdev = coresight_register(desc);
+	if (IS_ERR(drvdata->csdev)) {
+		pr_err("coresight: Coresight register failed\n");
+		ret = PTR_ERR(drvdata->csdev);
+		goto err0;
+	}
+
+	return 0;
+err0:
+	devm_kfree(dev, desc);
+err1:
+	devm_kfree(dev, drvdata);
+	platform_set_drvdata(pdev, NULL);
+	return ret;
+}
+EXPORT_SYMBOL(msmbus_coresight_init_adhoc);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM BusPM Adhoc CoreSight Driver");
diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c
index c4004b0d..0d302ea 100644
--- a/drivers/soc/qcom/subsystem_restart.c
+++ b/drivers/soc/qcom/subsystem_restart.c
@@ -564,7 +564,6 @@
 		return;
 	}
 
-	timeout_data->timer.data = (unsigned long) timeout_data;
 	timeout_data->comm_type = comm_type;
 	timeout = jiffies + msecs_to_jiffies(timeout_vals[comm_type]);
 	mod_timer(&timeout_data->timer, timeout);
diff --git a/drivers/soc/qcom/watchdog_v2.c b/drivers/soc/qcom/watchdog_v2.c
index 81a274b..17af8ae 100644
--- a/drivers/soc/qcom/watchdog_v2.c
+++ b/drivers/soc/qcom/watchdog_v2.c
@@ -396,10 +396,10 @@
 	}
 }
 
-static void pet_task_wakeup(unsigned long data)
+static void pet_task_wakeup(struct timer_list *t)
 {
 	struct msm_watchdog_data *wdog_dd =
-		(struct msm_watchdog_data *)data;
+		from_timer(wdog_dd, t, pet_timer);
 	wdog_dd->timer_expired = true;
 	wdog_dd->timer_fired = sched_clock();
 	wake_up(&wdog_dd->pet_complete);
@@ -703,9 +703,7 @@
 	wdog_dd->user_pet_complete = true;
 	wdog_dd->user_pet_enabled = false;
 	wake_up_process(wdog_dd->watchdog_task);
-	init_timer(&wdog_dd->pet_timer);
-	wdog_dd->pet_timer.data = (unsigned long)wdog_dd;
-	wdog_dd->pet_timer.function = pet_task_wakeup;
+	timer_setup(&wdog_dd->pet_timer, pet_task_wakeup, 0);
 	wdog_dd->pet_timer.expires = jiffies + delay_time;
 	add_timer(&wdog_dd->pet_timer);
 
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 360b821..3d547f9 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2015, 2017-2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1325,6 +1325,7 @@
 	.driver		= {
 		.name	= "spmi_pmic_arb",
 		.of_match_table = spmi_pmic_arb_match_table,
+		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
 	},
 };
 module_platform_driver(spmi_pmic_arb_driver);
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index bb6e36a..4975e04 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -93,18 +93,18 @@
 #define GSI_TRB_ADDR_BIT_53_MASK	(1 << 21)
 #define GSI_TRB_ADDR_BIT_55_MASK	(1 << 23)
 
-#define	GSI_GENERAL_CFG_REG		(QSCRATCH_REG_OFFSET + 0xFC)
+#define	GSI_GENERAL_CFG_REG(offset) (QSCRATCH_REG_OFFSET + offset)
 #define	GSI_RESTART_DBL_PNTR_MASK	BIT(20)
 #define	GSI_CLK_EN_MASK			BIT(12)
 #define	BLOCK_GSI_WR_GO_MASK		BIT(1)
 #define	GSI_EN_MASK			BIT(0)
 
-#define GSI_DBL_ADDR_L(n)	((QSCRATCH_REG_OFFSET + 0x110) + (n*4))
-#define GSI_DBL_ADDR_H(n)	((QSCRATCH_REG_OFFSET + 0x120) + (n*4))
-#define GSI_RING_BASE_ADDR_L(n)	((QSCRATCH_REG_OFFSET + 0x130) + (n*4))
-#define GSI_RING_BASE_ADDR_H(n)	((QSCRATCH_REG_OFFSET + 0x144) + (n*4))
+#define GSI_DBL_ADDR_L(offset, n)	((QSCRATCH_REG_OFFSET + offset) + (n*4))
+#define GSI_DBL_ADDR_H(offset, n)	((QSCRATCH_REG_OFFSET + offset) + (n*4))
+#define GSI_RING_BASE_ADDR_L(offset, n)	((QSCRATCH_REG_OFFSET + offset) + (n*4))
+#define GSI_RING_BASE_ADDR_H(offset, n)	((QSCRATCH_REG_OFFSET + offset) + (n*4))
 
-#define	GSI_IF_STS	(QSCRATCH_REG_OFFSET + 0x1A4)
+#define	GSI_IF_STS(offset)	(QSCRATCH_REG_OFFSET + offset)
 #define	GSI_WR_CTRL_STATE_MASK	BIT(15)
 
 #define DWC3_GEVNTCOUNT_EVNTINTRPTMASK		(1 << 31)
@@ -112,6 +112,16 @@
 #define DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX(n)	(n << 16)
 #define DWC3_GEVENT_TYPE_GSI			0x3
 
+enum usb_gsi_reg {
+	GENERAL_CFG_REG,
+	DBL_ADDR_L,
+	DBL_ADDR_H,
+	RING_BASE_ADDR_L,
+	RING_BASE_ADDR_H,
+	IF_STS,
+	GSI_REG_MAX,
+};
+
 struct dwc3_msm_req_complete {
 	struct list_head list_item;
 	struct usb_request *req;
@@ -262,6 +272,8 @@
 	struct mutex suspend_resume_mutex;
 
 	enum usb_device_speed override_usb_speed;
+	u32			*gsi_reg;
+	int			gsi_reg_offset_cnt;
 };
 
 #define USB_HSPHY_3P3_VOL_MIN		3050000 /* uV */
@@ -926,8 +938,9 @@
 	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
 	int n = ep->ep_intr_num - 1;
 
-	dwc3_msm_write_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n),
-			dwc3_trb_dma_offset(dep, &dep->trb_pool[0]));
+	dwc3_msm_write_reg(mdwc->base,
+		GSI_RING_BASE_ADDR_L(mdwc->gsi_reg[RING_BASE_ADDR_L], (n)),
+		dwc3_trb_dma_offset(dep, &dep->trb_pool[0]));
 
 	if (request->mapped_db_reg_phs_addr_lsb)
 		dma_unmap_resource(dwc->sysdev,
@@ -944,12 +957,16 @@
 		ep->name, request->db_reg_phs_addr_lsb,
 		(unsigned long long)request->mapped_db_reg_phs_addr_lsb);
 
-	dwc3_msm_write_reg(mdwc->base, GSI_DBL_ADDR_L(n),
-			(u32)request->mapped_db_reg_phs_addr_lsb);
+	dwc3_msm_write_reg(mdwc->base,
+		GSI_DBL_ADDR_L(mdwc->gsi_reg[DBL_ADDR_L], (n)),
+		(u32)request->mapped_db_reg_phs_addr_lsb);
 	dev_dbg(mdwc->dev, "Ring Base Addr %d: %x (LSB)\n", n,
-			dwc3_msm_read_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n)));
+		dwc3_msm_read_reg(mdwc->base,
+			GSI_RING_BASE_ADDR_L(mdwc->gsi_reg[RING_BASE_ADDR_L],
+								(n))));
 	dev_dbg(mdwc->dev, "GSI DB Addr %d: %x (LSB)\n", n,
-			dwc3_msm_read_reg(mdwc->base, GSI_DBL_ADDR_L(n)));
+		dwc3_msm_read_reg(mdwc->base,
+			GSI_DBL_ADDR_L(mdwc->gsi_reg[DBL_ADDR_L], (n))));
 }
 
 /**
@@ -1282,14 +1299,18 @@
 	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
 
 	dwc3_msm_write_reg_field(mdwc->base,
-			GSI_GENERAL_CFG_REG, GSI_CLK_EN_MASK, 1);
+		GSI_GENERAL_CFG_REG(mdwc->gsi_reg[GENERAL_CFG_REG]),
+		GSI_CLK_EN_MASK, 1);
 	dwc3_msm_write_reg_field(mdwc->base,
-			GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 1);
+		GSI_GENERAL_CFG_REG(mdwc->gsi_reg[GENERAL_CFG_REG]),
+		GSI_RESTART_DBL_PNTR_MASK, 1);
 	dwc3_msm_write_reg_field(mdwc->base,
-			GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 0);
+		GSI_GENERAL_CFG_REG(mdwc->gsi_reg[GENERAL_CFG_REG]),
+		GSI_RESTART_DBL_PNTR_MASK, 0);
 	dev_dbg(mdwc->dev, "%s: Enable GSI\n", __func__);
 	dwc3_msm_write_reg_field(mdwc->base,
-			GSI_GENERAL_CFG_REG, GSI_EN_MASK, 1);
+		GSI_GENERAL_CFG_REG(mdwc->gsi_reg[GENERAL_CFG_REG]),
+		GSI_EN_MASK, 1);
 }
 
 /**
@@ -1308,7 +1329,8 @@
 	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
 
 	dwc3_msm_write_reg_field(mdwc->base,
-		GSI_GENERAL_CFG_REG, BLOCK_GSI_WR_GO_MASK, block_db);
+		GSI_GENERAL_CFG_REG(mdwc->gsi_reg[GENERAL_CFG_REG]),
+		BLOCK_GSI_WR_GO_MASK, block_db);
 }
 
 /**
@@ -1325,7 +1347,7 @@
 	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
 
 	while (dwc3_msm_read_reg_field(mdwc->base,
-		GSI_IF_STS, GSI_WR_CTRL_STATE_MASK)) {
+		GSI_IF_STS(mdwc->gsi_reg[IF_STS]), GSI_WR_CTRL_STATE_MASK)) {
 		if (!timeout--) {
 			dev_err(mdwc->dev,
 			"Unable to suspend GSI ch. WR_CTRL_STATE != 0\n");
@@ -3282,7 +3304,7 @@
 	struct dwc3	*dwc;
 	struct resource *res;
 	bool host_mode;
-	int ret = 0, i;
+	int ret = 0, size = 0, i;
 	u32 val;
 	unsigned long irq_type;
 
@@ -3435,6 +3457,29 @@
 	ret = of_property_read_u32(node, "qcom,num-gsi-evt-buffs",
 				&mdwc->num_gsi_event_buffers);
 
+	if (mdwc->num_gsi_event_buffers) {
+		of_get_property(node, "qcom,gsi-reg-offset", &size);
+		if (size) {
+			mdwc->gsi_reg = devm_kzalloc(dev, size, GFP_KERNEL);
+			if (!mdwc->gsi_reg)
+				return -ENOMEM;
+
+			mdwc->gsi_reg_offset_cnt =
+					(size / sizeof(*mdwc->gsi_reg));
+			if (mdwc->gsi_reg_offset_cnt != GSI_REG_MAX) {
+				dev_err(dev, "invalid reg offset count\n");
+				return -EINVAL;
+			}
+
+			of_property_read_u32_array(dev->of_node,
+				"qcom,gsi-reg-offset", mdwc->gsi_reg,
+				mdwc->gsi_reg_offset_cnt);
+		} else {
+			dev_err(dev, "err provide qcom,gsi-reg-offset\n");
+			return -EINVAL;
+		}
+	}
+
 	mdwc->use_pdc_interrupts = of_property_read_bool(node,
 				"qcom,use-pdc-interrupts");
 	dwc3_set_notifier(&dwc3_msm_notify_event);
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 5780fba..5ee1a29 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -1404,17 +1404,39 @@
 	 */
 	if (!ncm_opts->bound) {
 		mutex_lock(&ncm_opts->lock);
+		ncm_opts->net = gether_setup_default();
+		if (IS_ERR(ncm_opts->net)) {
+			status = PTR_ERR(ncm_opts->net);
+			mutex_unlock(&ncm_opts->lock);
+			goto error;
+		}
 		gether_set_gadget(ncm_opts->net, cdev->gadget);
 		status = gether_register_netdev(ncm_opts->net);
 		mutex_unlock(&ncm_opts->lock);
-		if (status)
-			return status;
+		if (status) {
+			free_netdev(ncm_opts->net);
+			goto error;
+		}
 		ncm_opts->bound = true;
 	}
+
+	/* export host's Ethernet address in CDC format */
+	status = gether_get_host_addr_cdc(ncm_opts->net, ncm->ethaddr,
+				      sizeof(ncm->ethaddr));
+	if (status < 12) { /* strlen("01234567890a") */
+		ERROR(cdev, "%s: failed to get host eth addr, err %d\n",
+		__func__, status);
+		status = -EINVAL;
+		goto netdev_cleanup;
+	}
+	ncm->port.ioport = netdev_priv(ncm_opts->net);
+
 	us = usb_gstrings_attach(cdev, ncm_strings,
 				 ARRAY_SIZE(ncm_string_defs));
-	if (IS_ERR(us))
-		return PTR_ERR(us);
+	if (IS_ERR(us)) {
+		status = PTR_ERR(us);
+		goto netdev_cleanup;
+	}
 	ncm_control_intf.iInterface = us[STRING_CTRL_IDX].id;
 	ncm_data_nop_intf.iInterface = us[STRING_DATA_IDX].id;
 	ncm_data_intf.iInterface = us[STRING_DATA_IDX].id;
@@ -1514,7 +1536,10 @@
 		kfree(ncm->notify_req->buf);
 		usb_ep_free_request(ncm->notify, ncm->notify_req);
 	}
+netdev_cleanup:
+	gether_cleanup(netdev_priv(ncm_opts->net));
 
+error:
 	ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
 
 	return status;
@@ -1562,8 +1587,6 @@
 	opts = container_of(f, struct f_ncm_opts, func_inst);
 	if (opts->bound)
 		gether_cleanup(netdev_priv(opts->net));
-	else
-		free_netdev(opts->net);
 	kfree(opts);
 }
 
@@ -1576,12 +1599,6 @@
 		return ERR_PTR(-ENOMEM);
 	mutex_init(&opts->lock);
 	opts->func_inst.free_func_inst = ncm_free_inst;
-	opts->net = gether_setup_default();
-	if (IS_ERR(opts->net)) {
-		struct net_device *net = opts->net;
-		kfree(opts);
-		return ERR_CAST(net);
-	}
 
 	config_group_init_type_name(&opts->func_inst.group, "", &ncm_func_type);
 
@@ -1604,6 +1621,8 @@
 static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
 {
 	struct f_ncm *ncm = func_to_ncm(f);
+	struct f_ncm_opts *opts = container_of(f->fi, struct f_ncm_opts,
+					func_inst);
 
 	DBG(c->cdev, "ncm unbind\n");
 
@@ -1614,13 +1633,15 @@
 
 	kfree(ncm->notify_req->buf);
 	usb_ep_free_request(ncm->notify, ncm->notify_req);
+
+	gether_cleanup(netdev_priv(opts->net));
+	opts->bound = false;
 }
 
 static struct usb_function *ncm_alloc(struct usb_function_instance *fi)
 {
 	struct f_ncm		*ncm;
 	struct f_ncm_opts	*opts;
-	int status;
 
 	/* allocate and initialize one new instance */
 	ncm = kzalloc(sizeof(*ncm), GFP_KERNEL);
@@ -1630,20 +1651,9 @@
 	opts = container_of(fi, struct f_ncm_opts, func_inst);
 	mutex_lock(&opts->lock);
 	opts->refcnt++;
-
-	/* export host's Ethernet address in CDC format */
-	status = gether_get_host_addr_cdc(opts->net, ncm->ethaddr,
-				      sizeof(ncm->ethaddr));
-	if (status < 12) { /* strlen("01234567890a") */
-		kfree(ncm);
-		mutex_unlock(&opts->lock);
-		return ERR_PTR(-EINVAL);
-	}
 	ncm_string_defs[STRING_MAC_IDX].s = ncm->ethaddr;
-
 	spin_lock_init(&ncm->lock);
 	ncm_reset_values(ncm);
-	ncm->port.ioport = netdev_priv(opts->net);
 	mutex_unlock(&opts->lock);
 	ncm->port.is_fixed = true;
 	ncm->port.supports_multi_frame = true;
diff --git a/include/dt-bindings/msm/msm-bus-ids.h b/include/dt-bindings/msm/msm-bus-ids.h
index 99790f9..0bca45b 100644
--- a/include/dt-bindings/msm/msm-bus-ids.h
+++ b/include/dt-bindings/msm/msm-bus-ids.h
@@ -89,8 +89,10 @@
 #define	MSM_BUS_BCM_CO1 7041
 #define	MSM_BUS_BCM_CO2 7042
 
+#define	MSM_BUS_RSC_FIRST 8000
 #define	MSM_BUS_RSC_APPS 8000
 #define	MSM_BUS_RSC_DISP 8001
+#define	MSM_BUS_RSC_LAST 8001
 
 #define	MSM_BUS_BCM_MC0_DISPLAY 27000
 #define	MSM_BUS_BCM_SH0_DISPLAY 27001
diff --git a/include/dt-bindings/msm/msm-bus-rule-ops.h b/include/dt-bindings/msm/msm-bus-rule-ops.h
new file mode 100644
index 0000000..12e7982
--- /dev/null
+++ b/include/dt-bindings/msm/msm-bus-rule-ops.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __MSM_BUS_RULE_OPS_H
+#define __MSM_BUS_RULE_OPS_H
+
+#define FLD_IB	0
+#define FLD_AB	1
+#define FLD_CLK	2
+
+#define OP_LE	0
+#define OP_LT	1
+#define OP_GE	2
+#define OP_GT	3
+#define OP_NOOP	4
+
+#define RULE_STATE_NOT_APPLIED	0
+#define RULE_STATE_APPLIED	1
+
+#define THROTTLE_ON	0
+#define THROTTLE_OFF	1
+#define THROTTLE_REG	2
+
+
+#endif
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 147bdec..07f2d16 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -54,6 +54,7 @@
  *     cpu_present_mask - has bit 'cpu' set iff cpu is populated
  *     cpu_online_mask  - has bit 'cpu' set iff cpu available to scheduler
  *     cpu_active_mask  - has bit 'cpu' set iff cpu available to migration
+ *     cpu_isolated_mask- has bit 'cpu' set iff cpu isolated
  *
  *  If !CONFIG_HOTPLUG_CPU, present == possible, and active == online.
  *
@@ -90,29 +91,35 @@
 extern struct cpumask __cpu_online_mask;
 extern struct cpumask __cpu_present_mask;
 extern struct cpumask __cpu_active_mask;
+extern struct cpumask __cpu_isolated_mask;
 #define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask)
 #define cpu_online_mask   ((const struct cpumask *)&__cpu_online_mask)
 #define cpu_present_mask  ((const struct cpumask *)&__cpu_present_mask)
 #define cpu_active_mask   ((const struct cpumask *)&__cpu_active_mask)
+#define cpu_isolated_mask ((const struct cpumask *)&__cpu_isolated_mask)
 
 #if NR_CPUS > 1
 #define num_online_cpus()	cpumask_weight(cpu_online_mask)
 #define num_possible_cpus()	cpumask_weight(cpu_possible_mask)
 #define num_present_cpus()	cpumask_weight(cpu_present_mask)
 #define num_active_cpus()	cpumask_weight(cpu_active_mask)
+#define num_isolated_cpus()	cpumask_weight(cpu_isolated_mask)
 #define cpu_online(cpu)		cpumask_test_cpu((cpu), cpu_online_mask)
 #define cpu_possible(cpu)	cpumask_test_cpu((cpu), cpu_possible_mask)
 #define cpu_present(cpu)	cpumask_test_cpu((cpu), cpu_present_mask)
 #define cpu_active(cpu)		cpumask_test_cpu((cpu), cpu_active_mask)
+#define cpu_isolated(cpu)	cpumask_test_cpu((cpu), cpu_isolated_mask)
 #else
 #define num_online_cpus()	1U
 #define num_possible_cpus()	1U
 #define num_present_cpus()	1U
 #define num_active_cpus()	1U
+#define num_isolated_cpus()	0U
 #define cpu_online(cpu)		((cpu) == 0)
 #define cpu_possible(cpu)	((cpu) == 0)
 #define cpu_present(cpu)	((cpu) == 0)
 #define cpu_active(cpu)		((cpu) == 0)
+#define cpu_isolated(cpu)	((cpu) != 0)
 #endif
 
 static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
@@ -777,6 +784,7 @@
 #define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
 #define for_each_online_cpu(cpu)   for_each_cpu((cpu), cpu_online_mask)
 #define for_each_present_cpu(cpu)  for_each_cpu((cpu), cpu_present_mask)
+#define for_each_isolated_cpu(cpu) for_each_cpu((cpu), cpu_isolated_mask)
 
 /* Wrappers for arch boot code to manipulate normally-constant masks */
 void init_cpu_present(const struct cpumask *src);
@@ -824,6 +832,15 @@
 		cpumask_clear_cpu(cpu, &__cpu_active_mask);
 }
 
+static inline void
+set_cpu_isolated(unsigned int cpu, bool isolated)
+{
+	if (isolated)
+		cpumask_set_cpu(cpu, &__cpu_isolated_mask);
+	else
+		cpumask_clear_cpu(cpu, &__cpu_isolated_mask);
+}
+
 
 /**
  * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
diff --git a/include/linux/msm-bus.h b/include/linux/msm-bus.h
index 9af4867..519682d 100644
--- a/include/linux/msm-bus.h
+++ b/include/linux/msm-bus.h
@@ -211,7 +211,6 @@
 		struct platform_device *pdev, struct device_node *of_node);
 struct msm_bus_scale_pdata *msm_bus_cl_get_pdata(struct platform_device *pdev);
 struct msm_bus_scale_pdata *msm_bus_cl_get_pdata_from_dev(struct device *dev);
-void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata);
 #else
 static inline struct msm_bus_scale_pdata
 *msm_bus_cl_get_pdata(struct platform_device *pdev)
@@ -224,11 +223,11 @@
 {
 	return NULL;
 }
+#endif
 
 static inline void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata)
 {
 }
-#endif
 
 #ifdef CONFIG_DEBUG_BUS_VOTER
 int msm_bus_floor_vote_context(const char *name, u64 floor_hz,
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 5025cba..1e04b07 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -46,6 +46,7 @@
 	POWER_SUPPLY_CHARGE_TYPE_NONE,
 	POWER_SUPPLY_CHARGE_TYPE_TRICKLE,
 	POWER_SUPPLY_CHARGE_TYPE_FAST,
+	POWER_SUPPLY_CHARGE_TYPE_TAPER,
 };
 
 enum {
@@ -58,6 +59,9 @@
 	POWER_SUPPLY_HEALTH_COLD,
 	POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE,
 	POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE,
+	POWER_SUPPLY_HEALTH_WARM,
+	POWER_SUPPLY_HEALTH_COOL,
+	POWER_SUPPLY_HEALTH_HOT,
 };
 
 enum {
@@ -85,6 +89,62 @@
 	POWER_SUPPLY_SCOPE_DEVICE,
 };
 
+enum {
+	POWER_SUPPLY_DP_DM_UNKNOWN = 0,
+	POWER_SUPPLY_DP_DM_PREPARE = 1,
+	POWER_SUPPLY_DP_DM_UNPREPARE = 2,
+	POWER_SUPPLY_DP_DM_CONFIRMED_HVDCP3 = 3,
+	POWER_SUPPLY_DP_DM_DP_PULSE = 4,
+	POWER_SUPPLY_DP_DM_DM_PULSE = 5,
+	POWER_SUPPLY_DP_DM_DP0P6_DMF = 6,
+	POWER_SUPPLY_DP_DM_DP0P6_DM3P3 = 7,
+	POWER_SUPPLY_DP_DM_DPF_DMF = 8,
+	POWER_SUPPLY_DP_DM_DPR_DMR = 9,
+	POWER_SUPPLY_DP_DM_HVDCP3_SUPPORTED = 10,
+	POWER_SUPPLY_DP_DM_ICL_DOWN = 11,
+	POWER_SUPPLY_DP_DM_ICL_UP = 12,
+	POWER_SUPPLY_DP_DM_FORCE_5V = 13,
+	POWER_SUPPLY_DP_DM_FORCE_9V = 14,
+	POWER_SUPPLY_DP_DM_FORCE_12V = 15,
+};
+
+enum {
+	POWER_SUPPLY_PL_NONE,
+	POWER_SUPPLY_PL_USBIN_USBIN,
+	POWER_SUPPLY_PL_USBIN_USBIN_EXT,
+	POWER_SUPPLY_PL_USBMID_USBMID,
+};
+
+enum {
+	POWER_SUPPLY_CHARGER_SEC_NONE = 0,
+	POWER_SUPPLY_CHARGER_SEC_CP,
+	POWER_SUPPLY_CHARGER_SEC_PL,
+	POWER_SUPPLY_CHARGER_SEC_CP_PL,
+};
+
+enum {
+	POWER_SUPPLY_CP_NONE = 0,
+	POWER_SUPPLY_CP_HVDCP3,
+	POWER_SUPPLY_CP_PPS,
+	POWER_SUPPLY_CP_WIRELESS,
+};
+
+enum {
+	POWER_SUPPLY_CONNECTOR_TYPEC,
+	POWER_SUPPLY_CONNECTOR_MICRO_USB,
+};
+
+enum {
+	POWER_SUPPLY_PL_STACKED_BATFET,
+	POWER_SUPPLY_PL_NON_STACKED_BATFET,
+};
+
+enum {
+	POWER_SUPPLY_PD_INACTIVE = 0,
+	POWER_SUPPLY_PD_ACTIVE,
+	POWER_SUPPLY_PD_PPS_ACTIVE,
+};
+
 enum power_supply_property {
 	/* Properties of type `int' */
 	POWER_SUPPLY_PROP_STATUS = 0,
@@ -155,12 +215,112 @@
 	POWER_SUPPLY_PROP_USB_HC,
 	POWER_SUPPLY_PROP_USB_OTG,
 	POWER_SUPPLY_PROP_CHARGE_ENABLED,
+	POWER_SUPPLY_PROP_SET_SHIP_MODE,
+	POWER_SUPPLY_PROP_REAL_TYPE,
+	POWER_SUPPLY_PROP_CHARGE_NOW_RAW,
+	POWER_SUPPLY_PROP_CHARGE_NOW_ERROR,
+	POWER_SUPPLY_PROP_CAPACITY_RAW,
+	POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED,
+	POWER_SUPPLY_PROP_CHARGING_ENABLED,
+	POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED,
+	POWER_SUPPLY_PROP_STEP_CHARGING_STEP,
+	POWER_SUPPLY_PROP_PIN_ENABLED,
+	POWER_SUPPLY_PROP_INPUT_SUSPEND,
+	POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_MAX,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_TRIM,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+	POWER_SUPPLY_PROP_INPUT_VOLTAGE_SETTLED,
+	POWER_SUPPLY_PROP_VCHG_LOOP_DBC_BYPASS,
+	POWER_SUPPLY_PROP_CHARGE_COUNTER_SHADOW,
+	POWER_SUPPLY_PROP_HI_POWER,
+	POWER_SUPPLY_PROP_LOW_POWER,
+	POWER_SUPPLY_PROP_COOL_TEMP,
+	POWER_SUPPLY_PROP_WARM_TEMP,
+	POWER_SUPPLY_PROP_COLD_TEMP,
+	POWER_SUPPLY_PROP_HOT_TEMP,
+	POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL,
+	POWER_SUPPLY_PROP_RESISTANCE,
+	POWER_SUPPLY_PROP_RESISTANCE_CAPACITIVE,
+	POWER_SUPPLY_PROP_RESISTANCE_ID, /* in Ohms */
+	POWER_SUPPLY_PROP_RESISTANCE_NOW,
+	POWER_SUPPLY_PROP_FLASH_CURRENT_MAX,
+	POWER_SUPPLY_PROP_UPDATE_NOW,
+	POWER_SUPPLY_PROP_ESR_COUNT,
+	POWER_SUPPLY_PROP_BUCK_FREQ,
+	POWER_SUPPLY_PROP_BOOST_CURRENT,
+	POWER_SUPPLY_PROP_SAFETY_TIMER_ENABLE,
+	POWER_SUPPLY_PROP_CHARGE_DONE,
+	POWER_SUPPLY_PROP_FLASH_ACTIVE,
+	POWER_SUPPLY_PROP_FLASH_TRIGGER,
+	POWER_SUPPLY_PROP_FORCE_TLIM,
+	POWER_SUPPLY_PROP_DP_DM,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_NOW,
+	POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE,
+	POWER_SUPPLY_PROP_CURRENT_QNOVO,
+	POWER_SUPPLY_PROP_VOLTAGE_QNOVO,
+	POWER_SUPPLY_PROP_RERUN_AICL,
+	POWER_SUPPLY_PROP_CYCLE_COUNT_ID,
+	POWER_SUPPLY_PROP_SAFETY_TIMER_EXPIRED,
+	POWER_SUPPLY_PROP_RESTRICTED_CHARGING,
+	POWER_SUPPLY_PROP_CURRENT_CAPABILITY,
+	POWER_SUPPLY_PROP_TYPEC_MODE,
+	POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION, /* 0: N/C, 1: CC1, 2: CC2 */
+	POWER_SUPPLY_PROP_TYPEC_POWER_ROLE,
+	POWER_SUPPLY_PROP_TYPEC_SRC_RP,
+	POWER_SUPPLY_PROP_PD_ALLOWED,
+	POWER_SUPPLY_PROP_PD_ACTIVE,
+	POWER_SUPPLY_PROP_PD_IN_HARD_RESET,
+	POWER_SUPPLY_PROP_PD_CURRENT_MAX,
+	POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED,
+	POWER_SUPPLY_PROP_CHARGER_TEMP,
+	POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
+	POWER_SUPPLY_PROP_PARALLEL_DISABLE,
+	POWER_SUPPLY_PROP_PE_START,
+	POWER_SUPPLY_PROP_SOC_REPORTING_READY,
+	POWER_SUPPLY_PROP_DEBUG_BATTERY,
+	POWER_SUPPLY_PROP_FCC_DELTA,
+	POWER_SUPPLY_PROP_ICL_REDUCTION,
+	POWER_SUPPLY_PROP_PARALLEL_MODE,
+	POWER_SUPPLY_PROP_DIE_HEALTH,
+	POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
+	POWER_SUPPLY_PROP_CTM_CURRENT_MAX,
+	POWER_SUPPLY_PROP_HW_CURRENT_MAX,
+	POWER_SUPPLY_PROP_PR_SWAP,
+	POWER_SUPPLY_PROP_CC_STEP,
+	POWER_SUPPLY_PROP_CC_STEP_SEL,
+	POWER_SUPPLY_PROP_SW_JEITA_ENABLED,
+	POWER_SUPPLY_PROP_PD_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_PD_VOLTAGE_MIN,
+	POWER_SUPPLY_PROP_SDP_CURRENT_MAX,
+	POWER_SUPPLY_PROP_CONNECTOR_TYPE,
+	POWER_SUPPLY_PROP_PARALLEL_BATFET_MODE,
+	POWER_SUPPLY_PROP_PARALLEL_FCC_MAX,
+	POWER_SUPPLY_PROP_MIN_ICL,
+	POWER_SUPPLY_PROP_MOISTURE_DETECTED,
+	POWER_SUPPLY_PROP_BATT_PROFILE_VERSION,
+	POWER_SUPPLY_PROP_BATT_FULL_CURRENT,
+	POWER_SUPPLY_PROP_RECHARGE_SOC,
+	POWER_SUPPLY_PROP_HVDCP_OPTI_ALLOWED,
+	POWER_SUPPLY_PROP_SMB_EN_MODE,
+	POWER_SUPPLY_PROP_SMB_EN_REASON,
+	POWER_SUPPLY_PROP_ESR_ACTUAL,
+	POWER_SUPPLY_PROP_ESR_NOMINAL,
+	POWER_SUPPLY_PROP_SOH,
+	POWER_SUPPLY_PROP_CLEAR_SOH,
+	POWER_SUPPLY_PROP_FORCE_RECHARGE,
+	POWER_SUPPLY_PROP_FCC_STEPPER_ENABLE,
+	POWER_SUPPLY_PROP_TOGGLE_STAT,
+	POWER_SUPPLY_PROP_MAIN_FCC_MAX,
 	/* Local extensions of type int64_t */
 	POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
 	/* Properties of type `const char *' */
 	POWER_SUPPLY_PROP_MODEL_NAME,
 	POWER_SUPPLY_PROP_MANUFACTURER,
 	POWER_SUPPLY_PROP_SERIAL_NUMBER,
+	POWER_SUPPLY_PROP_BATTERY_TYPE,
+	POWER_SUPPLY_PROP_CYCLE_COUNTS,
 };
 
 enum power_supply_type {
@@ -176,6 +336,16 @@
 	POWER_SUPPLY_TYPE_USB_PD,		/* Power Delivery Port */
 	POWER_SUPPLY_TYPE_USB_PD_DRP,		/* PD Dual Role Port */
 	POWER_SUPPLY_TYPE_APPLE_BRICK_ID,	/* Apple Charging Method */
+	POWER_SUPPLY_TYPE_USB_HVDCP,		/* High Voltage DCP */
+	POWER_SUPPLY_TYPE_USB_HVDCP_3,		/* Efficient High Voltage DCP */
+	POWER_SUPPLY_TYPE_WIRELESS,		/* Accessory Charger Adapters */
+	POWER_SUPPLY_TYPE_USB_FLOAT,		/* Floating charger */
+	POWER_SUPPLY_TYPE_BMS,			/* Battery Monitor System */
+	POWER_SUPPLY_TYPE_PARALLEL,		/* Parallel Path */
+	POWER_SUPPLY_TYPE_MAIN,			/* Main Path */
+	POWER_SUPPLY_TYPE_WIPOWER,		/* Wipower */
+	POWER_SUPPLY_TYPE_UFP,			/* Type-C UFP */
+	POWER_SUPPLY_TYPE_DFP,			/* Type-C DFP */
 };
 
 enum power_supply_usb_type {
@@ -191,6 +361,37 @@
 	POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID,	/* Apple Charging Method */
 };
 
+/* Indicates USB Type-C CC connection status */
+enum power_supply_typec_mode {
+	POWER_SUPPLY_TYPEC_NONE,
+
+	/* Acting as source */
+	POWER_SUPPLY_TYPEC_SINK,		/* Rd only */
+	POWER_SUPPLY_TYPEC_SINK_POWERED_CABLE,	/* Rd/Ra */
+	POWER_SUPPLY_TYPEC_SINK_DEBUG_ACCESSORY,/* Rd/Rd */
+	POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER,	/* Ra/Ra */
+	POWER_SUPPLY_TYPEC_POWERED_CABLE_ONLY,	/* Ra only */
+
+	/* Acting as sink */
+	POWER_SUPPLY_TYPEC_SOURCE_DEFAULT,	/* Rp default */
+	POWER_SUPPLY_TYPEC_SOURCE_MEDIUM,	/* Rp 1.5A */
+	POWER_SUPPLY_TYPEC_SOURCE_HIGH,		/* Rp 3A */
+	POWER_SUPPLY_TYPEC_NON_COMPLIANT,
+};
+
+enum power_supply_typec_src_rp {
+	POWER_SUPPLY_TYPEC_SRC_RP_STD,
+	POWER_SUPPLY_TYPEC_SRC_RP_1P5A,
+	POWER_SUPPLY_TYPEC_SRC_RP_3A
+};
+
+enum power_supply_typec_power_role {
+	POWER_SUPPLY_TYPEC_PR_NONE,		/* CC lines in high-Z */
+	POWER_SUPPLY_TYPEC_PR_DUAL,
+	POWER_SUPPLY_TYPEC_PR_SINK,
+	POWER_SUPPLY_TYPEC_PR_SOURCE,
+};
+
 enum power_supply_notifier_events {
 	PSY_EVENT_PROP_CHANGED,
 };
diff --git a/include/linux/show_mem_notifier.h b/include/linux/show_mem_notifier.h
new file mode 100644
index 0000000..17d32fe
--- /dev/null
+++ b/include/linux/show_mem_notifier.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/notifier.h>
+
+int show_mem_notifier_register(struct notifier_block *nb);
+
+int show_mem_notifier_unregister(struct notifier_block *nb);
+
+void show_mem_call_notifiers(void);
diff --git a/include/trace/events/trace_msm_bus.h b/include/trace/events/trace_msm_bus.h
new file mode 100644
index 0000000..e21917c
--- /dev/null
+++ b/include/trace/events/trace_msm_bus.h
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM msm_bus
+
+#if !defined(_TRACE_MSM_BUS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MSM_BUS_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(bus_update_request,
+
+	TP_PROTO(int sec, int nsec, const char *name, int src, int dest,
+		unsigned long long ab, unsigned long long ib),
+
+	TP_ARGS(sec, nsec, name, src, dest, ab, ib),
+
+	TP_STRUCT__entry(
+		__field(int, sec)
+		__field(int, nsec)
+		__string(name, name)
+		__field(int, src)
+		__field(int, dest)
+		__field(u64, ab)
+		__field(u64, ib)
+	),
+
+	TP_fast_assign(
+		__entry->sec = sec;
+		__entry->nsec = nsec;
+		__assign_str(name, name);
+		__entry->src = src;
+		__entry->dest = dest;
+		__entry->ab = ab;
+		__entry->ib = ib;
+	),
+
+	TP_printk("time= %u.%09u name=%s src=%d dest=%d ab=%llu ib=%llu",
+		__entry->sec,
+		__entry->nsec,
+		__get_str(name),
+		__entry->src,
+		__entry->dest,
+		(unsigned long long)__entry->ab,
+		(unsigned long long)__entry->ib)
+);
+
+TRACE_EVENT(bus_update_request_end,
+
+	TP_PROTO(const char *name),
+
+	TP_ARGS(name),
+
+	TP_STRUCT__entry(
+		__string(name, name)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+	),
+
+	TP_printk("client-name=%s", __get_str(name))
+);
+
+TRACE_EVENT(bus_bimc_config_limiter,
+
+	TP_PROTO(int mas_id, unsigned long long cur_lim_bw),
+
+	TP_ARGS(mas_id, cur_lim_bw),
+
+	TP_STRUCT__entry(
+		__field(int, mas_id)
+		__field(u64, cur_lim_bw)
+	),
+
+	TP_fast_assign(
+		__entry->mas_id = mas_id;
+		__entry->cur_lim_bw = cur_lim_bw;
+	),
+
+	TP_printk("Master=%d cur_lim_bw=%llu",
+		__entry->mas_id,
+		(unsigned long long)__entry->cur_lim_bw)
+);
+
+TRACE_EVENT(bus_avail_bw,
+
+	TP_PROTO(unsigned long long cur_bimc_bw, unsigned long long cur_mdp_bw),
+
+	TP_ARGS(cur_bimc_bw, cur_mdp_bw),
+
+	TP_STRUCT__entry(
+		__field(u64, cur_bimc_bw)
+		__field(u64, cur_mdp_bw)
+	),
+
+	TP_fast_assign(
+		__entry->cur_bimc_bw = cur_bimc_bw;
+		__entry->cur_mdp_bw = cur_mdp_bw;
+	),
+
+	TP_printk("cur_bimc_bw = %llu cur_mdp_bw = %llu",
+		(unsigned long long)__entry->cur_bimc_bw,
+		(unsigned long long)__entry->cur_mdp_bw)
+);
+
+TRACE_EVENT(bus_rules_matches,
+
+	TP_PROTO(int node_id, int rule_id, unsigned long long node_ab,
+		unsigned long long node_ib, unsigned long long node_clk),
+
+	TP_ARGS(node_id, rule_id, node_ab, node_ib, node_clk),
+
+	TP_STRUCT__entry(
+		__field(int, node_id)
+		__field(int, rule_id)
+		__field(u64, node_ab)
+		__field(u64, node_ib)
+		__field(u64, node_clk)
+	),
+
+	TP_fast_assign(
+		__entry->node_id = node_id;
+		__entry->rule_id = rule_id;
+		__entry->node_ab = node_ab;
+		__entry->node_ib = node_ib;
+		__entry->node_clk = node_clk;
+	),
+
+	TP_printk("Rule match node%d rule%d node-ab%llu:ib%llu:clk%llu",
+		__entry->node_id, __entry->rule_id,
+		(unsigned long long)__entry->node_ab,
+		(unsigned long long)__entry->node_ib,
+		(unsigned long long)__entry->node_clk)
+);
+
+TRACE_EVENT(bus_bke_params,
+
+	TP_PROTO(u32 gc, u32 gp, u32 thl, u32 thm, u32 thh),
+
+	TP_ARGS(gc, gp, thl, thm, thh),
+
+	TP_STRUCT__entry(
+		__field(u32, gc)
+		__field(u32, gp)
+		__field(u32, thl)
+		__field(u32, thm)
+		__field(u32, thh)
+	),
+
+	TP_fast_assign(
+		__entry->gc = gc;
+		__entry->gp = gp;
+		__entry->thl = thl;
+		__entry->thm = thm;
+		__entry->thh = thh;
+	),
+
+	TP_printk("BKE Params GC=0x%x GP=0x%x THL=0x%x THM=0x%x THH=0x%x",
+		__entry->gc, __entry->gp, __entry->thl, __entry->thm,
+			__entry->thh)
+);
+
+TRACE_EVENT(bus_client_status,
+
+	TP_PROTO(const char *name, int src, int dest,
+		unsigned long long ab, unsigned long long ib, int active_only),
+
+	TP_ARGS(name, src, dest, ab, ib, active_only),
+
+	TP_STRUCT__entry(
+		__string(name, name)
+		__field(int, src)
+		__field(int, dest)
+		__field(u64, ab)
+		__field(u64, ib)
+		__field(int, active_only)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->src = src;
+		__entry->dest = dest;
+		__entry->ab = ab;
+		__entry->ib = ib;
+		__entry->active_only = active_only;
+	),
+
+	TP_printk("name=%s src=%d dest=%d ab=%llu ib=%llu active_only=%d",
+		__get_str(name),
+		__entry->src,
+		__entry->dest,
+		(unsigned long long)__entry->ab,
+		(unsigned long long)__entry->ib,
+		__entry->active_only)
+);
+
+TRACE_EVENT(bus_agg_bw,
+
+	TP_PROTO(unsigned int node_id, int rpm_id, int ctx_set,
+		unsigned long long agg_ab),
+
+	TP_ARGS(node_id, rpm_id, ctx_set, agg_ab),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, node_id)
+		__field(int, rpm_id)
+		__field(int, ctx_set)
+		__field(u64, agg_ab)
+	),
+
+	TP_fast_assign(
+		__entry->node_id = node_id;
+		__entry->rpm_id = rpm_id;
+		__entry->ctx_set = ctx_set;
+		__entry->agg_ab = agg_ab;
+	),
+
+	TP_printk("node_id:%u rpm_id:%d rpm_ctx:%d agg_ab:%llu",
+		__entry->node_id,
+		__entry->rpm_id,
+		__entry->ctx_set,
+		(unsigned long long)__entry->agg_ab)
+);
+#endif
+#define TRACE_INCLUDE_FILE trace_msm_bus
+#include <trace/define_trace.h>
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index ed0a120..dfb82aa 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -110,11 +110,16 @@
 	SNDRV_HWDEP_IFACE_LINE6,	/* Line6 USB processors */
 	SNDRV_HWDEP_IFACE_FW_MOTU,	/* MOTU FireWire series */
 	SNDRV_HWDEP_IFACE_FW_FIREFACE,	/* RME Fireface series */
+	SNDRV_HWDEP_IFACE_AUDIO_BE,	/* Backend Audio Control */
+	SNDRV_HWDEP_IFACE_AUDIO_CODEC,  /* codec Audio Control */
 
 	/* Don't forget to change the following: */
-	SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_FW_FIREFACE
+	SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_AUDIO_CODEC
 };
 
+#define SNDRV_HWDEP_IFACE_AUDIO_BE SNDRV_HWDEP_IFACE_AUDIO_BE
+#define SNDRV_HWDEP_IFACE_AUDIO_CODEC SNDRV_HWDEP_IFACE_AUDIO_CODEC
+
 struct snd_hwdep_info {
 	unsigned int device;		/* WR: device number */
 	int card;			/* R: card number */
diff --git a/include/uapi/sound/compress_params.h b/include/uapi/sound/compress_params.h
index e9da492..50084b7 100644
--- a/include/uapi/sound/compress_params.h
+++ b/include/uapi/sound/compress_params.h
@@ -261,8 +261,15 @@
 
 struct snd_enc_wma {
 	__u32 super_block_align; /* WMA Type-specific data */
+	__u32 bits_per_sample;
+	__u32 channelmask;
+	__u32 encodeopt;
+	__u32 encodeopt1;
+	__u32 encodeopt2;
+	__u32 avg_bit_rate;
 };
 
+#define SND_ENC_WMA_EXTENTED_SUPPORT
 
 /**
  * struct snd_enc_vorbis
@@ -337,12 +344,23 @@
 	__s32 reserved[15];	/* Can be used for SND_AUDIOCODEC_BESPOKE */
 } __attribute__((packed, aligned(4)));
 
+struct snd_dec_flac {
+	__u16 sample_size;
+	__u16 min_blk_size;
+	__u16 max_blk_size;
+	__u16 min_frame_size;
+	__u16 max_frame_size;
+} __attribute__((packed, aligned(4)));
+
+#define SND_DEC_FLAC_SUPPORTED
+
 union snd_codec_options {
 	struct snd_enc_wma wma;
 	struct snd_enc_vorbis vorbis;
 	struct snd_enc_real real;
 	struct snd_enc_flac flac;
 	struct snd_enc_generic generic;
+	struct snd_dec_flac flac_dec;
 } __attribute__((packed, aligned(4)));
 
 /** struct snd_codec_desc - description of codec capabilities
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 5224704..ffb6553 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -2231,6 +2231,9 @@
 struct cpumask __cpu_active_mask __read_mostly;
 EXPORT_SYMBOL(__cpu_active_mask);
 
+struct cpumask __cpu_isolated_mask __read_mostly;
+EXPORT_SYMBOL(__cpu_isolated_mask);
+
 void init_cpu_present(const struct cpumask *src)
 {
 	cpumask_copy(&__cpu_present_mask, src);
@@ -2246,6 +2249,11 @@
 	cpumask_copy(&__cpu_online_mask, src);
 }
 
+void init_cpu_isolated(const struct cpumask *src)
+{
+	cpumask_copy(&__cpu_isolated_mask, src);
+}
+
 /*
  * Activate the first processor.
  */
diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c
index 10e6d8b..c18dadd 100644
--- a/kernel/locking/spinlock_debug.c
+++ b/kernel/locking/spinlock_debug.c
@@ -150,6 +150,11 @@
 	printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
 		msg, raw_smp_processor_id(), current->comm,
 		task_pid_nr(current), lock);
+#ifdef CONFIG_DEBUG_SPINLOCK_BITE_ON_BUG
+	msm_trigger_wdog_bite();
+#elif defined(CONFIG_DEBUG_SPINLOCK_PANIC_ON_BUG)
+	BUG();
+#endif
 	dump_stack();
 }
 
diff --git a/kernel/trace/ipc_logging_debug.c b/kernel/trace/ipc_logging_debug.c
index 6ef25e5..0a767a7 100644
--- a/kernel/trace/ipc_logging_debug.c
+++ b/kernel/trace/ipc_logging_debug.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2015, 2017-2018, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/slab.h>
@@ -70,18 +70,19 @@
 	struct dentry *d = file->f_path.dentry;
 	char *buffer;
 	int bsize;
-	int srcu_idx;
 	int r;
 
-	r = debugfs_use_file_start(d, &srcu_idx);
-	if (!r) {
-		ilctxt = file->private_data;
-		r = kref_get_unless_zero(&ilctxt->refcount) ? 0 : -EIO;
-	}
-	debugfs_use_file_finish(srcu_idx);
+	r = debugfs_file_get(d);
 	if (r)
 		return r;
 
+	ilctxt = file->private_data;
+	r = kref_get_unless_zero(&ilctxt->refcount) ? 0 : -EIO;
+	if (r) {
+		debugfs_file_put(d);
+		return r;
+	}
+
 	buffer = kmalloc(count, GFP_KERNEL);
 	if (!buffer) {
 		bsize = -ENOMEM;
@@ -102,6 +103,7 @@
 
 done:
 	ipc_log_context_put(ilctxt);
+	debugfs_file_put(d);
 	return bsize;
 }
 
diff --git a/lib/Makefile b/lib/Makefile
index ca3f7eb..3e2f6c3 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -33,6 +33,7 @@
 lib-y	+= kobject.o klist.o
 obj-y	+= lockref.o
 
+KASAN_SANITIZE_find_bit.o := n
 obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
 	 bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
 	 gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
diff --git a/mm/Makefile b/mm/Makefile
index 26ef77a..7332f89 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -39,7 +39,7 @@
 			   mm_init.o mmu_context.o percpu.o slab_common.o \
 			   compaction.o vmacache.o \
 			   interval_tree.o list_lru.o workingset.o \
-			   debug.o $(mmu-y)
+			   debug.o $(mmu-y) showmem.o
 
 obj-y += init-mm.o
 
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 97db0e8..c20dbd6 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -192,7 +192,7 @@
 	return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
 }
 
-void __init free_bootmem_late(unsigned long physaddr, unsigned long size)
+void free_bootmem_late(unsigned long physaddr, unsigned long size)
 {
 	unsigned long cursor, end;
 
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index 439af3b..2bd550e 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -81,7 +81,7 @@
  * down, but we are still initializing the system.  Pages are given directly
  * to the page allocator, no bootmem metadata is updated because it is gone.
  */
-void __init free_bootmem_late(unsigned long addr, unsigned long size)
+void free_bootmem_late(unsigned long addr, unsigned long size)
 {
 	unsigned long cursor, end;
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7ae2649..a2bd843 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1264,7 +1264,7 @@
 	local_irq_restore(flags);
 }
 
-static void __init __free_pages_boot_core(struct page *page, unsigned int order)
+static void __free_pages_boot_core(struct page *page, unsigned int order)
 {
 	unsigned int nr_pages = 1 << order;
 	struct page *p = page;
@@ -1338,7 +1338,7 @@
 #endif
 
 
-void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
+void __free_pages_bootmem(struct page *page, unsigned long pfn,
 							unsigned int order)
 {
 	if (early_page_uninitialised(pfn))
diff --git a/mm/showmem.c b/mm/showmem.c
new file mode 100644
index 0000000..62bea47
--- /dev/null
+++ b/mm/showmem.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/notifier.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+
+ATOMIC_NOTIFIER_HEAD(show_mem_notifier);
+
+int show_mem_notifier_register(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_register(&show_mem_notifier, nb);
+}
+
+int show_mem_notifier_unregister(struct notifier_block *nb)
+{
+	return  atomic_notifier_chain_unregister(&show_mem_notifier, nb);
+}
+
+void show_mem_call_notifiers(void)
+{
+	atomic_notifier_call_chain(&show_mem_notifier, 0, NULL);
+}
+
+static int show_mem_notifier_get(void *dat, u64 *val)
+{
+	show_mem_call_notifiers();
+	*val = 0;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(show_mem_notifier_debug_ops, show_mem_notifier_get,
+				NULL, "%llu\n");
+
+int show_mem_notifier_debugfs_register(void)
+{
+	debugfs_create_file("show_mem_notifier", 0664, NULL, NULL,
+				&show_mem_notifier_debug_ops);
+
+	return 0;
+}
+late_initcall(show_mem_notifier_debugfs_register);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 4791aa8..ab2f280 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1480,6 +1480,9 @@
 		    (ut[i].family != prev_family))
 			return -EINVAL;
 
+		if (ut[i].mode >= XFRM_MODE_MAX)
+			return -EINVAL;
+
 		prev_family = ut[i].family;
 
 		switch (ut[i].family) {